From 7eb2333e8553817e7c775cb6719d61c199d9424e Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Mon, 9 Jun 2025 12:55:09 -0700 Subject: [PATCH 01/17] Add InvalidPreferredSlotId error for use in rot prep_image_update --- gateway-messages/src/lib.rs | 2 +- gateway-messages/src/sp_to_mgs.rs | 7 ++++++ gateway-messages/tests/versioning/mod.rs | 1 + gateway-messages/tests/versioning/v20.rs | 29 ++++++++++++++++++++++++ 4 files changed, 38 insertions(+), 1 deletion(-) create mode 100644 gateway-messages/tests/versioning/v20.rs diff --git a/gateway-messages/src/lib.rs b/gateway-messages/src/lib.rs index 7b5f376..9c8be97 100644 --- a/gateway-messages/src/lib.rs +++ b/gateway-messages/src/lib.rs @@ -69,7 +69,7 @@ pub const HF_PAGE_SIZE: usize = 256; /// for more detail and discussion. pub mod version { pub const MIN: u32 = 2; - pub const CURRENT: u32 = 19; + pub const CURRENT: u32 = 20; /// MGS protocol version in which SP watchdog messages were added pub const WATCHDOG_VERSION: u32 = 12; diff --git a/gateway-messages/src/sp_to_mgs.rs b/gateway-messages/src/sp_to_mgs.rs index c5d1da7..d442767 100644 --- a/gateway-messages/src/sp_to_mgs.rs +++ b/gateway-messages/src/sp_to_mgs.rs @@ -1294,6 +1294,7 @@ pub enum UpdateError { ImageMismatch, SignatureNotValidated, VersionNotSupported, + InvalidPreferredSlotId, } impl fmt::Display for UpdateError { @@ -1353,6 +1354,12 @@ impl fmt::Display for UpdateError { Self::InvalidComponent => { write!(f, "invalid component for operation") } + Self::InvalidPreferredSlotId => { + write!( + f, + "updating a bootloader preferred slot is not permitted" + ) + } } } } diff --git a/gateway-messages/tests/versioning/mod.rs b/gateway-messages/tests/versioning/mod.rs index be3ff2d..ca9fe89 100644 --- a/gateway-messages/tests/versioning/mod.rs +++ b/gateway-messages/tests/versioning/mod.rs @@ -25,6 +25,7 @@ mod v16; mod v17; mod v18; mod v19; +mod v20; pub fn assert_serialized( expected: &[u8], diff --git a/gateway-messages/tests/versioning/v20.rs b/gateway-messages/tests/versioning/v20.rs new file mode 100644 index 0000000..3bc765e --- /dev/null +++ b/gateway-messages/tests/versioning/v20.rs @@ -0,0 +1,29 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +//! This source file is named after the protocol version being tested, +//! e.g. v01.rs implements tests for protocol version 1. +//! The tested protocol version is represented by "$VERSION" below. +//! +//! The tests in this module check that the serialized form of messages from MGS +//! protocol version $VERSION have not changed. +//! +//! If a test in this module fails, _do not change the test_! This means you +//! have changed, deleted, or reordered an existing message type or enum +//! variant, and you should revert that change. This will remain true until we +//! bump the `version::MIN` to a value higher than $VERSION, at which point these +//! tests can be removed as we will stop supporting $VERSION. + +use super::assert_serialized; +use gateway_messages::UpdateError; + +#[test] +fn error_enums() { + + let response: [UpdateError; 1] = [ + UpdateError::InvalidPreferredSlotId, + ]; + let expected = vec![34]; + assert_serialized(&expected, &response); +} From cf7fcb5808b7a81794753bfb1be21dd1a0cf68c6 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 18 Jun 2025 10:29:30 -0700 Subject: [PATCH 02/17] Add InvalidPreferredSlotId error for use in rot prep_image_update (#398) --- gateway-messages/tests/versioning/v19.rs | 84 ++++++++++++++++++++++++ gateway-sp-comms/src/error.rs | 2 + 2 files changed, 86 insertions(+) diff --git a/gateway-messages/tests/versioning/v19.rs b/gateway-messages/tests/versioning/v19.rs index f724fa6..845abe9 100644 --- a/gateway-messages/tests/versioning/v19.rs +++ b/gateway-messages/tests/versioning/v19.rs @@ -16,6 +16,9 @@ //! tests can be removed as we will stop supporting $VERSION. use super::assert_serialized; +use gateway_messages::ImageError; +use gateway_messages::SpStateV3; +use gateway_messages::UpdateError; use gateway_messages::{HfError, MgsRequest, SpError, SpResponse}; #[test] @@ -60,3 +63,84 @@ fn read_host_flash() { assert_serialized(&[38, i as u8], &request); } } + +#[test] +fn sp_response() { + let response = SpResponse::SpStateV3(SpStateV3 { + hubris_archive_id: [1, 2, 3, 4, 5, 6, 7, 8], + serial_number: [ + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + ], + model: [ + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, + 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, + ], + revision: 0xf0f1f2f3, + base_mac_address: [73, 74, 75, 76, 77, 78], + power_state: gateway_messages::PowerState::A0, + }); + + #[rustfmt::skip] + let expected = vec![ + 44, // SpStateV3 + 1, 2, 3, 4, 5, 6, 7, 8, // hubris_archive_id + + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40, // serial_number + + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, + 71, 72, // model + + 0xf3, 0xf2, 0xf1, 0xf0, // revision + 73, 74, 75, 76, 77, 78, // base_mac_address + 0, // power_state + ]; + + assert_serialized(&expected, &response); +} + +#[test] +fn host_request() { + let request = MgsRequest::VersionedRotBootInfo { version: 3 }; + + #[rustfmt::skip] + let expected = vec![ + 45, // VersionedRotBootInfo + 3, // version + ]; + + assert_serialized(&expected, &request); +} + +#[test] +fn error_enums() { + let response: [ImageError; 13] = [ + ImageError::Unchecked, + ImageError::FirstPageErased, + ImageError::PartiallyProgrammed, + ImageError::InvalidLength, + ImageError::HeaderNotProgrammed, + ImageError::BootloaderTooSmall, + ImageError::BadMagic, + ImageError::HeaderImageSize, + ImageError::UnalignedLength, + ImageError::UnsupportedType, + ImageError::ResetVectorNotThumb2, + ImageError::ResetVector, + ImageError::Signature, + ]; + let expected = vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]; + assert_serialized(&expected, &response); + + let response: [UpdateError; 4] = [ + UpdateError::BlockOutOfOrder, + UpdateError::InvalidComponent, + UpdateError::InvalidSlotIdForOperation, + UpdateError::InvalidPreferredSlotId, + ]; + let expected = vec![27, 28, 29, 34]; + assert_serialized(&expected, &response); +} diff --git a/gateway-sp-comms/src/error.rs b/gateway-sp-comms/src/error.rs index 8c45978..1b3273b 100644 --- a/gateway-sp-comms/src/error.rs +++ b/gateway-sp-comms/src/error.rs @@ -116,6 +116,8 @@ pub enum UpdateError { InvalidComponent, #[error("an image was not found")] ImageNotFound, + #[error("updating a bootloader preferred slot is not permitted")] + InvalidPreferredSlotId, } #[derive(Debug, thiserror::Error, SlogInlineError)] From 2ce3a035d9f7af672ddbed3e65f7ca3c9d01ecf0 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 5 Feb 2025 14:12:31 -0800 Subject: [PATCH 03/17] Integrate a Rhai interpreter into faux-mgs. Rust code changes: - add Rhai scripting as a feature (--features=rhaiscript) - add ArchiveInspector for access to RawHubrisArchive - add "system(argv) -> #{exit_code, stdout, stderr}" from std::process::Command - export faux-mgs paramsters to Rhai main. - run any faux-mgs command with "let result = faux_mgs(["arg0", .. "argN"]); - faux-mgs results are passed back to the script as a map even if they are simpler JSON. - ChronoPackage for time handling. - FilesystemPackage for file access. - EnvironmentPackage for env var access. - export "scriptdir" so that script can get other files relative to itself. - "verify_rot_image()" to verify a RoT image vs CFPA, CMPA. - vars available to main(): - "argv" - script main's scope passing all remaining CLI args. - "rbi_default" - expose faux-mgs default "rot_boot_info" version - "interface" - pass the "--interface INTERFACE" value. - "reset_watchdog_timeout_ms" - pass that value to the script. - Route Rhai's debug function to the faux-mgs log. - The `debug("message")` function is routed to the faux-mgs slog logging. Prefixing a message with "crit|", "trace|", "error|", "warn|", "error|", or "debug|" will log at that corresponding level. Leaving off the prefix or using some other prefix will log at the debug level. - Rhai's `print()` still goes to stdout. Rhai scripts: scripts/util.rhai contains common script and faux-mgs support. - getops() - to_hexstring() - cstring_to_string(a) - array_to_mac(a) - ab_to_01(v) - env_expand(s, override) - rot_boot_info() - state() - caboose_value(component, slot, key) - get_device_cabooses() - rkth_to_key_name(rkth) - array_to_blob(a) - get_cmpa() - get_cfpa() - get_rot_keyset() scripts/update-rollback.rhai - Only use MGS messages for testing, no humility or other APIs - perform happy path update and rollback from baseline to under-test images. scripts/targets.json - an example configuration script for scripts/update-rollback.rhai --- Cargo.lock | 270 ++++++++++++- Cargo.toml | 10 +- faux-mgs/Cargo.toml | 16 + faux-mgs/src/main.rs | 59 ++- faux-mgs/src/rhaiscript.rs | 639 ++++++++++++++++++++++++++++++ gateway-sp-comms/src/single_sp.rs | 4 + scripts/README.md | 127 ++++++ scripts/targets.json | 30 ++ scripts/upgrade-rollback.rhai | 595 ++++++++++++++++++++++++++++ scripts/util.rhai | 401 +++++++++++++++++++ 10 files changed, 2133 insertions(+), 18 deletions(-) create mode 100644 faux-mgs/src/rhaiscript.rs create mode 100644 scripts/README.md create mode 100644 scripts/targets.json create mode 100644 scripts/upgrade-rollback.rhai create mode 100644 scripts/util.rhai diff --git a/Cargo.lock b/Cargo.lock index dbb4a14..4bcd7b9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -19,13 +19,25 @@ checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.8.3" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", + "const-random", + "getrandom 0.3.1", "once_cell", "version_check", + "zerocopy 0.8.25", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", ] [[package]] @@ -119,6 +131,17 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "async-trait" version = "0.1.88" @@ -338,10 +361,33 @@ dependencies = [ "iana-time-zone", "js-sys", "num-traits", + "pure-rust-locales", + "serde", "wasm-bindgen", "windows-targets 0.52.6", ] +[[package]] +name = "chrono-tz" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "efdce149c370f133a071ca8ef6ea340b7b88748ab0810097a9e2976eaa34b4f3" +dependencies = [ + "chrono", + "chrono-tz-build", + "phf", +] + +[[package]] +name = "chrono-tz-build" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f10f8c9340e31fc120ff885fcdb54a0b48e474bbd77cab557f0c30a3e569402" +dependencies = [ + "parse-zoneinfo", + "phf_codegen", +] + [[package]] name = "cipher" version = "0.4.4" @@ -432,6 +478,26 @@ version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "520fbf3c07483f94e3e3ca9d0cfd913d7718ef2483d2cfd91c0d9e91474ab913" +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom 0.2.7", + "once_cell", + "tiny-keccak", +] + [[package]] name = "constant_time_eq" version = "0.3.0" @@ -543,6 +609,12 @@ version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" +[[package]] +name = "crunchy" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43da5946c66ffcc7745f48db692ffbb10a83bfe0afd96235c5c2a4fb23994929" + [[package]] name = "crypto-bigint" version = "0.5.5" @@ -792,6 +864,7 @@ name = "faux-mgs" version = "0.1.1" dependencies = [ "anyhow", + "async-recursion", "async-trait", "clap", "futures", @@ -799,10 +872,17 @@ dependencies = [ "gateway-sp-comms", "glob", "hex", + "hubtools", "humantime", + "lpc55_areas", + "lpc55_sign", "nix", "parse_int", "rand", + "rhai", + "rhai-chrono", + "rhai-env", + "rhai-fs", "serde", "serde_json", "sha2", @@ -812,9 +892,11 @@ dependencies = [ "ssh-agent-client-rs", "ssh-key", "termios", + "thiserror", "tokio", "tokio-stream", "tokio-util", + "toml", "uuid", "zerocopy 0.8.25", ] @@ -1317,7 +1399,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.9", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -1526,7 +1608,7 @@ dependencies = [ [[package]] name = "lpc55_areas" version = "0.2.5" -source = "git+https://github.com/oxidecomputer/lpc55_support#17d04af60b3a4fd82c77b1a33ca5370943cd25d9" +source = "git+https://github.com/oxidecomputer/lpc55_support/#17d04af60b3a4fd82c77b1a33ca5370943cd25d9" dependencies = [ "bitfield", "clap", @@ -1537,7 +1619,7 @@ dependencies = [ [[package]] name = "lpc55_sign" version = "0.3.4" -source = "git+https://github.com/oxidecomputer/lpc55_support#17d04af60b3a4fd82c77b1a33ca5370943cd25d9" +source = "git+https://github.com/oxidecomputer/lpc55_support/#17d04af60b3a4fd82c77b1a33ca5370943cd25d9" dependencies = [ "byteorder", "const-oid", @@ -1808,6 +1890,9 @@ name = "once_cell" version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" +dependencies = [ + "portable-atomic", +] [[package]] name = "p256" @@ -1892,6 +1977,15 @@ dependencies = [ "windows-sys 0.36.1", ] +[[package]] +name = "parse-zoneinfo" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24" +dependencies = [ + "regex", +] + [[package]] name = "parse_int" version = "0.6.0" @@ -1973,6 +2067,35 @@ dependencies = [ "sha2", ] +[[package]] +name = "phf" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_codegen" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a" +dependencies = [ + "phf_generator", + "phf_shared", +] + +[[package]] +name = "phf_generator" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" +dependencies = [ + "phf_shared", + "rand", +] + [[package]] name = "phf_shared" version = "0.11.3" @@ -2027,6 +2150,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" +[[package]] +name = "portable-atomic" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483" + [[package]] name = "ppv-lite86" version = "0.2.16" @@ -2087,6 +2216,12 @@ dependencies = [ "unicode-ident", ] +[[package]] +name = "pure-rust-locales" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1190fd18ae6ce9e137184f207593877e70f39b015040156b1e05081cdfe3733a" + [[package]] name = "quote" version = "1.0.35" @@ -2171,6 +2306,35 @@ dependencies = [ "thiserror", ] +[[package]] +name = "regex" +version = "1.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12de2eff854e5fa4b1295edd650e227e9d8fb0c9e90b12e7f36d6a6811791a29" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49530408a136e16e5b486e883fbb6ba058e8e4e8ae6621a77b048b314336e629" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.7.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbb5fb1acd8a1a18b3dd5be62d25485eb770e05afb408a9627d14d451bae12da" + [[package]] name = "reqwest" version = "0.11.27" @@ -2224,6 +2388,69 @@ dependencies = [ "subtle", ] +[[package]] +name = "rhai" +version = "1.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2780e813b755850e50b178931aaf94ed24f6817f46aaaf5d21c13c12d939a249" +dependencies = [ + "ahash", + "bitflags 2.9.1", + "instant", + "num-traits", + "once_cell", + "rhai_codegen", + "serde", + "serde_json", + "smallvec 1.10.0", + "smartstring", + "thin-vec", +] + +[[package]] +name = "rhai-chrono" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22099214084b30d0fc59b98404ddaf0e9a6e9d85a5410f93d6e19639f166e524" +dependencies = [ + "chrono", + "chrono-tz", + "rhai", +] + +[[package]] +name = "rhai-env" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e328a0eea295867e893ee7888f161ed3197f8ab561b3cc050d493399c410bee8" +dependencies = [ + "rhai", + "serde", + "serde_json", +] + +[[package]] +name = "rhai-fs" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9af3f7717d7194033924473b3e4f0b3dbd569e3528837c3fbb00b4e1d1c6ff16" +dependencies = [ + "rhai", + "serde", + "serde_json", +] + +[[package]] +name = "rhai_codegen" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5a11a05ee1ce44058fa3d5961d05194fdbe3ad6b40f904af764d81b86450e6b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.87", +] + [[package]] name = "ring" version = "0.16.20" @@ -2669,6 +2896,21 @@ name = "smallvec" version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" +dependencies = [ + "serde", +] + +[[package]] +name = "smartstring" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fb72c633efbaa2dd666986505016c32c3044395ceaf881518399d2f4127ee29" +dependencies = [ + "autocfg", + "serde", + "static_assertions", + "version_check", +] [[package]] name = "smoltcp" @@ -2964,6 +3206,15 @@ dependencies = [ "libc", ] +[[package]] +name = "thin-vec" +version = "0.2.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "144f754d318415ac792f9d69fc87abbbfc043ce2ef041c60f16ad828f638717d" +dependencies = [ + "serde", +] + [[package]] name = "thiserror" version = "1.0.69" @@ -3022,6 +3273,15 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + [[package]] name = "tinyvec" version = "1.8.0" diff --git a/Cargo.toml b/Cargo.toml index 67c037c..0a02dc9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,7 @@ hubtools = { git = "https://github.com/oxidecomputer/hubtools.git", branch = "ma slog-error-chain = { git = "https://github.com/oxidecomputer/slog-error-chain.git", branch = "main", features = ["derive"] } anyhow = "1.0" +async-recursion = "1.1.0" async-trait = "0.1" backoff = { version = "0.4.0", features = ["tokio"] } base64 = "0.22.1" @@ -38,6 +39,8 @@ glob = "0.3.2" hex = "0.4.3" hubpack = "0.1.2" humantime = "2.2.0" +lpc55_areas = { git = "https://github.com/oxidecomputer/lpc55_support/", version = "0.2.3" } +lpc55_sign = { git = "https://github.com/oxidecomputer/lpc55_support/", version = "0.3.0" } lru-cache = "0.1.2" lzss = "0.8" nix = { version = "0.27.1", features = ["net"] } @@ -46,6 +49,10 @@ once_cell = "1.21.3" paste = "1.0.15" parse_int = "0.6" rand = "0.8.5" +rhai-chrono = { version = "^0" } +rhai-env = "0.1.2" +rhai-fs = { version = "0.1.3", features = ["metadata"] } +rhai = { version = "1.21.0", features = ["serde", "metadata", "debugging"]} serde = { version = "1.0", default-features = false, features = ["derive"] } serde-big-array = "0.5.1" serde_bytes = "0.11.17" @@ -65,10 +72,11 @@ strum = { version = "0.27.1", default-features = false } strum_macros = "0.27.1" string_cache = "0.8.9" termios = "0.3" -thiserror = "1.0.69" +thiserror = { version = "1.0.69", default-features = false } tokio = { version = "1.29", features = ["full"] } tokio-stream = { version = "0.1", features = ["fs"] } tokio-util = { version = "0.7", features = ["compat"] } +toml = { version = "0.7", default-features = false, features = ["parse", "display"] } usdt = "0.5.0" uuid = { version = "1.16", default-features = false } version_check = "0.9.5" diff --git a/faux-mgs/Cargo.toml b/faux-mgs/Cargo.toml index 75acd0f..ee28111 100644 --- a/faux-mgs/Cargo.toml +++ b/faux-mgs/Cargo.toml @@ -32,3 +32,19 @@ zerocopy.workspace = true gateway-messages = { workspace = true, features = ["std"] } gateway-sp-comms.workspace = true + +async-recursion = { workspace = true, optional = true } +hubtools = { workspace = true, optional = true } +lpc55_areas = { workspace = true, optional = true } +lpc55_sign= { workspace = true, optional = true } +rhai-chrono = { workspace = true, optional = true } +rhai-env = { workspace = true, optional = true } +rhai-fs = { workspace = true, optional = true } +rhai = { workspace = true, optional = true } +thiserror = { workspace = true, optional = true } +toml = { workspace = true, optional = true } + +[features] +# XXX remove rhaiscript as a defailt feature +default = ["rhaiscript"] +rhaiscript = [ "dep:async-recursion", "dep:hubtools", "dep:lpc55_areas", "dep:lpc55_sign", "dep:rhai", "dep:rhai-chrono", "dep:rhai-env", "dep:rhai-fs", "dep:thiserror", "dep:toml"] diff --git a/faux-mgs/src/main.rs b/faux-mgs/src/main.rs index c109bd0..b034601 100644 --- a/faux-mgs/src/main.rs +++ b/faux-mgs/src/main.rs @@ -65,6 +65,8 @@ use uuid::Uuid; use zerocopy::IntoBytes; mod picocom_map; +#[cfg(feature = "rhaiscript")] +mod rhaiscript; mod usart; /// Command line program that can send MGS messages to a single SP. @@ -137,6 +139,14 @@ struct Args { command: Command, } +/// Command line program that can send MGS messages to a single SP. +#[cfg(feature = "rhaiscript")] +#[derive(Parser, Debug)] +struct RhaiArgs { + #[clap(subcommand)] + command: Command, +} + fn level_from_str(s: &str) -> Result { if let Ok(level) = s.parse() { Ok(level) @@ -407,6 +417,16 @@ enum Command { disable_watchdog: bool, }, + /// Run a Rhai script within faux-mgs + #[cfg(feature = "rhaiscript")] + Rhai { + /// Path to Rhia script + script: PathBuf, + /// Additional arguments passed to Rhia scripe + #[clap(trailing_var_arg = true, allow_hyphen_values = true)] + script_args: Vec, + }, + /// Controls the system LED SystemLed { #[clap(subcommand)] @@ -891,7 +911,7 @@ async fn main() -> Result<()> { .into_iter() .map(|sp| { let interface = sp.interface().to_string(); - run_command( + run_any_command( sp, args.command.clone(), args.json.is_some(), @@ -965,11 +985,28 @@ fn ssh_list_keys(socket: &PathBuf) -> Result> { client.list_identities().context("failed to list identities") } -async fn run_command( +/// This function exists to break recursive calls to the Rhai interpreter. +/// main() calls here but Rhai{...} calls run_command(). +async fn run_any_command( sp: SingleSp, command: Command, json: bool, log: Logger, +) -> Result { + match command { + #[cfg(feature = "rhaiscript")] + Command::Rhai { script, script_args } => { + rhaiscript::interpreter(&sp, log, script, script_args).await + } + _ => run_command(&sp, command, json, log).await, + } +} + +async fn run_command( + sp: &SingleSp, + command: Command, + json: bool, + log: Logger, ) -> Result { match command { // Skip special commands handled by `main()` above. @@ -1328,7 +1365,7 @@ async fn run_command( let data = fs::read(&image).with_context(|| { format!("failed to read {}", image.display()) })?; - update(&log, &sp, component, slot, data).await.with_context( + update(&log, sp, component, slot, data).await.with_context( || { format!( "updating {} slot {} to {} failed", @@ -1465,7 +1502,10 @@ async fn run_command( Ok(Output::Lines(vec!["reset complete".to_string()])) } } - + #[cfg(feature = "rhaiscript")] + Command::Rhai { script, script_args } => { + rhaiscript::interpreter(sp, log, script, script_args).await + } Command::ResetComponent { component, disable_watchdog } => { sp.reset_component_prepare(component).await?; info!(log, "SP is prepared to reset component {component}",); @@ -1554,14 +1594,8 @@ async fn run_command( if time_sec == 0 { bail!("--time must be >= 1 second"); } - monorail_unlock( - &log, - &sp, - time_sec, - ssh_auth_sock, - key, - ) - .await?; + monorail_unlock(&log, sp, time_sec, ssh_auth_sock, key) + .await?; } } } @@ -2062,6 +2096,7 @@ async fn populate_phase2_images( Ok(()) } +#[derive(Clone)] enum Output { Json(serde_json::Value), Lines(Vec), diff --git a/faux-mgs/src/rhaiscript.rs b/faux-mgs/src/rhaiscript.rs new file mode 100644 index 0000000..dd34b06 --- /dev/null +++ b/faux-mgs/src/rhaiscript.rs @@ -0,0 +1,639 @@ +// +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::anyhow; +use crate::debug; +use crate::info; +use crate::warn; +use slog::crit; +use slog::error; +use slog::trace; + +use crate::fs; +use crate::json; +use crate::run_command; +use crate::Context; +use crate::Logger; +use crate::Output; +use crate::Path; +use crate::PathBuf; +use crate::Result; +use crate::RhaiArgs; +use crate::RotBootInfo; +use crate::SingleSp; +use clap::Parser; +use std::sync::Arc; + +use async_recursion::async_recursion; +use hubtools::{Caboose, RawHubrisArchive}; +use lpc55_areas::{CFPAPage, CMPAPage}; +use rhai::packages::Package; +use rhai::{ + Array, CustomType, Dynamic, Engine, EvalAltResult, ImmutableString, Map, + NativeCallContext, Scope, TypeBuilder, +}; +use rhai_chrono::ChronoPackage; +use rhai_env::EnvironmentPackage; +use rhai_fs::FilesystemPackage; +use serde_json::Value as Json; +use toml::Value as Toml; + +#[derive(Debug, CustomType)] +#[rhai_type(extra = Self::build_archive_inspector)] +struct ArchiveInspector { + #[rhai_type(skip)] + inner: Arc, +} + +impl Clone for ArchiveInspector { + fn clone(&self) -> Self { + ArchiveInspector { inner: self.inner.clone() } + } +} + +impl ArchiveInspector { + fn new(inner: Arc) -> Self { + ArchiveInspector { inner } + } + + pub fn from_vec(contents: Vec) -> Result> { + match RawHubrisArchive::from_vec(contents) { + Ok(archive) => Ok(Self::new(Arc::new(archive))), + Err(e) => Err(format!("RawHubrisArchive::from_vec: {e}") + .to_string() + .into()), + } + } + + pub fn load(path: ImmutableString) -> Result> { + let path = PathBuf::from(path.into_owned()); + match RawHubrisArchive::load(&path) { + Ok(archive) => Ok(Self::new(Arc::new(archive))), + Err(e) => { + Err(format!("RawHubrisArchive::load: {e}").to_string().into()) + } + } + } + + fn u8_to_string(array: &[u8]) -> String { + String::from_utf8_lossy( + if let Some(p) = array.iter().position(|&x| x == 0) { + &array[0..p] + } else { + &array[0..] + }, + ) + .to_string() + } + + pub fn indexer( + &mut self, + index: &str, + ) -> Result> { + match index { + // Copied from hubtools/src/caboose + "BORD" => { + let caboose: Caboose = self + .inner + .read_caboose() + .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; + caboose + .board() + .map(|v| Ok(Self::u8_to_string(v).into())) + .unwrap_or(Ok(Dynamic::UNIT)) + } + "GITC" => { + let caboose: Caboose = self + .inner + .read_caboose() + .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; + caboose + .git_commit() + .map(|v| Ok(Self::u8_to_string(v).into())) + .unwrap_or(Ok(Dynamic::UNIT)) + } + "NAME" => { + let caboose: Caboose = self + .inner + .read_caboose() + .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; + caboose + .name() + .map(|v| Ok(Self::u8_to_string(v).into())) + .unwrap_or(Ok(Dynamic::UNIT)) + } + "SIGN" => { + let caboose: Caboose = self + .inner + .read_caboose() + .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; + caboose + .sign() + .map(|v| Ok(Self::u8_to_string(v).into())) + .unwrap_or(Ok(Dynamic::UNIT)) + } + "VERS" => { + let caboose: Caboose = self + .inner + .read_caboose() + .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; + caboose + .version() + .map(|v| Ok(Self::u8_to_string(v).into())) + .unwrap_or(Ok(Dynamic::UNIT)) + } + "image_name" => { + if let Ok(iname) = self.inner.image_name() { + Ok(Dynamic::from(iname)) + } else { + Ok(Dynamic::UNIT) + } + } + // Only in Bootleby versions v1.3.1 and later + // "manufacturing_cfg" => {..} + _ => { + // Try to extract a file by this name + if let Ok(contents) = self.inner.extract_file(index) { + // contents: Vec + match Path::new(index) + .extension() + .and_then(|os| os.to_str()) + { + Some("bin") | Some("elf") => { + Ok(Dynamic::from_blob(contents)) + } + Some("toml") => { + // Adapted from toml crate example toml2json + fn toml2json(tv: Toml) -> Json { + match tv { + Toml::String(s) => Json::String(s), + Toml::Integer(i) => Json::Number(i.into()), + Toml::Float(f) => { + if let Some(n) = + serde_json::Number::from_f64(f) + { + Json::Number(n) + } else { + Json::Null + } + } + Toml::Boolean(b) => Json::Bool(b), + Toml::Array(arr) => Json::Array( + arr.into_iter() + .map(toml2json) + .collect(), + ), + Toml::Table(table) => Json::Object( + table + .into_iter() + .map(|(k, v)| (k, toml2json(v))) + .collect(), + ), + Toml::Datetime(dt) => { + Json::String(dt.to_string()) + } + } + } + let text = String::from_utf8_lossy(&contents[..]) + .to_string(); + if let Json::Object(json) = + toml2json(Toml::from(text)) + { + Ok(Dynamic::from(json)) + } else { + unreachable!(); + } + } + Some("json") => { + let text: String = + String::from_utf8_lossy(&contents[..]) + .to_string(); + if let Ok(json) = serde_json::to_value(text) { + Ok(Dynamic::from(json)) + } else { + let text = + String::from_utf8_lossy(&contents[..]) + .to_string(); + if let Json::Object(json) = json!({ "Err": serde_json::Value::String(text) }) + { + Ok(Dynamic::from(json)) + } else { + unreachable!() + } + } + } + _ => { + if index.starts_with("elf/") { + // All of these are binary files + Ok(Dynamic::from_blob(contents)) + } else { + // .txt, .fwid, .ron, .tlvc, .cfg, .gdb + // git-rev, .TXT, image-name + let text = + String::from_utf8_lossy(&contents[..]) + .to_string(); + Ok(Dynamic::from(text)) + } + } + } + } else { + Err(format!("unknown index: {:?}", index).into()) + } + } + } + } + + pub fn build_archive_inspector(builder: &mut TypeBuilder) { + builder + .with_name("archive") + .with_fn("new_archive", ArchiveInspector::from_vec) + .with_fn("new_archive", ArchiveInspector::load) + .with_indexer_get(ArchiveInspector::indexer); + } +} + +/// Use a Rhai interpreter per SingleSp that can maintain a connection. +#[async_recursion] +pub async fn interpreter( + sp: &SingleSp, + log: Logger, + script: PathBuf, + script_args: Vec, +) -> Result { + // Channel: Script -> Master + let (tx_script, rx_master) = std::sync::mpsc::sync_channel::(1); + // Channel: Master -> Script + let (tx_master, rx_script) = std::sync::mpsc::sync_channel::(1); + + let interface = sp.interface().to_string().to_owned(); + let reset_watchdog_timeout_ms = sp.reset_watchdog_timeout_ms() as i64; + + let thread_log = log.clone(); + let handle = std::thread::spawn(move || { + let log = thread_log; + // Create Engine + let mut engine = Engine::new(); + + // Setup file system access for scripts + let package = FilesystemPackage::new(); + package.register_into_engine(&mut engine); + + // Standard date formats + let package = ChronoPackage::new(); + package.register_into_engine(&mut engine); + + // Setup env access for scripts + let package = EnvironmentPackage::new(); + package.register_into_engine(&mut engine); + + // Don't limit resources for now. + engine.set_max_expr_depths(0, 0); + + // Access RawHubrisArchives + engine.build_type::(); + engine.register_fn("system", system); + + // Compile the script + let program = fs::read_to_string(&script) + .with_context(|| format!("failed to read {}", script.display())) + .unwrap(); + + // Construct argv for the script and canonicalize the script path. + let pb = fs::canonicalize(&script) + .context("Cannot canonicalize {&script}")?; + let script_dir = pb + .parent() + .context("Cannot get parent dir of {&script}")? + .display() + .to_string(); + let argv0 = pb.display().to_string(); + + engine + // faux_mgs thread consumes and produces JSON + .register_fn("faux_mgs", move |v: Dynamic| -> Dynamic { + match tx_script.send(serde_json::to_string(&v).unwrap()) { + Ok(()) => match rx_script.recv() { + Ok(v) => { + // println!("RECEIVED Ok: \"{:?}\"", v); + serde_json::from_str::(&v).unwrap() + } + Err(e) => { + // println!("RECEIVED Ok(Err): \"{:?}\"", v); + let err = format!("{{\"error\": \"{:?}\"}}", e) + .to_string(); + serde_json::from_str::(&err).unwrap() + } + }, + Err(e) => { + // println!("RECEIVED Err: \"{:?}\"", v); + let err = + format!("{{\"error\": \"{:?}\"}}", e).to_string(); + serde_json::from_str::(&err).unwrap() + } + } + }) + // Offer proper JSON to Dynamic::Map conversion + .register_fn("json_to_map", move |v: Dynamic| -> Dynamic { + match v.into_string() { + Ok(s) => match serde_json::from_str::(&s) { + Ok(v) => v, + Err(e) => { + let err = format!("{{\"error\": \"{:?}\"}}", e) + .to_string(); + serde_json::from_str::(&err).unwrap() + } + }, + Err(e) => { + let err = + format!("{{\"error\": \"{:?}\"}}", e).to_string(); + serde_json::from_str::(&err).unwrap() + } + } + }) + // lpc55_support RoT signature verification + .register_fn( + "verify_rot_image", + move |image: Dynamic, + cmpa: Dynamic, + cfpa: Dynamic| + -> Dynamic { + fn to_page(slice: &[u8]) -> Option<&[u8; 512]> { + slice.try_into().ok() + } + + let a = cmpa.as_blob_ref().unwrap().clone(); + let page = to_page(a.as_ref()).unwrap(); + let cmpapage = CMPAPage::from_bytes(page).unwrap(); + + let a = cfpa.as_blob_ref().unwrap().clone(); + let page = to_page(a.as_ref()).unwrap(); + let cfpapage = CFPAPage::from_bytes(page).unwrap(); + + let image = image.as_blob_ref().unwrap().clone(); + + if lpc55_sign::verify::verify_image( + &image, cmpapage, cfpapage, + ) + .is_ok() + { + true.into() + } else { + false.into() + } + }, + ); + + // A script can log via debug at any level: + // debug("INFO|log message at INFO level"); + // debug("CRIT|log message at CRIT level"); + // etc. + let rhai_log = log.clone(); + engine.on_debug(move |x, src, pos| { + let src = if src.is_some() { + format!("{}@", src.unwrap()) + } else { + "".to_string() + }; + let x: Vec<&str> = x.trim_matches('"').splitn(2, '|').collect(); + let (level, msg) = if x.len() == 1 { + ("info".to_string(), x[0].to_string()) + } else { + let level = x[0].to_string().to_lowercase(); + let msg = x[1].to_string(); + match level.as_str() { + "info" => ("info".to_string(), msg), + "crit" => ("crit".to_string(), msg), + "error" => ("error".to_string(), msg), + "trace" => ("trace".to_string(), msg), + "warn" => ("warn".to_string(), msg), + "debug" => ("debug".to_string(), msg), + _ => ("debug".to_string(), format!("{}|{}", level, msg)), + } + }; + let src = if src.is_empty() { + format!("{}@", src) + } else { + "".to_string() + }; + let msg = format!("{}pos={:?} {}", src, pos, msg); + match level.as_str() { + "crit" => crit!(rhai_log, "{msg}"), + "debug" => debug!(rhai_log, "{msg}"), + "error" => error!(rhai_log, "{msg}"), + "info" => info!(rhai_log, "{msg}"), + "trace" => trace!(rhai_log, "{msg}"), + "warn" => warn!(rhai_log, "{msg}"), + _ => unreachable!(), + } + }); + + // Print registered functions if you're interested. + + // engine.gen_fn_signatures(false).into_iter().for_each(|func| println!("{func}")); + + match engine.compile(program) { + Ok(ast) => { + // These variables are visible in the script main() + let mut scope = Scope::new(); + let mut argv = vec![]; + argv.push(argv0); + argv.extend(script_args); + scope.push_dynamic("argv", argv.clone().into()); + scope.push_dynamic( + "rbi_default", + RotBootInfo::HIGHEST_KNOWN_VERSION.to_string().into(), + ); + scope.push_dynamic("script_dir", script_dir.into()); + scope.push_dynamic("interface", interface.into()); + scope.push_dynamic( + "reset_watchdog_timeout_ms", + reset_watchdog_timeout_ms.into(), + ); + match engine.call_fn::(&mut scope, &ast, "main", ()) { + Ok(exit_value) => { + Ok(Output::Json(json!({"exit": exit_value}))) + } + Err(err) => Err(anyhow!("{err}")), + } + } + Err(e) => Err(anyhow!(format!( + "failed to parse {}: {:?}", + &script.display(), + e + ))), + } + }); + + while let Ok(command_args) = rx_master.recv() { + // Service the script's calls to "faux_mgs" + // from those commands. + // The script can only send use arrays of strings. + // println!("args=${:?}", &command_args); + let response = if let Ok(serde_json::Value::Array(j)) = + serde_json::from_str(&command_args) + { + // TODO: fix this so that user can put an i64 and we won't panic. + let a: Vec = + j.iter().map(|v| v.as_str().unwrap().to_string()).collect(); + debug!(log, "vec string: {:?}", a); + let mut ra = vec![]; + // The clap crate is expecting ARGV[0] as the program name, insert a dummy. + ra.push("faux-mgs".to_string()); + ra.append(&mut a.clone()); + + let args = RhaiArgs::parse_from(&ra); + match run_command(sp, args.command.clone(), true, log.clone()).await + { + Ok(Output::Json(json)) => { + // Turn all results into a map for easy digestion + // println!("RESULT: Ok: {:?}", &json); + let obj = match json { + serde_json::Value::Object(map) => map, + _ => json!({ "Ok": json }) + .as_object() + .unwrap() + .to_owned(), + }; + match serde_json::to_string(&obj) { + Ok(s) => s, + // More verbose code, but don't need to worry about quoting. + Err(e) => serde_json::to_string(json!({ + "Err": serde_json::Value::String(format!("{:?}", e)) + }).as_object().unwrap()).unwrap(), + } + } + Ok(Output::Lines(_)) => { + // The --json=pretty option is hard-coded + unreachable!(); + } + Err(e) => { + // println!("RESULT: Err: {:?}", &e); + format!("{{\"error\": \"failed\", \"message\": \"{}\"}}", e) + } + } + } else { + "{{\"error\": \"cannot serialize faux_mgs args to json\"}}" + .to_string() + }; + if tx_master.send(response).is_err() { + break; + } + } + + match handle.join() { + Ok(result) => result, + Err(err) => Err(anyhow!("{:?}", err)), + } +} + +/// Allow Rhai scripts to run commands and capture stdout, stderr, and +/// exit code. +/// This function was generated with the following prompt to +/// gemini.google.com: +/// +/// Write a Rust function, `system`, that can be registered with the Rhai +/// scripting engine. The function should take an array of strings (`Array`) +/// as input, representing a command and its arguments, execute the command +/// using `std::process::Command`, and return a Rhai `Map` containing the +/// command's exit code, standard output, and standard error. +/// +/// The function should handle the following: +/// +/// * Convert the input `Array` to a `Vec`. +/// * Handle errors if the input `Array` is empty or if any element cannot +/// be converted to a `String`. +/// * Use `std::process::Command` with fully qualified names (e.g., +/// `std::process::Command::new`). +/// * Capture the command's standard output and standard error using +/// `std::process::Stdio::piped()`. +/// * Convert the captured output to Rhai `ImmutableString` values using +/// `String::from_utf8_lossy`. +/// * Return a Rhai `Map` with the keys "exit_code", "stdout", and "stderr". +/// * Handle errors during command execution and output capture. +/// * Use `EvalAltResult::ErrorInFunctionCall` for function call errors and +/// `EvalAltResult::ErrorRuntime` for runtime errors. +/// * Ensure that error messages passed to `EvalAltResult::ErrorRuntime` +/// are converted to `Dynamic` using `.into()`. +/// * Place the underlying error in the third position of the +/// `EvalAltResult::ErrorInFunctionCall` variant. +/// * Use `context.position()` to get the error position. +/// * Do not use the `mut` keyword on the `child` variable when calling +/// `command.spawn()`. +/// +/// Provide a complete Rust code example that includes the `system` function +/// and a `main` function that registers it with a Rhai engine and runs a +/// sample Rhai script. +fn system( + context: NativeCallContext, + argv: Array, +) -> Result> { + let mut string_argv: Vec = Vec::new(); + for arg in argv.iter() { + match arg.clone().into_string() { + Ok(s) => string_argv.push(s), + Err(_) => { + return Err(Box::new(EvalAltResult::ErrorRuntime( + "Arguments must be strings.".into(), + context.position(), + ))); + } + } + } + + if string_argv.is_empty() { + return Err(Box::new(EvalAltResult::ErrorInFunctionCall( + "system".to_string(), + "Expected at least one argument.".to_string(), + Box::new(EvalAltResult::ErrorRuntime( + "".into(), + context.position(), + )), + context.position(), + ))); + } + + let command_name = &string_argv[0]; + let args = &string_argv[1..]; + + let mut command = std::process::Command::new(command_name); + command.args(args); + + command + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()); + + let output = match command.spawn() { + Ok(child) => child.wait_with_output(), + Err(e) => { + return Err(Box::new(EvalAltResult::ErrorRuntime( + format!("Failed to spawn command: {}", e).into(), + context.position(), + ))); + } + }; + + let output = match output { + Ok(output) => output, + Err(e) => { + return Err(Box::new(EvalAltResult::ErrorRuntime( + format!("Failed to get command output: {}", e).into(), + context.position(), + ))); + } + }; + + let exit_code = output.status.code().unwrap_or(-1) as i64; + let stdout = ImmutableString::from( + String::from_utf8_lossy(&output.stdout).to_string(), + ); + let stderr = ImmutableString::from( + String::from_utf8_lossy(&output.stderr).to_string(), + ); + + let mut result = Map::new(); + result.insert("exit_code".into(), exit_code.into()); + result.insert("stdout".into(), stdout.into()); + result.insert("stderr".into(), stderr.into()); + + Ok(result) +} diff --git a/gateway-sp-comms/src/single_sp.rs b/gateway-sp-comms/src/single_sp.rs index 6f3298c..8b649fa 100644 --- a/gateway-sp-comms/src/single_sp.rs +++ b/gateway-sp-comms/src/single_sp.rs @@ -430,6 +430,10 @@ impl SingleSp { &self.interface } + pub fn reset_watchdog_timeout_ms(&self) -> u32 { + self.reset_watchdog_timeout_ms + } + /// Retrieve the [`watch::Receiver`] for notifications of discovery of an /// SP's address. pub fn sp_addr_watch( diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000..d094777 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,127 @@ +# Scripting in faux-mgs + +The `faux-mgs` utility is useful for testing the APIs and protocols used +between the control plane and service processor. + +The choices for testing over multiple messages with behavior conditional +on results against real hardware becomes more challenging. + +`Faux-mgs` itself can be extended to include any new command or +test. But for one-off scripting, personal development bench tests, or +other contexts, such as tying a CI test to a certain set of hardware, +the mix of scripts and faux-mgs commands can quickly become unwieldy. + +`Faux-mgs` supports a `--json pretty` output format for all commands. So, +using any scripting language, including bash, becomes much easier with +the language's JSON libraries or use of the `jq` program. + +Here, another option is provided; the embedded Rhai scripting language +is used to extend `faux-mgs`. + +The JSON output from `Faux-mgs` means that all commands already produce +an easy to parse output format. That format is easily translated to a Rhai +`map`. + +The `clap` parser used by `faux-mgs` isn't limited to parsing the `argv` +from the OS command line interface. It can also be called internal to +`faux-mgs` on an arbitrary string array. + +This standardized command I/O means that the interpreter integration +does not have to be aware of any particular `faux-mgs` command. The +exception being itself in order to prevent recursive calls. + +Nice attributes: + - Faster, because the "connection" (discovery, etc.) to the SP is + reused between commands and multiple commands are run from the same + faux-mgs process. + - Command output is made available to scripts in a Rhai native format + - the script is as portable as `faux-mgs` + - The feature of faux-mgs of being able to run the same command + against multiple SPs means that a script can also be run that way. + +## Rhai Integration + +Rhai calls the script's `main() -> i64 {}`. + +### Globals available to the script are the: + + - `argv` array of string arguments that trail the `clap`/OS CLI `rhai` command. + - `interface` the value of the `faux-mgs` `--interface` argument. + - `reset_watchdog_timeout_ms` + - `rbi_default` is RotBootInfo::HIGHEST_KNOWN_VERSION + - `script_dir` is the canonical path to the directory of the main + script file. + +### Extra Rhai Packages used include: + + - rhai_env::EnvironmentPackage - user environment variables + - rhai_fs::FilesystemPackage - file system access + - [rhai_chrono::ChronoPackage](https://github.com/iganev/rhai-chrono) - standard time formats. + +### Modified Rhai behaviod + - The `debug("message")` function is routed to the faux-mgs slog logging. + Prefixing a message with "crit|", "trace|", "error|", "warn|", "error|", or "debug|" + will log at that corresponding level. Leaving off the prefix or using some other + prefix will log at the debug level. + +### Custom functions: + + - faux_mgs(["arg0", .., "argN"]) -> #{} // Run any faux-mgs command -> map + - RawHubrisArchive + - new_archive(path) -> ArchiveInspector // RawHubrisArchive inspection + - indexer (get via var["index"]) for ArchiveInspector + - zip path name to blob or string as appropriate according to + internal rules (e.g. .bin, elf/*, etc are blobs) + - verify_rot_image(image_blob, cmpa, cfpa) -> bool // verify RoT image signature + - json_to_map(string) -> #{} // convert any JSON to a Rhai map + - system(["argv0", .., "argvN"]) -> #{"exit_code": i64, "stdout": str, "stderr": str} + - run any command. Note: no shell expansion, this is std::process::Command + +### Script utility functions + +See `scripts/util.rhai` for additional utility functions. + +## Running a script + +In this example, we use bash wrapper "FM" to save typing. +`faux-mgs` command against a particular SP (Grapefruit). + +```bash +#!/usr/bin/bash +if [[ "${1:-}" == "--too-quick" ]] +then + shift + # too fast for update to succeed. Used to trigger update watchdog. + MAXATTEMPTS=5 + MAXATTEMPTS_RESET=1 + PER_ATTEMPT_MS=2000 +else + # Normal values + MAXATTEMPTS=5 + MAXATTEMPTS_RESET=30 + # PER_ATTEMPT_MS=2000 + # 2165 is on the edge + PER_ATTEMPT_MS=2200 +fi + +cargo -q run --bin faux-mgs --features=rhaiscript -- \ + --log-level=crit \ + --interface=enp5s0 \ + --json=pretty \ + --max-attempts=${MAXATTEMPTS} \ + --max-attempts-reset=${MAXATTEMPTS_RESET} \ + --per-attempt-timeout-millis=${PER_ATTEMPT_MS} \ + "$@" +``` +A `getops` utility function provides command line parsing within +the script. + +For the upgrade-rollback script, a JSON configuration file supplies +paths or other parameters needed to configure the script. + +then for instance: +```bash +./FM rhai scripts/upgrade-rollback.rhai -- -c scripts/targets.json +``` + +See the scripts themselves for further information. diff --git a/scripts/targets.json b/scripts/targets.json new file mode 100644 index 0000000..164ac7d --- /dev/null +++ b/scripts/targets.json @@ -0,0 +1,30 @@ +{ + "repo-home": "${HOME}/Oxide/src", + "base_repo": "${repo-home}/hubris/master", + "ut_repo": "${repo-home}/hubris/epoch", + + "keyset": "bart", + "keyset-dvt-dock": "${repo-home}/dvt-dock/${keyset}", + "base-b-ver": "v1.3.1", + "ut-b-ver": "v1.3.3", + + "bord": { + "sp": "grapefruit-standalone", + "rot": "oxide-rot-1-selfsigned" + }, + + "images": { + "base": { + "sp": "${base_repo}/target/${bord.sp}/dist/default/build-${bord.sp}-image-default.zip", + "rot_a": "${base_repo}/target/${bord.rot}/dist/a/build-${bord.rot}-image-a.zip", + "rot_b": "${base_repo}/target/${bord.rot}/dist/b/build-${bord.rot}-image-b.zip", + "stage0": "${keyset-dvt-dock}/gimlet/bootleby-${base-b-ver}-${keyset}-gimlet.zip" + }, + "ut": { + "sp": "${ut_repo}/target/${bord.sp}/dist/default/build-${bord.sp}-image-default.zip", + "rot_a": "${ut_repo}/target/${bord.rot}/dist/a/build-${bord.rot}-image-a.zip", + "rot_b": "${ut_repo}/target/${bord.rot}/dist/b/build-${bord.rot}-image-b.zip", + "stage0": "${keyset-dvt-dock}/gimlet/bootleby-${ut-b-ver}-${keyset}-gimlet.zip" + } + } +} diff --git a/scripts/upgrade-rollback.rhai b/scripts/upgrade-rollback.rhai new file mode 100644 index 0000000..8baa9f5 --- /dev/null +++ b/scripts/upgrade-rollback.rhai @@ -0,0 +1,595 @@ +import `${script_dir}/util` as util; + +/// Print command line usage +fn usage(prog, error) { + if error != () { + print(`Error: ${error}`); + } + print(`Usage: faux-mgs ... rhai ${prog} [-v] [-h] [-c config.json]`); + print(" -c CONFIG.JSON # Path to configuration"); + print(" -v # be verbose"); + print(" -h # Help. Print this message"); + print(""); +} + +fn main() { + // Display start time + let start_ts = timestamp(); + let start_time = datetime_local(); + print(`Starting at ${start_time}`); + + let conf = process_cli(argv); + switch type_of(conf) { + "i64" => return conf, + "map" => (), + _ => return 1, + } + // Collect GITC and signature verification info. + let images = get_image_info(conf); + + print("\n## Determine initial FW state and update if needed"); + + // Generally, there should no updates in progress + // and we can proceed with installation of the + // baseline images if needed. + // . + // If a previous test was aborted, some recovery may + // be necessary. + // + // A corner case that may require a hard-reset is described in: + // https://github.com/oxidecomputer/hubris/issues/1022 + + let problems = 0; + for attempt in 1..=2 { + problems = 0; + for component in ["rot", "sp"] { + let r = check_update_in_progress(component); + if r?.Err != () { + print(`Error check_update_in_progress=${r}`); + return 1; + } + debug(`check_update_in_progress(${component})=${r}`); + if r != () { + problems += 1; + let id = r?.id; + debug(`r=${r}`); + debug(`id=${id}`); + debug(`Failed: component ${component}: ${r}`); + print(`The SP probably needs to be reset before continuing`); + print(`Trying to abort the update with id=${id}`); + let r = faux_mgs(["update-abort", component, `${id}`]); + debug(`update-abort = ${r}`); + } + } + if problems == 0 { + break; + } + let r = reset_sp_and_rot(); + if r?["error"] != () { + print(`Cannot reset rot and sp: ${r.error}`); + print(`Check that RoT has no pending update and STLINK is off`); + print("Also see https://github.com/oxidecomputer/hubris/issues/1022"); + return 1; + } + } + if problems > 0 { + print(`Devices are stuck in update and need to be hard-reset before continuing`); + return 1; + } + + // Check initial vs desired state based on GITC values. + // + // Note that during development, GITC+"dirty" won't + // distinguish between your various iterations. + // It would be better to use FWID values all around. + // TODO: When hubtools has fwidgen integrated and when SP can report + // the FWID of its active and inactive flash banks, and old images + // without those features do not need to be supported, transition to + // FWID-based assessment. Note: The RoT can report SP Active bank FWID + // over IPCC. + let result = image_check("base", images); + let err = result?["error"]; + if err != () { + print(`image_check error: ${err}`); + return 1; + } + let flash_sp = result?.ok?.sp; + let flash_rot = result?.ok?.rot; + + print(""); + print(`Elapsed time: ${start_ts.elapsed}`); + print(`Now=${datetime_local()}`); + print(""); + + // Update SP with baseline image if needed. + if flash_sp && !update_sp(conf.base.sp) { + return 1; + } + + // Update RoT with baseline image + debug(`Flash baseline rot hubris`); + if flash_rot && !update_rot_hubris(conf.base.rot_a, conf.base.rot_b) { + return 1 + } + + // Both SP and RoT should have baseline images installed. + + // Verify that SP and RoT are running correct images. + // Check updated vs desired state based on GITC values. + if flash_sp || flash_rot { + let result = image_check("base", images); + if result?.error != () || result.ok.sp || result.ok.rot { + // Not able to check images or one or both base images not installed. + debug(`image_check error or failed BASE image updates: ${result}`); + return 1; + } + } + + // Upgrade to under-test then rollback to baseline images. + for v in [ + #{ + "up_down": "upgrade", + "label": "under-test", + "branch": "ut", + "sp_path": conf.ut.sp, + "rot_a_path": conf.ut.rot_a, + "rot_b_path": conf.ut.rot_b, + }, + #{ + "up_down": "rollback", + "label": "baseline", + "branch": "base", + "sp_path": conf.base.sp, + "rot_a_path": conf.base.rot_a, + "rot_b_path": conf.base.rot_b, + } + ] { + print(""); + print(`## ${v.up_down} to ${v.label} images`); + debug(`${v.up_down} SP Hubris to ${v.label} image`); + if !update_sp(v.sp_path) { + print(`Failed to ${v.up_down} SP Hubris to ${v.label} image`); + return 1; + } + debug(`${v.up_down} Rot Hubris to ${v.label} image`); + if !update_rot_hubris(v.rot_a_path, v.rot_b_path) { + print(`Failed to ${v.up_down} RoT Hubris to ${v.label} image: ${r}`); + return 1 + } + let result = image_check(v.branch, images); + if result?.error != () || result.ok.sp || result.ok.rot { + // Not able to check images or one or both ${branch} images not installed. + debug(`image_check error or failed ${label} image updates: ${result}`); + return 1; + } + print(`### SUCCESS: ${v.up_down} to SP and RoT ${v.label} images`); + } + + print(""); + print(`Elapsed time so far: ${start_ts.elapsed}`); + print(`Now=${datetime_local()}`); + print("Done"); + // All is well. Exit with code 0 + 0 +} + +// TODO: Parameterize update orders for upgrade and rollback with defaults +// that match what omicron or wicket would do. +// e.g. in config.json: "upgrade-order": ["sp", "rot", "stage0"], +// "rollback-order": ["sp", "rot", "stage0"]; +// +// TODO: Take a TUF repo as a set of baseline or under-test images. +// Alternatively, write a script that will extract from a TUF repo(s) +// and write a configuration file for this script. + +/// Parse the Rhai script command line options including the required +/// JSON configuration file. +/// Return an exit code or the configuration map +fn process_cli(argv) { + // Process command line options + let prog = argv[0]; + let options = "hc:b:u:"; // Options with arguments are followed by ':' + let parsed = util::getopts(argv, "hc:v"); + if parsed?["error"] != () { + usage(prog, parsed?["error"]); + return 1; + } + + if parsed.result?["h"] == true { + usage(prog, ()); + return 0; + } + + // Build the configuration map + let conf = #{}; + + conf["verbose"] = false; + if parsed.result?["v"] == true { + conf.verbose=true; + } + + let conf_path = parsed["result"]?["c"]; + if conf_path == () { + usage(prog, "Missing option: -c config.json"); + return 1; + } + let conf_path = path(conf_path); + if !conf_path.is_file { + usage(prog, `${conf_path} is not a regular file`); + return 1 + } + conf["conf_path"] = conf_path; + + // Process the JSON format configuration file into a map. + let conf_file = open_file(conf_path); + let conf_json = conf_file.read_string(); + let config = json_to_map(conf_json); + if conf.verbose { + print(""); + print(`config=${config}`); + } + + // Expand the paths to archives so that the config file + // can use vars from the environment and the configuration file itself. + conf["sp_bord"] = util::env_expand(config.bord.sp, config); + conf["rot_bord"] = util::env_expand(config.bord.rot, config); + + for branch in ["base", "ut"] { + conf[branch] = #{}; + for image in ["sp", "rot_a", "rot_b", "stage0"] { + let zip_path = util::env_expand(config.images[branch][image], config); + if zip_path == () { + print(`No configuration for image ${branch}.${image}`); + } else { + conf[branch][image] = zip_path; + debug(`conf.${branch}.${image}=${zip_path}`); + } + }; + } + + if conf.verbose { + print(""); + print(`Parsed conf=${conf}`); + } + conf +} + +// Check and organize the images mentioned in the configuration +fn get_image_info(conf) { + let images = #{}; + + // Get the current CFPA and CMPA in order to verify the RoT test images. + images["cmpa"] = util::get_cmpa(); + images["cfpa"] = util::get_cfpa(); + images["keyset"] = util::get_rot_keyset(images["cfpa"]); + + let gitc = #{}; + let error = false; + for branch in ["base", "ut"] { + images[branch] = #{}; + for image in ["sp", "rot_a", "rot_b", "stage0"] { + let zip_path = conf[branch][image]; + images[branch][image] = #{"path": zip_path}; + let current_ar = new_archive(zip_path); + if type_of(current_ar) != "archive" { + print(`Invalid archive path ${zip_path}`); + error = true; + continue; + } + // images[branch][image]["ar"] = current_ar; + images[branch][image]["caboose"] = #{}; + for key in ["BORD", "GITC", "NAME", "SIGN", "VERS"] { + images[branch][image]["caboose"][key] = current_ar?[key]; + } + if image == "sp" { + // TODO: Do some sanity checks to make sure BORD and NAME + // are appropriate for the attached SP. + images[branch][image]["verified"] = true; + } else { + // All others are RoT images and need signature verification. + // stage0, rot_a, and rot_b images are verified against the + // current RoT device's configured keys in its CMPA. + let final_bin = current_ar["img/final.bin"]; + let verified = verify_rot_image(final_bin, images.cmpa, images.cfpa); + images[branch][image]["verified"] = verified; + if !verified { + if images?["failed_to_verify"] == () { + images["failed_to_verify"] = []; + } + // The caller may want to test with bad images or not. + images["failed_to_verify"] += images[branch][image]["path"]; + } + } + // Reverse map GITC to the set of images built at that commit. + let image_gitc = images[branch][image]["caboose"]?["GITC"]; + if image_gitc != () { + if gitc?[image_gitc] == () { + gitc[image_gitc] = []; + } + + // TODO: There should be a warning if the base and ut image + // have the // same GITC. This is true for some stage0 images + // that differ only in packaging but not in the image + // themselves. Otherwise it represents a release engineering + // failure or a case where the test is not configured properly. + + gitc[image_gitc] += [ `${branch}_${image}` ]; + } + } + } + debug(`gitc=${gitc}`); + images["by_gitc"] = gitc; + // Display the accumulated information to document this run. + if conf.verbose { + print(""); + print(`## images=${images}`); + } + if error { + return 1; + } + images +} + +// Check for update in progress. +// This will normally return () indicating no update in progress. +// The result is a re-write of the `update-status` return value: +// +// #{"Ok":"None"} => () +// #{"Ok":{"Complete":[94,4,54,248,142,146,79,45,187,3,82,219,240,213,7,53]}] +// => #{"state": "Complete", "id": "5e0436f88e924f2dbb0352dbf0d50735"} +// #{"Ok":{"InProgress":{"bytes_received":53790, +// "id":[51,22,226,160,87,133,65,112,134,62,131,83,143,203,4,155], +// "total_size":201896}}} +// => #{"state": "InProgress", +// "bytes_received": 53790, +// "id": "3316e2a057854170863e83538fcb049b", +// "total_size":201896 +// } +// #{"Err": "some error message"} => #{"Err": "some error message"} +fn check_update_in_progress(component) { + let r = faux_mgs(["update-status", component]); + debug(`update_status(${component})=${r}`); + if r?.Err != () { + debug(`failed update-status: ${r}`); + return r; + } + if r?.Ok == "None" { + debug("update-status ok"); + return (); + } + // Status was retrieved + if r?.InProgress != () { + debug(`update-status: ${r}`); + return #{ + "state": "InProgress", + "id": util::to_hexstring(r.InProgress.id), + "bytes_received": r.InProgress.bytes_received, + "total_size": r.InProgress.total_size + }; + } + if r?.Complete != () { + debug(`update-status: ${r}`); + return #{ + "state": "Complete", + "id": util::to_hexstring(r.Complete), + }; + } + debug(`update-status: ${r}`); + return #{"Err": `unknown update-status: ${r}`}; +} + +/// Update SP with specified image +fn update_sp(sp_zip) { + print(""); + print(`### update_sp: ${sp_zip}`); + + let r = faux_mgs(["update", "sp", "0", sp_zip]); + debug(`update result = ${r}`); + if r?.ack == "updated" { + print("flash_sp updated"); + } else { + debug("#### FAILED flash_sp update"); + print("FAIL\n"); + return false; + } + + let r = check_update_in_progress("sp"); + if r?.Ok != () { + print(`ERROR: Update sp did not complete: r=${r}`); + print("FAIL\n"); + return false; + } + + /* + let r = faux_mgs(["component-active-slot", "--persist", "-s", "0", "sp"]); + debug(`persist result = ${r}`); + if r?["ack"] == () || r.ack != "set" || r.slot != 0 { + debug("Failed to persist"); + print("FAIL\n"); + return false; + } + */ + + // Reset the SP to boot into the new image. + let r = faux_mgs(["reset"]); + debug(`faux-mgs reset => ${r}`); + if r?.ack != "reset" { + debug(`unexpected sp reset response: ${r}`); + print("FAIL\n"); + return false; + } + sleep(10); + + let r = check_update_in_progress("sp"); + if r?.Ok != () { + print(`ERROR: Update sp did not complete: r=${r}`); + print(""); + return false; + } + print(`### SUCCESS update_sp: ${sp_zip}`); + print(""); + return true; +} + +fn update_rot_hubris(path_a, path_b) { + print(""); + print(`### update_rot_hubris: ${path_a} ${path_b}`); + let r = get_rot_active(); + if r?.error != () { + print(`get_rot_active failed: ${rot_active}`); + print(""); + return #{"error": `${rot_active["error"]}`}; + } + let rot_active = r.ok; + + // Assume that A is active + let rot_update_slot = 1; + let rot_update_image = path_b; + if rot_active == 1 { + // Nope, B is active. Need to update A. + rot_update_slot = 0; + rot_update_image = path_a; + } + debug(`selected: rot_update_slot=${rot_update_slot} rot_update_image=${rot_update_image}`); + let r = faux_mgs(["update", "rot", `${rot_update_slot}`, rot_update_image]); + debug(`update result = ${r}`); + if r?.ack == "updated" { + print("SUCCESS"); + } else { + print(`update failed: ${r}`); + print(""); + return false; + } + let r = faux_mgs(["component-active-slot", "-p", "-s", `${rot_update_slot}`, "rot"]); + debug(`persist result = ${r}`); + if r?.ack == () || r.ack != "set" || r.slot != rot_update_slot { + debug(`Failed to persist rot: r=${r}`); + print(""); + return false; + } + let r = faux_mgs(["reset-component", "rot"]); + if r?.ack != "reset" { + debug(`reset failed: ${r}`); + print(""); + return false; + } + sleep(5); + print(`### SUCCESS update_rot_hubris: updated slot ${rot_update_slot} with ${rot_update_image}`); + print(""); + true +} + +fn get_caboose(component, slot) { + let caboose = #{}; + for key in ["BORD", "GITC", "VERS", "NAME", "SIGN"] { + let value = util::caboose_value(component, slot, key); + if value != () { + caboose[key] = value; + } + } + caboose +} + +fn reset_sp_and_rot() { + // The disable watchdog should not be required for the SP. + // The STLINK needs to be powered down. + // e.g. not: + // faux_mgs(["reset-component", "sp", "--disable-watchdog"]); + for params in [["rot", 3], ["sp", 5]] { + print(`### Reset ${params[0]}`); + let r = faux_mgs(["reset-component", params[0]]); + if r?.error != () { + return #{"error": `${r}`}; + } + sleep(params[1]); + } + #{"ok": ()} +} + +fn get_rot_active() { + // fetch RoT boot info + let rbi = util::rot_boot_info(); + if rbi == () { + return #{"error": "Cannot determine active RoT flash bank"}; + } + #{"ok": rbi?["active"]} +} + +// Check if SP and RoT images match desired branches. +// if no issues getting info, returns +// ${"ok": #{"sp": bool, "rot": bool}} +// on error, returns one or both of "sp" and "rot" error messages: +// ${"error": #{"sp": message, "rot": message}} +fn image_check(branch, images) { + let ok = #{}; + let error = #{}; + // debug(`image_check: images.by_gitc=${images.by_gitc}`); + + let need_flash = sp_needs_flashing(`${branch}_sp`, images.by_gitc); + if need_flash?.error != () { + print(`Error: ${need_flash}`); + error["sp"] = `${need_flash}`; + } else { + ok["sp"] = need_flash.ok; + } + + let need_flash = rot_needs_flashing(`${branch}`, images.by_gitc); + if need_flash?.error != () { + debug(`Error: ${need_flash}`); + error["rot"] = `${flash_rot}`; + } else { + ok["rot"] = need_flash.ok; + } + if ok.len() == 2 { + #{"ok": ok} + } else { + #{"error": error} + } +} + +fn sp_needs_flashing(name, gitc) { + let sp_gitc = util::caboose_value("sp", "active", "GITC"); + print(`### SP is running GITC=${sp_gitc}`); + debug(`gitc=${gitc}`); + debug(`gitc?[${sp_gitc}]=${gitc?[sp_gitc]}`); + let known_gitc = gitc?[sp_gitc]; + debug(`known_gitc=${known_gitc}`); + if known_gitc == () { + print(" the SP is not running a BASE or UNDER-TEST image."); + #{"ok": true} + } else if name in known_gitc { + print(` ${name} IS in ${known_gitc}`); + #{"ok": false} + } else { + print(` ${name} IS NOT in ${known_gitc}`); + #{"ok": true} + } +} + +fn rot_needs_flashing(branch, gitc) { + let rot_active = get_rot_active(); + if rot_active?.error != () { + return #{"error": `get_rot_active: ${rot_active}`}; + } + let rot_active = rot_active.ok; + + let branch_rot_name = `${branch}_rot_a`; + if rot_active == 1 { + branch_rot_name = `${branch}_rot_b`; + } + + let rot_gitc = util::caboose_value("rot", `${rot_active}`, "GITC"); + print(`RoT is running GITC=${rot_gitc}`); + debug(`gitc[${rot_gitc}]=${gitc?[rot_gitc]}`); + let known_gitc = gitc?[rot_gitc]; + if known_gitc == () { + print(" the RoT is not running a BASE or UNDER-TEST image."); + #{"ok": true} + } else if branch_rot_name in known_gitc { + debug(`${branch_rot_name} IS in ${known_gitc}`); + #{"ok": false} + } else { + debug(`${branch_rot_name} IS NOT in ${known_gitc}`); + #{"ok": true} + } +} diff --git a/scripts/util.rhai b/scripts/util.rhai new file mode 100644 index 0000000..b72e8cc --- /dev/null +++ b/scripts/util.rhai @@ -0,0 +1,401 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. +// Copyright 2025 Oxide Computer Company + +/// Print out commonly available environment variables to demonstrate +/// the env_expand function. +fn show_env() { + print(`cwd=${cwd().to_string()}`); + print("Common environment variables:"); + print(` ${env_expand("PWD=${PWD}", #{})}`); + print(` ${env_expand("HOME=${HOME}", #{})}`); + print(` ${env_expand("LOGNAME=${LOGNAME}", #{})}`); + print(` ${env_expand("SHELL=${SHELL}", #{})}`); + print(` ${env_expand("USER=${USER}", #{})}`); +} + +// Reformat a byte array as a hex byte string representation. +fn to_hexstring(a) { + let s = ""; + for b in a { + let h = b.to_hex(); + if h.len() == 1 { + s += "0"; + } + s += h; + } + s.to_string() +} + +// Reformat an ASCII byte array with 0x00 padding into a string +// The trailing NUL is optional. +fn cstring_to_string(a) { + let data = blob(); + for b in a { + if b == 0 { + break; + } + data += b; + } + data.as_string() +} + +// Format a byte array into an ASCII mac address. +fn array_to_mac(a) { + let mac = ""; + mac += a[0].to_hex(); + for i in 1..a.len() { + mac += ":"; + mac += a.get(i).to_hex(); + } + mac +} + +// Translate A/B RoT bank name to 0/1 equivalent +fn ab_to_01(v) { + switch v { + "A" => 0, + "B" => 1, + () => (), + } +} + +// Expand string using environment and values from the override map +// For no override, use #{} in place of the override map. +fn env_expand(s, override) { + if s == () { + print!("Warning: trying to expand ()"); + return (); + } + // Note: Using an ImmutableString when there are usually + // zero to three varable expansions on relatively small strings + // is not a big deal. + // Escaping '$' may be needed someday. + let out = ""; + + let remain = s; + let envmap = envs(); + while remain.len() > 0 { + if out.len() > 2048 { + print(`env_expand error: out.len() has reached ${out.len()}`); + return (); + } + let i = remain.index_of("${"); + if i == -1 { + out += remain; + return out; + } + out += remain[0..i]; + remain.crop(i+2); + let i = remain.index_of("}"); + if i == -1 { + out += "${" + remain; + remain = ""; + continue; + } + let key = remain[0..i]; + remain.crop(key.len() + 1); + // We have key from within a string, e.g. "abc${key}def" + let value = (); + // If there is a '.' in the key, then this is a nested reference + // within `override` + let dot = key.index_of("."); + if dot > 0 { + let orig_key = key; + let vars = override; + while dot > 0 { + let topkey = key[0..dot]; + key = key[dot+1..]; + vars = vars?[topkey]; + if vars == () { + print("Error: env_expand: out of vars!"); + print(`Expansion of "${s}" is "${out}" with remainder "${remain}"`); + return (); + } + dot = key.index_of("."); + } + if key in vars { + value = vars[key]; + } else { + print(`Cannot expand ${orig_key}`); + print(`Expansion of "${s}" is "${out}" with remainder "${remain}"`); + } + } else { + // No dot in key, top-level from `override` or + // from env() + if key in override { + value = override[key]; + } else if key in envmap { + value = env(key); + } else { + // TODO: tie into faux-mgs logging + print(`Cannot expand ${key}`); + print(`Expansion of "${s}" is "${out}" with remainder "${remain}"`); + return () + } + } + remain = value + remain; + } + out +} + +// --- Wrapped faux_mgs functions to make them easier to digest in the main script. + +/// Make RotBootInfoV3 more rhai friendly. +fn rot_boot_info() { + let r = faux_mgs(["rot-boot-info", "--version", "3"]); + // print(`RESULTS: ${type_of(r)}:`); + // print(`${r}`); + if r.V3?.active == () { + return (#{}) + } + let v3 = r.V3; + let rbi = #{ + active: ab_to_01(v3.active), + persistent_boot_preference: ab_to_01(v3.persistent_boot_preference), + pending_persistent_boot_preference: ab_to_01(v3.pending_persistent_boot_preference), + transient_boot_preference: ab_to_01(v3.transient_boot_preference), + slot_a: #{ + fwid: to_hexstring(v3.slot_a_fwid.Sha3_256), + status: v3.slot_a_status, + }, + slot_b: #{ + fwid: to_hexstring(v3.slot_b_fwid.Sha3_256), + status: v3.slot_b_status, + }, + stage0: #{ + fwid: to_hexstring(v3.stage0_fwid.Sha3_256), + status: v3.stage0_status, + }, + stage0next: #{ + fwid: to_hexstring(v3.stage0next_fwid.Sha3_256), + status: v3.stage0next_status, + }, + }; + rbi +} + +// faux-mgs state without the redundant rot_boot_info struct. +fn state() { + let r = faux_mgs(["state"]); + let v2 = r.V2; + #{ + base_mac_address: array_to_mac(v2.base_mac_address), + hubris_archive_id: to_hexstring(v2.hubris_archive_id), + model: cstring_to_string(v2.model), + power_state: v2.power_state, + revision: v2.revision, + // Ignore the rot state in favor of rot_boot_info + } +} + +/// Read Caboose Value +// "stage0", "rot", and "sp" are the interesting components here though others may exist. +// "stage0" doesn't have a caboose. +fn caboose_value(component, slot, key) { + let r = faux_mgs(["read-component-caboose", "--component", component, "--slot", slot, key]); + r?.value +} + +// Connect though the SP to get caboose values for all RoT and SP images. +fn get_device_cabooses() { + let caboose = #{}; + for component in ["stage0", "rot", "sp"] { + for slot in ["0", "1"] { + if (component in caboose) == false { + caboose[component] = #{}; + } + if (slot in caboose[component]) == false { + caboose[component][slot] = #{}; + } + caboose[component][slot] = get_caboose(component, slot); +} +} +caboose +} + +// Translate the LPC55's Root Key Table Hash to a well-known keyset name or +// leave it as the original value. +fn rkth_to_key_name(rkth) { + switch rkth { + "84332ef8279df87fbb759dc3866cbc50cd246fbb5a64705a7e60ba86bf01c27d" => + "Bart", + "11594bb5548a757e918e6fe056e2ad9e084297c9555417a025d8788eacf55daf" => + "StagingDevGimlet", + "1432cc4cfe5688c51b55546fe37837c753cfbc89e8c3c6aabcf977fdf0c41e27" => + "StagingDevSidecar", + "f592d8f109b81881221eed5af6438abad9b5df8c220b9129c03763e7e10b22c7" => + "StagingDevPSC", + "31942f8d53dc908c5cb338bdcecb204785fa87834e8b18f706fc972a42886c8b" => + "ProdRelPSC", + "5796ee3433f840519c3bcde73e19ee82ccb6af3857eddaabb928b8d9726d93c0" => + "ProdRelGimlet", + "5c69a42ee1f1e6cd5f356d14f81d46f8dbee783bb28777334226c689f169c0eb" => + "ProdRelSidecar", + _ => rkth + } +} + +// Convert a byte array returned from faux-mgs using JSON into a native blob type. +fn array_to_blob(a) { + let out = blob(); + for byte in a { + out.push(byte); + } + out +} + +// Get the RoT device CMPA as a blob +fn get_cmpa() { + let cmpa = faux_mgs(["read-cmpa"])?["cmpa"]; + if cmpa != () { + array_to_blob(cmpa) + } else { + () + } +} + +// Get the RoT device CFPA as a blob +fn get_cfpa() { + let cfpa = faux_mgs(["read-cfpa"])?["cfpa"]; + if cfpa != () { + array_to_blob(cfpa) + } else { + () + } +} + +// Identify the keyset name from the RoT device's CMPA. +fn get_rot_keyset(cmpa) { + rkth_to_key_name(to_hexstring(cmpa.extract(80,32))) +} + +// fn rot_boot_info() { +// fn state() { +// fn caboose_value(component, slot, key) { +// fn get_device_cabooses + +// component-active-slot Get or set the active slot of a component (e.g., `host-boot-flash`) +// current-time Ask the SP for its current system time (interpreted as human time or as a raw +// dump List and read per-task crash dumps +// power-state Get or set the power state +// read-caboose Read a single key from the caboose +// read-cfpa Reads a CFPA slot from an attached Root of Trust +// read-cmpa Reads the CMPA from an attached Root of Trust +// read-component-caboose Read a single key from the caboose +// read-sensor-value Reads a single sensor by `SensorId`, returning a `f32` +// reset-component Reset a component +// reset Instruct the SP to reset +// rot-boot-info Read the RoT's boot-time information +// set-ipcc-key-value Set an IPCC key/value +// system-led Controls the system LED +// update-abort Abort an in-progress update +// update-status Get the status of an update to the specified component +// update Upload a new image to the SP or one of its components + + + +// getops - This code was generated by gemini.google.com. +// When asked, "Gemini" indicated that the code was free to use. +// +// The following prompt was given: +// +// Write a Rhai script that implements a command-line argument parser similar +// to the bash `getopts` function. The script should define a function +// `getopts(argv, options)` where: +// +// * `argv` is a vector of strings representing the command-line arguments +// (including the script name as the first element). +// * `options` is a string specifying the valid options. Options that +// require an argument are indicated by a trailing colon (e.g., "a:b:c"). +// * The function should parse `argv` and return a Rhai map containing two +// keys: "result" and "positional". +// * "result" should be a map containing the parsed options and their +// values. Short options (e.g., `-a`) and long options (e.g., `--long`) +// should be supported. +// * "positional" should be a vector of strings containing the positional +// arguments (those not starting with `-` or `--`). +// * Short options can be combined (e.g., `-abc`). +// * A double dash (`--`) should stop option processing; any remaining `argv` +// elements should be treated as positional arguments. +// * If an option requires an argument but it is missing, or if an unknown +// option is encountered, the function should return a map with an "error" +// key containing an error message. +// * Rhai does not have a `substr` function, use `sub_string` instead. +// * Rhai does not have a native `any` function for iterators, implement the +// functionality manually. +// * Rhai does not have an error function, return a map with an error key +// instead. +// +// Provide example usage demonstrating the function's functionality, including +// cases with combined short options, missing arguments, unknown options, +// and the double dash argument. +// +fn getopts(argv, options) { + let result = #{}; + let positional = []; + let i = 1; + + while i < argv.len() { + let arg = argv[i]; + + if arg == "--" { + i += 1; + while i < argv.len() { + positional.push(argv[i]); + i += 1; + } + break; + } else if arg.starts_with("--") { + let opt = arg.sub_string(2, arg.len()); + if options.contains(opt + ":") { + if i + 1 < argv.len() { + result[opt] = argv[i + 1]; + i += 2; + } else { + return #{ "error": "Option '" + opt + "' requires an argument." }; + } + } else if options.contains(opt) { + result[opt] = true; + i += 1; + } else { + return #{ "error": "Unknown option '" + opt + "'." }; + } + } else if arg.starts_with("-") { + let opts = arg.sub_string(1, arg.len()).chars(); + for opt in opts { + let opt_str = opt.to_string(); + if options.contains(opt_str + ":") { + if i + 1 < argv.len() { + result[opt_str] = argv[i + 1]; + i += 2; + break; + } else { + return #{ "error": "Option '" + opt_str + "' requires an argument." }; + } + } else if options.contains(opt_str) { + result[opt_str] = true; + } else { + return #{ "error": "Unknown option '" + opt_str + "'." }; + } + } + let short_opts = arg.sub_string(1, arg.len()).chars(); + let has_argument_required = false; + for o in short_opts{ + if options.contains(o.to_string()+":"){ + has_argument_required = true; + break; + } + } + if !has_argument_required{ + i += 1; + } + } else { + positional.push(arg); + i += 1; + } + } + + return #{ "result": result, "positional": positional }; +} From 48d53112972db47b02eb35dc667ba83569cbf175 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Mon, 24 Mar 2025 12:30:19 -0700 Subject: [PATCH 04/17] Update RoT before SP so it is ready to measure SP when SP resets. Fix `json_to_map()` so that JSON errors are reported properly. Some calls to `print()` change to logging to debug or info. Add configuration for `faux-ipcc`. - since system() needs to be used to run faux-ipcc, should there be a regex to parse or are Rhai's existing string functions sufficient to pull out certs and measurements? Since faux-ipcc doesn't handle attestations yet, we'll wait a bit. --- faux-mgs/Cargo.toml | 1 - faux-mgs/src/rhaiscript.rs | 5 +-- scripts/targets.json | 5 +++ scripts/upgrade-rollback.rhai | 76 ++++++++++++++++++++++------------- scripts/util.rhai | 54 ++++++++++++------------- 5 files changed, 82 insertions(+), 59 deletions(-) diff --git a/faux-mgs/Cargo.toml b/faux-mgs/Cargo.toml index ee28111..1c97102 100644 --- a/faux-mgs/Cargo.toml +++ b/faux-mgs/Cargo.toml @@ -45,6 +45,5 @@ thiserror = { workspace = true, optional = true } toml = { workspace = true, optional = true } [features] -# XXX remove rhaiscript as a defailt feature default = ["rhaiscript"] rhaiscript = [ "dep:async-recursion", "dep:hubtools", "dep:lpc55_areas", "dep:lpc55_sign", "dep:rhai", "dep:rhai-chrono", "dep:rhai-env", "dep:rhai-fs", "dep:thiserror", "dep:toml"] diff --git a/faux-mgs/src/rhaiscript.rs b/faux-mgs/src/rhaiscript.rs index dd34b06..b0bc52e 100644 --- a/faux-mgs/src/rhaiscript.rs +++ b/faux-mgs/src/rhaiscript.rs @@ -334,12 +334,11 @@ pub async fn interpreter( }) // Offer proper JSON to Dynamic::Map conversion .register_fn("json_to_map", move |v: Dynamic| -> Dynamic { - match v.into_string() { + match v.clone().into_string() { Ok(s) => match serde_json::from_str::(&s) { Ok(v) => v, Err(e) => { - let err = format!("{{\"error\": \"{:?}\"}}", e) - .to_string(); + let err = json!(e.to_string()).to_string(); serde_json::from_str::(&err).unwrap() } }, diff --git a/scripts/targets.json b/scripts/targets.json index 164ac7d..fff08ae 100644 --- a/scripts/targets.json +++ b/scripts/targets.json @@ -26,5 +26,10 @@ "rot_b": "${ut_repo}/target/${bord.rot}/dist/b/build-${bord.rot}-image-b.zip", "stage0": "${keyset-dvt-dock}/gimlet/bootleby-${ut-b-ver}-${keyset}-gimlet.zip" } + }, + + "ipcc": { + "faux_ipcc": "${HOME}/.cargo/bin/faux-ipcc", + "port": "/dev/ttyUSB0" } } diff --git a/scripts/upgrade-rollback.rhai b/scripts/upgrade-rollback.rhai index 8baa9f5..3320117 100644 --- a/scripts/upgrade-rollback.rhai +++ b/scripts/upgrade-rollback.rhai @@ -16,7 +16,7 @@ fn main() { // Display start time let start_ts = timestamp(); let start_time = datetime_local(); - print(`Starting at ${start_time}`); + debug(`info|Starting at ${start_time}`); let conf = process_cli(argv); switch type_of(conf) { @@ -58,7 +58,7 @@ fn main() { print(`The SP probably needs to be reset before continuing`); print(`Trying to abort the update with id=${id}`); let r = faux_mgs(["update-abort", component, `${id}`]); - debug(`update-abort = ${r}`); + debug(`info|update-abort = ${r}`); } } if problems == 0 { @@ -107,7 +107,7 @@ fn main() { } // Update RoT with baseline image - debug(`Flash baseline rot hubris`); + debug(`info|Flash baseline rot hubris`); if flash_rot && !update_rot_hubris(conf.base.rot_a, conf.base.rot_b) { return 1 } @@ -120,7 +120,7 @@ fn main() { let result = image_check("base", images); if result?.error != () || result.ok.sp || result.ok.rot { // Not able to check images or one or both base images not installed. - debug(`image_check error or failed BASE image updates: ${result}`); + debug(`warn|image_check error or failed BASE image updates: ${result}`); return 1; } } @@ -144,25 +144,25 @@ fn main() { "rot_b_path": conf.base.rot_b, } ] { - print(""); - print(`## ${v.up_down} to ${v.label} images`); - debug(`${v.up_down} SP Hubris to ${v.label} image`); - if !update_sp(v.sp_path) { - print(`Failed to ${v.up_down} SP Hubris to ${v.label} image`); - return 1; - } - debug(`${v.up_down} Rot Hubris to ${v.label} image`); + debug(`info|${v.up_down} Rot Hubris to ${v.label} image`); if !update_rot_hubris(v.rot_a_path, v.rot_b_path) { - print(`Failed to ${v.up_down} RoT Hubris to ${v.label} image: ${r}`); + debug(`error|Failed to ${v.up_down} RoT Hubris to ${v.label} image: ${r}`); return 1 } + + debug(`info|${v.up_down} SP Hubris to ${v.label} image`); + if !update_sp(v.sp_path) { + debug(`error|Failed to ${v.up_down} SP Hubris to ${v.label} image: ${r}`); + return 1; + } + let result = image_check(v.branch, images); if result?.error != () || result.ok.sp || result.ok.rot { // Not able to check images or one or both ${branch} images not installed. - debug(`image_check error or failed ${label} image updates: ${result}`); + debug(`error|image_check error or failed ${label} image updates: ${result}`); return 1; } - print(`### SUCCESS: ${v.up_down} to SP and RoT ${v.label} images`); + debug(`info|### SUCCESS: ${v.up_down} to SP and RoT ${v.label} images`); } print(""); @@ -242,15 +242,37 @@ fn process_cli(argv) { print(`No configuration for image ${branch}.${image}`); } else { conf[branch][image] = zip_path; - debug(`conf.${branch}.${image}=${zip_path}`); + debug(`info|conf.${branch}.${image}=${zip_path}`); } }; } + // If ipcc has been specified, then include that as well + let faux_ipcc_path = config?.ipcc?.faux_ipcc; + let x = 1; + let a = if x == 2 { + 3 + } else { + 4 + }; + print(x); + conf.ipcc = if faux_ipcc_path != () { + #{ + "use_ipcc": true, + "faux_ipcc": util::env_expand(config.ipcc.faux_ipcc, config), + "port": util::env_expand(config.ipcc?.port, config), + } + } else { + #{ + "use_ipcc": false, + } + }; + if conf.verbose { print(""); print(`Parsed conf=${conf}`); } + conf } @@ -350,7 +372,7 @@ fn check_update_in_progress(component) { let r = faux_mgs(["update-status", component]); debug(`update_status(${component})=${r}`); if r?.Err != () { - debug(`failed update-status: ${r}`); + debug(`error|failed update-status: ${r}`); return r; } if r?.Ok == "None" { @@ -374,7 +396,7 @@ fn check_update_in_progress(component) { "id": util::to_hexstring(r.Complete), }; } - debug(`update-status: ${r}`); + debug(`error|update-status: ${r}`); return #{"Err": `unknown update-status: ${r}`}; } @@ -388,7 +410,7 @@ fn update_sp(sp_zip) { if r?.ack == "updated" { print("flash_sp updated"); } else { - debug("#### FAILED flash_sp update"); + debug("error|#### FAILED flash_sp update"); print("FAIL\n"); return false; } @@ -404,7 +426,7 @@ fn update_sp(sp_zip) { let r = faux_mgs(["component-active-slot", "--persist", "-s", "0", "sp"]); debug(`persist result = ${r}`); if r?["ack"] == () || r.ack != "set" || r.slot != 0 { - debug("Failed to persist"); + debug("error|Failed to persist"); print("FAIL\n"); return false; } @@ -414,7 +436,7 @@ fn update_sp(sp_zip) { let r = faux_mgs(["reset"]); debug(`faux-mgs reset => ${r}`); if r?.ack != "reset" { - debug(`unexpected sp reset response: ${r}`); + debug(`warn|unexpected sp reset response: ${r}`); print("FAIL\n"); return false; } @@ -463,13 +485,13 @@ fn update_rot_hubris(path_a, path_b) { let r = faux_mgs(["component-active-slot", "-p", "-s", `${rot_update_slot}`, "rot"]); debug(`persist result = ${r}`); if r?.ack == () || r.ack != "set" || r.slot != rot_update_slot { - debug(`Failed to persist rot: r=${r}`); + debug(`error|Failed to persist rot: r=${r}`); print(""); return false; } let r = faux_mgs(["reset-component", "rot"]); if r?.ack != "reset" { - debug(`reset failed: ${r}`); + debug(`error|reset failed: ${r}`); print(""); return false; } @@ -535,7 +557,7 @@ fn image_check(branch, images) { let need_flash = rot_needs_flashing(`${branch}`, images.by_gitc); if need_flash?.error != () { - debug(`Error: ${need_flash}`); + debug(`error|${need_flash}`); error["rot"] = `${flash_rot}`; } else { ok["rot"] = need_flash.ok; @@ -583,13 +605,13 @@ fn rot_needs_flashing(branch, gitc) { debug(`gitc[${rot_gitc}]=${gitc?[rot_gitc]}`); let known_gitc = gitc?[rot_gitc]; if known_gitc == () { - print(" the RoT is not running a BASE or UNDER-TEST image."); + debug("info|the RoT is not running a BASE or UNDER-TEST image."); #{"ok": true} } else if branch_rot_name in known_gitc { - debug(`${branch_rot_name} IS in ${known_gitc}`); + debug(`info|${branch_rot_name} IS in ${known_gitc}`); #{"ok": false} } else { - debug(`${branch_rot_name} IS NOT in ${known_gitc}`); + debug(`info|${branch_rot_name} IS NOT in ${known_gitc}`); #{"ok": true} } } diff --git a/scripts/util.rhai b/scripts/util.rhai index b72e8cc..39b9210 100644 --- a/scripts/util.rhai +++ b/scripts/util.rhai @@ -6,13 +6,13 @@ /// Print out commonly available environment variables to demonstrate /// the env_expand function. fn show_env() { - print(`cwd=${cwd().to_string()}`); - print("Common environment variables:"); - print(` ${env_expand("PWD=${PWD}", #{})}`); - print(` ${env_expand("HOME=${HOME}", #{})}`); - print(` ${env_expand("LOGNAME=${LOGNAME}", #{})}`); - print(` ${env_expand("SHELL=${SHELL}", #{})}`); - print(` ${env_expand("USER=${USER}", #{})}`); + debug(`info|cwd=${cwd().to_string()}`); + debug("info|Common environment variables:"); + debug(`info| ${env_expand("PWD=${PWD}", #{})}`); + debug(`info| ${env_expand("HOME=${HOME}", #{})}`); + debug(`info| ${env_expand("LOGNAME=${LOGNAME}", #{})}`); + debug(`info| ${env_expand("SHELL=${SHELL}", #{})}`); + debug(`info| ${env_expand("USER=${USER}", #{})}`); } // Reformat a byte array as a hex byte string representation. @@ -65,7 +65,7 @@ fn ab_to_01(v) { // For no override, use #{} in place of the override map. fn env_expand(s, override) { if s == () { - print!("Warning: trying to expand ()"); + debug("error|Warning: trying to expand ()"); return (); } // Note: Using an ImmutableString when there are usually @@ -78,7 +78,7 @@ fn env_expand(s, override) { let envmap = envs(); while remain.len() > 0 { if out.len() > 2048 { - print(`env_expand error: out.len() has reached ${out.len()}`); + debug(`error|env_expand error: out.len() has reached ${out.len()}`); return (); } let i = remain.index_of("${"); @@ -109,8 +109,8 @@ fn env_expand(s, override) { key = key[dot+1..]; vars = vars?[topkey]; if vars == () { - print("Error: env_expand: out of vars!"); - print(`Expansion of "${s}" is "${out}" with remainder "${remain}"`); + debug("error|env_expand: out of vars!"); + debug(`error|Expansion of "${s}" is "${out}" with remainder "${remain}"`); return (); } dot = key.index_of("."); @@ -118,8 +118,8 @@ fn env_expand(s, override) { if key in vars { value = vars[key]; } else { - print(`Cannot expand ${orig_key}`); - print(`Expansion of "${s}" is "${out}" with remainder "${remain}"`); + debug(`error|Cannot expand ${orig_key}`); + debug(`error|Expansion of "${s}" is "${out}" with remainder "${remain}"`); } } else { // No dot in key, top-level from `override` or @@ -130,8 +130,8 @@ fn env_expand(s, override) { value = env(key); } else { // TODO: tie into faux-mgs logging - print(`Cannot expand ${key}`); - print(`Expansion of "${s}" is "${out}" with remainder "${remain}"`); + debug(`error|Cannot expand ${key}`); + debug(`error|Expansion of "${s}" is "${out}" with remainder "${remain}"`); return () } } @@ -145,8 +145,6 @@ fn env_expand(s, override) { /// Make RotBootInfoV3 more rhai friendly. fn rot_boot_info() { let r = faux_mgs(["rot-boot-info", "--version", "3"]); - // print(`RESULTS: ${type_of(r)}:`); - // print(`${r}`); if r.V3?.active == () { return (#{}) } @@ -200,19 +198,19 @@ fn caboose_value(component, slot, key) { // Connect though the SP to get caboose values for all RoT and SP images. fn get_device_cabooses() { - let caboose = #{}; - for component in ["stage0", "rot", "sp"] { + let caboose = #{}; + for component in ["stage0", "rot", "sp"] { for slot in ["0", "1"] { - if (component in caboose) == false { - caboose[component] = #{}; + if (component in caboose) == false { + caboose[component] = #{}; + } + if (slot in caboose[component]) == false { + caboose[component][slot] = #{}; + } + caboose[component][slot] = get_caboose(component, slot); } - if (slot in caboose[component]) == false { - caboose[component][slot] = #{}; - } - caboose[component][slot] = get_caboose(component, slot); -} -} -caboose + } + caboose } // Translate the LPC55's Root Key Table Hash to a well-known keyset name or From f572998c6c025b346039efd0c6f01b4ceb64b6f4 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Sat, 29 Mar 2025 01:16:13 -0700 Subject: [PATCH 05/17] Refactoring Move RawHubrisArchive knowledge to a separate source file. --- faux-mgs/src/main.rs | 7 +- faux-mgs/src/rhaiscript.rs | 403 ++++++------------------------ faux-mgs/src/rhaiscript/hubris.rs | 256 +++++++++++++++++++ scripts/upgrade-rollback.rhai | 10 +- scripts/util.rhai | 1 - 5 files changed, 346 insertions(+), 331 deletions(-) create mode 100644 faux-mgs/src/rhaiscript/hubris.rs diff --git a/faux-mgs/src/main.rs b/faux-mgs/src/main.rs index b034601..4fb902d 100644 --- a/faux-mgs/src/main.rs +++ b/faux-mgs/src/main.rs @@ -139,7 +139,7 @@ struct Args { command: Command, } -/// Command line program that can send MGS messages to a single SP. +/// Rhai program that can send MGS messages to a single SP. #[cfg(feature = "rhaiscript")] #[derive(Parser, Debug)] struct RhaiArgs { @@ -986,7 +986,9 @@ fn ssh_list_keys(socket: &PathBuf) -> Result> { } /// This function exists to break recursive calls to the Rhai interpreter. -/// main() calls here but Rhai{...} calls run_command(). +/// the faux-mgs main function calls here. However, the `rhai` subcommand +/// calls run_command() which does not include any calls to the +/// rhai subcommand. async fn run_any_command( sp: SingleSp, command: Command, @@ -1002,6 +1004,7 @@ async fn run_any_command( } } +/// Run faux-mgs commands except for the `rhai` subcommand. async fn run_command( sp: &SingleSp, command: Command, diff --git a/faux-mgs/src/rhaiscript.rs b/faux-mgs/src/rhaiscript.rs index b0bc52e..9bb1dd2 100644 --- a/faux-mgs/src/rhaiscript.rs +++ b/faux-mgs/src/rhaiscript.rs @@ -1,256 +1,29 @@ -// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at https://mozilla.org/MPL/2.0/. -use crate::anyhow; -use crate::debug; -use crate::info; -use crate::warn; use slog::crit; use slog::error; use slog::trace; -use crate::fs; -use crate::json; -use crate::run_command; -use crate::Context; -use crate::Logger; -use crate::Output; -use crate::Path; -use crate::PathBuf; -use crate::Result; -use crate::RhaiArgs; -use crate::RotBootInfo; -use crate::SingleSp; +use crate::{anyhow, debug, info, warn}; +use crate::{ + fs, json, run_command, Context, Logger, Output, PathBuf, Result, RhaiArgs, + RotBootInfo, SingleSp, +}; use clap::Parser; -use std::sync::Arc; use async_recursion::async_recursion; -use hubtools::{Caboose, RawHubrisArchive}; -use lpc55_areas::{CFPAPage, CMPAPage}; use rhai::packages::Package; use rhai::{ - Array, CustomType, Dynamic, Engine, EvalAltResult, ImmutableString, Map, - NativeCallContext, Scope, TypeBuilder, + Array, Dynamic, Engine, EvalAltResult, ImmutableString, Map, + NativeCallContext, Scope, }; use rhai_chrono::ChronoPackage; use rhai_env::EnvironmentPackage; use rhai_fs::FilesystemPackage; -use serde_json::Value as Json; -use toml::Value as Toml; - -#[derive(Debug, CustomType)] -#[rhai_type(extra = Self::build_archive_inspector)] -struct ArchiveInspector { - #[rhai_type(skip)] - inner: Arc, -} - -impl Clone for ArchiveInspector { - fn clone(&self) -> Self { - ArchiveInspector { inner: self.inner.clone() } - } -} -impl ArchiveInspector { - fn new(inner: Arc) -> Self { - ArchiveInspector { inner } - } - - pub fn from_vec(contents: Vec) -> Result> { - match RawHubrisArchive::from_vec(contents) { - Ok(archive) => Ok(Self::new(Arc::new(archive))), - Err(e) => Err(format!("RawHubrisArchive::from_vec: {e}") - .to_string() - .into()), - } - } - - pub fn load(path: ImmutableString) -> Result> { - let path = PathBuf::from(path.into_owned()); - match RawHubrisArchive::load(&path) { - Ok(archive) => Ok(Self::new(Arc::new(archive))), - Err(e) => { - Err(format!("RawHubrisArchive::load: {e}").to_string().into()) - } - } - } - - fn u8_to_string(array: &[u8]) -> String { - String::from_utf8_lossy( - if let Some(p) = array.iter().position(|&x| x == 0) { - &array[0..p] - } else { - &array[0..] - }, - ) - .to_string() - } - - pub fn indexer( - &mut self, - index: &str, - ) -> Result> { - match index { - // Copied from hubtools/src/caboose - "BORD" => { - let caboose: Caboose = self - .inner - .read_caboose() - .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; - caboose - .board() - .map(|v| Ok(Self::u8_to_string(v).into())) - .unwrap_or(Ok(Dynamic::UNIT)) - } - "GITC" => { - let caboose: Caboose = self - .inner - .read_caboose() - .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; - caboose - .git_commit() - .map(|v| Ok(Self::u8_to_string(v).into())) - .unwrap_or(Ok(Dynamic::UNIT)) - } - "NAME" => { - let caboose: Caboose = self - .inner - .read_caboose() - .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; - caboose - .name() - .map(|v| Ok(Self::u8_to_string(v).into())) - .unwrap_or(Ok(Dynamic::UNIT)) - } - "SIGN" => { - let caboose: Caboose = self - .inner - .read_caboose() - .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; - caboose - .sign() - .map(|v| Ok(Self::u8_to_string(v).into())) - .unwrap_or(Ok(Dynamic::UNIT)) - } - "VERS" => { - let caboose: Caboose = self - .inner - .read_caboose() - .map_err(|e| format!("RawArchive::read_caboose: {e:?}"))?; - caboose - .version() - .map(|v| Ok(Self::u8_to_string(v).into())) - .unwrap_or(Ok(Dynamic::UNIT)) - } - "image_name" => { - if let Ok(iname) = self.inner.image_name() { - Ok(Dynamic::from(iname)) - } else { - Ok(Dynamic::UNIT) - } - } - // Only in Bootleby versions v1.3.1 and later - // "manufacturing_cfg" => {..} - _ => { - // Try to extract a file by this name - if let Ok(contents) = self.inner.extract_file(index) { - // contents: Vec - match Path::new(index) - .extension() - .and_then(|os| os.to_str()) - { - Some("bin") | Some("elf") => { - Ok(Dynamic::from_blob(contents)) - } - Some("toml") => { - // Adapted from toml crate example toml2json - fn toml2json(tv: Toml) -> Json { - match tv { - Toml::String(s) => Json::String(s), - Toml::Integer(i) => Json::Number(i.into()), - Toml::Float(f) => { - if let Some(n) = - serde_json::Number::from_f64(f) - { - Json::Number(n) - } else { - Json::Null - } - } - Toml::Boolean(b) => Json::Bool(b), - Toml::Array(arr) => Json::Array( - arr.into_iter() - .map(toml2json) - .collect(), - ), - Toml::Table(table) => Json::Object( - table - .into_iter() - .map(|(k, v)| (k, toml2json(v))) - .collect(), - ), - Toml::Datetime(dt) => { - Json::String(dt.to_string()) - } - } - } - let text = String::from_utf8_lossy(&contents[..]) - .to_string(); - if let Json::Object(json) = - toml2json(Toml::from(text)) - { - Ok(Dynamic::from(json)) - } else { - unreachable!(); - } - } - Some("json") => { - let text: String = - String::from_utf8_lossy(&contents[..]) - .to_string(); - if let Ok(json) = serde_json::to_value(text) { - Ok(Dynamic::from(json)) - } else { - let text = - String::from_utf8_lossy(&contents[..]) - .to_string(); - if let Json::Object(json) = json!({ "Err": serde_json::Value::String(text) }) - { - Ok(Dynamic::from(json)) - } else { - unreachable!() - } - } - } - _ => { - if index.starts_with("elf/") { - // All of these are binary files - Ok(Dynamic::from_blob(contents)) - } else { - // .txt, .fwid, .ron, .tlvc, .cfg, .gdb - // git-rev, .TXT, image-name - let text = - String::from_utf8_lossy(&contents[..]) - .to_string(); - Ok(Dynamic::from(text)) - } - } - } - } else { - Err(format!("unknown index: {:?}", index).into()) - } - } - } - } - - pub fn build_archive_inspector(builder: &mut TypeBuilder) { - builder - .with_name("archive") - .with_fn("new_archive", ArchiveInspector::from_vec) - .with_fn("new_archive", ArchiveInspector::load) - .with_indexer_get(ArchiveInspector::indexer); - } -} +mod hubris; /// Use a Rhai interpreter per SingleSp that can maintain a connection. #[async_recursion] @@ -289,14 +62,22 @@ pub async fn interpreter( // Don't limit resources for now. engine.set_max_expr_depths(0, 0); - // Access RawHubrisArchives - engine.build_type::(); + // Access RawHubrisArchives and their Cabooses + engine.build_type::(); + engine.build_type::(); engine.register_fn("system", system); // Compile the script - let program = fs::read_to_string(&script) - .with_context(|| format!("failed to read {}", script.display())) - .unwrap(); + let program = match fs::read_to_string(&script) { + Ok(content) => content, + Err(e) => { + return Err(anyhow!( + "failed to read {}: {}", + script.display(), + e + )); + } + }; // Construct argv for the script and canonicalize the script path. let pb = fs::canonicalize(&script) @@ -310,7 +91,7 @@ pub async fn interpreter( engine // faux_mgs thread consumes and produces JSON - .register_fn("faux_mgs", move |v: Dynamic| -> Dynamic { + .register_fn("faux_mgs", move |v: Array| -> Dynamic { match tx_script.send(serde_json::to_string(&v).unwrap()) { Ok(()) => match rx_script.recv() { Ok(v) => { @@ -348,39 +129,7 @@ pub async fn interpreter( serde_json::from_str::(&err).unwrap() } } - }) - // lpc55_support RoT signature verification - .register_fn( - "verify_rot_image", - move |image: Dynamic, - cmpa: Dynamic, - cfpa: Dynamic| - -> Dynamic { - fn to_page(slice: &[u8]) -> Option<&[u8; 512]> { - slice.try_into().ok() - } - - let a = cmpa.as_blob_ref().unwrap().clone(); - let page = to_page(a.as_ref()).unwrap(); - let cmpapage = CMPAPage::from_bytes(page).unwrap(); - - let a = cfpa.as_blob_ref().unwrap().clone(); - let page = to_page(a.as_ref()).unwrap(); - let cfpapage = CFPAPage::from_bytes(page).unwrap(); - - let image = image.as_blob_ref().unwrap().clone(); - - if lpc55_sign::verify::verify_image( - &image, cmpapage, cfpapage, - ) - .is_ok() - { - true.into() - } else { - false.into() - } - }, - ); + }); // A script can log via debug at any level: // debug("INFO|log message at INFO level"); @@ -427,7 +176,6 @@ pub async fn interpreter( }); // Print registered functions if you're interested. - // engine.gen_fn_signatures(false).into_iter().for_each(|func| println!("{func}")); match engine.compile(program) { @@ -464,21 +212,28 @@ pub async fn interpreter( }); while let Ok(command_args) = rx_master.recv() { - // Service the script's calls to "faux_mgs" - // from those commands. - // The script can only send use arrays of strings. - // println!("args=${:?}", &command_args); - let response = if let Ok(serde_json::Value::Array(j)) = + // Service the script's calls to "faux_mgs". + // The script can only send arrays of string and i64 values. + let response = if let Ok(serde_json::Value::Array(script_args)) = serde_json::from_str(&command_args) { - // TODO: fix this so that user can put an i64 and we won't panic. - let a: Vec = - j.iter().map(|v| v.as_str().unwrap().to_string()).collect(); - debug!(log, "vec string: {:?}", a); + // TODO: Check for non-string non-i64 values in the + // script_args and return an error instead of executing the faux-mgs + // command. + let faux_mgs_args: Vec = script_args + .iter() + .map(|v| { + v.as_str() + .map(|s| s.to_string()) + .or_else(|| v.as_i64().map(|i| i.to_string())) + .unwrap() + }) + .collect(); + debug!(log, "vec string: {:?}", faux_mgs_args); let mut ra = vec![]; // The clap crate is expecting ARGV[0] as the program name, insert a dummy. ra.push("faux-mgs".to_string()); - ra.append(&mut a.clone()); + ra.append(&mut faux_mgs_args.clone()); let args = RhaiArgs::parse_from(&ra); match run_command(sp, args.command.clone(), true, log.clone()).await @@ -525,43 +280,45 @@ pub async fn interpreter( } } -/// Allow Rhai scripts to run commands and capture stdout, stderr, and +// +// This function was generated with the following prompt to +// gemini.google.com: +// +// Write a Rust function, `system`, that can be registered with the Rhai +// scripting engine. The function should take an array of strings (`Array`) +// as input, representing a command and its arguments, execute the command +// using `std::process::Command`, and return a Rhai `Map` containing the +// command's exit code, standard output, and standard error. +// +// The function should handle the following: +// +// * Convert the input `Array` to a `Vec`. +// * Handle errors if the input `Array` is empty or if any element cannot +// be converted to a `String`. +// * Use `std::process::Command` with fully qualified names (e.g., +// `std::process::Command::new`). +// * Capture the command's standard output and standard error using +// `std::process::Stdio::piped()`. +// * Convert the captured output to Rhai `ImmutableString` values using +// `String::from_utf8_lossy`. +// * Return a Rhai `Map` with the keys "exit_code", "stdout", and "stderr". +// * Handle errors during command execution and output capture. +// * Use `EvalAltResult::ErrorInFunctionCall` for function call errors and +// `EvalAltResult::ErrorRuntime` for runtime errors. +// * Ensure that error messages passed to `EvalAltResult::ErrorRuntime` +// are converted to `Dynamic` using `.into()`. +// * Place the underlying error in the third position of the +// `EvalAltResult::ErrorInFunctionCall` variant. +// * Use `context.position()` to get the error position. +// * Do not use the `mut` keyword on the `child` variable when calling +// `command.spawn()`. +// +// Provide a complete Rust code example that includes the `system` function +// and a `main` function that registers it with a Rhai engine and runs a +// sample Rhai script. + +/// Allow Rhai scripts to run a command and capture the stdout, stderr, and /// exit code. -/// This function was generated with the following prompt to -/// gemini.google.com: -/// -/// Write a Rust function, `system`, that can be registered with the Rhai -/// scripting engine. The function should take an array of strings (`Array`) -/// as input, representing a command and its arguments, execute the command -/// using `std::process::Command`, and return a Rhai `Map` containing the -/// command's exit code, standard output, and standard error. -/// -/// The function should handle the following: -/// -/// * Convert the input `Array` to a `Vec`. -/// * Handle errors if the input `Array` is empty or if any element cannot -/// be converted to a `String`. -/// * Use `std::process::Command` with fully qualified names (e.g., -/// `std::process::Command::new`). -/// * Capture the command's standard output and standard error using -/// `std::process::Stdio::piped()`. -/// * Convert the captured output to Rhai `ImmutableString` values using -/// `String::from_utf8_lossy`. -/// * Return a Rhai `Map` with the keys "exit_code", "stdout", and "stderr". -/// * Handle errors during command execution and output capture. -/// * Use `EvalAltResult::ErrorInFunctionCall` for function call errors and -/// `EvalAltResult::ErrorRuntime` for runtime errors. -/// * Ensure that error messages passed to `EvalAltResult::ErrorRuntime` -/// are converted to `Dynamic` using `.into()`. -/// * Place the underlying error in the third position of the -/// `EvalAltResult::ErrorInFunctionCall` variant. -/// * Use `context.position()` to get the error position. -/// * Do not use the `mut` keyword on the `child` variable when calling -/// `command.spawn()`. -/// -/// Provide a complete Rust code example that includes the `system` function -/// and a `main` function that registers it with a Rhai engine and runs a -/// sample Rhai script. fn system( context: NativeCallContext, argv: Array, diff --git a/faux-mgs/src/rhaiscript/hubris.rs b/faux-mgs/src/rhaiscript/hubris.rs new file mode 100644 index 0000000..13a562b --- /dev/null +++ b/faux-mgs/src/rhaiscript/hubris.rs @@ -0,0 +1,256 @@ +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this +// file, You can obtain one at https://mozilla.org/MPL/2.0/. + +use crate::Path; +use crate::PathBuf; +use hubtools::{Caboose, RawHubrisArchive}; +use rhai::{CustomType, Dynamic, EvalAltResult, ImmutableString, TypeBuilder}; +use serde_json::Value as Json; +use std::sync::Arc; +use toml::Value as Toml; + +fn json_text_to_dynamic( + contents: &[u8], +) -> Result> { + let text = String::from_utf8_lossy(contents); + match serde_json::from_str::(&text) { + Ok(json) => Ok(Dynamic::from(json)), + // The Json error includes the original text with a marker + // indicating where the error is. + Err(e) => Err(format!("Failed to parse JSON: {}", e).into()), + } +} + +// Adapted from toml crate example toml2json +fn toml2json(tv: Toml) -> Json { + match tv { + Toml::String(s) => Json::String(s), + Toml::Integer(i) => Json::Number(i.into()), + Toml::Float(f) => { + if let Some(n) = serde_json::Number::from_f64(f) { + Json::Number(n) + } else { + Json::Null + } + } + Toml::Boolean(b) => Json::Bool(b), + Toml::Array(arr) => { + Json::Array(arr.into_iter().map(toml2json).collect()) + } + Toml::Table(table) => Json::Object( + table.into_iter().map(|(k, v)| (k, toml2json(v))).collect(), + ), + Toml::Datetime(dt) => Json::String(dt.to_string()), + } +} + +#[derive(Debug, CustomType)] +#[rhai_type(name = "Archive", extra = Self::build_archive_inspector)] +pub struct ArchiveInspector { + #[rhai_type(skip)] + inner: Arc, +} + +impl Clone for ArchiveInspector { + fn clone(&self) -> Self { + ArchiveInspector { inner: self.inner.clone() } + } +} + +impl ArchiveInspector { + fn new(inner: Arc) -> Self { + ArchiveInspector { inner } + } + + pub fn from_vec(contents: Vec) -> Result> { + match RawHubrisArchive::from_vec(contents) { + Ok(archive) => Ok(Self::new(Arc::new(archive))), + Err(e) => Err(format!("RawHubrisArchive::from_vec: {e}") + .to_string() + .into()), + } + } + + pub fn load(path: ImmutableString) -> Result> { + let path = PathBuf::from(path.into_owned()); + match RawHubrisArchive::load(&path) { + Ok(archive) => Ok(Self::new(Arc::new(archive))), + Err(e) => { + Err(format!("RawHubrisArchive::load: {e}").to_string().into()) + } + } + } + + fn extract_and_convert( + &self, + index: &str, + ) -> Result> { + match self.inner.extract_file(index) { + Ok(contents) => match Path::new(index) + .extension() + .and_then(|os| os.to_str()) + { + Some("bin") | Some("elf") => Ok(Dynamic::from_blob(contents)), + Some("toml") => Self::toml_to_dynamic(&contents), + Some("json") => json_text_to_dynamic(&contents), + _ => { + // All remaining files that start with "\x7fELF" or are not valid UTF8 + // are blobs, everything else is text. + if contents[0..4] == *b"\x7fELF" { + Ok(Dynamic::from_blob(contents)) + } else { + match String::from_utf8(contents.clone()) { + Ok(text) => Ok(Dynamic::from(text)), + Err(_) => Ok(Dynamic::from_blob(contents)), + } + } + } + }, + Err(e) => Err(format!("hubtools error: {}", e).into()), + } + } + + pub fn indexer( + &mut self, + index: &str, + ) -> Result> { + match index { + "caboose" => Ok(Dynamic::from::( + CabooseInspector::from_archive(&self.inner)?, + )), + "image_name" => self + .inner + .image_name() + .map(Dynamic::from) + .map_or(Ok(Dynamic::UNIT), Ok), + _ => self.extract_and_convert(index), + } + } + + fn toml_to_dynamic(contents: &[u8]) -> Result> { + let text = String::from_utf8_lossy(contents).to_string(); + let toml_value = text + .parse::() + .map_err(|e| format!("Failed to parse TOML: {}", e))?; + match toml2json(toml_value.clone()) { + Json::Object(json) => Ok(Dynamic::from(json)), + _ => Err(format!( + "Failed to convert TOML to JSON object: {:?}", + toml_value + ) + .into()), + } + } + + fn decode_blob( + blob: Dynamic, + name: &str, + ) -> Result<[u8; N], Box> { + let bytes = blob + .read_lock::() + .ok_or_else(|| format!("invalid type {}", name))? + .to_vec(); + if bytes.len() != N { + return Err(format!( + "invalid {} length {} != {}", + name, + bytes.len(), + N + ) + .into()); + } + bytes.try_into().map_err(|_| format!("invalid {}", name).into()) + } + + pub fn verify_rot_image( + &mut self, + cmpa: Dynamic, + cfpa: Dynamic, + ) -> Result> { + let cmpa = Self::decode_blob::<512>(cmpa, "CMPA")?; + let cfpa = Self::decode_blob::<512>(cfpa, "CFPA")?; + if let Err(e) = self.inner.verify(&cmpa, &cfpa) { + return Err(Box::new(EvalAltResult::from(format!("{:?}", e)))); + } + Ok(true.into()) + } + + pub fn build_archive_inspector(builder: &mut TypeBuilder) { + builder + .with_name("Archive") + .with_fn("new_archive", ArchiveInspector::from_vec) + .with_fn("new_archive", ArchiveInspector::load) + .with_fn("verify_rot_image", ArchiveInspector::verify_rot_image) + .with_indexer_get(ArchiveInspector::indexer); + } +} + +macro_rules! caboose_tag { + ($caboose: ident, $method:ident) => { + $caboose + .inner + .$method() + .map(|v| Ok(u8_to_string(v).into())) + .unwrap_or(Ok(Dynamic::UNIT)) + }; +} + +#[derive(Debug, CustomType)] +#[rhai_type(name = "Caboose", extra = Self::build_caboose_inspector)] +pub struct CabooseInspector { + #[rhai_type(skip)] + inner: Arc, +} + +impl Clone for CabooseInspector { + fn clone(&self) -> Self { + CabooseInspector { inner: self.inner.clone() } + } +} + +impl CabooseInspector { + fn new(inner: Arc) -> Self { + CabooseInspector { inner } + } + + pub fn from_archive( + archive: &RawHubrisArchive, + ) -> Result> { + let caboose = archive + .read_caboose() + .map_err(|e| format!("RawArchive::read_caboose: {:?}", e))?; + Ok(CabooseInspector::new(Arc::new(caboose))) + } + + pub fn indexer( + &mut self, + index: &str, + ) -> Result> { + match index { + "BORD" => caboose_tag!(self, board), + "GITC" => caboose_tag!(self, git_commit), + "NAME" => caboose_tag!(self, name), + "SIGN" => caboose_tag!(self, sign), + "VERS" => caboose_tag!(self, version), + _ => Err(format!("unknown index: {:?}", index).into()), + } + } + + pub fn build_caboose_inspector(builder: &mut TypeBuilder) { + builder + .with_name("Caboose") + .with_indexer_get(CabooseInspector::indexer); + } +} + +fn u8_to_string(array: &[u8]) -> String { + String::from_utf8_lossy( + if let Some(p) = array.iter().position(|&x| x == 0) { + &array[0..p] + } else { + &array[0..] + }, + ) + .to_string() +} diff --git a/scripts/upgrade-rollback.rhai b/scripts/upgrade-rollback.rhai index 3320117..5bb89ba 100644 --- a/scripts/upgrade-rollback.rhai +++ b/scripts/upgrade-rollback.rhai @@ -293,15 +293,16 @@ fn get_image_info(conf) { let zip_path = conf[branch][image]; images[branch][image] = #{"path": zip_path}; let current_ar = new_archive(zip_path); - if type_of(current_ar) != "archive" { + if type_of(current_ar) != "Archive" { print(`Invalid archive path ${zip_path}`); error = true; continue; } // images[branch][image]["ar"] = current_ar; images[branch][image]["caboose"] = #{}; + let caboose = current_ar?["caboose"]; for key in ["BORD", "GITC", "NAME", "SIGN", "VERS"] { - images[branch][image]["caboose"][key] = current_ar?[key]; + images[branch][image]["caboose"][key] = caboose[key]; } if image == "sp" { // TODO: Do some sanity checks to make sure BORD and NAME @@ -311,8 +312,7 @@ fn get_image_info(conf) { // All others are RoT images and need signature verification. // stage0, rot_a, and rot_b images are verified against the // current RoT device's configured keys in its CMPA. - let final_bin = current_ar["img/final.bin"]; - let verified = verify_rot_image(final_bin, images.cmpa, images.cfpa); + let verified = current_ar.verify_rot_image(images.cmpa, images.cfpa); images[branch][image]["verified"] = verified; if !verified { if images?["failed_to_verify"] == () { @@ -330,7 +330,7 @@ fn get_image_info(conf) { } // TODO: There should be a warning if the base and ut image - // have the // same GITC. This is true for some stage0 images + // have the same GITC. This is true for some stage0 images // that differ only in packaging but not in the image // themselves. Otherwise it represents a release engineering // failure or a case where the test is not configured properly. diff --git a/scripts/util.rhai b/scripts/util.rhai index 39b9210..682a7c3 100644 --- a/scripts/util.rhai +++ b/scripts/util.rhai @@ -129,7 +129,6 @@ fn env_expand(s, override) { } else if key in envmap { value = env(key); } else { - // TODO: tie into faux-mgs logging debug(`error|Cannot expand ${key}`); debug(`error|Expansion of "${s}" is "${out}" with remainder "${remain}"`); return () From 4d1b4aca3a5551f5cdb17e3143929219ee93fa70 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Tue, 29 Apr 2025 17:01:34 -0700 Subject: [PATCH 06/17] specify repo on the command line to override configuration --- scripts/README.md | 17 ++++++++++++++--- scripts/targets.json | 2 +- scripts/upgrade-rollback.rhai | 22 ++++++++++++++++++++-- 3 files changed, 35 insertions(+), 6 deletions(-) diff --git a/scripts/README.md b/scripts/README.md index d094777..d5dc539 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -58,7 +58,7 @@ Rhai calls the script's `main() -> i64 {}`. - rhai_fs::FilesystemPackage - file system access - [rhai_chrono::ChronoPackage](https://github.com/iganev/rhai-chrono) - standard time formats. -### Modified Rhai behaviod +### Modified Rhai behavior - The `debug("message")` function is routed to the faux-mgs slog logging. Prefixing a message with "crit|", "trace|", "error|", "warn|", "error|", or "debug|" will log at that corresponding level. Leaving off the prefix or using some other @@ -113,15 +113,26 @@ cargo -q run --bin faux-mgs --features=rhaiscript -- \ --per-attempt-timeout-millis=${PER_ATTEMPT_MS} \ "$@" ``` + + A `getops` utility function provides command line parsing within the script. +### An update/rollback test + For the upgrade-rollback script, a JSON configuration file supplies paths or other parameters needed to configure the script. -then for instance: +For convenience, it is assumed that there are two repos with there +respective Grapefruit SP and RoT images built. + +#### Running update-rollback between the master branch and your new code: + ```bash -./FM rhai scripts/upgrade-rollback.rhai -- -c scripts/targets.json +BASELINE=$HOME/Oxide/src/hubris/master +UNDER_TEST=$HOME/Oxide/src/hubris/my-new-branch +./FM rhai scripts/upgrade-rollback.rhai -- \ + -c scripts/targets.json ${BASELINE} ${UNDER_TEST} ``` See the scripts themselves for further information. diff --git a/scripts/targets.json b/scripts/targets.json index fff08ae..64de193 100644 --- a/scripts/targets.json +++ b/scripts/targets.json @@ -1,7 +1,7 @@ { "repo-home": "${HOME}/Oxide/src", "base_repo": "${repo-home}/hubris/master", - "ut_repo": "${repo-home}/hubris/epoch", + "ut_repo": "${repo-home}/hubris/${UT_WORKTREE}", "keyset": "bart", "keyset-dvt-dock": "${repo-home}/dvt-dock/${keyset}", diff --git a/scripts/upgrade-rollback.rhai b/scripts/upgrade-rollback.rhai index 5bb89ba..f5c83a1 100644 --- a/scripts/upgrade-rollback.rhai +++ b/scripts/upgrade-rollback.rhai @@ -5,10 +5,12 @@ fn usage(prog, error) { if error != () { print(`Error: ${error}`); } - print(`Usage: faux-mgs ... rhai ${prog} [-v] [-h] [-c config.json]`); + print(`Usage: faux-mgs ... rhai ${prog} [-v] [-h] [-c config.json] [path0] [path1]`); print(" -c CONFIG.JSON # Path to configuration"); print(" -v # be verbose"); print(" -h # Help. Print this message"); + print("path0 - optionally override configuration base_repo value"); + print("path1 - optionally override configuration ut_repo value"); print(""); } @@ -152,7 +154,7 @@ fn main() { debug(`info|${v.up_down} SP Hubris to ${v.label} image`); if !update_sp(v.sp_path) { - debug(`error|Failed to ${v.up_down} SP Hubris to ${v.label} image: ${r}`); + debug(`error|Failed to ${v.up_down} SP Hubris to ${v.label} image`); return 1; } @@ -229,6 +231,22 @@ fn process_cli(argv) { print(`config=${config}`); } + // Optionally override baseline and under test repo from command line + let repo_path = parsed?.positional[0]; + if repo_path != () { + print("override config base_repo:"); + print(` was: ${config?.base_repo}`); + config.base_repo = repo_path; + print(` now: ${config?.base_repo}`); + } + let repo_path = parsed?.positional[1]; + if repo_path != () { + print("override config ut_repo:"); + print(` was: ${config?.ut_repo}`); + config.ut_repo = repo_path; + print(` now: ${config?.ut_repo}`); + } + // Expand the paths to archives so that the config file // can use vars from the environment and the configuration file itself. conf["sp_bord"] = util::env_expand(config.bord.sp, config); From ae503a449843f79260d2330ff91aead711b68fc0 Mon Sep 17 00:00:00 2001 From: Ben Stoltz Date: Wed, 30 Apr 2025 12:09:52 -0700 Subject: [PATCH 07/17] Testing upgrade/rollback with transient preference Also work around baseline not implementing transient boot preference --- scripts/TODO.md | 114 ++++ scripts/upgrade-rollback-transient.rhai | 773 ++++++++++++++++++++++++ scripts/upgrade-rollback.rhai | 2 +- scripts/util.rhai | 84 ++- 4 files changed, 971 insertions(+), 2 deletions(-) create mode 100644 scripts/TODO.md create mode 100644 scripts/upgrade-rollback-transient.rhai diff --git a/scripts/TODO.md b/scripts/TODO.md new file mode 100644 index 0000000..771b416 --- /dev/null +++ b/scripts/TODO.md @@ -0,0 +1,114 @@ +# TODO List for Rhai scripting in the `faux-mgs` utility + + - Prioritize this list in favor of code correctness, clarity, security, and testability. + +## Refactoring, Code Quality & Maintainability + + - Refactor any direct calls to `faux_mgs()` into the `scripts/util.rhai` file. + * **Solution:** Identify all `faux_mgs([...])` calls in `*.rhai` scripts (e.g., `upgrade-rollback-transient.rhai`) and create corresponding wrapper functions in `util.rhai` that provide a clearer interface and handle potential errors more gracefully. + - Refactor any repeated code snippets in Rhai scripts into reasonable functions, likely within `util.rhai` or script-specific helper functions. + * **Solution:** Analyze scripts for duplicated logic (e.g., common setup sequences, polling loops, result parsing) and abstract them into reusable functions. + - Remove dead code and useless comments from all scripts and Rust files. + * **Solution:** Perform a thorough code review to identify and remove unused variables, functions, and comments that no longer add value or are out of date. + - The calling convention and error reporting for functions in `scripts/util.rhai` seems clunky. Can it be improved? + * **Solution:** Design a consistent error handling pattern for `util.rhai` functions. For example, functions could return a map like `#{ ok: result, err: error_message }` or leverage Rhai's error throwing/catching mechanisms more consistently. Document this pattern. + - The top level flow in `main()` of `upgrade-rollback-transient.rhai` (and other complex scripts) should be easy to read and understand. + * **Solution:** Break down large `main()` functions into smaller, well-named functions that represent logical stages of the script. Aim for a declarative style at the top level. + - Consider breaking down very long Rhai scripts (e.g., `upgrade-rollback-transient.rhai`) into smaller, more manageable modules or by importing other Rhai scripts. + * **Solution:** Explore Rhai's `import` capabilities further to logically segment parts of complex scripts (e.g., CLI parsing, image handling, update logic specific to components). + - Standardize logging practices within Rhai scripts. + * **Solution:** Define guidelines for log levels (using the `debug("level|message")` convention) and message formats to ensure consistent and useful log output. + - (From `rhaiscript.rs`) Check for non-string non-i64 values in the `faux_mgs` `script_args` from Rhai and return an error instead of potentially panicking during execution. + * **Solution:** In `faux-mgs/src/rhaiscript.rs`, before converting `script_args` to `Vec`, iterate and validate each `serde_json::Value` to ensure it's a string or number. If not, send an appropriate error JSON back to the Rhai script. + +## Error Handling & Robustness + + - The new transient boot preference selection feature is not present in the current baseline RoT Hubris image. The `upgrade-rollback-transient.rhai` test doesn't handle this as well as it could. + * **Solution:** Modify the script to gracefully detect if the transient boot preference feature is supported by the current RoT. If not, either skip transient-specific tests with a clear message or use an alternative validation path. + - (From `upgrade-rollback-transient.rhai`) Need a better liveness test and decision on failure for RoT reset in `update_rot_hubris`. + * **Solution:** Instead of a fixed `sleep()`, implement a polling mechanism (e.g., trying to read `rot_boot_info()` or another status indicator) with a timeout to confirm RoT is responsive after reset. Define clear actions for timeout/failure. + - (From `upgrade-rollback-transient.rhai`) Implement fault insertion and test recovery paths, especially for transient boot failures during RoT updates. + * **Solution:** Design specific test scenarios that intentionally cause failures (e.g., using a corrupted image if verification is bypassed, or power cycling at critical moments if power control is available). Verify that the script's recovery mechanisms (or manual recovery steps) work. + - Improve error reporting from Rust Rhai integration back to the script and user. + * **Solution:** Ensure that errors originating in `rhaiscript.rs` or `hubris.rs` (e.g., file access, command execution, archive parsing) are propagated to Rhai as structured error objects/maps rather than generic strings, allowing scripts to make better decisions. + +## Testing & Validation + + - Develop a test suite or test harness for Rhai scripts. + * **Solution:** Create a framework (perhaps another Rhai script or a Rust test module) that can execute test scripts. This might involve mocking `faux_mgs` calls to simulate different hardware responses and test script logic in isolation. + - Add unit tests for utility functions in `scripts/util.rhai`. + * **Solution:** Write Rhai test functions within `util.rhai` or in separate test scripts that call and verify the behavior of functions like `env_expand`, `to_hexstring`, `getopts`, etc., with various inputs. + - Implement schema validation for `scripts/targets.json` and any other JSON configuration files used by scripts. + * **Solution:** Define a JSON schema. This could be validated by a Rhai function at script startup using `json_to_map` and manual checks, or by an external tool during a linting/CI step. + - Review the `getopts` function in `util.rhai` (generated by Gemini) for completeness, edge cases, and adherence to common `getopts` behavior. + * **Solution:** Test `getopts` with a comprehensive set of argument patterns, including combined short options, options with and without arguments, optional arguments, various uses of `--`, and error conditions. Compare behavior with standard `getopts`. + +## Features & Enhancements + + - (From `upgrade-rollback-transient.rhai`) When `hubtools` has `fwidgen` integrated and SP/RoT can report FWID for active/inactive banks, transition to FWID-based assessment instead of relying solely on GITC/VERS. + * **Solution:** Monitor `hubtools` and firmware developments. Once available, update `image_check` and related functions to fetch and use FWIDs for more precise image identification. + - (From `upgrade-rollback-transient.rhai`) Parameterize update orders (e.g., SP then RoT, or RoT then SP) for upgrade and rollback scenarios, potentially via `targets.json`. + * **Solution:** Add a configuration option in `targets.json` (e.g., `"update_sequence": ["sp", "rot"]`) and modify the `upgrade-rollback-transient.rhai` script to respect this order. + - (From `upgrade-rollback-transient.rhai`) Allow specifying a TUF repository as a source for baseline or under-test images. + * **Solution:** This is a larger feature. It would likely involve adding new `faux_mgs` commands or Rhai functions to interact with TUF (e.g., download artifacts, verify metadata). An alternative is a separate tool that prepares a `targets.json`-compatible structure from a TUF repo. + - (From `upgrade-rollback-transient.rhai`) Do some sanity checks in `get_image_info` to make sure `BORD` and `NAME` from image cabooses are appropriate for the attached SP/hardware. + * **Solution:** Fetch expected `BORD`/`NAME` from the connected SP (if possible via `faux-mgs state` or similar) and compare against values from the image caboose being processed. Log warnings or errors if mismatched. + - (From `upgrade-rollback-transient.rhai`) Add a warning in `get_image_info` if the base and under-test images (especially SP or RoT images) have the same GITC, as this might indicate a misconfiguration unless it's for components like stage0 that might only differ in packaging. + * **Solution:** Store GITCs seen for base images and compare them against UT images. If a match is found for critical components, issue a prominent warning. + - Add a power control function for testing recovery after a failed RoT Hubris update. Power control is through a configured shell command run via Rhai `system()`. + * **Solution:** + 1. Define a structure in `targets.json` for power control commands (e.g., `power_control: { rot: { on: "cmd_rot_on", off: "cmd_rot_off", status: "cmd_rot_status" } }`). + 2. Create functions in `util.rhai` (e.g., `power_cycle_rot(conf)`) that use `system()` to execute these configured commands. + 3. Integrate these into test scripts where power cycling is needed for recovery. + - Add a power control function for controlling the STLINK probe attached to the SP, similar to the RoT power control. + * **Solution:** + 1. Extend `targets.json` for STLINK power commands (e.g., `power_control: { stlink: { ... } }`). + 2. Add corresponding functions in `util.rhai`. + - Develop a library of common pre-flight checks for scripts. + * **Solution:** Create functions in `util.rhai` that check for SP connectivity, required tools (`jq` if used by `system` calls), minimum `faux-mgs` version, etc. Scripts can call these at the beginning. + - Explore creating a template or skeleton for new Rhai test scripts. + * **Solution:** Develop a basic `.rhai` file that includes common imports (`util.rhai`), `main()` structure, `usage()` function, CLI argument parsing setup, and placeholders for test logic. + - Investigate providing more context/globals from `faux-mgs` (Rust) to Rhai scripts if useful. + * **Solution:** Review `faux-mgs/src/rhaiscript.rs` and identify if additional information from the `SingleSp` struct or global `faux-mgs` settings would be beneficial to scripts, then add them to the Rhai `Scope`. + - Expose more `hubtools::RawHubrisArchive` or `hubtools::Caboose` functionality through Rhai custom types/functions if needed. + * **Solution:** If scripts frequently need to perform complex operations on archives/cabooses currently done with intricate Rhai logic, consider adding new methods to `ArchiveInspector` or `CabooseInspector` in `faux-mgs/src/rhaiscript/hubris.rs`. + +## Documentation + + - Update the `scripts/README.md` file. + * **Solution:** Review and update `README.md` to reflect new features, functions added to `util.rhai`, changes in script execution, and new scripts. Add a section on error handling and debugging. + - Properly document any non-obvious functions in Rhai scripts and in `util.rhai`. + * **Solution:** Add comments explaining the purpose, arguments, return values, and any non-trivial logic for functions. Use a consistent documentation style. + - Document the schema for `scripts/targets.json` thoroughly. + * **Solution:** Create a section in `README.md` or a separate `CONFIG_SCHEMA.md` detailing all possible keys in `targets.json`, their purpose, expected values, and if they are optional or required. + - Expand `README.md` with more examples, advanced usage scenarios, and a guide on writing new scripts. + * **Solution:** Add sections covering common patterns, best practices for scripting with `faux-mgs`, how to debug scripts, and step-by-step examples of creating a new test script. + +## Configuration Management + + - Allow easier local overrides for `scripts/targets.json` without direct modification. + * **Solution:** Implement logic in `process_cli` (or a dedicated config loading function in `util.rhai`) to look for an optional `targets.local.json` and merge its values over the base `targets.json`. + - Consider a `faux-mgs` subcommand or a utility script to validate a script's configuration (`targets.json`). + * **Solution:** This could be a Rhai script itself (`check_config.rhai`) that loads `targets.json`, performs schema checks, and verifies path existence for images. + +## Security + + - Harden the `system()` function in `rhaiscript.rs` if scripts are ever run in less trusted environments or with externally sourced configurations. + * **Solution:** Options: + 1. Add a `faux-mgs` CLI flag to disable `system()` altogether. + 2. Introduce a configuration setting (e.g., in `faux-mgs` config or an environment variable) to provide a whitelist or blacklist of allowed commands for `system()`. + 3. Log all `system()` calls prominently. + - Review security implications of file system access granted to Rhai scripts, especially if script sources or configurations could be untrusted. + * **Solution:** Document the trust model for Rhai scripts. If necessary, explore options to restrict `rhai_fs::FilesystemPackage` (e.g., to subdirectories of the main script or project). + +## CI/CD (Continuous Integration / Continuous Delivery) + + - Integrate static analysis or linting for Rhai scripts into a CI pipeline. + * **Solution:** While specific Rhai linters might be rare, a CI step could check for basic syntax validity (`rhai-cli check