diff --git a/Cargo.lock b/Cargo.lock index 34fe644..0f2c3d9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,43 @@ # It is not intended for manual editing. version = 4 +[[package]] +name = "bare-metal" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5deb64efa5bd81e31fcd1938615a6d98c82eafcbcd787162b6f63b91d6bac5b3" +dependencies = [ + "rustc_version", +] + +[[package]] +name = "bitfield" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "46afbd2983a5d5a7bd740ccb198caf5b82f45c40c09c0eed36052d91cb92e719" + +[[package]] +name = "cortex-m" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ec610d8f49840a5b376c69663b6369e71f4b34484b9b2eb29fb918d92516cb9" +dependencies = [ + "bare-metal", + "bitfield", + "embedded-hal 0.2.7", + "volatile-register", +] + +[[package]] +name = "embedded-hal" +version = "0.2.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35949884794ad573cf46071e41c9b60efb0cb311e3ca01f7af807af1debc66ff" +dependencies = [ + "nb 0.1.3", + "void", +] + [[package]] name = "embedded-hal" version = "1.0.0" @@ -14,7 +51,7 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c4c685bbef7fe13c3c6dd4da26841ed3980ef33e841cddfa15ce8a8fb3f1884" dependencies = [ - "embedded-hal", + "embedded-hal 1.0.0", ] [[package]] @@ -23,8 +60,17 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fba4268c14288c828995299e59b12babdbe170f6c6d73731af1b4648142e8605" dependencies = [ - "embedded-hal", - "nb", + "embedded-hal 1.0.0", + "nb 1.1.0", +] + +[[package]] +name = "nb" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f" +dependencies = [ + "nb 1.1.0", ] [[package]] @@ -48,7 +94,8 @@ dependencies = [ name = "openprot-hal-blocking" version = "0.1.0" dependencies = [ - "embedded-hal", + "embedded-hal 1.0.0", + "zerocopy", ] [[package]] @@ -56,7 +103,7 @@ name = "openprot-hal-nb" version = "0.1.0" dependencies = [ "embedded-hal-nb", - "nb", + "nb 1.1.0", ] [[package]] @@ -73,6 +120,15 @@ dependencies = [ "openprot-platform-traits", ] +[[package]] +name = "openprot-platform-mock" +version = "0.1.0" +dependencies = [ + "cortex-m", + "embedded-hal 1.0.0", + "openprot-hal-blocking", +] + [[package]] name = "openprot-platform-tock" version = "0.1.0" @@ -92,6 +148,33 @@ version = "0.1.0" name = "openprot-services-telemetry" version = "0.1.0" +[[package]] +name = "proc-macro2" +version = "1.0.97" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d61789d7719defeb74ea5fe81f2fdfdbd28a803847077cecce2ff14e1472f6f1" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + [[package]] name = "same-file" version = "1.0.6" @@ -101,6 +184,59 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "syn" +version = "2.0.105" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7bc3fcb250e53458e712715cf74285c1f889686520d79294a9ef3bd7aa1fc619" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "unicode-ident" +version = "1.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" + +[[package]] +name = "vcell" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77439c1b53d2303b20d9459b1ade71a83c716e3f9c34f3228c00e6f185d6c002" + +[[package]] +name = "void" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" + +[[package]] +name = "volatile-register" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de437e2a6208b014ab52972a27e59b33fa2920d3e00fe05026167a1c509d19cc" +dependencies = [ + "vcell", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -215,3 +351,23 @@ dependencies = [ "walkdir", "xshell", ] + +[[package]] +name = "zerocopy" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1039dd0d3c310cf05de012d8a39ff557cb0d23087fd44cad61df08fc31907a2f" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.26" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ecf5b4cc5364572d7f4c329661bcc82724222973f2cab6f050a4e5c22f75181" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] diff --git a/Cargo.toml b/Cargo.toml index 34a542b..7f9cb04 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -8,6 +8,7 @@ members = [ "hal/async", "hal/nb", "platform/traits", + "platform/impls/baremetal/mock", "platform/impls/linux", "platform/impls/tock", "platform/impls/hubris", @@ -15,3 +16,6 @@ members = [ "services/storage", ] resolver = "2" + +[workspace.dependencies] +zerocopy = { version = "0.8.25", features = ["derive"] } diff --git a/docs/src/hash-driver-hubris.md b/docs/src/hash-driver-hubris.md new file mode 100644 index 0000000..a836a9d --- /dev/null +++ b/docs/src/hash-driver-hubris.md @@ -0,0 +1,1604 @@ +# Generic Digest Server Design Document + +This document describes the design and architecture of a generic digest server for Hubris OS that supports both SPDM and PLDM protocol implementations. + +## Requirements + +### Primary Requirement + +**Enable SPDM and PLDM Protocol Support**: The digest server must provide cryptographic hash services to support both SPDM (Security Protocol and Data Model) and PLDM (Platform Level Data Model) protocol implementations in Hubris OS. + +### Derived Requirements + +#### R1: Algorithm Support +- **R1.1**: Support SHA-256 for basic SPDM operations and PLDM firmware integrity validation +- **R1.2**: Support SHA-384 for enhanced security profiles in both SPDM and PLDM +- **R1.3**: Support SHA-512 for maximum security assurance +- **R1.4**: Reject unsupported algorithms (SHA-3) with clear error codes + +#### R2: Session Management +- **R2.1**: Support incremental hash computation for large certificate chains and firmware images +- **R2.2**: Support multiple concurrent digest sessions (hardware-dependent capacity) +- **R2.3**: Provide session isolation between different SPDM and PLDM protocol flows +- **R2.4**: Automatic session cleanup to prevent resource exhaustion +- **R2.5**: Session timeout mechanism for abandoned operations + +#### R3: SPDM and PLDM Use Cases +- **R3.1**: Certificate chain verification (hash large X.509 certificate data) +- **R3.2**: Measurement verification (hash firmware measurement data) +- **R3.3**: Challenge-response authentication (compute transcript hashes) +- **R3.4**: Session key derivation (hash key exchange material) +- **R3.5**: Message authentication (hash SPDM message sequences) +- **R3.6**: PLDM firmware image integrity validation (hash received firmware chunks) +- **R3.7**: PLDM component image verification (validate assembled image against manifest digest) +- **R3.8**: PLDM signature verification support (hash image data for signature validation) + +#### R4: Performance and Resource Constraints +- **R4.1**: Memory-efficient operation suitable for embedded systems +- **R4.2**: Zero-copy data processing using Hubris leased memory +- **R4.3**: Deterministic resource allocation (no dynamic allocation) +- **R4.4**: Bounded execution time for real-time guarantees + +#### R5: Hardware Abstraction +- **R5.1**: Generic interface supporting any hardware digest accelerator +- **R5.2**: Mock implementation for testing and development +- **R5.3**: Type-safe hardware abstraction with compile-time verification +- **R5.4**: Consistent API regardless of underlying hardware + +#### R6: Error Handling and Reliability +- **R6.1**: Comprehensive error reporting for SPDM protocol diagnostics +- **R6.2**: Graceful handling of hardware failures +- **R6.3**: Session state validation and corruption detection +- **R6.4**: Clear error propagation to SPDM layer + +#### R7: Integration Requirements +- **R7.1**: Synchronous IPC interface compatible with Hubris task model +- **R7.2**: Idol-generated API stubs for type-safe inter-process communication +- **R7.3**: Integration with Hubris memory management and scheduling +- **R7.4**: No dependency on async runtime or futures + +#### R8: Supervisor Integration Requirements +- **R8.1**: Configure appropriate task disposition (Restart recommended for production) +- **R8.2**: SPDM clients handle task generation changes transparently (no complex recovery logic needed) +- **R8.3**: Digest server fails fast on unrecoverable hardware errors rather than returning complex error states +- **R8.4**: Support debugging via jefe external interface during development + +## Design Overview + +This digest server provides a generic implementation that can work with any device implementing the required digest traits from `openprot-hal-blocking`. The design supports both single-context and multi-context hardware through hardware-adaptive session management. + +## Architecture + +### System Context + +```mermaid +graph LR + subgraph "SPDM Client Task" + SC[SPDM Client] + SCV[• Certificate verification
• Transcript hashing
• Challenge-response
• Key derivation] + end + + subgraph "PLDM Client Task" + PC[PLDM Firmware Update] + PCV[• Image integrity validation
• Component verification
• Signature validation
• Running digest computation] + end + + subgraph "Digest Server" + DS[ServerImpl<D>] + DSV[• Session management
• Generic implementation
• Resource management
• Error handling] + end + + subgraph "Hardware Backend" + HW[Hardware Device] + HWV[• MockDigestDevice
• Actual HW accelerator
• Any device with traits] + end + + SC ---|Synchronous
IPC/Idol| DS + PC ---|Synchronous
IPC/Idol| DS + DS ---|HAL Traits| HW + + SC -.-> SCV + PC -.-> PCV + DS -.-> DSV + HW -.-> HWV +``` + +### Component Architecture + +``` +ServerImpl +├── Generic Type Parameter D +│ └── Trait Bounds: DigestInit +├── Session Management +│ ├── Static session storage (hardware-dependent capacity) +│ ├── Session lifecycle (init → update → finalize) +│ └── Automatic timeout and cleanup +└── Hardware Abstraction + ├── Static dispatch (no runtime polymorphism) + ├── Algorithm-specific methods + └── Error translation layer +``` + +### Data Flow + +``` +SPDM Client Request + ↓ + Idol-generated stub + ↓ + ServerImpl method + ↓ + Session validation/allocation + ↓ + Hardware context management (save/restore) + ↓ + Direct hardware streaming + ↓ + Result processing + ↓ + Response to client +``` + +### Hardware-Adaptive Implementation + +#### Platform-Specific Trait Implementations +```rust +// Single-context hardware (ASPEED HACE) - context management happens in OpContext +impl DigestInit for Ast1060HashDevice { + type OpContext<'a> = Ast1060DigestContext<'a> where Self: 'a; + type Output = Digest<8>; + + fn init<'a>(&'a mut self, _: Sha2_256) -> Result, Self::Error> { + // Direct hardware initialization - no session management needed + Ok(Ast1060DigestContext::new_sha256(self)) + } +} + +impl DigestOp for Ast1060DigestContext<'_> { + fn update(&mut self, data: &[u8]) -> Result<(), Self::Error> { + // Direct streaming to hardware - blocking until complete + self.hardware.stream_data(data) + } + + fn finalize(self) -> Result { + // Complete and return result - hardware auto-resets + self.hardware.finalize_sha256() + } +} + +// Multi-context hardware (hypothetical) - context switching hidden in traits +impl DigestInit for MultiContextDevice { + type OpContext<'a> = MultiContextDigestContext<'a> where Self: 'a; + type Output = Digest<8>; + + fn init<'a>(&'a mut self, _: Sha2_256) -> Result, Self::Error> { + // Complex session allocation happens here, hidden from server + let context_id = self.allocate_hardware_context()?; + Ok(MultiContextDigestContext::new(self, context_id)) + } +} + +impl DigestOp for MultiContextDigestContext<'_> { + fn update(&mut self, data: &[u8]) -> Result<(), Self::Error> { + // Context switching happens transparently here + self.hardware.ensure_context_active(self.context_id)?; + self.hardware.stream_data(data) + } +} +``` + +#### Hardware-Specific Processing Patterns + +**Single-Context Hardware (ASPEED HACE Pattern)** +```mermaid +sequenceDiagram + participant C1 as SPDM Client + participant C2 as PLDM Client + participant DS as Digest Server + participant HW as ASPEED HACE + + Note over C1,HW: Clients naturally serialize via blocking IPC + + C1->>DS: init_sha256() + DS->>HW: Initialize SHA-256 (direct hardware access) + HW-->>DS: Context initialized + DS-->>C1: session_id = 1 + + par Client 2 blocks waiting + C2->>DS: init_sha384() (BLOCKS until C1 finishes) + end + + C1->>DS: update(session_id=1, data_chunk_1) + DS->>HW: Stream data directly to hardware + HW->>HW: Process data incrementally + HW-->>DS: Update complete + DS-->>C1: Success + + C1->>DS: finalize_sha256(session_id=1) + DS->>HW: Finalize computation + HW->>HW: Complete hash calculation + HW-->>DS: Final digest result + DS-->>C1: SHA-256 digest + + Note over DS,HW: Hardware available for next client + + DS->>HW: Initialize SHA-384 for Client 2 + HW-->>DS: Context initialized + DS-->>C2: session_id = 2 (C2 unblocks) +``` + +**Multi-Context Hardware Pattern (Hypothetical)** +```mermaid +sequenceDiagram + participant C1 as SPDM Client + participant C2 as PLDM Client + participant DS as Digest Server + participant HW as Multi-Context Hardware + participant RAM as Context Storage + + Note over C1,RAM: Complex session management with context switching + + C1->>DS: init_sha256() + DS->>HW: Initialize SHA-256 context + DS->>DS: current_session = 0 + DS-->>C1: session_id = 1 + + C1->>DS: update(session_id=1, data_chunk_1) + DS->>HW: Stream data to active context + HW-->>DS: Update complete + DS-->>C1: Success + + C2->>DS: init_sha384() + Note over DS,RAM: Context switching required + DS->>RAM: Save session 0 context (SHA-256 state) + DS->>HW: Initialize SHA-384 context + DS->>DS: current_session = 1 + DS-->>C2: session_id = 2 + + C2->>DS: update(session_id=2, data_chunk_2) + DS->>HW: Stream data to active context + HW-->>DS: Update complete + DS-->>C2: Success + + C1->>DS: update(session_id=1, data_chunk_3) + Note over DS,RAM: Switch back to session 0 + DS->>RAM: Save session 1 context (SHA-384 state) + DS->>RAM: Restore session 0 context (SHA-256 state) + DS->>HW: Load SHA-256 context to hardware + DS->>DS: current_session = 0 + DS->>HW: Stream data to restored context + HW-->>DS: Update complete + DS-->>C1: Success + + C1->>DS: finalize_sha256(session_id=1) + DS->>HW: Finalize computation + HW-->>DS: Final digest result + DS-->>C1: SHA-256 digest + DS->>DS: current_session = None +``` + +## IPC Interface Definition + +The digest server exposes its functionality through a Hubris Idol IPC interface that provides both session-based streaming operations and one-shot convenience methods. + +### Idol Interface Specification + +```rust +// digest.idol - Hubris IPC interface definition +Interface( + name: "Digest", + ops: { + // Session-based streaming operations (enabled by owned API) + "init_sha256": ( + args: {}, + reply: Result( + ok: "u32", // Returns session ID for the digest context + err: CLike("DigestError"), + ), + ), + "init_sha384": ( + args: {}, + reply: Result( + ok: "u32", // Returns session ID for the digest context + err: CLike("DigestError"), + ), + ), + "init_sha512": ( + args: {}, + reply: Result( + ok: "u32", // Returns session ID for the digest context + err: CLike("DigestError"), + ), + ), + "update": ( + args: { + "session_id": "u32", + "len": "u32", + }, + leases: { + "data": (type: "[u8]", read: true, max_len: Some(1024)), + }, + reply: Result( + ok: "()", + err: CLike("DigestError"), + ), + ), + "finalize_sha256": ( + args: { + "session_id": "u32", + }, + leases: { + "digest_out": (type: "[u32; 8]", write: true), + }, + reply: Result( + ok: "()", + err: CLike("DigestError"), + ), + ), + "finalize_sha384": ( + args: { + "session_id": "u32", + }, + leases: { + "digest_out": (type: "[u32; 12]", write: true), + }, + reply: Result( + ok: "()", + err: CLike("DigestError"), + ), + ), + "finalize_sha512": ( + args: { + "session_id": "u32", + }, + leases: { + "digest_out": (type: "[u32; 16]", write: true), + }, + reply: Result( + ok: "()", + err: CLike("DigestError"), + ), + ), + "reset": ( + args: { + "session_id": "u32", + }, + reply: Result( + ok: "()", + err: CLike("DigestError"), + ), + ), + + // One-shot convenience operations (using scoped API internally) + "digest_oneshot_sha256": ( + args: { + "len": "u32", + }, + leases: { + "data": (type: "[u8]", read: true, max_len: Some(1024)), + "digest_out": (type: "[u32; 8]", write: true), + }, + reply: Result( + ok: "()", + err: CLike("DigestError"), + ), + ), + "digest_oneshot_sha384": ( + args: { + "len": "u32", + }, + leases: { + "data": (type: "[u8]", read: true, max_len: Some(1024)), + "digest_out": (type: "[u32; 12]", write: true), + }, + reply: Result( + ok: "()", + err: CLike("DigestError"), + ), + ), + "digest_oneshot_sha512": ( + args: { + "len": "u32", + }, + leases: { + "data": (type: "[u8]", read: true, max_len: Some(1024)), + "digest_out": (type: "[u32; 16]", write: true), + }, + reply: Result( + ok: "()", + err: CLike("DigestError"), + ), + ), + }, +) +``` + +### IPC Design Rationale + +#### Session-Based Operations +- **init_sha256/384/512()**: Creates new session using owned API, returns session ID for storage +- **update(session_id, data)**: Updates specific session using move-based context operations +- **finalize_sha256/384/512(session_id)**: Completes session and recovers controller for reuse +- **reset(session_id)**: Cancels session early and recovers controller + +#### One-Shot Operations +- **digest_oneshot_sha256/384/512()**: Complete digest computation in single IPC call using scoped API +- **Convenience methods**: For simple use cases that don't need streaming + +#### Zero-Copy Data Transfer +- **Leased memory**: All data transfer uses Hubris leased memory system +- **Read leases**: Input data (`data`) passed by reference, no copying +- **Write leases**: Output digests (`digest_out`) written directly to client memory +- **Bounded transfers**: Maximum 1024 bytes per update for deterministic behavior + +#### Type Safety +- **Algorithm-specific finalize**: `finalize_sha256` only works with SHA-256 sessions +- **Sized output arrays**: `[u32; 8]` for SHA-256, `[u32; 12]` for SHA-384, `[u32; 16]` for SHA-512 +- **Session validation**: Invalid session IDs return `DigestError::InvalidSession` + +### IPC Usage Patterns + +#### SPDM Certificate Verification (Streaming) +```rust +// Client code using generated Idol stubs +let digest = Digest::from(DIGEST_SERVER_TASK_ID); + +let session_id = digest.init_sha256()?; +for chunk in certificate_data.chunks(1024) { + digest.update(session_id, chunk.len() as u32, chunk)?; +} +let mut cert_hash = [0u32; 8]; +digest.finalize_sha256(session_id, &mut cert_hash)?; +``` + +#### Simple Hash Computation (One-Shot) +```rust +// Client code for simple operations +let digest = Digest::from(DIGEST_SERVER_TASK_ID); +let mut hash_output = [0u32; 8]; +digest.digest_oneshot_sha256(data.len() as u32, data, &mut hash_output)?; +``` + +## Detailed Design + +### Session Model + +#### Session Lifecycle +``` +┌─────────┐ init_sha256/384/512() ┌─────────┐ +│ FREE │ ────────────────────────→ │ ACTIVE │ +│ │ │ │ +└─────────┘ └─────────┘ + ↑ │ + │ finalize_sha256/384/512() │ update(data) + │ reset() │ (stream to hardware) + │ timeout_cleanup() │ + └───────────────────────────────────────┘ +``` + +#### Hardware-Specific Session Management + +Different hardware platforms have varying capabilities for concurrent session support: + +```rust +// Platform-specific capability trait +pub trait DigestHardwareCapabilities { + const MAX_CONCURRENT_SESSIONS: usize; + const SUPPORTS_HARDWARE_CONTEXT_SWITCHING: bool; +} + +// AST1060 implementation - single session, simple and efficient +impl DigestHardwareCapabilities for Ast1060HashDevice { + const MAX_CONCURRENT_SESSIONS: usize = 1; // Work with hardware, not against it + const SUPPORTS_HARDWARE_CONTEXT_SWITCHING: bool = false; +} + +// Example hypothetical multi-context implementation +impl DigestHardwareCapabilities for HypotheticalMultiContextDevice { + const MAX_CONCURRENT_SESSIONS: usize = 16; // Hardware-dependent capacity + const SUPPORTS_HARDWARE_CONTEXT_SWITCHING: bool = true; +} + +// Generic server implementation +pub struct ServerImpl { + sessions: FnvIndexMap, + hardware: D, + next_session_id: u32, +} + +pub struct DigestSession { + algorithm: SessionAlgorithm, + timeout: Option, + // Hardware-specific context data only if supported +} +``` + +### Generic Hardware Abstraction with Platform-Adaptive Session Management + +#### Trait Requirements +The server is generic over type `D` where: +```rust +D: DigestInit + DigestInit + DigestInit + ErrorType +``` + +With the actual `openprot-hal-blocking` trait structure: +```rust +// Hardware device implements DigestInit for each algorithm +impl DigestInit for MyDigestDevice { + type OpContext<'a> = MyDigestContext<'a> where Self: 'a; + type Output = Digest<8>; + + fn init<'a>(&'a mut self, _: Sha2_256) -> Result, Self::Error> { + // All hardware complexity (context management, save/restore) handled here + } +} + +// The context handles streaming operations +impl DigestOp for MyDigestContext<'_> { + type Output = Digest<8>; + + fn update(&mut self, data: &[u8]) -> Result<(), Self::Error> { + // Hardware-specific streaming implementation + // Context switching (if needed) happens transparently + } + + fn finalize(self) -> Result { + // Complete digest computation + // Context cleanup happens automatically + } +} +``` + +#### Hardware-Adaptive Architecture +- **Single-Context Hardware**: Direct operations, clients naturally serialize via blocking IPC +- **Multi-Context Hardware**: Native hardware session switching when supported +- **Compile-time optimization**: Session management code only included when needed +- **Platform-specific limits**: `MAX_CONCURRENT_SESSIONS` based on hardware capabilities +- **Synchronous IPC alignment**: Works naturally with Hubris blocking message passing + +#### Concurrency Patterns by Hardware Type + +**Single-Context Hardware (ASPEED HACE):** +``` +Client A calls init_sha256() → Blocks until complete → Returns session_id +Client B calls init_sha384() → Blocks waiting for A to finish → Still blocked +Client A calls update(session_id) → Blocks until complete → Returns success +Client B calls update(session_id) → Still blocked waiting for A to finalize +Client A calls finalize() → Releases hardware → Client B can now proceed +``` + +**Multi-Context Hardware (Hypothetical):** +``` +Client A calls init_sha256() → Creates session context → Returns immediately +Client B calls init_sha384() → Creates different context → Returns immediately +Client A calls update(session_id) → Uses session context → Returns immediately +Client B calls update(session_id) → Uses different context → Returns immediately +``` + +#### Session Management Flow (Hardware-Dependent) +``` +Single-Context Hardware: Direct Operation → Hardware → Result +Multi-Context Hardware: Session Request → Hardware Context → Process → Save Context → Result +``` + +#### Static Dispatch Pattern +- **Compile-time algorithm selection**: No runtime algorithm switching +- **Type safety**: Associated type constraints ensure output size compatibility +- **Zero-cost abstraction**: No virtual function calls or dynamic dispatch +- **Hardware flexibility**: Any device implementing the traits can be used + +### Memory Management + +#### Static Allocation Strategy (Hardware-Adaptive) +```rust +// Session storage sized based on hardware capabilities +static mut SESSION_STORAGE: [SessionData; D::MAX_CONCURRENT_SESSIONS] = [...]; +``` +- **Hardware-aligned limits**: Session count matches hardware capabilities +- **Single-context optimization**: No session overhead for simple hardware +- **Multi-context support**: Full session management when hardware supports it +- **Deterministic memory usage**: No dynamic allocation +- **Real-time guarantees**: Bounded memory access patterns + +#### Hardware-Adaptive Data Flow +- **Zero-copy IPC**: Uses Hubris leased memory system +- **Platform optimization**: Direct operations for single-context hardware +- **Session management**: Only when hardware supports multiple contexts +- **Bounded updates**: Maximum 1024 bytes per update call (hardware limitation) +- **Memory safety**: All buffer accesses bounds-checked +- **Synchronous semantics**: Natural blocking behavior with Hubris IPC + +#### Platform-Specific Processing +``` +Single-Context: Client Request → Direct Hardware → Result → Client Response +Multi-Context: Client Request → Session Management → Hardware Context → Result → Client Response +``` + +### Error Handling Strategy + +#### Hardware-Adaptive Error Model +``` +Hardware Layer Error → DigestError → RequestError → Client Response +``` + +#### Platform-Specific Error Categories +- **Hardware failures**: `DigestError::HardwareFailure` (all platforms) +- **Session management**: `DigestError::InvalidSession`, `DigestError::TooManySessions` (multi-context only) +- **Input validation**: `DigestError::InvalidInputLength` (hardware-specific limits) +- **Algorithm support**: `DigestError::UnsupportedAlgorithm` (capability-dependent) + +### Hardware-Adaptive Session Architecture + +Instead of imposing a complex context management layer, the digest server adapts to hardware capabilities: + +```mermaid +graph TB + subgraph "Single-Context Hardware (ASPEED HACE)" + SC1[Client Request] + SC2[Direct Hardware Operation] + SC3[Immediate Response] + SC1 --> SC2 --> SC3 + end + + subgraph "Multi-Context Hardware (Hypothetical)" + MC1[Session Pool] + MC2[Context Scheduler] + MC3[Hardware Contexts] + MC4[Session Management] + MC1 --> MC2 --> MC3 --> MC4 + end +``` + +#### Hardware Capability Detection + +The digest server adapts to different hardware capabilities through compile-time trait bounds: + +```rust +pub trait DigestHardwareCapabilities { + const MAX_CONCURRENT_SESSIONS: usize; // Hardware-dependent: 1 for single-context, 16+ for multi-context + const SUPPORTS_CONTEXT_SWITCHING: bool; + const MAX_UPDATE_SIZE: usize; +} +``` + +Examples of hardware-specific session limits: +- **ASPEED AST1060**: `MAX_CONCURRENT_SESSIONS = 1` (single hardware context) +- **Multi-context accelerators**: `MAX_CONCURRENT_SESSIONS = 16` (or higher based on hardware design) +- **Software implementations**: Can support many concurrent sessions limited by memory + +#### Session Management Strategy +- **Single-context platforms**: Direct hardware operations, no session state +- **Multi-context platforms**: Full session management with context switching +- **Compile-time optimization**: Dead code elimination for unused features + +3. **Context Initialization**: When starting new session + ```rust +#### Clean Server Implementation + +With proper trait encapsulation, the server implementation becomes much simpler: + +```rust +impl ServerImpl +where + D: DigestInit + DigestInit + DigestInit + ErrorType +{ + fn update_session(&mut self, session_id: u32, data: &[u8]) -> Result<(), DigestError> { + let session = self.get_session_mut(session_id)?; + + // Generic trait call - all hardware complexity hidden + session.op_context.update(data) + .map_err(|_| DigestError::HardwareFailure)?; + + Ok(()) + } + + fn finalize_session(&mut self, session_id: u32) -> Result { + let session = self.take_session(session_id)?; + + // Trait handles finalization and automatic cleanup + session.op_context.finalize() + .map_err(|_| DigestError::HardwareFailure) + } +} +``` + +#### Hardware Complexity Encapsulation +- **No save/restore methods**: All context management hidden in trait implementations +- **No platform-specific code**: Server only calls generic trait methods +- **Automatic optimization**: Single-context hardware avoids unnecessary overhead +- **Transparent complexity**: Multi-context hardware handles switching internally + +### Concurrency Model + +#### Session Isolation +- Each session operates independently +- No shared mutable state between sessions +- Session IDs provide access control +- Timeout mechanism prevents resource leaks + +#### SPDM and PLDM Integration Points +1. **SPDM Certificate Verification**: Hash certificate chains incrementally +2. **SPDM Transcript Computation**: Hash sequences of SPDM messages +3. **SPDM Challenge Processing**: Compute authentication hashes +4. **SPDM Key Derivation**: Hash key exchange material +5. **PLDM Firmware Integrity**: Hash received firmware image chunks during transfer +6. **PLDM Component Validation**: Verify assembled components against manifest digests +7. **PLDM Multi-Component Updates**: Concurrent digest computation for multiple firmware components + +## Failure Scenarios + +### Session Management Failures + +#### Session Exhaustion Scenarios + +**Single-Context Hardware (ASPEED HACE) - No Exhaustion Possible** +```mermaid +sequenceDiagram + participant S1 as SPDM Client 1 + participant S2 as SPDM Client 2 + participant DS as Digest Server + participant HW as ASPEED HACE + + Note over DS,HW: Hardware only supports one active session + + S1->>DS: init_sha256() + DS->>HW: Direct hardware initialization + DS-->>S1: session_id = 1 + + S2->>DS: init_sha384() (BLOCKS on IPC until S1 finishes) + Note over S2: Client automatically waits - no error needed + + S1->>DS: finalize_sha256(session_id=1) + DS->>HW: Complete and release hardware + DS-->>S1: digest result + + Note over DS,HW: Hardware now available + DS->>HW: Initialize SHA-384 for S2 + DS-->>S2: session_id = 2 (S2 unblocks) +``` + +**Multi-Context Hardware (Hypothetical) - True Session Exhaustion** +```mermaid +sequenceDiagram + participant S1 as Client 1 + participant S2 as Client 9 + participant DS as Digest Server + participant HW as Multi-Context Hardware + + Note over DS: Hardware capacity reached, all contexts active + + S2->>DS: init_sha256() + DS->>DS: find_free_hardware_context() + DS-->>S2: Error: TooManySessions + + Note over S2: Client must wait for context to free up + S2->>DS: init_sha256() (retry after delay) + DS->>HW: Allocate available hardware context + DS-->>S2: session_id = 9 +``` + +#### Session Timeout Recovery +```mermaid +sequenceDiagram + participant SC as SPDM Client + participant DS as Digest Server + participant T as Timer + + SC->>DS: init_sha256() + DS-->>SC: session_id = 3 + + Note over T: 10,000 ticks pass + T->>DS: timer_tick + DS->>DS: cleanup_expired_sessions() + DS->>DS: session[3].timeout expired + DS->>DS: session[3] = FREE + + SC->>DS: update(session_id=3, data) + DS->>DS: validate_session(3) + DS-->>SC: Error: InvalidSession + + Note over SC: Client must reinitialize + SC->>DS: init_sha256() + DS-->>SC: session_id = 3 (reused) +``` + +### Hardware Failure Scenarios + +#### Hardware Device Failure +```mermaid +flowchart TD + A[SPDM/PLDM Client Request] --> B[Digest Server] + B --> C{Hardware Available?} + + C -->|Yes| D[Call hardware.init] + C -->|No| E[panic! - Hardware unavailable] + + D --> F{Hardware Response} + F -->|Success| G[Process normally] + F -->|Error| H[panic! - Hardware failure] + + G --> I[Return result to client] + E --> J[Task fault → Jefe supervision] + H --> J + + style E fill:#ffcccc + style H fill:#ffcccc + style J fill:#fff2cc +``` + + +### Resource Exhaustion Scenarios + +#### Memory Pressure Handling +```mermaid +flowchart LR + A[Large Data Update] --> B{Buffer Space Available?} + + B -->|Yes| C[Accept data into session buffer] + B -->|No| D[Return InvalidInputLength] + + C --> E{Session Buffer Full?} + E -->|No| F[Continue accepting updates] + E -->|Yes| G[Client must finalize before more updates] + + D --> H[Client must use smaller chunks] + G --> I[finalize_sha256/384/512] + H --> J[Retry with smaller data] + + style D fill:#ffcccc + style G fill:#fff2cc + style H fill:#ccffcc +``` + +#### Session Lifecycle Error States +```mermaid +stateDiagram-v2 + [*] --> FREE + FREE --> ACTIVE_SHA256: init_sha256() + hardware context init + FREE --> ACTIVE_SHA384: init_sha384() + hardware context init + FREE --> ACTIVE_SHA512: init_sha512() + hardware context init + + ACTIVE_SHA256 --> ACTIVE_SHA256: update(data) → stream to hardware + ACTIVE_SHA384 --> ACTIVE_SHA384: update(data) → stream to hardware + ACTIVE_SHA512 --> ACTIVE_SHA512: update(data) → stream to hardware + + ACTIVE_SHA256 --> FREE: finalize_sha256() → hardware result + ACTIVE_SHA384 --> FREE: finalize_sha384() → hardware result + ACTIVE_SHA512 --> FREE: finalize_sha512() → hardware result + + ACTIVE_SHA256 --> FREE: reset() + context cleanup + ACTIVE_SHA384 --> FREE: reset() + context cleanup + ACTIVE_SHA512 --> FREE: reset() + context cleanup + + ACTIVE_SHA256 --> FREE: timeout + context cleanup + ACTIVE_SHA384 --> FREE: timeout + context cleanup + ACTIVE_SHA512 --> FREE: timeout + context cleanup + + state ERROR_STATES { + [*] --> InvalidSession: Wrong session ID + [*] --> WrongAlgorithm: finalize_sha384() on SHA256 session + [*] --> ContextSwitchError: Hardware context save/restore failure + [*] --> HardwareError: Hardware streaming failure + } + + ACTIVE_SHA256 --> ERROR_STATES: Error conditions + ACTIVE_SHA384 --> ERROR_STATES: Error conditions + ACTIVE_SHA512 --> ERROR_STATES: Error conditions +``` + +### SPDM Protocol Impact Analysis + +#### Certificate Verification Failure Recovery + +**Single-Context Hardware (ASPEED HACE) - No Session Exhaustion** +```mermaid +sequenceDiagram + participant SPDM as SPDM Protocol + participant DS as Digest Server + participant HW as ASPEED HACE + + SPDM->>DS: verify_certificate_chain() + DS->>HW: Direct hardware operation (blocks until complete) + HW-->>DS: Certificate hash result + DS-->>SPDM: Success + + Note over SPDM: No session management complexity needed +``` + +**Multi-Context Hardware (Hypothetical) - True Session Management** +```mermaid +sequenceDiagram + participant SPDM as SPDM Protocol + participant DS as Digest Server + participant HW as Multi-Context Hardware + + SPDM->>DS: verify_certificate_chain() + + alt Hardware context available + DS->>HW: Allocate context and process + HW-->>DS: Certificate hash result + DS-->>SPDM: Success + else All contexts busy + DS-->>SPDM: Error: TooManySessions + Note over SPDM: Client retry logic or wait + SPDM->>DS: verify_certificate_chain() (retry) + DS-->>SPDM: Success (context now available) + end +``` + +#### Transcript Hash Failure Impact +```mermaid +flowchart TD + A[SPDM Message Exchange] --> B[Compute Transcript Hash] + B --> C{Digest Server Available?} + + C -->|Yes| D[Normal transcript computation] + C -->|No| E[Digest server failure] + + E --> F{Failure Type} + F -->|Session Exhausted| G[Retry with backoff] + F -->|Hardware Failure| H[Abort authentication] + F -->|Timeout| I[Reinitialize session] + + G --> J{Retry Successful?} + J -->|Yes| D + J -->|No| K[Authentication failure] + + H --> K + I --> L{Reinit Successful?} + L -->|Yes| D + L -->|No| K + + D --> M[Continue SPDM protocol] + K --> N[Report to security policy] + + style E fill:#ffcccc + style K fill:#ff9999 + style N fill:#ffcccc +``` + +### Failure Recovery Strategies + +#### Error Propagation Chain +```mermaid +flowchart LR + HW[Hardware Layer] -->|Any Error| PANIC[Task Panic] + + DS[Digest Server] -->|Recoverable DigestError| RE[RequestError wrapper] + RE -->|IPC| CLIENTS[SPDM/PLDM Clients] + CLIENTS -->|Simple Retry| POL[Security Policy] + + PANIC -->|Task Fault| JEFE[Jefe Supervisor] + JEFE -->|Task Restart| DS_NEW[Fresh Digest Server] + DS_NEW -->|Next IPC| CLIENTS + + subgraph "Recoverable Error Types" + E1[InvalidSession] + E2[TooManySessions] + E3[InvalidInputLength] + end + + subgraph "Simple Client Recovery" + R1[Session Cleanup] + R2[Retry with Backoff] + R3[Use One-shot API] + R4[Authentication Failure] + end + + DS --> E1 + DS --> E2 + DS --> E3 + + CLIENTS --> R1 + CLIENTS --> R2 + CLIENTS --> R3 + CLIENTS --> R4 + + style PANIC fill:#ffcccc + style DS_NEW fill:#ccffcc +``` + +#### System-Level Failure Handling +```mermaid +graph TB + subgraph "Digest Server Internal Failures" + F1[Session Exhaustion] + F2[Recoverable Hardware Failure] + F3[Input Validation Errors] + end + + subgraph "Task-Level Failures" + T1[Unrecoverable Hardware Failure] + T2[Memory Corruption] + T3[Syscall Faults] + T4[Explicit Panics] + end + + subgraph "SPDM Client Responses" + S1[Retry with Backoff] + S2[Fallback to One-shot] + S3[Graceful Degradation] + S4[Abort Authentication] + end + + subgraph "Jefe Supervisor Actions" + J1[Task Restart - Restart Disposition] + J2[Hold for Debug - Hold Disposition] + J3[Log Fault Information] + J4[External Debug Interface] + end + + subgraph "System-Level Responses" + R1[Continue with Fresh Task Instance] + R2[Debug Analysis Mode] + R3[System Reboot - Jefe Fault] + end + + F1 --> S1 + F2 --> S1 + F3 --> S4 + + T1 --> J1 + T2 --> J1 + T3 --> J1 + T4 --> J1 + + J1 --> R1 + J2 --> R2 + + S1 --> R1 + S2 --> R1 + S3 --> R1 + + R2 --> R3 + R1 --> R4 + R2 --> R4 +``` + +## Supervisor Integration and System-Level Failure Handling + +### Jefe Supervisor Role + +The digest server operates under the supervision of Hubris OS's supervisor task ("jefe"), which provides system-level failure management beyond the server's internal error handling. + +#### Supervisor Architecture +```mermaid +graph TB + subgraph "Supervisor Domain (Priority 0)" + JEFE[Jefe Supervisor Task] + JEFE_FEATURES[• Fault notification handling
• Task restart decisions
• Debugging interface
• System restart capability] + end + + subgraph "Application Domain" + DS[Digest Server] + SPDM[SPDM Client] + OTHER[Other Tasks] + end + + KERNEL[Hubris Kernel] -->|Fault Notifications| JEFE + JEFE -->|reinit_task| KERNEL + JEFE -->|system_restart| KERNEL + + DS -.->|Task Fault| KERNEL + SPDM -.->|Task Fault| KERNEL + OTHER -.->|Task Fault| KERNEL + + JEFE -.-> JEFE_FEATURES +``` + +#### Task Disposition Management + +Each task, including the digest server, has a configured disposition that determines jefe's response to failures: + +- **Restart Disposition**: Automatic recovery via `kipc::reinit_task()` +- **Hold Disposition**: Task remains faulted for debugging inspection + +#### Failure Escalation Hierarchy + +```mermaid +sequenceDiagram + participant HW as Hardware + participant DS as Digest Server + participant SPDM as SPDM Client + participant K as Kernel + participant JEFE as Jefe Supervisor + + Note over DS: Fail immediately on any hardware failure + HW->>DS: Hardware fault + DS->>DS: panic!("Hardware failure detected") + DS->>K: Task fault occurs + K->>JEFE: Fault notification (bit 0) + + JEFE->>K: find_faulted_task() + K-->>JEFE: task_index (digest server) + + alt Restart disposition (production) + JEFE->>K: reinit_task(digest_server, true) + K->>DS: Task reinitialized with fresh hardware state + Note over SPDM: Next IPC gets fresh task, no special handling needed + else Hold disposition (debug) + JEFE->>JEFE: Mark holding_fault = true + Note over DS: Task remains faulted for debugging + Note over SPDM: IPC returns generation mismatch error + end +``` + +### System Failure Categories and Responses + +#### Recoverable Failures (Handled by Digest Server) +- **Session Management**: `TooManySessions`, `InvalidSession` → Return error to client +- **Input Validation**: `InvalidInputLength` → Return error to client + +#### Task-Level Failures (Handled by Jefe) +- **Any Hardware Failure**: Hardware errors of any kind → Task panic → Jefe restart +- **Hardware Resource Exhaustion**: Hardware cannot allocate resources → Task panic → Jefe restart +- **Memory Corruption**: Stack overflow, heap corruption → Task fault → Jefe restart +- **Syscall Faults**: Invalid kernel IPC usage → Task fault → Jefe restart +- **Explicit Panics**: `panic!()` in digest server code → Task fault → Jefe restart + +#### System-Level Failures (Handled by Kernel) +- **Supervisor Fault**: Jefe task failure → System reboot +- **Kernel Panic**: Critical kernel failure → System reset +- **Watchdog Timeout**: System hang detection → Hardware reset + +**Key Design Principle**: The digest server fails immediately on any hardware error without attempting recovery. This maximally simplifies the implementation and ensures consistent system behavior through jefe's supervision. + +### External Debugging Interface + +Jefe provides an external interface for debugging digest server failures: + +```rust +// External control commands available via debugger (Humility) +enum JefeRequest { + Hold, // Stop automatic restart of digest server + Start, // Manually restart digest server + Release, // Resume automatic restart behavior + Fault, // Force digest server to fault for testing +} +``` + +This enables development workflows like: +1. **Hold faulting server**: Examine failure state without automatic restart +2. **Analyze dump data**: Extract task memory and register state +3. **Test recovery**: Manually trigger restart after fixes +4. **Fault injection**: Test SPDM client resilience + +### Integration Requirements Update + +#### R8: Supervisor Integration Requirements +- **R8.1**: Configure appropriate task disposition (Restart recommended for production) +- **R8.2**: SPDM clients handle task generation changes transparently (no complex recovery logic needed) +- **R8.3**: Digest server fails fast on unrecoverable hardware errors rather than returning complex error states +- **R8.4**: Support debugging via jefe external interface during development + +## SPDM Integration Examples + +### Certificate Chain Verification (Requirement R3.1) +```rust +// SPDM task verifying a certificate chain +fn verify_certificate_chain(&mut self, cert_chain: &[u8]) -> Result { + let digest = Digest::from(DIGEST_SERVER_TASK_ID); + + // Create session for certificate hash (R2.1: incremental computation) + let session_id = digest.init_sha256()?; // R1.1: SHA-256 support + + // Process certificate data incrementally (R4.2: zero-copy processing) + for chunk in cert_chain.chunks(512) { + digest.update(session_id, chunk.len() as u32, chunk)?; + } + + // Get final certificate hash + let mut cert_hash = [0u32; 8]; + digest.finalize_sha256(session_id, &mut cert_hash)?; + + // Verify against policy + self.verify_hash_against_policy(&cert_hash) +} +``` + +### SPDM Transcript Hash Computation (Requirement R3.3) +```rust +// Computing hash of SPDM message sequence for authentication +fn compute_transcript_hash(&mut self, messages: &[SpdmMessage]) -> Result<[u32; 8], SpdmError> { + let digest = Digest::from(DIGEST_SERVER_TASK_ID); + let session_id = digest.init_sha256()?; // R2.3: session isolation + + // Hash all messages in the SPDM transcript (R3.5: message authentication) + for msg in messages { + let msg_bytes = msg.serialize()?; + digest.update(session_id, msg_bytes.len() as u32, &msg_bytes)?; + } + + let mut transcript_hash = [0u32; 8]; + digest.finalize_sha256(session_id, &mut transcript_hash)?; // R7.1: synchronous IPC + Ok(transcript_hash) +} +``` + +### Sequential SPDM Operations (Requirement R2.1) +```rust +// SPDM task performing sequential operations using incremental hashing +impl SpdmResponder { + fn handle_certificate_and_transcript(&mut self, cert_data: &[u8], messages: &[SpdmMessage]) -> Result<(), SpdmError> { + let digest = Digest::from(DIGEST_SERVER_TASK_ID); + + // Operation 1: Certificate verification (R2.1: incremental computation) + let cert_session = digest.init_sha256()?; // R1.1: SHA-256 support + + // Process certificate incrementally + for chunk in cert_data.chunks(512) { + digest.update(cert_session, chunk.len() as u32, chunk)?; + } + + let mut cert_hash = [0u32; 8]; + digest.finalize_sha256(cert_session, &mut cert_hash)?; + + // Operation 2: Transcript hash computation (sequential, after cert verification) + let transcript_session = digest.init_sha256()?; // R2.3: new isolated session + + // Hash all SPDM messages in sequence + for msg in messages { + let msg_bytes = msg.serialize()?; + digest.update(transcript_session, msg_bytes.len() as u32, &msg_bytes)?; + } + + let mut transcript_hash = [0u32; 8]; + digest.finalize_sha256(transcript_session, &mut transcript_hash)?; + + // Use both hashes for SPDM protocol + self.process_verification_results(&cert_hash, &transcript_hash) + } +} +``` + +## PLDM Integration Examples + +### PLDM Firmware Image Integrity Validation (Requirement R3.6) +```rust +// PLDM task validating received firmware chunks +fn validate_firmware_image(&mut self, image_chunks: &[&[u8]], expected_digest: &[u32; 8]) -> Result { + let digest = Digest::from(DIGEST_SERVER_TASK_ID); + + // Create session for running digest computation (R2.1: incremental computation) + let session_id = digest.init_sha256()?; // R1.1: SHA-256 commonly used in PLDM + + // Process firmware image incrementally as chunks are received (R4.2: zero-copy processing) + for chunk in image_chunks { + digest.update(session_id, chunk.len() as u32, chunk)?; + } + + // Get final image digest + let mut computed_digest = [0u32; 8]; + digest.finalize_sha256(session_id, &mut computed_digest)?; + + // Compare with manifest digest + Ok(computed_digest == *expected_digest) +} +``` + +### PLDM Component Verification During Transfer (Requirement R3.7) +```rust +// PLDM task computing running digest during TransferFirmware +fn transfer_firmware_with_validation(&mut self, component_id: u16) -> Result<(), PldmError> { + let digest = Digest::from(DIGEST_SERVER_TASK_ID); + + // Initialize digest session for this component transfer (R2.3: session isolation) + let session_id = digest.init_sha384()?; // R1.2: SHA-384 for enhanced security + + // Store session for this component transfer + self.component_sessions.insert(component_id, session_id); + + // Firmware chunks will be processed via update() calls as they arrive + // This enables real-time validation during transfer rather than after + + Ok(()) +} + +fn process_firmware_chunk(&mut self, component_id: u16, chunk: &[u8]) -> Result<(), PldmError> { + let digest = Digest::from(DIGEST_SERVER_TASK_ID); + + // Retrieve session for this component + let session_id = self.component_sessions.get(&component_id) + .ok_or(PldmError::InvalidComponent)?; + + // Add chunk to running digest (R3.6: firmware image integrity) + digest.update(*session_id, chunk.len() as u32, chunk)?; + + Ok(()) +} +``` + +### PLDM Multi-Component Concurrent Updates (Requirement R2.2) +```rust +// PLDM task handling multiple concurrent firmware updates +impl PldmFirmwareUpdate { + fn handle_concurrent_updates(&mut self) -> Result<(), PldmError> { + let digest = Digest::from(DIGEST_SERVER_TASK_ID); + + // Component 1: Main firmware using SHA-256 + let main_fw_session = digest.init_sha256()?; + + // Component 2: Boot loader using SHA-384 + let bootloader_session = digest.init_sha384()?; // R1.2: SHA-384 support + + // Component 3: FPGA bitstream using SHA-512 + let fpga_session = digest.init_sha512()?; // R1.3: SHA-512 support + + // All components can be updated concurrently (hardware-dependent capacity - R2.2) + // Each maintains independent digest state (R2.3: isolation) + + // Store sessions for component tracking + self.component_sessions.insert(MAIN_FW_COMPONENT, main_fw_session); + self.component_sessions.insert(BOOTLOADER_COMPONENT, bootloader_session); + self.component_sessions.insert(FPGA_COMPONENT, fpga_session); + + Ok(()) + } +} +``` + +## Requirements Validation + +### ✅ Requirements Satisfied + +| Requirement | Status | Implementation | +|-------------|--------|----------------| +| **R1.1** SHA-256 support | ✅ | `init_sha256()`, `finalize_sha256()` with hardware context | +| **R1.2** SHA-384 support | ✅ | `init_sha384()`, `finalize_sha384()` with hardware context | +| **R1.3** SHA-512 support | ✅ | `init_sha512()`, `finalize_sha512()` with hardware context | +| **R1.4** Reject unsupported algorithms | ✅ | SHA-3 functions return `UnsupportedAlgorithm` | +| **R2.1** Incremental hash computation | ✅ | True streaming via `update_hardware_context()` | +| **R2.2** Multiple concurrent sessions | ✅ | Hardware-dependent capacity with context switching | +| **R2.3** Session isolation | ✅ | Independent hardware contexts in non-cacheable RAM | +| **R2.4** Automatic cleanup | ✅ | `cleanup_expired_sessions()` with context cleanup | +| **R2.5** Session timeout | ✅ | `SESSION_TIMEOUT_TICKS` with hardware context release | +| **R3.1-R3.5** SPDM use cases | ✅ | All supported via streaming session-based API | +| **R3.6-R3.8** PLDM use cases | ✅ | Firmware validation, component verification, streaming support | +| **R4.1** Memory efficient | ✅ | Static allocation, hardware context simulation | +| **R4.2** Zero-copy processing | ✅ | Direct streaming to hardware, no session buffering | +| **R4.3** Deterministic allocation | ✅ | No dynamic memory allocation | +| **R4.4** Bounded execution | ✅ | Fixed context switch costs, predictable timing | +| **R5.1** Generic hardware interface | ✅ | `ServerImpl` with context management traits | +| **R5.2** Mock implementation | ✅ | `MockDigestDevice` with context simulation | +| **R5.3** Type-safe abstraction | ✅ | Associated type constraints + context safety | +| **R5.4** Consistent API | ✅ | Same streaming interface regardless of hardware | +| **R6.1** Comprehensive errors | ✅ | Full `DigestError` enumeration + context errors | +| **R6.2** Hardware failure handling | ✅ | `HardwareFailure` error propagation + context cleanup | +| **R6.3** Session state validation | ✅ | `validate_session()` + context state checks | +| **R6.4** Clear error propagation | ✅ | `RequestError` wrapper | +| **R7.1** Synchronous IPC | ✅ | No async/futures dependencies | +| **R7.2** Idol-generated stubs | ✅ | Type-safe IPC interface | +| **R7.3** Hubris integration | ✅ | Uses userlib, leased memory | +| **R7.4** No async runtime | ✅ | Pure synchronous implementation | +| **R8.1** Task disposition configuration | ✅ | Configured in app.toml | +| **R8.2** Transparent task generation handling | ✅ | SPDM clients get fresh task transparently | +| **R8.3** Fail-fast hardware error handling | ✅ | Task panic on unrecoverable hardware errors | +| **R8.4** Debugging support | ✅ | Jefe external interface available | + +## Generic Design Summary + +The `ServerImpl` struct is now generic over any device `D` that implements: + +## Key Features + +1. **True Hardware Streaming**: Data flows directly to hardware contexts with proper save/restore +2. **Context Management**: Multiple sessions share hardware via non-cacheable RAM context switching +3. **Type Safety**: Associated type constraints ensure digest output sizes match expectations +4. **Zero Runtime Cost**: Uses static dispatch for optimal performance +5. **Memory Efficient**: Static session storage with hardware context simulation +6. **Concurrent Sessions**: Hardware-dependent concurrent digest operations with automatic context switching + +## Usage Example + +To use with a custom hardware device that supports context management: + +```rust +// Your hardware device must implement the required traits +struct MyDigestDevice { + // Hardware-specific context management fields + current_context: Option, + context_save_addr: *mut u8, // Non-cacheable RAM base +} + +impl DigestInit for MyDigestDevice { + type Output = Digest<8>; + + fn init(&mut self, _: Sha2_256) -> Result { + // Initialize hardware registers for SHA-256 + // Set up context for streaming operations + Ok(DigestContext::new_sha256()) + } +} + +impl DigestInit for MyDigestDevice { + type Output = Digest<12>; + // Similar implementation for SHA-384 +} + +impl DigestInit for MyDigestDevice { + type Output = Digest<16>; + // Similar implementation for SHA-512 +} + +impl DigestCtrlReset for MyDigestDevice { + fn reset(&mut self) -> Result<(), HardwareError> { + // Reset hardware to clean state + // Clear any active contexts + Ok(()) + } +} + +// Context management methods (hardware-specific) +impl MyDigestDevice { + fn save_context_to_ram(&mut self, session_id: usize) -> Result<(), HardwareError> { + // Save current hardware context to non-cacheable RAM + // Hardware-specific register read and memory write operations + } + + fn restore_context_from_ram(&mut self, session_id: usize) -> Result<(), HardwareError> { + // Restore session context from non-cacheable RAM to hardware + // Hardware-specific memory read and register write operations + } +} + +// Then use it with the streaming server +let server = ServerImpl::new(MyDigestDevice::new()); +``` + +--- + +# Implementation Status and Development Notes + +## Critical Findings and Resolutions + +### Trait Lifetime Incompatibility with Session-Based Operations - RESOLVED + +During implementation, a fundamental incompatibility was discovered between the `openprot-hal-blocking` digest traits and the session-based streaming operations described in this design document. **This issue has been resolved through the implementation of a dual API structure with owned context variants.** + +#### The Original Problem + +The `openprot-hal-blocking` digest traits were originally designed for **scoped operations**, but the digest server API expected **persistent sessions**. These requirements were fundamentally incompatible due to lifetime constraints. + +#### Root Cause: Lifetime Constraints in Scoped API + +The original scoped trait definition created lifetime constraints: + +```rust +pub trait DigestInit: ErrorType { + type OpContext<'a>: DigestOp + where Self: 'a; + + fn init(&mut self, init_params: T) -> Result, Self::Error>; +} +``` + +The `OpContext<'a>` had a lifetime tied to `&'a mut self`, meaning: +- Context could not outlive the function call that created it +- Context could not be stored in a separate struct +- Context could not persist across IPC boundaries +- Sessions could not maintain persistent state between operations + +#### The Solution: Dual API with Move-Based Resource Management + +The incompatibility has been **completely resolved** through implementation of a dual API structure: + +**1. Scoped API (Original)** - For simple, one-shot operations: +```rust +pub mod scoped { + pub trait DigestInit: ErrorType { + type OpContext<'a>: DigestOp + where Self: 'a; + + fn init<'a>(&'a mut self, init_params: T) -> Result, Self::Error>; + } +} +``` + +**2. Owned API (New)** - For session-based, streaming operations: +```rust +pub mod owned { + pub trait DigestInit: ErrorType { + type OwnedContext: DigestOp; + + fn init_owned(&mut self, init_params: T) -> Result; + } + + pub trait DigestOp { + type Output; + + fn update(&mut self, data: &[u8]) -> Result<(), Self::Error>; + fn finalize(self) -> Result; + fn cancel(self) -> Self::Controller; + } +} +``` + +#### How the Owned API Enables Sessions + +The owned API uses **move-based resource management** to solve the lifetime problem: + +```rust +// ✅ NOW POSSIBLE: Digest server with owned contexts and controller +use openprot_hal_blocking::digest::owned::{DigestInit, DigestOp}; + +struct DigestServer { + controller: Option, // Hardware controller + active_session: Option, // Single active session +} + +impl DigestServer +where + H: DigestInit, + C: DigestOp, +{ + fn init_session(&mut self) -> Result<(), Error> { + let controller = self.controller.take().ok_or(Error::Busy)?; + let context = controller.init(Sha2_256)?; // ✅ Owned context + self.active_session = Some(context); // ✅ Store in server + Ok(()) + } + + fn update_session(&mut self, data: &[u8]) -> Result<(), Error> { + let context = self.active_session.take().ok_or(Error::NoSession)?; + let updated_context = context.update(data)?; // ✅ Move-based update + self.active_session = Some(updated_context); // ✅ Store updated state + Ok(()) + } + + fn finalize_session(&mut self) -> Result, Error> { + let context = self.active_session.take().ok_or(Error::NoSession)?; + let (digest, controller) = context.finalize()?; + self.controller = Some(controller); // ✅ Controller recovery + Ok(digest) + } +} +``` + +#### Key Benefits of the Move-Based Solution + +1. **True Streaming Support**: Contexts can be stored and updated incrementally +2. **Session Isolation**: Each session owns its context independently +3. **Resource Recovery**: `cancel()` method allows controller recovery +4. **Rust Ownership Safety**: Move semantics prevent use-after-finalize +5. **Backward Compatibility**: Scoped API remains unchanged for simple use cases + +#### Implementation Examples + +**Session-Based Streaming (Now Possible)**: +```rust +// SPDM certificate chain verification with streaming +let session_id = digest_server.init_sha256()?; + +for cert_chunk in certificate_chain.chunks(1024) { + digest_server.update(session_id, cert_chunk)?; +} + +let cert_digest = digest_server.finalize_sha256(session_id)?; +``` + +**One-Shot Operations (Still Supported)**: +```rust +// Simple hash computation using scoped API +let digest = digest_device.compute_sha256(complete_data)?; +``` + +#### Current Implementation Status + +The dual API solution is **fully implemented and working**: + +- ✅ **Scoped API**: Original lifetime-constrained API for simple operations +- ✅ **Owned API**: New move-based API enabling persistent sessions +- ✅ **Mock Implementation**: Both APIs implemented in baremetal mock platform +- ✅ **Comprehensive Testing**: Session storage patterns validated +- ✅ **Documentation**: Complete analysis comparing both approaches + +#### Architectural Resolution + +The dual API approach resolves all original limitations: + +1. ✅ **Session-based streaming is now possible** with the owned API +2. ✅ **Both one-shot and streaming operations supported** via appropriate API choice +3. ✅ **Design document architecture is now implementable** using owned contexts +4. ✅ **Streaming large data sets fully supported** with persistent session state + +This demonstrates how API design evolution can solve fundamental architectural constraints while maintaining backward compatibility. The move-based resource management pattern provides the persistent contexts needed for server applications while preserving the simplicity of scoped operations for basic use cases. diff --git a/docs/src/rust-trait-to-idl-conversion b/docs/src/rust-trait-to-idl-conversion new file mode 100644 index 0000000..7be7022 --- /dev/null +++ b/docs/src/rust-trait-to-idl-conversion @@ -0,0 +1,670 @@ +# Converting Rust HAL Traits to Idol Interfaces: A Practical Guide + +## Overview + +This guide explains how to transform Rust Hardware Abstraction Layer (HAL) traits into Idol interface definitions for use in Hubris-based systems. Based on practical experience converting the digest traits, this guide covers the key patterns, challenges, and solutions. + +## Table of Contents + +1. [Understanding the Transformation](#understanding-the-transformation) +2. [Core Design Patterns](#core-design-patterns) +3. [Step-by-Step Conversion Process](#step-by-step-conversion-process) +4. [Common Challenges and Solutions](#common-challenges-and-solutions) +5. [Type System Considerations](#type-system-considerations) +6. [Error Handling Patterns](#error-handling-patterns) +7. [Performance Considerations](#performance-considerations) +8. [Testing and Validation](#testing-and-validation) + +## Understanding the Transformation + +### From Trait-Based to IPC-Based + +**Rust HAL traits** provide compile-time polymorphism with: +- Associated types +- Lifetime parameters +- Generic parameters +- Zero-cost abstractions +- Direct memory access + +**Idol interfaces** provide runtime communication with: +- Concrete types +- Message passing +- Serialization boundaries +- Process isolation +- Memory leases for data transfer + +### Key Conceptual Shifts + +| Rust Trait Concept | Idol Equivalent | Transformation Strategy | +|-------------------|-----------------|------------------------| +| `&mut self` methods | Session-based operations | Use session IDs | +| Associated types | Concrete types | Define enums/structs | +| Lifetimes | Ownership transfer | Memory leases | +| Generic parameters | Multiple operations | One operation per type | +| Zero-cost abstractions | IPC overhead | Optimize message structure | + +## Core Design Patterns + +### 1. Session-Based State Management + +**Problem**: Rust traits use `&mut self` for stateful operations. +**Solution**: Use session IDs to track state across IPC boundaries. + +```rust +// Original Trait +pub trait DigestOp: ErrorType { + fn update(&mut self, input: &[u8]) -> Result<(), Self::Error>; + fn finalize(self) -> Result; +} +``` + +```ron +// Idol Interface +Interface( + name: "Digest", + ops: { + "init_sha256": ( + reply: Result(ok: "u32", err: CLike("DigestError")), // Returns session ID + ), + "update": ( + args: { "session_id": "u32", "len": "u32" }, + leases: { "data": (type: "[u8]", read: true, max_len: Some(1024)) }, + reply: Result(ok: "()", err: CLike("DigestError")), + ), + "finalize_sha256": ( + args: { "session_id": "u32" }, + leases: { "digest_out": (type: "[u32; 8]", write: true) }, + reply: Result(ok: "()", err: CLike("DigestError")), + ), + }, +) +``` + +### 2. Generic Type Expansion + +**Problem**: Rust traits use generics to support multiple types. +**Solution**: Create separate operations for each concrete type. + +```rust +// Original Generic Trait +pub trait DigestInit: ErrorType { + fn init(&mut self, params: T) -> Result, Self::Error>; +} +``` + +```ron +// Idol Interface - Expanded Operations +"init_sha256": (/* ... */), +"init_sha384": (/* ... */), +"init_sha512": (/* ... */), +"init_sha3_256": (/* ... */), +// etc. +``` + +### 3. Memory Lease Patterns + +**Problem**: Rust uses references and slices for zero-copy operations. +**Solution**: Use Idol memory leases for efficient data transfer. + +| Rust Pattern | Idol Lease Pattern | Use Case | +|-------------|-------------------|----------| +| `&[u8]` | `read: true` | Input data | +| `&mut [u8]` | `write: true` | Output buffers | +| `&T` | `read: true` | Configuration structs | +| `&mut T` | `write: true` | Result structs | + +### 4. Error Type Consolidation + +**Problem**: Traits use associated error types and generic error handling. +**Solution**: Define comprehensive concrete error enums. + +```rust +// Original - Generic Error +pub trait ErrorType { + type Error: Error; +} + +pub trait Error: core::fmt::Debug { + fn kind(&self) -> ErrorKind; +} +``` + +```rust +// Idol - Concrete Error Enum +#[derive(Copy, Clone, Debug, FromPrimitive, Eq, PartialEq, IdolError, counters::Count)] +#[repr(u32)] +pub enum DigestError { + InvalidInputLength = 1, + UnsupportedAlgorithm = 2, + // ... comprehensive error cases + #[idol(server_death)] + ServerRestarted = 100, +} +``` + +## Step-by-Step Conversion Process + +### Step 1: Analyze the Original Trait + +1. **Identify State Management Patterns** + - Methods that take `&mut self` → Need session management + - Methods that consume `self` → Need session cleanup + - Associated types → Need concrete type definitions + +2. **Map Data Flow** + - Input parameters → Idol args + read leases + - Output parameters → Idol return values + write leases + - Mutable references → Write leases + +3. **Catalog Error Cases** + - Collect all possible error conditions + - Map generic `ErrorKind` to specific error variants + +### Step 2: Design the Idol Interface + +1. **Create the IDL File** + ```bash + mkdir -p hubris/idl/ + touch hubris/idl/my_trait.idol + ``` + +2. **Define Operations Structure** + ```ron + Interface( + name: "MyTrait", + ops: { + // Initialization operations + "init_*": (/* ... */), + + // State manipulation operations + "operation_*": (/* ... */), + + // Cleanup operations + "reset": (/* ... */), + + // Convenience operations + "oneshot_*": (/* ... */), + }, + ) + ``` + +3. **Design Session Management** + - Use `u32` session IDs + - Return session ID from init operations + - Pass session ID to subsequent operations + +### Step 3: Create the API Package + +1. **Directory Structure** + ``` + hubris/drv/my-trait-api/ + ├── Cargo.toml + ├── build.rs + └── src/ + └── lib.rs + ``` + +2. **Configure Cargo.toml** + ```toml + [package] + name = "drv-my-trait-api" + version = "0.1.0" + edition = "2021" + + [dependencies] + idol-runtime.workspace = true + num-traits.workspace = true + zerocopy.workspace = true + zerocopy-derive.workspace = true + counters = { path = "../../lib/counters" } + derive-idol-err = { path = "../../lib/derive-idol-err" } + userlib = { path = "../../sys/userlib" } + + [build-dependencies] + idol.workspace = true + + [lib] + test = false + doctest = false + bench = false + + [lints] + workspace = true + ``` + +3. **Create build.rs** + ```rust + fn main() -> Result<(), Box> { + idol::client::build_client_stub("../../idl/my_trait.idol", "client_stub.rs")?; + Ok(()) + } + ``` + +### Step 4: Implement Type Definitions + +1. **Create Zerocopy-Compatible Types** + ```rust + #[derive( + Copy, Clone, Debug, PartialEq, Eq, + zerocopy::IntoBytes, + zerocopy::FromBytes, + zerocopy::Immutable, + zerocopy::KnownLayout, + )] + #[repr(C, packed)] // Use packed for complex structs + pub struct MyConfig { + pub field1: u32, + pub field2: u8, + // Avoid bool - use u8 instead + pub enabled: u8, + } + ``` + +2. **Define Error Types** + ```rust + #[derive( + Copy, Clone, Debug, FromPrimitive, Eq, PartialEq, + IdolError, counters::Count, + )] + #[repr(u32)] + pub enum MyTraitError { + // Map from original ErrorKind + InvalidInput = 1, + HardwareFailure = 2, + // ... + #[idol(server_death)] + ServerRestarted = 100, + } + ``` + +3. **Create Enum Types for IPC** + ```rust + #[derive( + Copy, Clone, Debug, PartialEq, Eq, + zerocopy::IntoBytes, + zerocopy::Immutable, + zerocopy::KnownLayout, + FromPrimitive, + )] + #[repr(u32)] // Use u32 for enums + pub enum MyAlgorithm { + Algorithm1 = 0, + Algorithm2 = 1, + } + ``` + +### Step 5: Handle Memory Management + +1. **Input Data Patterns** + ```ron + "process_data": ( + args: { "len": "u32" }, + leases: { + "input_data": (type: "[u8]", read: true, max_len: Some(4096)), + }, + ), + ``` + +2. **Output Data Patterns** + ```ron + "get_result": ( + args: { "session_id": "u32" }, + leases: { + "output_buffer": (type: "[u8]", write: true, max_len: Some(1024)), + }, + ), + ``` + +3. **Configuration Patterns** + ```ron + "configure": ( + args: { "session_id": "u32" }, + leases: { + "config": (type: "MyConfig", read: true), + }, + ), + ``` + +## Common Challenges and Solutions + +### Challenge 1: Associated Types + +**Problem**: Rust traits use associated types for flexibility. +```rust +pub trait DigestAlgorithm { + const OUTPUT_BITS: usize; + type Digest; +} +``` + +**Solution**: Define concrete types and use constants. +```rust +pub const SHA256_WORDS: usize = 8; +pub type Sha256Digest = DigestOutput; + +#[repr(C)] +pub struct DigestOutput { + pub value: [u32; N], +} +``` + +### Challenge 2: Lifetime Parameters + +**Problem**: Rust contexts have lifetime dependencies. +```rust +pub trait DigestInit: ErrorType { + type OpContext<'a>: DigestOp where Self: 'a; + fn init<'a>(&'a mut self, params: T) -> Result, Self::Error>; +} +``` + +**Solution**: Replace with session-based state management. +```rust +// Server maintains context mapping +struct DigestServer { + contexts: HashMap, + next_session_id: u32, +} +``` + +### Challenge 3: Generic Methods + +**Problem**: Single generic method supports multiple types. +```rust +fn process(&mut self, data: &[u8], algo: T) -> Result; +``` + +**Solution**: Create type-specific operations. +```ron +"process_sha256": (/* ... */), +"process_sha384": (/* ... */), +"process_aes": (/* ... */), +``` + +### Challenge 4: Complex Return Types + +**Problem**: Rust can return complex generic types. +```rust +fn finalize(self) -> Result; +``` + +**Solution**: Use output leases for complex types. +```ron +"finalize": ( + args: { "session_id": "u32" }, + leases: { "result": (type: "MyResult", write: true) }, + reply: Result(ok: "()", err: CLike("MyError")), +), +``` + +## Type System Considerations + +### Zerocopy Compatibility + +All types used in Idol interfaces must be zerocopy-compatible: + +```rust +// ✅ Good - Zerocopy compatible +#[derive(zerocopy::IntoBytes, zerocopy::FromBytes, zerocopy::Immutable)] +#[repr(C)] +pub struct GoodConfig { + pub value: u32, + pub enabled: u8, // Not bool! + pub _padding: [u8; 3], // Explicit padding +} + +// ❌ Bad - Not zerocopy compatible +pub struct BadConfig { + pub value: u32, + pub enabled: bool, // bool doesn't implement FromBytes + pub data: Vec, // Dynamic allocation +} +``` + +### Enum Representations + +```rust +// ✅ Good - Use u32 for enums +#[derive(FromPrimitive)] +#[repr(u32)] +pub enum MyEnum { + Variant1 = 0, + Variant2 = 1, +} + +// ❌ Bad - u8 enums with FromBytes need 256 variants +#[repr(u8)] +pub enum SmallEnum { + A = 0, + B = 1, // Only 2 variants - FromBytes won't work +} +``` + +### Padding and Alignment + +```rust +// ✅ Good - Use packed for complex layouts +#[repr(C, packed)] +pub struct PackedStruct { + pub field1: u8, + pub field2: u32, // No padding issues +} + +// ✅ Good - Manual padding control +#[repr(C)] +pub struct PaddedStruct { + pub field1: u8, + pub _pad: [u8; 3], // Explicit padding + pub field2: u32, +} +``` + +## Error Handling Patterns + +### Comprehensive Error Mapping + +Map all possible error conditions from the original trait: + +```rust +// Original trait error kinds +pub enum ErrorKind { + InvalidInputLength, + UnsupportedAlgorithm, + HardwareFailure, + // ... +} + +// Idol error enum - comprehensive mapping +#[derive(Copy, Clone, Debug, FromPrimitive, IdolError, counters::Count)] +#[repr(u32)] +pub enum MyTraitError { + // Map each ErrorKind to a specific variant + InvalidInputLength = 1, + UnsupportedAlgorithm = 2, + HardwareFailure = 3, + + // Add IPC-specific errors + InvalidSession = 10, + TooManySessions = 11, + + // Required for Hubris + #[idol(server_death)] + ServerRestarted = 100, +} +``` + +### Error Context Preservation + +```rust +// Add context-specific error variants +pub enum MyTraitError { + // Operation-specific errors + InitializationFailed = 20, + UpdateFailed = 21, + FinalizationFailed = 22, + + // Resource-specific errors + OutOfMemory = 30, + BufferTooSmall = 31, + InvalidConfiguration = 32, +} +``` + +## Performance Considerations + +### Minimize Message Overhead + +1. **Batch Operations**: Combine related parameters into single calls + ```ron + // ✅ Good - Single call with all parameters + "configure_and_start": ( + args: { + "algorithm": "MyAlgorithm", + "buffer_size": "u32", + "timeout_ms": "u32", + }, + ), + + // ❌ Bad - Multiple round trips + "set_algorithm": (args: {"algo": "MyAlgorithm"}), + "set_buffer_size": (args: {"size": "u32"}), + "set_timeout": (args: {"timeout": "u32"}), + "start": (), + ``` + +2. **Efficient Data Transfer**: Use appropriate lease sizes + ```ron + leases: { + // Size limits based on expected usage + "small_data": (type: "[u8]", read: true, max_len: Some(256)), + "large_data": (type: "[u8]", read: true, max_len: Some(4096)), + } + ``` + +### Memory Lease Optimization + +1. **Right-size Buffers**: Don't over-allocate +2. **Reuse Sessions**: Avoid constant init/cleanup +3. **Batch Updates**: Process multiple chunks in one call when possible + +## Testing and Validation + +### Build Verification + +1. **ARM Target Build**: + ```bash + cargo build -p drv-my-trait-api --target thumbv7em-none-eabihf + ``` + +2. **Generated Code Inspection**: + ```bash + ls target/thumbv7em-none-eabihf/debug/build/drv-my-trait-api*/out/ + head -50 target/thumbv7em-none-eabihf/debug/build/drv-my-trait-api*/out/client_stub.rs + ``` + +### API Surface Validation + +1. **Check Generated Operations**: Verify all expected operations are present +2. **Type Safety**: Ensure all types compile correctly +3. **Error Handling**: Verify error propagation works + +### Integration Testing + +1. **Mock Server**: Create a simple server implementation +2. **Client Testing**: Test all operation patterns +3. **Error Scenarios**: Test error handling paths + +## Example: Complete Conversion + +Here's a complete example showing the transformation of a simple trait: + +### Original Rust Trait + +```rust +pub trait Crypto: ErrorType { + type Algorithm: CryptoAlgorithm; + type Context<'a>: CryptoOp where Self: 'a; + + fn init<'a>(&'a mut self, algo: Self::Algorithm) -> Result, Self::Error>; +} + +pub trait CryptoOp: ErrorType { + type Output; + fn process(&mut self, data: &[u8]) -> Result<(), Self::Error>; + fn finalize(self) -> Result; +} +``` + +### Converted Idol Interface + +```ron +Interface( + name: "Crypto", + ops: { + "init_aes": ( + reply: Result(ok: "u32", err: CLike("CryptoError")), + ), + "init_chacha": ( + reply: Result(ok: "u32", err: CLike("CryptoError")), + ), + "process": ( + args: { "session_id": "u32", "len": "u32" }, + leases: { "data": (type: "[u8]", read: true, max_len: Some(1024)) }, + reply: Result(ok: "()", err: CLike("CryptoError")), + ), + "finalize_aes": ( + args: { "session_id": "u32" }, + leases: { "output": (type: "[u8; 16]", write: true) }, + reply: Result(ok: "()", err: CLike("CryptoError")), + ), + "finalize_chacha": ( + args: { "session_id": "u32" }, + leases: { "output": (type: "[u8; 32]", write: true) }, + reply: Result(ok: "()", err: CLike("CryptoError")), + ), + }, +) +``` + +### API Package Implementation + +```rust +// drv/crypto-api/src/lib.rs +#![no_std] + +use derive_idol_err::IdolError; +use userlib::{sys_send, FromPrimitive}; + +#[derive(Copy, Clone, Debug, PartialEq, Eq, zerocopy::IntoBytes, zerocopy::Immutable, FromPrimitive)] +#[repr(u32)] +pub enum CryptoAlgorithm { + Aes = 0, + ChaCha = 1, +} + +#[derive(Copy, Clone, Debug, FromPrimitive, Eq, PartialEq, IdolError, counters::Count)] +#[repr(u32)] +pub enum CryptoError { + InvalidInput = 1, + InvalidSession = 2, + HardwareFailure = 3, + #[idol(server_death)] + ServerRestarted = 100, +} + +include!(concat!(env!("OUT_DIR"), "/client_stub.rs")); +``` + +## Conclusion + +Converting Rust HAL traits to Idol interfaces requires careful consideration of: + +1. **State Management**: Sessions instead of lifetimes +2. **Type Systems**: Concrete types instead of generics +3. **Memory Management**: Leases instead of references +4. **Error Handling**: Comprehensive concrete error enums +5. **Performance**: Efficient message design + +The key is to preserve the semantic meaning and safety guarantees of the original trait while adapting to the constraints and patterns of the Hubris IPC system. + +By following these patterns and guidelines, you can successfully transform complex Rust HAL traits into efficient, type-safe Idol interfaces that maintain the robustness and performance characteristics expected in embedded systems. diff --git a/hal/blocking/Cargo.toml b/hal/blocking/Cargo.toml index 6671e98..42b65f2 100644 --- a/hal/blocking/Cargo.toml +++ b/hal/blocking/Cargo.toml @@ -8,4 +8,5 @@ description = "Blocking/synchronous HAL traits for OpenPRoT" license = "Apache-2.0" [dependencies] -embedded-hal = "1.0" \ No newline at end of file +embedded-hal = "1.0" +zerocopy = { workspace = true } \ No newline at end of file diff --git a/hal/blocking/src/digest.rs b/hal/blocking/src/digest.rs new file mode 100644 index 0000000..742b368 --- /dev/null +++ b/hal/blocking/src/digest.rs @@ -0,0 +1,934 @@ +// Licensed under the Apache-2.0 license + +//! # Digest HAL Traits +//! +//! This module provides blocking/synchronous Hardware Abstraction Layer (HAL) traits +//! for cryptographic digest operations. It defines a common interface for hash functions +//! and message authentication codes that can be implemented by various hardware and +//! software backends. +//! +//! ## API Evolution +//! +//! This module provides two complementary APIs: +//! +//! ### Scoped API (Current) +//! - **Use case**: One-shot operations, simple baremetal applications +//! - **Pattern**: Borrowed contexts with lifetime constraints +//! - **Benefits**: Minimal overhead, direct hardware mapping +//! - **Limitations**: Cannot store contexts, no persistent sessions +//! +//! ### Owned API (New - Move-based Resource Management) +//! - **Use case**: Server applications, persistent sessions, IPC boundaries +//! - **Pattern**: Owned contexts with resource recovery +//! - **Benefits**: Persistent storage, multiple concurrent contexts, IPC-safe +//! - **Limitations**: Slightly more complex ownership model +//! +//! ## Key Components +//! +//! - [`Digest`] - A generic container for digest output values +//! - [`DigestAlgorithm`] - Trait defining digest algorithm properties +//! +//! ### Scoped API +//! - [`scoped::DigestInit`] - Trait for initializing digest operations (borrowed contexts) +//! - [`scoped::DigestOp`] - Trait for performing digest computations (borrowed contexts) +//! - [`scoped::DigestCtrlReset`] - Trait for resetting digest contexts +//! +//! ### Owned API (Typestate) +//! - [`owned::DigestInit`] - Trait for initializing digest operations (owned contexts) +//! - [`owned::DigestOp`] - Trait for performing digest computations (owned contexts) +//! +//! ## Supported Algorithms +//! +//! This module includes support for: +//! - SHA-2 family: SHA-256, SHA-384, SHA-512 +//! - SHA-3 family: SHA3-224, SHA3-256, SHA3-384, SHA3-512 +//! +//! ## Example Usage +//! +//! ### Scoped API (Traditional) +//! ```rust,no_run +//! # use openprot_hal_blocking::digest::*; +//! # use openprot_hal_blocking::digest::scoped::*; +//! # struct MyDigestImpl; +//! # impl ErrorType for MyDigestImpl { type Error = core::convert::Infallible; } +//! # impl DigestInit for MyDigestImpl { +//! # type OpContext<'a> = MyContext<'a> where Self: 'a; +//! # type Output = Digest<8>; +//! # fn init<'a>(&'a mut self, _: Sha2_256) -> Result, Self::Error> { todo!() } +//! # } +//! # struct MyContext<'a>(&'a mut MyDigestImpl); +//! # impl ErrorType for MyContext<'_> { type Error = core::convert::Infallible; } +//! # impl DigestOp for MyContext<'_> { +//! # type Output = Digest<8>; +//! # fn update(&mut self, _: &[u8]) -> Result<(), Self::Error> { Ok(()) } +//! # fn finalize(self) -> Result { +//! # Ok(Digest { value: [0u32; 8] }) +//! # } +//! # } +//! let mut hasher = MyDigestImpl; +//! let mut ctx = hasher.init(Sha2_256)?; +//! ctx.update(b"hello world")?; +//! let digest = ctx.finalize()?; +//! # Ok::<(), core::convert::Infallible>(()) +//! ``` +//! +//! ### Owned API (Move-based - for servers/sessions) +//! ```rust,no_run +//! # use openprot_hal_blocking::digest::*; +//! # use openprot_hal_blocking::digest::owned::{DigestInit, DigestOp}; +//! # struct MyDigestController; +//! # impl ErrorType for MyDigestController { type Error = core::convert::Infallible; } +//! # impl DigestInit for MyDigestController { +//! # type Context = MyOwnedContext; +//! # type Output = Digest<8>; +//! # fn init(self, _: Sha2_256) -> Result { todo!() } +//! # } +//! # struct MyOwnedContext; +//! # impl ErrorType for MyOwnedContext { type Error = core::convert::Infallible; } +//! # impl DigestOp for MyOwnedContext { +//! # type Output = Digest<8>; +//! # type Controller = MyDigestController; +//! # fn update(self, _: &[u8]) -> Result { todo!() } +//! # fn finalize(self) -> Result<(Self::Output, Self::Controller), Self::Error> { todo!() } +//! # fn cancel(self) -> Self::Controller { todo!() } +//! # } +//! let controller = MyDigestController; +//! let context = controller.init(Sha2_256)?; +//! let context = context.update(b"hello world")?; +//! let (digest, recovered_controller) = context.finalize()?; +//! // Controller can be reused for new operations +//! # Ok::<(), core::convert::Infallible>(()) +//! ``` + +use core::fmt::Debug; +use core::result::Result; +use zerocopy::{FromBytes, Immutable, IntoBytes}; + +/// A generic digest output container. +/// +/// This structure represents the output of a cryptographic digest operation. +/// It uses a const generic parameter `N` to specify the number of 32-bit words +/// in the digest output, allowing it to accommodate different digest sizes. +/// +/// The structure is marked with `#[repr(C)]` to ensure a predictable memory layout, +/// making it suitable for zero-copy operations and hardware interfaces. +/// +/// ## Integration Benefits +/// +/// The `Digest` type solves several critical integration challenges: +/// +/// ### 1. Concrete vs Opaque Types +/// Unlike opaque associated types (`type Output: IntoBytes`), `Digest` provides +/// a **concrete type** that generic code can work with directly: +/// +/// ```rust +/// # use openprot_hal_blocking::digest::Digest; +/// // ✅ CONCRETE: We know exactly what this is +/// fn process_digest(digest: Digest<8>) -> [u32; 8] { +/// digest.into_array() // Safe, direct conversion +/// } +/// +/// // ❌ OPAQUE: We don't know what D::Output actually is +/// // fn process_generic(output: D::Output) -> /* Unknown type */ { +/// // // Cannot convert to [u32; 8] safely +/// // } +/// ``` +/// +/// ### 2. Safe Type Conversions +/// Provides safe methods to access underlying data without unsafe code: +/// +/// ```rust +/// # use openprot_hal_blocking::digest::Digest; +/// let digest = Digest::<8> { value: [1, 2, 3, 4, 5, 6, 7, 8] }; +/// +/// // Safe conversions - no unsafe code needed +/// let array: [u32; 8] = digest.into_array(); // Owned conversion +/// let array_ref: &[u32; 8] = digest.as_array(); // Borrowed conversion +/// let bytes: &[u8] = digest.as_bytes(); // Byte slice access +/// ``` +/// +/// ### 3. IPC Integration +/// Designed specifically for Hubris IPC leased memory operations: +/// +/// ```rust,no_run +/// # use openprot_hal_blocking::digest::Digest; +/// # struct Leased(core::marker::PhantomData<(T, U)>); +/// # impl Leased { fn write(&self, data: U) -> Result<(), ()> { Ok(()) } } +/// # let digest_out: Leased<(), [u32; 8]> = Leased(core::marker::PhantomData); +/// # let digest = Digest::<8> { value: [0; 8] }; +/// // Direct write to IPC lease - no conversion needed +/// digest_out.write(digest.into_array())?; +/// # Ok::<(), ()>(()) +/// ``` +/// +/// ### 4. Server Application Support +/// Enables servers to store and manipulate digest results safely: +/// +/// ```rust +/// # use openprot_hal_blocking::digest::Digest; +/// struct DigestCache { +/// sha256_results: Vec>, // Can store concrete types +/// sha384_results: Vec>, // Different sizes supported +/// } +/// +/// impl DigestCache { +/// fn store_sha256(&mut self, digest: Digest<8>) { +/// self.sha256_results.push(digest); // Direct storage +/// } +/// +/// fn get_as_array(&self, index: usize) -> [u32; 8] { +/// self.sha256_results[index].into_array() // Safe access +/// } +/// } +/// ``` +/// +/// ### 5. Zero-Copy Operations +/// Full zerocopy trait support enables efficient memory operations: +/// +/// ```rust +/// # use openprot_hal_blocking::digest::Digest; +/// let digest = Digest::<8> { value: [1, 2, 3, 4, 5, 6, 7, 8] }; +/// +/// // Zero-copy byte access via zerocopy traits +/// let bytes: &[u8] = zerocopy::IntoBytes::as_bytes(&digest); +/// +/// // Safe transmutation between compatible layouts +/// // (enabled by FromBytes + Immutable derives) +/// ``` +/// +/// ## Comparison with Opaque Output Types +/// +/// | Feature | `Digest` (Concrete) | `D::Output` (Opaque) | +/// |---------|-------------------------|----------------------| +/// | **Type Known at Compile Time** | ✅ Always `Digest` | ❌ Unknown until runtime | +/// | **Safe Array Access** | ✅ `into_array()`, `as_array()` | ❌ Requires unsafe casting | +/// | **IPC Integration** | ✅ Direct `[u32; N]` conversion | ❌ Complex type bridging | +/// | **Server Storage** | ✅ Can store in structs | ❌ Difficult generic storage | +/// | **Zero-Copy Support** | ✅ Full zerocopy traits | ❌ Implementation dependent | +/// | **Embedded Friendly** | ✅ Known size, no allocation | ❌ Unknown size, complex | +/// +/// # Type Parameters +/// +/// * `N` - The number of 32-bit words in the digest output +/// +/// # Examples +/// +/// ```rust +/// # use openprot_hal_blocking::digest::Digest; +/// // A 256-bit digest (8 words of 32 bits each) +/// let sha256_digest = Digest::<8> { +/// value: [0x12345678, 0x9abcdef0, 0x11111111, 0x22222222, +/// 0x33333333, 0x44444444, 0x55555555, 0x66666666], +/// }; +/// +/// // Safe conversion to array for IPC +/// let array = sha256_digest.into_array(); +/// +/// // Access as bytes for serialization +/// let bytes = sha256_digest.as_bytes(); +/// assert_eq!(bytes.len(), 32); +/// ``` +#[derive(Copy, Clone, PartialEq, Eq, IntoBytes, FromBytes, Immutable)] +#[repr(C)] +pub struct Digest { + /// The digest value as an array of 32-bit words + pub value: [u32; N], +} + +impl Digest { + /// Create a new digest from an array of words + pub fn new(value: [u32; N]) -> Self { + Self { value } + } + + /// Get the digest as an array of words + /// + /// This provides safe access to the underlying array without any conversions. + pub fn into_array(self) -> [u32; N] { + self.value + } + + /// Get a reference to the digest as an array of words + pub fn as_array(&self) -> &[u32; N] { + &self.value + } + + /// Get the digest as a byte slice + pub fn as_bytes(&self) -> &[u8] { + zerocopy::IntoBytes::as_bytes(self) + } +} + +impl AsRef<[u8]> for Digest { + fn as_ref(&self) -> &[u8] { + zerocopy::IntoBytes::as_bytes(self) + } +} + +/// Trait defining the properties of a cryptographic digest algorithm. +/// +/// This trait provides compile-time information about digest algorithms, +/// including their output size and associated digest type. It serves as +/// a type-level specification that can be used with generic digest operations. +/// +/// # Requirements +/// +/// Implementing types must be `Copy` and `Debug` to support easy cloning +/// and debugging capabilities. +/// +/// # Examples +/// +/// ```rust +/// # use openprot_hal_blocking::digest::{DigestAlgorithm, Digest}; +/// # use core::fmt::Debug; +/// #[derive(Clone, Copy, Debug)] +/// struct MyCustomAlgorithm; +/// +/// impl DigestAlgorithm for MyCustomAlgorithm { +/// const OUTPUT_BITS: usize = 256; +/// type Digest = Digest<8>; // 256 bits / 32 bits per word = 8 words +/// } +/// ``` +pub trait DigestAlgorithm: Copy + Debug { + /// The output size of the digest algorithm in bits. + /// + /// This constant defines the total number of bits in the digest output. + /// For example, SHA-256 would have `OUTPUT_BITS = 256`. + const OUTPUT_BITS: usize; + + /// The digest output type for this algorithm. + /// + /// This associated type specifies the concrete digest type that will be + /// produced by this algorithm. Typically this will be a [`Digest`] + /// where `N` is calculated from `OUTPUT_BITS`. + type Digest; +} + +/// SHA-256 digest algorithm marker type. +/// +/// This zero-sized type represents the SHA-256 cryptographic hash algorithm, +/// which produces a 256-bit (32-byte) digest output. +/// +/// SHA-256 is part of the SHA-2 family and is widely used for cryptographic +/// applications requiring strong collision resistance. +#[derive(Clone, Copy, Debug)] +pub struct Sha2_256; +impl DigestAlgorithm for Sha2_256 { + const OUTPUT_BITS: usize = 256usize; + type Digest = Digest<{ Self::OUTPUT_BITS / 32 }>; +} + +/// SHA-384 digest algorithm marker type. +/// +/// This zero-sized type represents the SHA-384 cryptographic hash algorithm, +/// which produces a 384-bit (48-byte) digest output. +/// +/// SHA-384 is part of the SHA-2 family and provides a larger output size +/// than SHA-256 for applications requiring additional security margin. +#[derive(Clone, Copy, Debug)] +pub struct Sha2_384; +impl DigestAlgorithm for Sha2_384 { + const OUTPUT_BITS: usize = 384usize; + type Digest = Digest<{ Self::OUTPUT_BITS / 32 }>; +} + +/// SHA-512 digest algorithm marker type. +/// +/// This zero-sized type represents the SHA-512 cryptographic hash algorithm, +/// which produces a 512-bit (64-byte) digest output. +/// +/// SHA-512 is part of the SHA-2 family and provides the largest standard +/// output size, offering maximum collision resistance. +#[derive(Clone, Copy, Debug)] +pub struct Sha2_512; +impl DigestAlgorithm for Sha2_512 { + const OUTPUT_BITS: usize = 512; + type Digest = Digest<{ Self::OUTPUT_BITS / 32 }>; +} + +/// SHA3-224 digest algorithm marker type. +/// +/// This zero-sized type represents the SHA3-224 cryptographic hash algorithm, +/// which produces a 224-bit (28-byte) digest output. +/// +/// SHA3-224 is part of the SHA-3 (Keccak) family and offers an alternative +/// to SHA-2 with different underlying mathematical foundations. +#[derive(Clone, Copy, Debug)] +pub struct Sha3_224; +impl DigestAlgorithm for Sha3_224 { + const OUTPUT_BITS: usize = 224usize; + type Digest = Digest<{ Self::OUTPUT_BITS / 32 }>; +} + +/// SHA3-256 digest algorithm marker type. +/// +/// This zero-sized type represents the SHA3-256 cryptographic hash algorithm, +/// which produces a 256-bit (32-byte) digest output. +/// +/// SHA3-256 is part of the SHA-3 (Keccak) family and provides equivalent +/// security to SHA-256 with different algorithmic properties. +#[derive(Clone, Copy, Debug)] +pub struct Sha3_256; +impl DigestAlgorithm for Sha3_256 { + const OUTPUT_BITS: usize = 256usize; + type Digest = Digest<{ Self::OUTPUT_BITS / 32 }>; +} + +/// SHA3-384 digest algorithm marker type. +/// +/// This zero-sized type represents the SHA3-384 cryptographic hash algorithm, +/// which produces a 384-bit (48-byte) digest output. +/// +/// SHA3-384 is part of the SHA-3 (Keccak) family and provides equivalent +/// security to SHA-384 with different algorithmic properties. +#[derive(Clone, Copy, Debug)] +pub struct Sha3_384; +impl DigestAlgorithm for Sha3_384 { + const OUTPUT_BITS: usize = 384usize; + type Digest = Digest<{ Self::OUTPUT_BITS / 32 }>; +} + +/// SHA3-512 digest algorithm marker type. +/// +/// This zero-sized type represents the SHA3-512 cryptographic hash algorithm, +/// which produces a 512-bit (64-byte) digest output. +/// +/// SHA3-512 is part of the SHA-3 (Keccak) family and provides equivalent +/// security to SHA-512 with different algorithmic properties. +#[derive(Clone, Copy, Debug)] +pub struct Sha3_512; +impl DigestAlgorithm for Sha3_512 { + const OUTPUT_BITS: usize = 512; + type Digest = Digest<{ Self::OUTPUT_BITS / 32 }>; +} + +/// Error kind. +/// +/// This represents a common set of digest operation errors. Implementations are +/// free to define more specific or additional error types. However, by providing +/// a mapping to these common errors, generic code can still react to them. +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[non_exhaustive] +pub enum ErrorKind { + /// The input data length is not valid for the hash function. + InvalidInputLength, + + /// The specified hash algorithm is not supported by the hardware or software implementation. + UnsupportedAlgorithm, + + /// Failed to allocate memory for the hash computation. + MemoryAllocationFailure, + + /// Failed to initialize the hash computation context. + InitializationError, + + /// Error occurred while updating the hash computation with new data. + UpdateError, + + /// Error occurred while finalizing the hash computation. + FinalizationError, + + /// The hardware accelerator is busy and cannot process the hash computation. + Busy, + + /// General hardware failure during hash computation. + HardwareFailure, + + /// The specified output size is not valid for the hash function. + InvalidOutputSize, + + /// Insufficient permissions to access the hardware or perform the hash computation. + PermissionDenied, + + /// The hash computation context has not been initialized. + NotInitialized, +} + +impl core::fmt::Display for ErrorKind { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + match self { + Self::InvalidInputLength => write!(f, "invalid input data length"), + Self::UnsupportedAlgorithm => write!(f, "unsupported hash algorithm"), + Self::MemoryAllocationFailure => write!(f, "memory allocation failed"), + Self::InitializationError => write!(f, "failed to initialize hash computation"), + Self::UpdateError => write!(f, "error updating hash computation"), + Self::FinalizationError => write!(f, "error finalizing hash computation"), + Self::Busy => write!(f, "hardware accelerator is busy"), + Self::HardwareFailure => write!(f, "hardware failure during hash computation"), + Self::InvalidOutputSize => write!(f, "invalid output size for hash function"), + Self::PermissionDenied => write!(f, "insufficient permissions to access hardware"), + Self::NotInitialized => write!(f, "hash computation context not initialized"), + } + } +} + +/// Trait for digest operation errors. +/// +/// This trait provides a common interface for all error types that can occur +/// during digest operations. It allows for categorization of errors into +/// common types while still permitting implementation-specific error details. +/// +/// All digest error types must implement `Debug` for debugging purposes and +/// provide a method to convert to a generic [`ErrorKind`]. +/// +/// # Examples +/// +/// ```rust +/// # use openprot_hal_blocking::digest::{Error, ErrorKind}; +/// # use core::fmt::Debug; +/// #[derive(Debug)] +/// struct MyDigestError { +/// message: &'static str, +/// } +/// +/// impl Error for MyDigestError { +/// fn kind(&self) -> ErrorKind { +/// ErrorKind::HardwareFailure +/// } +/// } +/// ``` +pub trait Error: core::fmt::Debug { + /// Convert error to a generic error kind + /// + /// By using this method, errors freely defined by HAL implementations + /// can be converted to a set of generic errors upon which generic + /// code can act. + fn kind(&self) -> ErrorKind; +} + +impl Error for core::convert::Infallible { + fn kind(&self) -> ErrorKind { + match *self {} + } +} + +/// Trait for types that have an associated error type. +/// +/// This trait provides a standard way for digest-related types to specify +/// their error type. It's used throughout the digest HAL to maintain +/// type safety while allowing different implementations to use their own +/// specific error types. +/// +/// # Examples +/// +/// ```rust +/// # use openprot_hal_blocking::digest::{ErrorType, Error, ErrorKind}; +/// # use core::fmt::Debug; +/// # #[derive(Debug)] +/// # struct MyError; +/// # impl Error for MyError { +/// # fn kind(&self) -> ErrorKind { ErrorKind::HardwareFailure } +/// # } +/// struct MyDigestDevice; +/// +/// impl ErrorType for MyDigestDevice { +/// type Error = MyError; +/// } +/// ``` +pub trait ErrorType { + /// Error type. + type Error: Error; +} + +/// Trait for initializing digest operations. +/// +/// This trait provides the interface for creating new digest computation contexts. +/// It is parameterized by a [`DigestAlgorithm`] type to ensure type safety and +/// allow different algorithms to have different initialization parameters. +/// +/// # Type Parameters +/// +/// * `T` - The digest algorithm type that implements [`DigestAlgorithm`] +/// +/// # Examples +/// +/// ```rust,no_run +/// # use openprot_hal_blocking::digest::*; +/// # struct MyDigestImpl; +/// # impl ErrorType for MyDigestImpl { type Error = core::convert::Infallible; } +/// # impl DigestInit for MyDigestImpl { +/// # type OpContext<'a> = MyContext<'a> where Self: 'a; +/// # type Output = Digest<8>; +/// # fn init<'a>(&'a mut self, _: Sha2_256) -> Result, Self::Error> { todo!() } +/// # } +/// # struct MyContext<'a>(&'a mut MyDigestImpl); +/// # impl ErrorType for MyContext<'_> { type Error = core::convert::Infallible; } +/// # impl DigestOp for MyContext<'_> { +/// # type Output = Digest<8>; +/// # fn update(&mut self, _: &[u8]) -> Result<(), Self::Error> { Ok(()) } +/// # fn finalize(self) -> Result { +/// # Ok(Digest { value: [0u32; 8] }) +/// # } +/// # } +/// let mut device = MyDigestImpl; +/// let context = device.init(Sha2_256)?; +/// # Ok::<(), core::convert::Infallible>(()) +/// ``` +pub trait DigestInit: ErrorType { + /// The operation context type that will handle the digest computation. + /// + /// This associated type represents the stateful context returned by [`init`](Self::init) + /// that can be used to perform the actual digest operations via [`DigestOp`]. + /// The lifetime parameter ensures the context cannot outlive the device that created it. + type OpContext<'a>: DigestOp + where + Self: 'a; + + /// The output type produced by this digest implementation. + /// + /// This type must implement [`IntoBytes`] to allow conversion to byte arrays + /// for interoperability with other systems and zero-copy operations. + type Output: IntoBytes; + + /// Init instance of the crypto function with the given context. + /// + /// # Parameters + /// + /// - `init_params`: The context or configuration parameters for the crypto function. + /// + /// # Returns + /// + /// A new instance of the hash function. + fn init(&mut self, init_params: T) -> Result, Self::Error>; +} + +/// Trait for resetting digest computation contexts. +/// +/// This trait provides the ability to reset a digest device or context back to +/// its initial state, allowing it to be reused for new digest computations +/// without needing to create a new instance. +/// +/// # Examples +/// +/// ```rust,no_run +/// # use openprot_hal_blocking::digest::*; +/// # struct MyDigestImpl; +/// # impl ErrorType for MyDigestImpl { type Error = core::convert::Infallible; } +/// # impl DigestCtrlReset for MyDigestImpl { +/// # fn reset(&mut self) -> Result<(), Self::Error> { Ok(()) } +/// # } +/// let mut device = MyDigestImpl; +/// device.reset()?; +/// # Ok::<(), core::convert::Infallible>(()) +/// ``` +pub trait DigestCtrlReset: ErrorType { + /// Reset instance to its initial state. + /// + /// # Returns + /// + /// A `Result` indicating success or failure. On success, returns `Ok(())`. On failure, returns a `CryptoError`. + fn reset(&mut self) -> Result<(), Self::Error>; +} + +/// Trait for performing digest operations. +/// +/// This trait provides the core interface for digest computation operations: +/// updating the digest state with input data and finalizing the computation +/// to produce the digest output. +/// +/// This trait is typically implemented by context types returned from +/// [`DigestInit::init`] and represents an active digest computation. +/// +/// # State Machine +/// +/// Digest operations follow a simple state machine: +/// 1. **Update**: Call [`update`](Self::update) zero or more times with input data +/// 2. **Finalize**: Call [`finalize`](Self::finalize) once to produce the final digest +/// +/// After finalization, the context is consumed and cannot be reused. +/// +/// # Examples +/// +/// ```rust,no_run +/// # use openprot_hal_blocking::digest::*; +/// # struct MyContext; +/// # impl ErrorType for MyContext { type Error = core::convert::Infallible; } +/// # impl DigestOp for MyContext { +/// # type Output = Digest<8>; +/// # fn update(&mut self, _: &[u8]) -> Result<(), Self::Error> { Ok(()) } +/// # fn finalize(self) -> Result { +/// # Ok(Digest { value: [0u32; 8] }) +/// # } +/// # } +/// let mut context = MyContext; +/// context.update(b"hello")?; +/// context.update(b" world")?; +/// let digest = context.finalize()?; +/// # Ok::<(), core::convert::Infallible>(()) +/// ``` +pub trait DigestOp: ErrorType { + /// The digest output type. + /// + /// This type represents the final digest value produced by [`finalize`](Self::finalize). + /// It must implement [`IntoBytes`] to enable zero-copy conversion to byte arrays. + type Output: IntoBytes; + + /// Update state using provided input data. + /// + /// # Parameters + /// + /// - `input`: The input data to be hashed. This can be any type that implements `AsRef<[u8]>`. + /// + /// # Returns + /// + /// A `Result` indicating success or failure. On success, returns `Ok(())`. On failure, returns a `CryptoError`. + fn update(&mut self, input: &[u8]) -> Result<(), Self::Error>; + + /// Finalize the computation and produce the output. + /// + /// # Parameters + /// + /// - `out`: A mutable slice to store the hash output. The length of the slice must be at least `MAX_OUTPUT_SIZE`. + /// + /// # Returns + /// + /// A `Result` indicating success or failure. On success, returns `Ok(())`. On failure, returns a `CryptoError`. + fn finalize(self) -> Result; +} + +pub mod scoped { + //! Scoped digest API with borrowed contexts (current) + //! + //! This module contains the original OpenPRoT HAL digest traits that use + //! borrowed contexts with lifetime constraints. These traits are ideal for: + //! - One-shot digest operations + //! - Simple embedded applications + //! - Direct hardware mapping + //! - Minimal memory overhead + //! + //! **Limitation**: Contexts cannot be stored or persist across function boundaries + //! due to lifetime constraints. + + pub use super::{DigestAlgorithm, DigestCtrlReset, DigestInit, DigestOp, ErrorType}; +} + +pub mod owned { + //! Owned digest API with move-based resource management + //! + //! This module provides a move-based digest API where contexts are owned + //! rather than borrowed. This enables: + //! - Persistent session storage + //! - Multiple concurrent contexts + //! - IPC boundary crossing + //! - Resource recovery patterns + //! - Compile-time prevention of use-after-finalize + //! + //! This API is specifically designed for server applications like Hubris + //! digest servers that need to maintain long-lived sessions. + + use super::{DigestAlgorithm, ErrorType, IntoBytes}; + use core::result::Result; + + /// Trait for initializing digest operations with owned contexts. + /// + /// This trait takes ownership of the controller and returns an owned context + /// that can be stored, moved, and persisted across function boundaries. + /// Unlike the scoped API, there are no lifetime constraints. + /// + /// # Type Parameters + /// + /// * `T` - The digest algorithm type that implements [`DigestAlgorithm`] + /// + /// # Examples + /// + /// ```rust,no_run + /// # use openprot_hal_blocking::digest::*; + /// # use openprot_hal_blocking::digest::owned::{DigestInit, DigestOp}; + /// # struct MyController; + /// # impl ErrorType for MyController { type Error = core::convert::Infallible; } + /// # struct MyContext; + /// # impl ErrorType for MyContext { type Error = core::convert::Infallible; } + /// # impl DigestOp for MyContext { + /// # type Output = Digest<8>; + /// # type Controller = MyController; + /// # fn update(self, _: &[u8]) -> Result { Ok(self) } + /// # fn finalize(self) -> Result<(Self::Output, Self::Controller), Self::Error> { todo!() } + /// # fn cancel(self) -> Self::Controller { todo!() } + /// # } + /// # impl DigestInit for MyController { + /// # type Context = MyContext; + /// # type Output = Digest<8>; + /// # fn init(self, _: Sha2_256) -> Result { todo!() } + /// # } + /// let controller = MyController; + /// let context = controller.init(Sha2_256)?; + /// // Context can be stored in structs, moved across functions, etc. + /// # Ok::<(), core::convert::Infallible>(()) + /// ``` + pub trait DigestInit: ErrorType + Sized { + /// The owned context type that will handle the digest computation. + /// + /// This context has no lifetime constraints and can be stored in structs, + /// moved between functions, and persisted across IPC boundaries. + type Context: DigestOp; + + /// The output type produced by this digest implementation. + /// + /// This type must implement [`IntoBytes`] to allow conversion to byte arrays + /// for interoperability with other systems and zero-copy operations. + type Output: IntoBytes; + + /// Initialize a new digest computation context. + /// + /// Takes ownership of the controller and returns an owned context. + /// The controller will be returned when the context is finalized or cancelled. + /// + /// # Parameters + /// + /// - `init_params`: Algorithm-specific initialization parameters + /// + /// # Returns + /// + /// An owned context that can be used for digest operations. + fn init(self, init_params: T) -> Result; + } + + /// Trait for performing digest operations with owned contexts. + /// + /// This trait uses move semantics where each operation consumes the + /// context and returns a new context (for `update`) or the final result + /// with a recovered controller (for `finalize`/`cancel`). + /// + /// # Move-based Safety + /// + /// The move-based pattern provides compile-time guarantees: + /// - Cannot use a context after finalization + /// - Cannot finalize the same context twice + /// - Controller is always recovered for reuse + /// + /// # Examples + /// + /// ```rust,no_run + /// # use openprot_hal_blocking::digest::*; + /// # use openprot_hal_blocking::digest::owned::{DigestInit, DigestOp}; + /// # struct MyContext; + /// # impl ErrorType for MyContext { type Error = core::convert::Infallible; } + /// # struct MyController; + /// # impl DigestOp for MyContext { + /// # type Output = Digest<8>; + /// # type Controller = MyController; + /// # fn update(self, _: &[u8]) -> Result { Ok(self) } + /// # fn finalize(self) -> Result<(Self::Output, Self::Controller), Self::Error> { todo!() } + /// # fn cancel(self) -> Self::Controller { todo!() } + /// # } + /// # fn get_context() -> MyContext { todo!() } + /// let context = get_context(); // MyContext + /// let context = context.update(b"hello")?; + /// let context = context.update(b" world")?; + /// let (digest, controller) = context.finalize()?; + /// // Controller recovered for reuse + /// # Ok::<(), core::convert::Infallible>(()) + /// ``` + pub trait DigestOp: ErrorType + Sized { + /// The digest output type. + /// + /// This type represents the final digest value produced by [`finalize`](Self::finalize). + /// It must implement [`IntoBytes`] to enable zero-copy conversion to byte arrays. + type Output: IntoBytes; + + /// The controller type that will be recovered after finalization or cancellation. + /// + /// This enables resource recovery and reuse patterns essential for server applications. + type Controller; + + /// Update the digest state with input data. + /// + /// This method consumes the current context and returns a new context with + /// the updated state. This prevents use-after-update bugs at compile time + /// through move semantics. + /// + /// # Parameters + /// + /// - `data`: Input data to be processed by the digest algorithm + /// + /// # Returns + /// + /// A new context with updated state, or an error + fn update(self, data: &[u8]) -> Result; + + /// Finalize the digest computation and recover the controller. + /// + /// This method consumes the context and returns both the final digest output + /// and the original controller, enabling resource reuse. + /// + /// # Returns + /// + /// A tuple containing the digest output and the recovered controller + fn finalize(self) -> Result<(Self::Output, Self::Controller), Self::Error>; + + /// Cancel the digest computation and recover the controller. + /// + /// This method cancels the current computation and returns the controller + /// in a clean state, ready for reuse. Unlike `finalize`, this cannot fail. + /// + /// # Returns + /// + /// The recovered controller in a clean state + fn cancel(self) -> Self::Controller; + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_digest_output_conversions() { + // Test safe conversion methods on Digest type + let sha256_digest = Digest::<8> { + value: [1, 2, 3, 4, 5, 6, 7, 8], + }; + + // Test into_array() method + let array = sha256_digest.into_array(); + assert_eq!(array, [1, 2, 3, 4, 5, 6, 7, 8]); + + // Test as_array() method + let sha256_digest = Digest::<8> { + value: [1, 2, 3, 4, 5, 6, 7, 8], + }; + let array_ref = sha256_digest.as_array(); + assert_eq!(array_ref, &[1, 2, 3, 4, 5, 6, 7, 8]); + + // Test as_bytes() method + let bytes = sha256_digest.as_bytes(); + assert_eq!(bytes.len(), 32); // 8 words * 4 bytes each + + // Verify the bytes match the expected layout (little endian) + let expected_bytes = [ + 1, 0, 0, 0, // word 1 + 2, 0, 0, 0, // word 2 + 3, 0, 0, 0, // word 3 + 4, 0, 0, 0, // word 4 + 5, 0, 0, 0, // word 5 + 6, 0, 0, 0, // word 6 + 7, 0, 0, 0, // word 7 + 8, 0, 0, 0, // word 8 + ]; + assert_eq!(bytes, &expected_bytes); + } + + #[test] + fn test_output_type_sizes() { + use core::mem; + + // Verify that digest output types have correct sizes for IPC + assert_eq!(mem::size_of::>(), 32); // SHA-256: 8 words * 4 bytes + assert_eq!(mem::size_of::>(), 48); // SHA-384: 12 words * 4 bytes + assert_eq!(mem::size_of::>(), 64); // SHA-512: 16 words * 4 bytes + + // Test alignment requirements + assert_eq!(mem::align_of::>(), 4); // Aligned to u32 + } + + #[test] + fn test_digest_new_constructor() { + let array = [ + 0x12345678, 0x9abcdef0, 0x11111111, 0x22222222, 0x33333333, 0x44444444, 0x55555555, + 0x66666666, + ]; + let digest = Digest::new(array); + assert_eq!(digest.value, array); + assert_eq!(digest.into_array(), array); + } +} diff --git a/hal/blocking/src/lib.rs b/hal/blocking/src/lib.rs index 721a89f..85ed139 100644 --- a/hal/blocking/src/lib.rs +++ b/hal/blocking/src/lib.rs @@ -14,8 +14,12 @@ #![forbid(unsafe_code)] #![deny(missing_docs)] +/// Cryptographic digest operations (hashing) +pub mod digest; /// Gpio port module pub mod gpio_port; +/// Message Authentication Code (MAC) traits and implementations +pub mod mac; /// Reset and clocking traits for OpenPRoT HAL pub mod system_control; diff --git a/hal/blocking/src/mac.rs b/hal/blocking/src/mac.rs new file mode 100644 index 0000000..ebec7c9 --- /dev/null +++ b/hal/blocking/src/mac.rs @@ -0,0 +1,169 @@ +// Licensed under the Apache-2.0 license + +use crate::digest::Digest; +use core::fmt::Debug; +use zerocopy::IntoBytes; + +/// Common error kinds for MAC operations (reused from digest operations). +#[derive(Debug, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Hash)] +#[non_exhaustive] +pub enum ErrorKind { + /// The input data length is not valid for the MAC function. + InvalidInputLength, + /// The specified MAC algorithm is not supported by the hardware or software implementation. + UnsupportedAlgorithm, + /// Failed to allocate memory for the MAC computation. + MemoryAllocationFailure, + /// Failed to initialize the MAC computation context. + InitializationError, + /// Error occurred while updating the MAC computation with new data. + UpdateError, + /// Error occurred while finalizing the MAC computation. + FinalizationError, + /// The hardware accelerator is busy and cannot process the MAC computation. + Busy, + /// General hardware failure during MAC computation. + HardwareFailure, + /// The specified output size is not valid for the MAC function. + InvalidOutputSize, + /// Insufficient permissions to access the hardware or perform the MAC computation. + PermissionDenied, + /// The MAC computation context has not been initialized. + NotInitialized, +} + +/// Trait for converting implementation-specific errors into a common error kind. +pub trait Error: Debug { + /// Returns a generic error kind corresponding to the specific error. + fn kind(&self) -> ErrorKind; +} + +impl Error for core::convert::Infallible { + fn kind(&self) -> ErrorKind { + match *self {} + } +} + +/// Trait for types that associate with a specific error type. +pub trait ErrorType { + /// The associated error type. + type Error: Error; +} + +/// Trait representing a MAC algorithm and its output characteristics. +pub trait MacAlgorithm: Copy + Debug { + /// The number of bits in the MAC output. + const OUTPUT_BITS: usize; + + /// The type representing the MAC output. + type MacOutput: IntoBytes; + + /// The type representing the key used for MAC computation. + type Key; +} + +/// Trait for initializing a MAC operation for a specific algorithm. +pub trait MacInit: ErrorType { + /// The type representing the operational context for the MAC. + type OpContext<'a>: MacOp + where + Self: 'a; + + /// Initializes the MAC operation with the specified algorithm and key. + /// + /// # Parameters + /// + /// - `algo`: A zero-sized type representing the MAC algorithm to use. + /// - `key`: A reference to the key used for the MAC computation. + /// + /// # Returns + /// + /// A result containing the operational context for the MAC, or an error. + fn init<'a>(&'a mut self, algo: A, key: &A::Key) -> Result, Self::Error>; +} + +/// Optional trait for resetting a MAC context to its initial state. +pub trait MacCtrlReset: ErrorType { + /// Resets the MAC context. + /// + /// # Returns + /// + /// A result indicating success or failure. + fn reset(&mut self) -> Result<(), Self::Error>; +} + +/// Trait for performing MAC operations. +pub trait MacOp: ErrorType { + /// The type of the MAC output. + type Output: IntoBytes; + + /// Updates the MAC state with the provided input data. + /// + /// # Parameters + /// + /// - `input`: A byte slice containing the data to authenticate. + /// + /// # Returns + /// + /// A result indicating success or failure. + fn update(&mut self, input: &[u8]) -> Result<(), Self::Error>; + + /// Finalizes the MAC computation and returns the result. + /// + /// # Returns + /// + /// A result containing the MAC output, or an error. + fn finalize(self) -> Result; +} + +// ============================================================================= +// MAC Algorithm Marker Types +// ============================================================================= + +/// HMAC-SHA-256 MAC algorithm marker type. +/// +/// This zero-sized type represents the HMAC-SHA-256 message authentication code +/// algorithm, which produces a 256-bit (32-byte) MAC output using SHA-256 as +/// the underlying hash function. +/// +/// HMAC-SHA-256 combines the SHA-256 hash function with a secret key to provide +/// both data integrity and authentication. +#[derive(Clone, Copy, Debug)] +pub struct HmacSha2_256; +impl MacAlgorithm for HmacSha2_256 { + const OUTPUT_BITS: usize = 256; + type MacOutput = Digest<{ Self::OUTPUT_BITS / 32 }>; + type Key = [u8; 32]; +} + +/// HMAC-SHA-384 MAC algorithm marker type. +/// +/// This zero-sized type represents the HMAC-SHA-384 message authentication code +/// algorithm, which produces a 384-bit (48-byte) MAC output using SHA-384 as +/// the underlying hash function. +/// +/// HMAC-SHA-384 provides a larger output size than HMAC-SHA-256 for applications +/// requiring additional security margin. +#[derive(Clone, Copy, Debug)] +pub struct HmacSha2_384; +impl MacAlgorithm for HmacSha2_384 { + const OUTPUT_BITS: usize = 384; + type MacOutput = Digest<{ Self::OUTPUT_BITS / 32 }>; + type Key = [u8; 48]; +} + +/// HMAC-SHA-512 MAC algorithm marker type. +/// +/// This zero-sized type represents the HMAC-SHA-512 message authentication code +/// algorithm, which produces a 512-bit (64-byte) MAC output using SHA-512 as +/// the underlying hash function. +/// +/// HMAC-SHA-512 provides the largest standard output size, offering maximum +/// collision resistance and authentication strength. +#[derive(Clone, Copy, Debug)] +pub struct HmacSha2_512; +impl MacAlgorithm for HmacSha2_512 { + const OUTPUT_BITS: usize = 512; + type MacOutput = Digest<{ Self::OUTPUT_BITS / 32 }>; + type Key = [u8; 64]; +} diff --git a/platform/impls/baremetal/mock/Cargo.toml b/platform/impls/baremetal/mock/Cargo.toml new file mode 100644 index 0000000..cea15ce --- /dev/null +++ b/platform/impls/baremetal/mock/Cargo.toml @@ -0,0 +1,18 @@ +# Licensed under the Apache-2.0 license + +[package] +name = "openprot-platform-mock" +version = "0.1.0" +edition = "2021" +description = "Mock/stub implementation of OpenPRoT platform traits for testing" + +[features] +default = [] + +[dependencies] +# OpenPRoT HAL traits +openprot-hal-blocking = { path = "../../../../hal/blocking" } + +# Core dependencies for embedded development +cortex-m = "0.7" +embedded-hal = "1.0" diff --git a/platform/impls/baremetal/mock/src/hash.rs b/platform/impls/baremetal/mock/src/hash.rs new file mode 100644 index 0000000..4c3f866 --- /dev/null +++ b/platform/impls/baremetal/mock/src/hash.rs @@ -0,0 +1,348 @@ +// Licensed under the Apache-2.0 license + +//! Mock Hash/Digest Accelerator Implementation +//! +//! Provides a stub implementation of digest operations that can be used +//! for testing when real hardware acceleration is not available. +//! +//! This module demonstrates both the scoped and owned digest APIs: +//! - **Scoped API**: Traditional lifetime-constrained contexts for simple use cases +//! - **Owned API**: Move-based resource management for server applications + +use openprot_hal_blocking::digest::{ + DigestAlgorithm, ErrorKind, ErrorType, Sha2_256, Sha2_384, Sha2_512, +}; + +// Import both API modules +use openprot_hal_blocking::digest::scoped::{DigestCtrlReset, DigestInit, DigestOp}; + +/// Mock digest accelerator device +/// +/// This is a software-only stub implementation of the digest hardware traits. +/// It provides working digest operations using simple algorithms or dummy outputs +/// for testing purposes. +#[derive(Default)] +pub struct MockDigestDevice; + +impl MockDigestDevice { + /// Create a new mock digest device + pub fn new() -> Self { + Self + } +} + +/// Mock digest error type +#[derive(Debug, Clone, Copy)] +pub struct MockDigestError; + +impl openprot_hal_blocking::digest::Error for MockDigestError { + fn kind(&self) -> ErrorKind { + // Mock implementation never fails, but we can simulate errors if needed + ErrorKind::HardwareFailure + } +} + +impl ErrorType for MockDigestDevice { + type Error = MockDigestError; +} + +impl DigestCtrlReset for MockDigestDevice { + fn reset(&mut self) -> Result<(), Self::Error> { + // Mock reset always succeeds + Ok(()) + } +} + +// +// SCOPED API IMPLEMENTATION (Original) +// + +/// Mock hasher context that tracks the algorithm type and lifetime of the device. +/// This mimics the pattern from the reference implementation where the hasher holds a reference +/// to the hardware device (even though we don't use it in the mock) +/// and the algorithm parameters for type safety. +pub struct MockHasher<'a, T> { + #[allow(dead_code)] // Mock implementation doesn't need to use the device reference + hw: &'a mut MockDigestDevice, + _alg: T, + data_processed: u64, +} + +impl ErrorType for MockHasher<'_, T> { + type Error = MockDigestError; +} + +/// Macro to implement scoped digest traits for each algorithm +macro_rules! impl_scoped_sha2 { + ($algo:ident) => { + impl DigestInit<$algo> for MockDigestDevice { + type OpContext<'a> = MockHasher<'a, $algo>; + type Output = <$algo as DigestAlgorithm>::Digest; + + fn init(&mut self, init_params: $algo) -> Result, Self::Error> { + // In a real implementation, we'd configure the hardware here + Ok(Self::OpContext { + hw: self, + _alg: init_params, + data_processed: 0, + }) + } + } + + impl DigestOp for MockHasher<'_, $algo> { + type Output = <$algo as DigestAlgorithm>::Digest; + + fn update(&mut self, input: &[u8]) -> Result<(), Self::Error> { + // Track the amount of data processed + self.data_processed += input.len() as u64; + // In a real implementation, we'd process the input data + Ok(()) + } + + fn finalize(self) -> Result { + // Generate a deterministic but fake digest based on the data length and algorithm + const OUTPUT_WORDS: usize = <$algo as DigestAlgorithm>::OUTPUT_BITS / 32; + let mut value = [0u32; OUTPUT_WORDS]; + for (i, word) in value.iter_mut().enumerate() { + *word = 0x12345678u32 + .wrapping_add(self.data_processed as u32) + .wrapping_add(i as u32); + } + Ok(Self::Output { value }) + } + } + }; +} + +impl_scoped_sha2!(Sha2_256); +impl_scoped_sha2!(Sha2_384); +impl_scoped_sha2!(Sha2_512); + +// +// OWNED API IMPLEMENTATION (Move-based Resource Management) +// + +/// Demonstrates the new owned API with move-based resource management for persistent sessions +pub mod owned { + use super::*; + use openprot_hal_blocking::digest::owned::{DigestInit, DigestOp}; + + /// Controller for owned digest operations + /// + /// This represents the hardware controller that can create owned contexts. + /// Unlike the scoped API, this can be moved into and out of contexts. + #[derive(Debug)] + pub struct MockDigestController { + // Hardware state could go here + // For AST1060, this might track the single hardware context + #[allow(dead_code)] // Mock implementation + hardware_id: u32, + } + + impl Default for MockDigestController { + fn default() -> Self { + Self::new() + } + } + + impl MockDigestController { + pub fn new() -> Self { + Self { + hardware_id: 0xDEADBEEF, + } + } + } + + impl ErrorType for MockDigestController { + type Error = MockDigestError; + } + + /// Owned digest context for a specific algorithm + /// + /// This context owns the controller and can be stored in structs, + /// moved across function boundaries, and persist across IPC calls. + pub struct MockOwnedContext { + controller: MockDigestController, + #[allow(dead_code)] // Algorithm type for type safety + algorithm: T, + data_processed: u64, + } + + impl ErrorType for MockOwnedContext { + type Error = MockDigestError; + } + + /// Macro to implement owned digest traits for each algorithm + macro_rules! impl_owned_sha2 { + ($algo:ident) => { + impl DigestInit<$algo> for MockDigestController { + type Context = MockOwnedContext<$algo>; + type Output = <$algo as DigestAlgorithm>::Digest; + + fn init(self, init_params: $algo) -> Result { + // Controller moves into the context + // In hardware implementation, this might claim hardware resources + Ok(MockOwnedContext { + controller: self, + algorithm: init_params, + data_processed: 0, + }) + } + } + + impl DigestOp for MockOwnedContext<$algo> { + type Output = <$algo as DigestAlgorithm>::Digest; + type Controller = MockDigestController; + + fn update(mut self, data: &[u8]) -> Result { + // Process data and return updated context + self.data_processed += data.len() as u64; + // In hardware implementation, this might feed data to hardware + Ok(self) + } + + fn finalize(self) -> Result<(Self::Output, Self::Controller), Self::Error> { + // Generate digest and return both result and controller + const OUTPUT_WORDS: usize = <$algo as DigestAlgorithm>::OUTPUT_BITS / 32; + let mut value = [0u32; OUTPUT_WORDS]; + for (i, word) in value.iter_mut().enumerate() { + *word = 0x87654321u32 // Different pattern to distinguish from scoped + .wrapping_add(self.data_processed as u32) + .wrapping_add(i as u32); + } + + let result = Self::Output { value }; + let controller = self.controller; // Move controller back + + Ok((result, controller)) + } + + fn cancel(self) -> Self::Controller { + // Clean cancellation - return controller without producing output + // In hardware implementation, this might reset hardware state + self.controller + } + } + }; + } + + impl_owned_sha2!(Sha2_256); + impl_owned_sha2!(Sha2_384); + impl_owned_sha2!(Sha2_512); +} + +#[cfg(test)] +mod tests { + use super::*; + use openprot_hal_blocking::digest::{Digest, Sha2_256}; + + #[test] + fn test_scoped_api() { + let mut device = MockDigestDevice::new(); + + // Test the scoped API + let mut ctx = device.init(Sha2_256).unwrap(); + ctx.update(b"hello").unwrap(); + ctx.update(b" world").unwrap(); + let digest = ctx.finalize().unwrap(); + + // Verify we got a digest with the expected pattern + assert_eq!(digest.value[0], 0x12345678 + 11); // 11 bytes processed + } + + #[test] + fn test_owned_api() { + use crate::hash::owned::MockDigestController; + use openprot_hal_blocking::digest::owned::{DigestInit, DigestOp}; + + let controller = MockDigestController::new(); + + // Test the owned API - contexts can be stored and moved + let ctx = controller.init(Sha2_256).unwrap(); + let ctx = ctx.update(b"hello").unwrap(); + let ctx = ctx.update(b" world").unwrap(); + let (digest, recovered_controller) = ctx.finalize().unwrap(); + + // Verify we got a digest with the expected pattern (different from scoped) + assert_eq!(digest.value[0], 0x87654321 + 11); // 11 bytes processed + + // Controller is recovered and can be reused + let _new_ctx = recovered_controller.init(Sha2_256).unwrap(); + } + + #[test] + fn test_owned_api_cancel() { + use crate::hash::owned::MockDigestController; + use openprot_hal_blocking::digest::owned::{DigestInit, DigestOp}; + + let controller = MockDigestController::new(); + + let ctx = controller.init(Sha2_256).unwrap(); + let ctx = ctx.update(b"some data").unwrap(); + + // Cancel the operation and recover controller + let recovered_controller = ctx.cancel(); + + // Controller can be reused after cancellation + let _new_ctx = recovered_controller.init(Sha2_256).unwrap(); + } + + #[test] + fn test_session_storage_pattern() { + use crate::hash::owned::{MockDigestController, MockOwnedContext}; + use openprot_hal_blocking::digest::owned::{DigestInit, DigestOp}; + + // Demonstrate session storage pattern (impossible with scoped API) + // This simulates what a server would do to store contexts + struct SimpleSessionManager { + session: Option>, + controller: Option, + } + + impl SimpleSessionManager { + fn new() -> Self { + Self { + session: None, + controller: Some(MockDigestController::new()), + } + } + + fn create_session(&mut self) -> Result<(), MockDigestError> { + let controller = self.controller.take().unwrap(); + let context = controller.init(Sha2_256)?; + self.session = Some(context); + Ok(()) + } + + fn update_session(&mut self, data: &[u8]) -> Result<(), MockDigestError> { + let context = self.session.take().unwrap(); + let updated_context = context.update(data)?; + self.session = Some(updated_context); + Ok(()) + } + + fn finalize_session(&mut self) -> Result, MockDigestError> { + let context = self.session.take().unwrap(); + let (result, controller) = context.finalize()?; + self.controller = Some(controller); // Resource recovered + Ok(result) + } + } + + let mut manager = SimpleSessionManager::new(); + + // Create a session and process data across multiple calls + manager.create_session().unwrap(); + manager.update_session(b"hello").unwrap(); + manager.update_session(b" world").unwrap(); + let result = manager.finalize_session().unwrap(); + + // Verify we got a meaningful result + assert_eq!(result.value[0], 0x87654321 + 11); // 11 bytes processed + + // Controller was recovered and can be reused + manager.create_session().unwrap(); + let result2 = manager.finalize_session().unwrap(); + assert_eq!(result2.value[0], 0x87654321); // 0 bytes processed + } +} diff --git a/platform/impls/baremetal/mock/src/lib.rs b/platform/impls/baremetal/mock/src/lib.rs new file mode 100644 index 0000000..e8cca72 --- /dev/null +++ b/platform/impls/baremetal/mock/src/lib.rs @@ -0,0 +1,11 @@ +// Licensed under the Apache-2.0 license + +//! Mock/Stub Platform Implementation +//! +//! This module provides stub implementations of OpenPRoT platform traits +//! for testing and development purposes when real hardware is not available. + +#![no_std] + +pub mod hash; +pub mod mac; diff --git a/platform/impls/baremetal/mock/src/mac.rs b/platform/impls/baremetal/mock/src/mac.rs new file mode 100644 index 0000000..98b5c80 --- /dev/null +++ b/platform/impls/baremetal/mock/src/mac.rs @@ -0,0 +1,200 @@ +// Licensed under the Apache-2.0 license + +//! Mock MAC (Message Authentication Code) Implementation +//! +//! Provides a stub implementation of MAC operations that can be used +//! for testing when real hardware acceleration is not available. + +use openprot_hal_blocking::mac::{ + Error, ErrorKind, ErrorType, HmacSha2_256, HmacSha2_384, HmacSha2_512, MacAlgorithm, + MacCtrlReset, MacInit, MacOp, +}; + +/// Mock MAC accelerator device +/// +/// This is a software-only stub implementation of the MAC hardware traits. +/// It provides working MAC operations using simple algorithms or dummy outputs +/// for testing purposes. +pub struct MockMacDevice; + +impl Default for MockMacDevice { + fn default() -> Self { + Self::new() + } +} + +impl MockMacDevice { + /// Create a new mock MAC device + pub fn new() -> Self { + Self + } +} + +/// Mock MAC error type +#[derive(Debug, Clone, Copy)] +pub struct MockMacError; + +impl Error for MockMacError { + fn kind(&self) -> ErrorKind { + // Mock implementation never fails, but we can simulate errors if needed + ErrorKind::HardwareFailure + } +} + +impl ErrorType for MockMacDevice { + type Error = MockMacError; +} + +impl MacCtrlReset for MockMacDevice { + fn reset(&mut self) -> Result<(), Self::Error> { + // Mock reset always succeeds + Ok(()) + } +} + +/// Mock MAC context that tracks the algorithm type, key, and lifetime of the device. +/// This mimics the pattern from the reference implementation where the MAC context holds a reference +/// to the hardware device and the algorithm parameters for type safety. +pub struct MockMac<'a, A> { + #[allow(dead_code)] // Mock implementation doesn't need to use the device reference + hw: &'a mut MockMacDevice, + _alg: A, + #[allow(dead_code)] // Key is stored but not used in mock implementation + key_hash: u64, // Simple hash of the key for deterministic output + data_processed: u64, +} + +impl ErrorType for MockMac<'_, A> { + type Error = MockMacError; +} + +/// Helper function to create a simple hash of a byte slice for deterministic mock output +fn simple_hash(data: &[u8]) -> u64 { + let mut hash = 0xcbf29ce484222325u64; // FNV offset basis + for &byte in data { + hash ^= byte as u64; + hash = hash.wrapping_mul(0x100000001b3); // FNV prime + } + hash +} + +/// Macro to implement MAC traits for each algorithm, following the reference pattern +macro_rules! impl_hmac { + ($algo:ident) => { + impl MacInit<$algo> for MockMacDevice { + type OpContext<'a> = MockMac<'a, $algo>; + + fn init<'a>( + &'a mut self, + init_params: $algo, + key: &<$algo as MacAlgorithm>::Key, + ) -> Result, Self::Error> { + // In a real implementation, we'd configure the hardware here + let key_bytes: &[u8] = unsafe { + core::slice::from_raw_parts( + key.as_ptr() as *const u8, + core::mem::size_of_val(key), + ) + }; + Ok(Self::OpContext { + hw: self, + _alg: init_params, + key_hash: simple_hash(key_bytes), + data_processed: 0, + }) + } + } + + impl MacOp for MockMac<'_, $algo> { + type Output = <$algo as MacAlgorithm>::MacOutput; + + fn update(&mut self, input: &[u8]) -> Result<(), Self::Error> { + // Track the amount of data processed + self.data_processed += input.len() as u64; + // In a real implementation, we'd process the input data with the key + Ok(()) + } + + fn finalize(self) -> Result { + // Generate a deterministic but fake MAC based on the data length, key hash, and algorithm + const OUTPUT_WORDS: usize = <$algo as MacAlgorithm>::OUTPUT_BITS / 32; + let mut value = [0u32; OUTPUT_WORDS]; + for (i, word) in value.iter_mut().enumerate() { + *word = 0x12345678u32 + .wrapping_add(self.data_processed as u32) + .wrapping_add(self.key_hash as u32) + .wrapping_add((self.key_hash >> 32) as u32) + .wrapping_add(i as u32); + } + Ok(Self::Output { value }) + } + } + }; +} + +impl_hmac!(HmacSha2_256); +impl_hmac!(HmacSha2_384); +impl_hmac!(HmacSha2_512); + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_mock_mac_device_creation() { + let device = MockMacDevice::new(); + let device_default = MockMacDevice; + // Both should be valid instances + assert_eq!( + core::mem::size_of_val(&device), + core::mem::size_of_val(&device_default) + ); + } + + #[test] + fn test_mock_mac_reset() { + let mut device = MockMacDevice::new(); + assert!(device.reset().is_ok()); + } + + #[test] + fn test_hmac_sha256_mock() { + let mut device = MockMacDevice::new(); + let key = [0u8; 32]; + + let mut mac_ctx = device + .init(HmacSha2_256, &key) + .expect("Failed to initialize MAC"); + mac_ctx.update(b"hello").expect("Failed to update MAC"); + mac_ctx.update(b" world").expect("Failed to update MAC"); + let result = mac_ctx.finalize().expect("Failed to finalize MAC"); + + // The result should be deterministic for the same input and key + assert_eq!(result.value.len(), 8); // 256 bits / 32 bits per word = 8 words + } + + #[test] + fn test_hmac_different_keys_different_output() { + let mut device1 = MockMacDevice::new(); + let mut device2 = MockMacDevice::new(); + + let key1 = [0u8; 32]; + let key2 = [1u8; 32]; + + let mut mac_ctx1 = device1 + .init(HmacSha2_256, &key1) + .expect("Failed to initialize MAC"); + mac_ctx1.update(b"test data").expect("Failed to update MAC"); + let result1 = mac_ctx1.finalize().expect("Failed to finalize MAC"); + + let mut mac_ctx2 = device2 + .init(HmacSha2_256, &key2) + .expect("Failed to initialize MAC"); + mac_ctx2.update(b"test data").expect("Failed to update MAC"); + let result2 = mac_ctx2.finalize().expect("Failed to finalize MAC"); + + // Different keys should produce different outputs (in a real implementation) + // Our mock should also produce different outputs due to key_hash + assert_ne!(result1.value, result2.value); + } +}