diff --git a/Cargo.lock b/Cargo.lock index daeb0822782..4794425d4a0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -171,11 +171,22 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "bincode" -version = "1.3.3" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" +checksum = "36eaf5d7b090263e8150820482d5d93cd964a81e4019913c972f4edcc6edb740" dependencies = [ + "bincode_derive", "serde", + "unty", +] + +[[package]] +name = "bincode_derive" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf95709a440f45e986983918d0e8a1f30a9b1df04918fc828670606804ac3c09" +dependencies = [ + "virtue", ] [[package]] @@ -1503,6 +1514,12 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" +[[package]] +name = "unty" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d49784317cd0d1ee7ec5c716dd598ec5b4483ea832a2dced265471cc0f690ae" + [[package]] name = "userfaultfd" version = "0.8.1" @@ -1585,6 +1602,12 @@ dependencies = [ "vmm-sys-util", ] +[[package]] +name = "virtue" +version = "0.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "051eb1abcf10076295e815102942cc58f9d5e3b4560e46e53c21e8ff6f3af7b1" + [[package]] name = "vm-allocator" version = "0.1.1" diff --git a/src/firecracker/examples/seccomp/jailer.rs b/src/firecracker/examples/seccomp/jailer.rs index 62e4b84ea52..53822d377fa 100644 --- a/src/firecracker/examples/seccomp/jailer.rs +++ b/src/firecracker/examples/seccomp/jailer.rs @@ -13,7 +13,7 @@ fn main() { let bpf_path = &args[2]; let filter_file = File::open(bpf_path).unwrap(); - let map = deserialize_binary(&filter_file, None).unwrap(); + let map = deserialize_binary(&filter_file).unwrap(); // Loads filters. apply_filter(map.get("main").unwrap()).unwrap(); diff --git a/src/firecracker/examples/seccomp/panic.rs b/src/firecracker/examples/seccomp/panic.rs index 315899872f4..db0446cf882 100644 --- a/src/firecracker/examples/seccomp/panic.rs +++ b/src/firecracker/examples/seccomp/panic.rs @@ -11,7 +11,7 @@ fn main() { let filter_thread = &args[2]; let filter_file = File::open(bpf_path).unwrap(); - let map = deserialize_binary(&filter_file, None).unwrap(); + let map = deserialize_binary(&filter_file).unwrap(); apply_filter(map.get(filter_thread).unwrap()).unwrap(); panic!("Expected panic."); } diff --git a/src/firecracker/src/seccomp.rs b/src/firecracker/src/seccomp.rs index 22069def685..421220a7b5f 100644 --- a/src/firecracker/src/seccomp.rs +++ b/src/firecracker/src/seccomp.rs @@ -9,12 +9,6 @@ use vmm::seccomp::{BpfThreadMap, DeserializationError, deserialize_binary, get_e const THREAD_CATEGORIES: [&str; 3] = ["vmm", "api", "vcpu"]; -// This byte limit is passed to `bincode` to guard against a potential memory -// allocation DOS caused by binary filters that are too large. -// This limit can be safely determined since the maximum length of a BPF -// filter is 4096 instructions and Firecracker has a finite number of threads. -const DESERIALIZATION_BYTES_LIMIT: Option = Some(100_000); - /// Error retrieving seccomp filters. #[derive(Debug, thiserror::Error, displaydoc::Display)] pub enum FilterError { @@ -72,15 +66,13 @@ pub fn get_filters(config: SeccompConfig) -> Result { fn get_default_filters() -> Result { // Retrieve, at compile-time, the serialized binary filter generated with seccompiler. let bytes: &[u8] = include_bytes!(concat!(env!("OUT_DIR"), "/seccomp_filter.bpf")); - let map = deserialize_binary(bytes, DESERIALIZATION_BYTES_LIMIT) - .map_err(FilterError::Deserialization)?; + let map = deserialize_binary(bytes).map_err(FilterError::Deserialization)?; filter_thread_categories(map) } /// Retrieve custom seccomp filters. fn get_custom_filters(reader: R) -> Result { - let map = deserialize_binary(BufReader::new(reader), DESERIALIZATION_BYTES_LIMIT) - .map_err(FilterError::Deserialization)?; + let map = deserialize_binary(BufReader::new(reader)).map_err(FilterError::Deserialization)?; filter_thread_categories(map) } diff --git a/src/seccompiler/Cargo.toml b/src/seccompiler/Cargo.toml index 09472f8b45e..f1f67a47d7a 100644 --- a/src/seccompiler/Cargo.toml +++ b/src/seccompiler/Cargo.toml @@ -16,7 +16,7 @@ path = "src/bin.rs" bench = false [dependencies] -bincode = "1.2.1" +bincode = { version = "2.0.1", features = ["serde"] } clap = { version = "4.5.32", features = ["derive", "string"] } displaydoc = "0.2.5" libc = "0.2.171" diff --git a/src/seccompiler/src/lib.rs b/src/seccompiler/src/lib.rs index 3fd62106275..ecd157c4449 100644 --- a/src/seccompiler/src/lib.rs +++ b/src/seccompiler/src/lib.rs @@ -8,7 +8,9 @@ use std::os::fd::{AsRawFd, FromRawFd}; use std::os::unix::fs::MetadataExt; use std::str::FromStr; -use bincode::Error as BincodeError; +use bincode::config; +use bincode::config::{Configuration, Fixint, Limit, LittleEndian}; +use bincode::error::EncodeError as BincodeError; mod bindings; use bindings::*; @@ -17,6 +19,18 @@ pub mod types; pub use types::*; use zerocopy::IntoBytes; +// This byte limit is passed to `bincode` to guard against a potential memory +// allocation DOS caused by binary filters that are too large. +// This limit can be safely determined since the maximum length of a BPF +// filter is 4096 instructions and Firecracker has a finite number of threads. +const DESERIALIZATION_BYTES_LIMIT: usize = 100_000; + +pub const BINCODE_CONFIG: Configuration> = + config::standard() + .with_fixed_int_encoding() + .with_limit::() + .with_little_endian(); + /// Binary filter compilation errors. #[derive(Debug, thiserror::Error, displaydoc::Display)] pub enum CompilationError { @@ -174,8 +188,9 @@ pub fn compile_bpf( bpf_map.insert(name.clone(), bpf); } - let output_file = File::create(out_path).map_err(CompilationError::OutputCreate)?; + let mut output_file = File::create(out_path).map_err(CompilationError::OutputCreate)?; - bincode::serialize_into(output_file, &bpf_map).map_err(CompilationError::BincodeSerialize)?; + bincode::encode_into_std_write(&bpf_map, &mut output_file, BINCODE_CONFIG) + .map_err(CompilationError::BincodeSerialize)?; Ok(()) } diff --git a/src/vmm/Cargo.toml b/src/vmm/Cargo.toml index 1b941b86416..b566b2a8c91 100644 --- a/src/vmm/Cargo.toml +++ b/src/vmm/Cargo.toml @@ -14,7 +14,7 @@ aes-gcm = { version = "0.10.1", default-features = false, features = ["aes"] } arrayvec = { version = "0.7.6", optional = true } aws-lc-rs = { version = "1.12.6", features = ["bindgen"] } base64 = "0.22.1" -bincode = "1.2.1" +bincode = { version = "2.0.1", features = ["serde"] } bitflags = "2.9.0" crc64 = "2.0.0" derive_more = { version = "2.0.1", default-features = false, features = ["from", "display"] } @@ -30,7 +30,6 @@ log = { version = "0.4.27", features = ["std", "serde"] } log-instrument = { path = "../log-instrument", optional = true } memfd = "0.6.3" micro_http = { git = "https://github.com/firecracker-microvm/micro-http" } - semver = { version = "1.0.26", features = ["serde"] } serde = { version = "1.0.219", features = ["derive", "rc"] } serde_json = "1.0.140" diff --git a/src/vmm/src/mmds/token.rs b/src/vmm/src/mmds/token.rs index 64c65e9015d..e902303593d 100644 --- a/src/vmm/src/mmds/token.rs +++ b/src/vmm/src/mmds/token.rs @@ -10,7 +10,8 @@ use std::{fmt, io}; use aes_gcm::{AeadInPlace, Aes256Gcm, Key, KeyInit, Nonce}; use base64::Engine; -use bincode::{DefaultOptions, Error as BincodeError, Options}; +use bincode::config; +use bincode::config::{Configuration, Fixint, Limit, LittleEndian}; use serde::{Deserialize, Serialize}; use utils::time::{ClockType, get_time_ms}; @@ -45,6 +46,12 @@ const TOKEN_LENGTH_LIMIT: usize = 70; /// too much memory when deserializing tokens. const DESERIALIZATION_BYTES_LIMIT: usize = std::mem::size_of::(); +const BINCODE_CONFIG: Configuration> = + config::standard() + .with_fixed_int_encoding() + .with_limit::() + .with_little_endian(); + #[rustfmt::skip] #[derive(Debug, thiserror::Error, displaydoc::Display)] pub enum MmdsTokenError { @@ -57,7 +64,7 @@ pub enum MmdsTokenError { /// Invalid time to live value provided for token: {0}. Please provide a value between {MIN_TOKEN_TTL_SECONDS:} and {MAX_TOKEN_TTL_SECONDS:}. InvalidTtlValue(u32), /// Bincode serialization failed: {0}. - Serialization(#[from] BincodeError), + Serialization(#[from] bincode::error::EncodeError), /// Failed to encrypt token. TokenEncryption, } @@ -287,7 +294,7 @@ impl Token { /// Encode token structure into a string using base64 encoding. fn base64_encode(&self) -> Result { - let token_bytes: Vec = bincode::serialize(self)?; + let token_bytes: Vec = bincode::serde::encode_to_vec(self, BINCODE_CONFIG)?; // Encode token structure bytes into base64. Ok(base64::engine::general_purpose::STANDARD.encode(token_bytes)) @@ -299,12 +306,9 @@ impl Token { .decode(encoded_token) .map_err(|_| MmdsTokenError::ExpiryExtraction)?; - let token: Token = DefaultOptions::new() - .with_fixint_encoding() - .allow_trailing_bytes() - .with_limit(DESERIALIZATION_BYTES_LIMIT as u64) - .deserialize(&token_bytes) - .map_err(|_| MmdsTokenError::ExpiryExtraction)?; + let token: Token = bincode::serde::decode_from_slice(&token_bytes, BINCODE_CONFIG) + .map_err(|_| MmdsTokenError::ExpiryExtraction)? + .0; Ok(token) } } @@ -518,45 +522,4 @@ mod tests { assert!(!token_authority.is_valid(&token0)); assert!(!token_authority.is_valid(&token1)); } - - #[test] - fn test_error_display() { - assert_eq!( - MmdsTokenError::EntropyPool(io::Error::from_raw_os_error(0)).to_string(), - format!( - "Failed to extract entropy from /dev/urandom entropy pool: {}.", - io::Error::from_raw_os_error(0) - ) - ); - - assert_eq!( - MmdsTokenError::ExpiryExtraction.to_string(), - "Failed to extract expiry value from token." - ); - - assert_eq!( - MmdsTokenError::InvalidState.to_string(), - "Invalid token authority state." - ); - - assert_eq!( - MmdsTokenError::InvalidTtlValue(0).to_string(), - format!( - "Invalid time to live value provided for token: 0. Please provide a value between \ - {} and {}.", - MIN_TOKEN_TTL_SECONDS, MAX_TOKEN_TTL_SECONDS - ) - ); - - assert_eq!( - MmdsTokenError::Serialization(BincodeError::new(bincode::ErrorKind::SizeLimit)) - .to_string(), - "Bincode serialization failed: the size limit has been reached." - ); - - assert_eq!( - MmdsTokenError::TokenEncryption.to_string(), - "Failed to encrypt token." - ); - } } diff --git a/src/vmm/src/seccomp.rs b/src/vmm/src/seccomp.rs index 3da974e6027..56e30908a62 100644 --- a/src/vmm/src/seccomp.rs +++ b/src/vmm/src/seccomp.rs @@ -5,7 +5,20 @@ use std::collections::HashMap; use std::io::Read; use std::sync::Arc; -use bincode::{DefaultOptions, Options}; +use bincode::config; +use bincode::config::{Configuration, Fixint, Limit, LittleEndian}; + +// This byte limit is passed to `bincode` to guard against a potential memory +// allocation DOS caused by binary filters that are too large. +// This limit can be safely determined since the maximum length of a BPF +// filter is 4096 instructions and Firecracker has a finite number of threads. +const DESERIALIZATION_BYTES_LIMIT: usize = 100_000; + +const BINCODE_CONFIG: Configuration> = + config::standard() + .with_fixed_int_encoding() + .with_limit::() + .with_little_endian(); /// Each BPF instruction is 8 bytes long and 4 byte aligned. /// This alignment needs to be satisfied in order for a BPF code to be accepted @@ -22,7 +35,7 @@ pub type BpfProgramRef<'a> = &'a [BpfInstruction]; pub type BpfThreadMap = HashMap>; /// Binary filter deserialization errors. -pub type DeserializationError = bincode::Error; +pub type DeserializationError = bincode::error::DecodeError; /// Retrieve empty seccomp filters. pub fn get_empty_filters() -> BpfThreadMap { @@ -34,19 +47,8 @@ pub fn get_empty_filters() -> BpfThreadMap { } /// Deserialize binary with bpf filters -pub fn deserialize_binary( - reader: R, - bytes_limit: Option, -) -> Result { - let result = match bytes_limit { - Some(limit) => DefaultOptions::new() - .with_fixint_encoding() - .allow_trailing_bytes() - .with_limit(limit) - .deserialize_from::>(reader), - // No limit is the default. - None => bincode::deserialize_from::>(reader), - }?; +pub fn deserialize_binary(mut reader: R) -> Result { + let result: HashMap = bincode::decode_from_std_read(&mut reader, BINCODE_CONFIG)?; Ok(result .into_iter() @@ -134,49 +136,28 @@ mod tests { #[test] fn test_deserialize_binary() { // Malformed bincode binary. - { - let data = "adassafvc".to_string(); - deserialize_binary(data.as_bytes(), None).unwrap_err(); - } + let data = "adassafvc".to_string(); + deserialize_binary(data.as_bytes()).unwrap_err(); // Test that the binary deserialization is correct, and that the thread keys // have been lowercased. - { - let bpf_prog = vec![0; 2]; - let mut filter_map: HashMap = HashMap::new(); - filter_map.insert("VcpU".to_string(), bpf_prog.clone()); - let bytes = bincode::serialize(&filter_map).unwrap(); - - let mut expected_res = BpfThreadMap::new(); - expected_res.insert("vcpu".to_string(), Arc::new(bpf_prog)); - assert_eq!(deserialize_binary(&bytes[..], None).unwrap(), expected_res); - } - - // Test deserialization with binary_limit. - { - let bpf_prog = vec![0; 2]; - - let mut filter_map: HashMap = HashMap::new(); - filter_map.insert("t1".to_string(), bpf_prog.clone()); - - let bytes = bincode::serialize(&filter_map).unwrap(); - - // Binary limit too low. - assert!(matches!( - deserialize_binary(&bytes[..], Some(20)).unwrap_err(), - error - if error.to_string() == "the size limit has been reached" - )); - - let mut expected_res = BpfThreadMap::new(); - expected_res.insert("t1".to_string(), Arc::new(bpf_prog)); - - // Correct binary limit. - assert_eq!( - deserialize_binary(&bytes[..], Some(50)).unwrap(), - expected_res - ); - } + let bpf_prog = vec![0; 2]; + let mut filter_map: HashMap = HashMap::new(); + filter_map.insert("VcpU".to_string(), bpf_prog.clone()); + let bytes = bincode::serde::encode_to_vec(&filter_map, BINCODE_CONFIG).unwrap(); + + let mut expected_res = BpfThreadMap::new(); + expected_res.insert("vcpu".to_string(), Arc::new(bpf_prog)); + assert_eq!(deserialize_binary(&bytes[..]).unwrap(), expected_res); + + let bpf_prog = vec![0; DESERIALIZATION_BYTES_LIMIT + 1]; + let mut filter_map: HashMap = HashMap::new(); + filter_map.insert("VcpU".to_string(), bpf_prog.clone()); + let bytes = bincode::serde::encode_to_vec(&filter_map, BINCODE_CONFIG).unwrap(); + assert!(matches!( + deserialize_binary(&bytes[..]).unwrap_err(), + bincode::error::DecodeError::LimitExceeded + )); } #[test] diff --git a/src/vmm/src/snapshot/mod.rs b/src/vmm/src/snapshot/mod.rs index a4900ee6eff..57ad3980215 100644 --- a/src/vmm/src/snapshot/mod.rs +++ b/src/vmm/src/snapshot/mod.rs @@ -28,19 +28,27 @@ mod persist; use std::fmt::Debug; use std::io::{Read, Write}; -use bincode::Options; +use bincode::config; +use bincode::config::{Configuration, Fixint, Limit, LittleEndian}; use semver::Version; use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use crate::snapshot::crc::{CRC64Reader, CRC64Writer}; pub use crate::snapshot::persist::Persist; +use crate::utils::mib_to_bytes; #[cfg(target_arch = "x86_64")] const SNAPSHOT_MAGIC_ID: u64 = 0x0710_1984_8664_0000u64; /// Constant bounding how much memory bincode may allocate during vmstate file deserialization -const VM_STATE_DESERIALIZE_LIMIT: u64 = 10_485_760; // 10MiB +const DESERIALIZATION_BYTES_LIMIT: usize = mib_to_bytes(10); + +const BINCODE_CONFIG: Configuration> = + config::standard() + .with_fixed_int_encoding() + .with_limit::() + .with_little_endian(); #[cfg(target_arch = "aarch64")] const SNAPSHOT_MAGIC_ID: u64 = 0x0710_1984_AAAA_0000u64; @@ -110,13 +118,7 @@ impl Snapshot { T: Read, O: DeserializeOwned + Debug, { - // flags below are those used by default by bincode::deserialize_from, plus `with_limit`. - bincode::DefaultOptions::new() - .with_limit(VM_STATE_DESERIALIZE_LIMIT) - .with_fixint_encoding() - .allow_trailing_bytes() // need this because we deserialize header and snapshot from the same file, so after - // reading the header, there will be trailing bytes. - .deserialize_from(reader) + bincode::serde::decode_from_std_read(reader, BINCODE_CONFIG) .map_err(|err| SnapshotError::Serde(err.to_string())) } @@ -126,7 +128,10 @@ impl Snapshot { T: Write, O: Serialize + Debug, { - bincode::serialize_into(writer, data).map_err(|err| SnapshotError::Serde(err.to_string())) + bincode::serde::encode_into_std_write(data, writer, BINCODE_CONFIG) + .map_err(|err| SnapshotError::Serde(err.to_string()))?; + + Ok(()) } /// Attempts to load an existing snapshot without performing CRC or version validation.