From bc6da1b78d62fda497637a166edfd2b8c2ee692f Mon Sep 17 00:00:00 2001 From: Ciaran Ryan-Anderson Date: Wed, 7 May 2025 09:29:20 -0600 Subject: [PATCH 1/9] Moving seepage for sq gates down --- .../src/engines/noise/general.rs | 36 ++++++++++--------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/crates/pecos-engines/src/engines/noise/general.rs b/crates/pecos-engines/src/engines/noise/general.rs index e296f41f2..e56549417 100644 --- a/crates/pecos-engines/src/engines/noise/general.rs +++ b/crates/pecos-engines/src/engines/noise/general.rs @@ -901,31 +901,33 @@ impl GeneralNoiseModel { for &qubit in &gate.qubits { if has_leakage { add_original_gate = false; + } - // If qubit has leaked and spontaneous emission has occurred... seep the qubit - if self.rng.occurs(self.p1_emission_ratio) { - if let Some(gates) = self.seep(qubit) { - noise.extend(gates); - } - } - } else if self.rng.occurs(self.p1) { + if self.rng.occurs(self.p1) { // Spontaneous emission if self.rng.occurs(self.p1_emission_ratio) { - add_original_gate = false; + // If qubit has leaked and spontaneous emission has occurred... seep the qubit + if has_leakage { + if let Some(gates) = self.seep(qubit) { + noise.extend(gates); + } + } else { + add_original_gate = false; - let result = self.p1_emission_model.sample_gates(&self.rng, qubit); + let result = self.p1_emission_model.sample_gates(&self.rng, qubit); - if result.has_leakage() { - // Handle leakage - if let Some(gate) = self.leak(qubit) { + if result.has_leakage() { + // Handle leakage + if let Some(gate) = self.leak(qubit) { + noise.push(gate); + } + } else if let Some(gate) = result.gate { + // Handle Pauli gate noise.push(gate); + trace!("Applied Pauli error to qubit {}", qubit); } - } else if let Some(gate) = result.gate { - // Handle Pauli gate - noise.push(gate); - trace!("Applied Pauli error to qubit {}", qubit); } - } else { + } else if !has_leakage { // Pauli noise let result = self.p1_pauli_model.sample_gates(&self.rng, qubit); if let Some(gate) = result.gate { From 34208ca35c83c5e30e0ea17dc182d4e1fdea7031 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ciar=C3=A1n=20Ryan-Anderson?= Date: Wed, 7 May 2025 09:33:50 -0600 Subject: [PATCH 2/9] Rng refactor (#135) * refactor rng in noise --- .../src/byte_message/gate_type.rs | 2 +- .../pecos-engines/src/byte_message/message.rs | 34 ++ crates/pecos-engines/src/engines/noise.rs | 60 ++- .../src/engines/noise/biased_depolarizing.rs | 23 +- .../src/engines/noise/biased_measurement.rs | 25 +- .../src/engines/noise/depolarizing.rs | 21 +- .../src/engines/noise/general.rs | 64 ++- .../src/engines/noise/noise_rng.rs | 411 +++++++++++++++++ .../pecos-engines/src/engines/noise/utils.rs | 316 +------------ .../src/engines/noise/weighted_sampler.rs | 430 +++++++++++++++++- .../pecos-engines/tests/noise_determinism.rs | 290 ++++++++++++ crates/pecos-engines/tests/noise_test.rs | 1 + 12 files changed, 1263 insertions(+), 414 deletions(-) create mode 100644 crates/pecos-engines/src/engines/noise/noise_rng.rs create mode 100644 crates/pecos-engines/tests/noise_determinism.rs diff --git a/crates/pecos-engines/src/byte_message/gate_type.rs b/crates/pecos-engines/src/byte_message/gate_type.rs index b96d69a8e..be3f96c78 100644 --- a/crates/pecos-engines/src/byte_message/gate_type.rs +++ b/crates/pecos-engines/src/byte_message/gate_type.rs @@ -75,7 +75,7 @@ impl fmt::Display for GateType { /// This struct is designed to replace `QuantumCommand` with a more FFI-friendly /// representation. It contains all the information needed to represent a quantum /// gate operation. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct QuantumGate { /// The type of the gate pub gate_type: GateType, diff --git a/crates/pecos-engines/src/byte_message/message.rs b/crates/pecos-engines/src/byte_message/message.rs index 805210ff1..60f2d9a79 100644 --- a/crates/pecos-engines/src/byte_message/message.rs +++ b/crates/pecos-engines/src/byte_message/message.rs @@ -347,6 +347,17 @@ impl ByteMessage { builder.add_measurements(&[qubit], &[result_id]); } } + Some(&"P") => { + if parts.len() >= 2 { + let qubit = parts[1].parse::().map_err(|_| { + QueueError::OperationError(format!( + "Invalid qubit in P command: {}", + parts[1] + )) + })?; + builder.add_prep(&[qubit]); + } + } _ => { return Err(QueueError::OperationError(format!( "Unknown command type: {}", @@ -1011,4 +1022,27 @@ mod tests { .build(); assert!(!non_empty_message.is_empty().unwrap()); } + + #[test] + fn test_parse_command_to_builder() { + // Test various commands including the new "P" command + let commands = [ + "H 0", "CX 0 1", "RZ 0.5 2", "P 3", // Test the new P command + "M 4 0", + ]; + + let message = ByteMessage::create_from_commands(&commands).unwrap(); + + // Parse the quantum operations from the message + let operations = message.parse_quantum_operations().unwrap(); + + // We should have 5 operations + assert_eq!(operations.len(), 5); + + // Check the P command was correctly parsed + assert_eq!(operations[3].gate_type, GateType::Prep); + assert_eq!(operations[3].qubits, vec![3]); + assert!(operations[3].params.is_empty()); + assert_eq!(operations[3].result_id, None); + } } diff --git a/crates/pecos-engines/src/engines/noise.rs b/crates/pecos-engines/src/engines/noise.rs index 132433f46..982ecb2be 100644 --- a/crates/pecos-engines/src/engines/noise.rs +++ b/crates/pecos-engines/src/engines/noise.rs @@ -20,6 +20,7 @@ pub mod biased_depolarizing; pub mod biased_measurement; pub mod depolarizing; pub mod general; +pub mod noise_rng; pub mod pass_through; pub mod utils; pub mod weighted_sampler; @@ -28,8 +29,9 @@ pub use self::biased_depolarizing::BiasedDepolarizingNoiseModel; pub use self::biased_measurement::BiasedMeasurementNoiseModel; pub use self::depolarizing::DepolarizingNoiseModel; pub use self::general::GeneralNoiseModel; +pub use self::noise_rng::NoiseRng; pub use self::pass_through::PassThroughNoiseModel; -pub use self::utils::{NoiseRng, NoiseUtils, ProbabilityValidator}; +pub use self::utils::{NoiseUtils, ProbabilityValidator}; pub use self::weighted_sampler::{ SingleQubitWeightedSampler, TwoQubitWeightedSampler, WeightedSampler, }; @@ -84,7 +86,7 @@ dyn_clone::clone_trait_object!(NoiseModel); /// reducing code duplication and improving maintainability. pub struct BaseNoiseModel { /// The random number generator for the noise model - rng: NoiseRng, + rng: NoiseRng, } impl BaseNoiseModel { @@ -92,7 +94,7 @@ impl BaseNoiseModel { #[must_use] pub fn new() -> Self { Self { - rng: NoiseRng::new(), + rng: NoiseRng::default(), } } @@ -106,10 +108,16 @@ impl BaseNoiseModel { /// Get a reference to the random number generator #[must_use] - pub fn rng(&self) -> &NoiseRng { + pub fn rng(&self) -> &NoiseRng { &self.rng } + /// Get a mutable reference to the random number generator + #[must_use] + pub fn rng_mut(&mut self) -> &mut NoiseRng { + &mut self.rng + } + /// Check if a message contains measurement results /// /// # Arguments @@ -141,15 +149,16 @@ impl RngManageable for BaseNoiseModel { type Rng = ChaCha8Rng; fn set_rng(&mut self, rng: ChaCha8Rng) -> Result<(), Box> { - self.rng.set_rng(rng) + self.rng = NoiseRng::new(rng); + Ok(()) } fn rng(&self) -> &Self::Rng { - self.rng.rng() + self.rng.inner() } fn rng_mut(&mut self) -> &mut Self::Rng { - self.rng.rng_mut() + self.rng.inner_mut() } } @@ -182,36 +191,39 @@ impl ControlEngine for Box { #[cfg(test)] mod base_tests { use super::*; - use crate::byte_message::ByteMessageBuilder; + use rand::SeedableRng; #[test] fn test_base_noise_model_construction() { - // Create a noise model with default seed let model = BaseNoiseModel::new(); - assert!(model.rng().random_float() >= 0.0); + // Verify RNG is initialized, not checking for null since from_ref is never null + assert!( + model.rng().inner() != &ChaCha8Rng::seed_from_u64(0), + "Default RNG should be randomly seeded" + ); - // Create a noise model with specific seed let model = BaseNoiseModel::with_seed(42); - assert!(model.rng().random_float() >= 0.0); + // Check the model has a properly seeded RNG + assert_eq!( + *model.rng().inner(), + ChaCha8Rng::seed_from_u64(42), + "RNG should be initialized with seed 42" + ); } #[test] fn test_base_noise_model_has_measurements() { let model = BaseNoiseModel::new(); - // Create a message with measurements - let mut builder = ByteMessageBuilder::new(); - let _ = builder.for_measurement_results(); - builder.add_measurement_results(&[0], &[0]); - let message = builder.build(); - assert!(model.has_measurements(&message)); + // Test with a message that has no measurements + let empty_msg = ByteMessage::new(Vec::new()); + assert!(!model.has_measurements(&empty_msg)); - // Create a message without measurements - let mut builder = ByteMessageBuilder::new(); - let _ = builder.for_quantum_operations(); - builder.add_x(&[0]); - let message = builder.build(); - assert!(!model.has_measurements(&message)); + // Test with a message that has measurements + let mut builder = ByteMessage::measurement_results_builder(); + builder.add_measurement_results(&[0], &[0]); + let measure_msg = builder.build(); + assert!(model.has_measurements(&measure_msg)); } } diff --git a/crates/pecos-engines/src/engines/noise/biased_depolarizing.rs b/crates/pecos-engines/src/engines/noise/biased_depolarizing.rs index aadc69249..796acf307 100644 --- a/crates/pecos-engines/src/engines/noise/biased_depolarizing.rs +++ b/crates/pecos-engines/src/engines/noise/biased_depolarizing.rs @@ -68,7 +68,7 @@ pub struct BiasedDepolarizingNoiseModel { /// Probability of applying an error after two-qubit gates p2: f64, /// Random number generator - rng: NoiseRng, + rng: NoiseRng, } impl ProbabilityValidator for BiasedDepolarizingNoiseModel {} @@ -90,7 +90,7 @@ impl BiasedDepolarizingNoiseModel { p_meas_1, p1, p2, - rng: NoiseRng::new(), + rng: NoiseRng::default(), } } @@ -152,7 +152,7 @@ impl BiasedDepolarizingNoiseModel { } /// Apply noise to a list of quantum gates - fn apply_noise_to_gates(&self, gates: &[QuantumGate]) -> ByteMessage { + fn apply_noise_to_gates(&mut self, gates: &[QuantumGate]) -> ByteMessage { let mut builder = NoiseUtils::create_quantum_builder(); for gate in gates { @@ -198,7 +198,7 @@ impl BiasedDepolarizingNoiseModel { /// /// # Returns /// The potentially biased measurement outcome - fn apply_bias_to_measurement(&self, result_id: u32, outcome: u32) -> (u32, u32) { + fn apply_bias_to_measurement(&mut self, result_id: u32, outcome: u32) -> (u32, u32) { // Generate a random number to determine if we should flip let should_flip = if outcome == 0 { // Flip from 0 to 1 with probability p_meas_0 @@ -227,7 +227,7 @@ impl BiasedDepolarizingNoiseModel { /// /// # Errors /// Returns a `QueueError` if applying bias fails - fn apply_bias_to_message(&self, message: ByteMessage) -> Result { + fn apply_bias_to_message(&mut self, message: ByteMessage) -> Result { // Parse the message to extract the measurement results let measurements = message.parse_measurements()?; @@ -252,14 +252,14 @@ impl BiasedDepolarizingNoiseModel { )) } - fn apply_prep_faults(&self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { + fn apply_prep_faults(&mut self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { if self.rng.occurs(self.p_prep) { trace!("Applying prep fault on qubits {:?}", gate.qubits); NoiseUtils::apply_x(builder, gate.qubits[0]); } } - fn apply_sq_faults(&self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { + fn apply_sq_faults(&mut self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { if self.rng.occurs(self.p1) { let fault_type = self.rng.random_int(0..3); let qubit = gate.qubits[0]; @@ -281,7 +281,7 @@ impl BiasedDepolarizingNoiseModel { } } - fn apply_tq_faults(&self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { + fn apply_tq_faults(&mut self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { if self.rng.occurs(self.p2) { let fault_type = self.rng.random_int(0..15); let qubit0 = gate.qubits[0]; @@ -430,15 +430,16 @@ impl RngManageable for BiasedDepolarizingNoiseModel { type Rng = ChaCha8Rng; fn set_rng(&mut self, rng: ChaCha8Rng) -> Result<(), Box> { - self.rng.set_rng(rng) + self.rng = NoiseRng::new(rng); + Ok(()) } fn rng(&self) -> &Self::Rng { - self.rng.rng() + self.rng.inner() } fn rng_mut(&mut self) -> &mut Self::Rng { - self.rng.rng_mut() + self.rng.inner_mut() } } diff --git a/crates/pecos-engines/src/engines/noise/biased_measurement.rs b/crates/pecos-engines/src/engines/noise/biased_measurement.rs index c8c0ef418..71bb355cc 100644 --- a/crates/pecos-engines/src/engines/noise/biased_measurement.rs +++ b/crates/pecos-engines/src/engines/noise/biased_measurement.rs @@ -47,7 +47,7 @@ pub struct BiasedMeasurementNoiseModel { /// The probability of flipping a 1 measurement to 0 prob_flip_from_1: f64, /// Random number generator - rng: NoiseRng, + rng: NoiseRng, } impl ProbabilityValidator for BiasedMeasurementNoiseModel {} @@ -70,7 +70,7 @@ impl BiasedMeasurementNoiseModel { Self { prob_flip_from_0, prob_flip_from_1, - rng: NoiseRng::new(), + rng: NoiseRng::default(), } } @@ -122,7 +122,7 @@ impl BiasedMeasurementNoiseModel { /// /// # Returns /// The potentially biased measurement outcome - fn apply_bias_to_measurement(&self, result_id: u32, outcome: u32) -> (u32, u32) { + fn apply_bias_to_measurement(&mut self, result_id: u32, outcome: u32) -> (u32, u32) { // Generate a random number to determine if we should flip let should_flip = if outcome == 0 { // Flip from 0 to 1 with probability prob_flip_from_0 @@ -151,7 +151,7 @@ impl BiasedMeasurementNoiseModel { /// /// # Errors /// Returns a `QueueError` if applying bias fails - fn apply_bias_to_message(&self, message: ByteMessage) -> Result { + fn apply_bias_to_message(&mut self, message: ByteMessage) -> Result { // Parse the message to extract the measurement results let measurements = message.parse_measurements()?; @@ -297,17 +297,16 @@ impl RngManageable for BiasedMeasurementNoiseModel { type Rng = ChaCha8Rng; fn set_rng(&mut self, rng: ChaCha8Rng) -> Result<(), Box> { - self.rng.set_rng(rng) + self.rng = NoiseRng::new(rng); + Ok(()) } fn rng(&self) -> &Self::Rng { - // Delegate to the NoiseRng implementation - self.rng.rng() + self.rng.inner() } fn rng_mut(&mut self) -> &mut Self::Rng { - // Delegate to the NoiseRng implementation - self.rng.rng_mut() + self.rng.inner_mut() } } @@ -318,19 +317,19 @@ mod tests { #[test] fn test_builder_pattern() { // Create with builder - let noise1 = BiasedMeasurementNoiseModel::builder() + let mut noise1 = BiasedMeasurementNoiseModel::builder() .with_prob_flip_from_0(0.1) .with_prob_flip_from_1(0.2) .with_seed(42) .build(); // Create directly - let noise2 = BiasedMeasurementNoiseModel::with_seed(0.1, 0.2, 42); + let mut noise2 = BiasedMeasurementNoiseModel::with_seed(0.1, 0.2, 42); // Verify the builder works by checking they produce the same randomness sequence let noise1_ref = noise1 - .as_any() - .downcast_ref::() + .as_any_mut() + .downcast_mut::() .unwrap(); for _ in 0..10 { diff --git a/crates/pecos-engines/src/engines/noise/depolarizing.rs b/crates/pecos-engines/src/engines/noise/depolarizing.rs index 6ed454ef8..62e659fe1 100644 --- a/crates/pecos-engines/src/engines/noise/depolarizing.rs +++ b/crates/pecos-engines/src/engines/noise/depolarizing.rs @@ -62,7 +62,7 @@ pub struct DepolarizingNoiseModel { /// Probability of applying an error after two-qubit gates p2: f64, /// Random number generator - rng: NoiseRng, + rng: NoiseRng, } impl ProbabilityValidator for DepolarizingNoiseModel {} @@ -82,7 +82,7 @@ impl DepolarizingNoiseModel { p_meas, p1, p2, - rng: NoiseRng::new(), + rng: NoiseRng::default(), } } @@ -123,7 +123,7 @@ impl DepolarizingNoiseModel { } /// Apply noise to a list of quantum gates - fn apply_noise_to_gates(&self, gates: &[QuantumGate]) -> ByteMessage { + fn apply_noise_to_gates(&mut self, gates: &[QuantumGate]) -> ByteMessage { let mut builder = NoiseUtils::create_quantum_builder(); for gate in gates { @@ -161,21 +161,21 @@ impl DepolarizingNoiseModel { builder.build() } - fn apply_prep_faults(&self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { + fn apply_prep_faults(&mut self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { if self.rng.occurs(self.p_prep) { trace!("Applying prep fault on qubits {:?}", gate.qubits); NoiseUtils::apply_x(builder, gate.qubits[0]); } } - fn apply_meas_faults(&self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { + fn apply_meas_faults(&mut self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { if self.rng.occurs(self.p_meas) { trace!("Applying meas fault on qubits {:?}", gate.qubits); NoiseUtils::apply_x(builder, gate.qubits[0]); } } - fn apply_sq_faults(&self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { + fn apply_sq_faults(&mut self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { if self.rng.occurs(self.p1) { let fault_type = self.rng.random_int(0..3); let qubit = gate.qubits[0]; @@ -197,7 +197,7 @@ impl DepolarizingNoiseModel { } } - fn apply_tq_faults(&self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { + fn apply_tq_faults(&mut self, builder: &mut ByteMessageBuilder, gate: &QuantumGate) { if self.rng.occurs(self.p2) { let fault_type = self.rng.random_int(0..15); let qubit0 = gate.qubits[0]; @@ -307,15 +307,16 @@ impl RngManageable for DepolarizingNoiseModel { type Rng = ChaCha8Rng; fn set_rng(&mut self, rng: ChaCha8Rng) -> Result<(), Box> { - self.rng.set_rng(rng) + self.rng = NoiseRng::new(rng); + Ok(()) } fn rng(&self) -> &Self::Rng { - self.rng.rng() + self.rng.inner() } fn rng_mut(&mut self) -> &mut Self::Rng { - self.rng.rng_mut() + self.rng.inner_mut() } } diff --git a/crates/pecos-engines/src/engines/noise/general.rs b/crates/pecos-engines/src/engines/noise/general.rs index e56549417..dab677d76 100644 --- a/crates/pecos-engines/src/engines/noise/general.rs +++ b/crates/pecos-engines/src/engines/noise/general.rs @@ -79,8 +79,9 @@ use std::collections::HashMap; use std::collections::HashSet; use crate::byte_message::{ByteMessage, ByteMessageBuilder, QuantumGate, gate_type::GateType}; +use crate::engines::noise::noise_rng::NoiseRng; use crate::engines::noise::utils::NoiseUtils; -use crate::engines::noise::utils::{NoiseRng, ProbabilityValidator}; +use crate::engines::noise::utils::ProbabilityValidator; use crate::engines::noise::weighted_sampler::{ SingleQubitWeightedSampler, TwoQubitWeightedSampler, }; @@ -88,7 +89,6 @@ use crate::engines::noise::{NoiseModel, RngManageable}; use crate::engines::{ControlEngine, EngineStage}; use crate::errors::QueueError; use log::trace; -use rand::SeedableRng; use rand_chacha::ChaCha8Rng; /// General noise model implementation that includes parameterized error channels for various quantum operations @@ -253,7 +253,7 @@ pub struct GeneralNoiseModel { leaked_qubits: HashSet, /// Random number generator for stochastic noise processes - rng: NoiseRng, + rng: NoiseRng, /// Overall scaling factor for error probabilities /// @@ -439,16 +439,17 @@ impl NoiseModel for GeneralNoiseModel { impl RngManageable for GeneralNoiseModel { type Rng = ChaCha8Rng; - fn set_rng(&mut self, rng: ChaCha8Rng) -> Result<(), Box> { - self.rng.set_rng(rng) + fn set_rng(&mut self, rng: Self::Rng) -> Result<(), Box> { + self.rng = NoiseRng::new(rng); + Ok(()) } fn rng(&self) -> &Self::Rng { - self.rng.rng() + self.rng.inner() } fn rng_mut(&mut self) -> &mut Self::Rng { - self.rng.rng_mut() + self.rng.inner_mut() } } @@ -557,7 +558,7 @@ impl GeneralNoiseModel { przz_d: 1.0, przz_power: 1.0, leaked_qubits: HashSet::new(), - rng: NoiseRng::new(), + rng: NoiseRng::default(), scale: 1.0, memory_scale: 1.0, prep_scale: 1.0, @@ -914,7 +915,7 @@ impl GeneralNoiseModel { } else { add_original_gate = false; - let result = self.p1_emission_model.sample_gates(&self.rng, qubit); + let result = self.p1_emission_model.sample_gates(&mut self.rng, qubit); if result.has_leakage() { // Handle leakage @@ -929,7 +930,7 @@ impl GeneralNoiseModel { } } else if !has_leakage { // Pauli noise - let result = self.p1_pauli_model.sample_gates(&self.rng, qubit); + let result = self.p1_pauli_model.sample_gates(&mut self.rng, qubit); if let Some(gate) = result.gate { noise.push(gate); trace!("Applied Pauli error to qubit {}", qubit); @@ -990,9 +991,9 @@ impl GeneralNoiseModel { // Spontaneous emission noise add_original_gate = false; - let result = self - .p2_emission_model - .sample_gates(&self.rng, qubits[0], qubits[1]); + let result = + self.p2_emission_model + .sample_gates(&mut self.rng, qubits[0], qubits[1]); if result.has_leakage() { for (qubit, leaked) in qubits.iter().zip(result.has_leakages().iter()) { @@ -1013,9 +1014,9 @@ impl GeneralNoiseModel { } } else { // Pauli noise - let result = self - .p2_pauli_model - .sample_gates(&self.rng, qubits[0], qubits[1]); + let result = + self.p2_pauli_model + .sample_gates(&mut self.rng, qubits[0], qubits[1]); if let Some(gates) = result.gates { noise.extend(gates); trace!( @@ -1112,7 +1113,7 @@ impl GeneralNoiseModel { // TODO: If qubits are in |1>, leak them again with some probability. // Maybe move L -> |1> + noise to first round of noise... - // Get the biased measurement results message + // Get the biased measurement results results_builder.build() } @@ -1124,7 +1125,7 @@ impl GeneralNoiseModel { if self.leak2depolar { // Apply completely depolarizing noise instead of leakage trace!("Replaced leakage with Pauli error on qubit {}", qubit); - NoiseUtils::random_pauli_or_none(&self.rng, qubit) + self.rng.random_pauli_or_none(qubit) } else { // Mark qubit as leaked trace!("Marking qubit {} as leaked", qubit); @@ -1169,7 +1170,7 @@ impl GeneralNoiseModel { noise.push(gate); } - if let Some(gate) = NoiseUtils::random_pauli_or_none(&self.rng, qubit) { + if let Some(gate) = self.rng.random_pauli_or_none(qubit) { noise.push(gate); } @@ -1186,7 +1187,9 @@ impl GeneralNoiseModel { /// Reset the noise model for a new shot fn reset_noise_model(&mut self) { + // Clear leaked qubits self.leaked_qubits.clear(); + // RNG state is intentionally not reset to maintain natural randomness } /// Scale error probabilities based on scaling factors @@ -1893,12 +1896,6 @@ impl GeneralNoiseModel { self.p_crosstalk_prep_rescale = scale; } - /// Set the seed for the random number generator - pub fn set_seed(&mut self, seed: u64) -> Result<(), Box> { - let rng = ChaCha8Rng::seed_from_u64(seed); - self.set_rng(rng) - } - /// Accessor for the p1 Pauli distribution #[must_use] pub fn p1_pauli_model(&self) -> &SingleQubitWeightedSampler { @@ -1922,6 +1919,23 @@ impl GeneralNoiseModel { pub fn p2_emission_model(&self) -> &TwoQubitWeightedSampler { &self.p2_emission_model } + + /// Reset the noise model and then set a new seed for the RNG + /// + /// This is a convenience method that combines calling `reset_noise_model()` + /// followed by `set_seed()` in a single call. + /// + /// # Parameters + /// * `seed` - The seed to set for the RNG + /// + /// # Returns + /// Result indicating success or failure + pub fn reset_with_seed(&mut self, seed: u64) -> Result<(), Box> { + // First reset the noise model + self.reset_noise_model(); + // Then set the seed + self.set_seed(seed) + } } /// Builder for creating general noise models diff --git a/crates/pecos-engines/src/engines/noise/noise_rng.rs b/crates/pecos-engines/src/engines/noise/noise_rng.rs new file mode 100644 index 000000000..5490e0423 --- /dev/null +++ b/crates/pecos-engines/src/engines/noise/noise_rng.rs @@ -0,0 +1,411 @@ +// Copyright 2025 The PECOS Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +// in compliance with the License.You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +// or implied. See the License for the specific language governing permissions and limitations under +// the License. + +//! Random number generator wrapper for noise models. +//! +//! This module provides a common interface for random number generation +//! in noise models through the `NoiseRng` wrapper. + +use rand::prelude::Distribution; +use rand::{Rng, SeedableRng}; +use rand_chacha::ChaCha8Rng; +use std::ops::Range; + +use crate::byte_message::QuantumGate; + +/// Wrapper for random number generator used by noise models +/// +/// Provides a common interface to random number generator functionality +/// for all noise models. +#[derive(Debug, Clone)] +pub struct NoiseRng { + rng: R, +} + +impl NoiseRng { + /// Create a new `NoiseRng` with the given RNG + pub fn new(rng: R) -> Self { + Self { rng } + } + + /// Create a new `NoiseRng` with a seeded `ChaCha8Rng` + #[must_use] + pub fn with_seed(seed: u64) -> Self + where + R: SeedableRng, + { + Self { + rng: R::seed_from_u64(seed), + } + } + + /// Generate a random float in the range [0, 1) + pub fn random_float(&mut self) -> f64 { + self.rng.random::() + } + + /// Determines if an event occurs with the given probability + /// + /// # Arguments + /// + /// * `probability` - The probability of the event occurring, in the range [0, 1] + /// + /// # Returns + /// + /// `true` if the event occurs, `false` otherwise + pub fn occurs(&mut self, probability: f64) -> bool { + debug_assert!((0.0..=1.0).contains(&probability)); + self.rng.random_bool(probability) + } + + /// Generate a random integer in the given range + pub fn random_int(&mut self, range: Range) -> usize { + self.rng.random_range(range) + } + + /// Sample a value from any distribution + /// + /// # Arguments + /// + /// * `distribution` - The distribution to sample from + /// + /// # Returns + /// + /// A value sampled from the distribution + pub fn sample>(&mut self, distribution: &D) -> T { + distribution.sample(&mut self.rng) + } + + /// Sample from a weighted distribution + /// + /// # Arguments + /// + /// * `distribution` - The weighted distribution to sample from + /// + /// # Returns + /// + /// The index of the sampled item + pub fn sample_from_distribution(&mut self, distribution: &D) -> T + where + D: Distribution, + { + self.sample(distribution) + } + + /// Generate a random u32 in the given range + pub fn random_u32(&mut self, range: Range) -> u32 { + self.rng.random_range(range) + } + + /// Get a reference to the inner RNG + pub fn inner(&self) -> &R { + &self.rng + } + + /// Get a mutable reference to the inner RNG + pub fn inner_mut(&mut self) -> &mut R { + &mut self.rng + } + + /// Generate a random Pauli gate (X, Y, Z) or none with equal probability + /// + /// # Arguments + /// + /// * `qubit` - The qubit to apply the Pauli gate to + /// + /// # Returns + /// + /// A `QuantumGate` representing the Pauli operation, or `None` if no operation + pub fn random_pauli_or_none(&mut self, qubit: usize) -> Option { + // Generate a random int from 0 to 3 + // 0: No operation (identity) + // 1: X gate + // 2: Y gate + // 3: Z gate + match self.random_int(0..4) { + 0 => None, + 1 => Some(QuantumGate::x(qubit)), + 2 => Some(QuantumGate::y(qubit)), + 3 => Some(QuantumGate::z(qubit)), + _ => unreachable!(), + } + } +} + +impl Default for NoiseRng +where + R: SeedableRng, +{ + fn default() -> Self { + // Using from_entropy() to seed the RNG from the OS + Self { + rng: R::try_from_os_rng().expect("Failed to create RNG from OS entropy"), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use rand::distr::Uniform; + use rand::distr::weighted::WeightedIndex; + + const SAMPLE_SIZE: usize = 100; + // Epsilon for float comparisons + const FLOAT_EPSILON: f64 = f64::EPSILON; + + // Helper function to compare floats with an epsilon + fn float_eq(a: f64, b: f64) -> bool { + (a - b).abs() < FLOAT_EPSILON + } + + #[test] + fn test_noise_rng_random_float() { + let mut rng = NoiseRng::::with_seed(42); + let value = rng.random_float(); + assert!((0.0..=1.0).contains(&value)); + + // Test with multiple calls to ensure we get different values + let values: Vec = (0..10).map(|_| rng.random_float()).collect(); + + // Don't use a HashSet for floats, instead check that at least some values are different + let mut all_same = true; + for i in 1..values.len() { + if (values[0] - values[i]).abs() > f64::EPSILON { + all_same = false; + break; + } + } + assert!(!all_same, "Random values should vary"); + } + + #[test] + fn test_noise_rng_occurs() { + let mut rng = NoiseRng::::with_seed(42); + + // With probability 0, should never occur + for _ in 0..100 { + assert!(!rng.occurs(0.0)); + } + + // With probability 1, should always occur + for _ in 0..100 { + assert!(rng.occurs(1.0)); + } + + // With probability 0.5, should occur roughly half the time + let occurs_count = (0..1000).filter(|_| rng.occurs(0.5)).count(); + assert!(occurs_count > 400 && occurs_count < 600); + } + + #[test] + fn test_noise_rng_random_int() { + let mut rng = NoiseRng::::with_seed(42); + + // Test with a range of 0..3 + for _ in 0..100 { + let value = rng.random_int(0..3); + assert!(value < 3); + } + + // Check distribution with a larger number of samples + let counts = (0..1000) + .map(|_| rng.random_int(0..3)) + .fold([0, 0, 0], |mut acc, val| { + acc[val] += 1; + acc + }); + + // Each value should appear roughly 1/3 of the time + for count in &counts { + assert!(*count > 250 && *count < 400); + } + } + + #[test] + fn test_random_pauli_or_none() { + let mut rng = NoiseRng::::with_seed(42); + + // Count occurrences of each gate type + let mut none_count = 0; + let mut x_count = 0; + let mut y_count = 0; + let mut z_count = 0; + + // Generate a large number of samples to test distribution + for _ in 0..1000 { + match rng.random_pauli_or_none(0) { + None => none_count += 1, + Some(gate) => match gate.gate_type { + crate::byte_message::GateType::X => x_count += 1, + crate::byte_message::GateType::Y => y_count += 1, + crate::byte_message::GateType::Z => z_count += 1, + _ => panic!("Unexpected gate type: {:?}", gate.gate_type), + }, + } + } + + // Each outcome should occur roughly 1/4 of the time (250 times) + // Allow a reasonable margin of error (±50) + assert!( + none_count > 200 && none_count < 300, + "None count: {none_count}" + ); + assert!(x_count > 200 && x_count < 300, "X count: {x_count}"); + assert!(y_count > 200 && y_count < 300, "Y count: {y_count}"); + assert!(z_count > 200 && z_count < 300, "Z count: {z_count}"); + } + + #[test] + fn test_seed_determinism_basic() { + // Test that the same seed produces the same sequence of random numbers + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + for _ in 0..SAMPLE_SIZE { + assert!( + float_eq(rng1.random_float(), rng2.random_float()), + "Random floats should be identical with same seed" + ); + } + } + + #[test] + fn test_seed_determinism_multiple_seeds() { + // Test multiple seed pairs to ensure determinism + let seed_pairs = [(42, 42), (123, 123), (999, 999), (0, 0)]; + + for (seed1, seed2) in seed_pairs { + let mut rng1 = NoiseRng::::with_seed(seed1); + let mut rng2 = NoiseRng::::with_seed(seed2); + + for _ in 0..SAMPLE_SIZE { + assert!( + float_eq(rng1.random_float(), rng2.random_float()), + "Random floats should be identical with seed pair ({seed1}, {seed2})" + ); + } + } + } + + #[test] + fn test_seed_determinism_different_seeds() { + // Test that different seeds produce different sequences + let seed_pairs = [(42, 43), (123, 124), (999, 1000), (0, 1)]; + + for (seed1, seed2) in seed_pairs { + let mut rng1 = NoiseRng::::with_seed(seed1); + let mut rng2 = NoiseRng::::with_seed(seed2); + + let mut found_difference = false; + for _ in 0..SAMPLE_SIZE { + if !float_eq(rng1.random_float(), rng2.random_float()) { + found_difference = true; + break; + } + } + assert!( + found_difference, + "Different seeds ({seed1}, {seed2}) should produce different sequences" + ); + } + } + + #[test] + fn test_seed_determinism_reset() { + // Test that resetting with the same seed produces the same sequence + let seed = 42; + let mut rng = NoiseRng::::with_seed(seed); + + // First sequence + let results1: Vec = (0..SAMPLE_SIZE).map(|_| rng.random_float()).collect(); + + // Reset and get second sequence + rng = NoiseRng::::with_seed(seed); + let results2: Vec = (0..SAMPLE_SIZE).map(|_| rng.random_float()).collect(); + + // Compare the floats with epsilon tolerance + for i in 0..results1.len() { + assert!( + float_eq(results1[i], results2[i]), + "Random sequences should be identical after reset with same seed" + ); + } + } + + #[test] + fn test_seed_determinism_distribution() { + // Test that the same seed produces the same sequence for different distributions + let seed = 42; + let mut rng1 = NoiseRng::::with_seed(seed); + let mut rng2 = NoiseRng::::with_seed(seed); + + // Test uniform distribution + let uniform = Uniform::new(0.0, 1.0).unwrap(); + for _ in 0..SAMPLE_SIZE { + let sample1 = rng1.sample(&uniform); + let sample2 = rng2.sample(&uniform); + assert!( + float_eq(sample1, sample2), + "Uniform distribution samples should be identical with same seed" + ); + } + + // Reset RNGs + rng1 = NoiseRng::::with_seed(seed); + rng2 = NoiseRng::::with_seed(seed); + + // Test weighted index distribution + let weights = vec![0.3, 0.7]; + let weighted = WeightedIndex::new(&weights).unwrap(); + for _ in 0..SAMPLE_SIZE { + assert_eq!( + rng1.sample(&weighted), + rng2.sample(&weighted), + "Weighted distribution samples should be identical with same seed" + ); + } + } + + #[test] + fn test_seed_determinism_interleaved() { + // Test that interleaved operations maintain determinism + let seed = 42; + let mut rng1 = NoiseRng::::with_seed(seed); + let mut rng2 = NoiseRng::::with_seed(seed); + + let uniform = Uniform::new(0.0, 1.0).unwrap(); + let weights = vec![0.3, 0.7]; + let weighted = WeightedIndex::new(&weights).unwrap(); + + for _ in 0..SAMPLE_SIZE { + // Interleave different types of random operations + assert!( + float_eq(rng1.random_float(), rng2.random_float()), + "Random floats should be identical" + ); + + let sample1 = rng1.sample(&uniform); + let sample2 = rng2.sample(&uniform); + assert!( + float_eq(sample1, sample2), + "Uniform samples should be identical" + ); + + assert_eq!( + rng1.sample(&weighted), + rng2.sample(&weighted), + "Weighted samples should be identical" + ); + } + } +} diff --git a/crates/pecos-engines/src/engines/noise/utils.rs b/crates/pecos-engines/src/engines/noise/utils.rs index 8c5a3c7dc..9a89471c8 100644 --- a/crates/pecos-engines/src/engines/noise/utils.rs +++ b/crates/pecos-engines/src/engines/noise/utils.rs @@ -19,173 +19,6 @@ #![allow(clippy::missing_panics_doc)] use crate::byte_message::{ByteMessage, ByteMessageBuilder, QuantumGate}; -use crate::errors::QueueError; -use pecos_core::RngManageable; -use rand::Rng; -use rand::SeedableRng; -use rand::distr::weighted::WeightedIndex; -use rand::prelude::Distribution; -use rand_chacha::ChaCha8Rng; -use std::ops::Range; -use std::sync::{Arc, Mutex, MutexGuard}; - -/// A thread-safe wrapper for random number generators used in noise models -/// -/// This struct encapsulates the common pattern of using an Arc> -/// for thread-safe access to the random number generator across all noise models. -/// -/// It provides methods for common RNG operations and implements the `RngManageable` trait. -#[derive(Clone, Debug)] -pub struct NoiseRng { - rng: Arc>, -} - -impl NoiseRng { - /// Create a new `NoiseRng` with a random seed - #[must_use] - pub fn new() -> Self { - Self { - rng: Arc::new(Mutex::new(ChaCha8Rng::from_os_rng())), - } - } - - /// Create a new `NoiseRng` with a specific seed - #[must_use] - pub fn with_seed(seed: u64) -> Self { - Self { - rng: Arc::new(Mutex::new(ChaCha8Rng::seed_from_u64(seed))), - } - } - - pub fn get_guard(&self) -> MutexGuard<'_, ChaCha8Rng> { - self.rng - .lock() - .expect("Failed to lock RNG mutex in sample_from_distribution") - } - - /// Generate a random float between 0.0 and 1.0 - /// - /// # Returns - /// A random f64 value between 0.0 and 1.0 - /// - /// # Panics - /// Panics if the mutex is poisoned - #[must_use] - pub fn random_float(&self) -> f64 { - let mut rng = self.rng.lock().unwrap(); - rng.random::() - } - - /// Check if an event should occur with the given probability - /// - /// # Arguments - /// * `probability` - The probability of the event occurring (between 0.0 and 1.0) - /// - /// # Returns - /// true if the event should occur, false otherwise - /// - /// # Panics - /// Panics if the mutex is poisoned - #[must_use] - pub fn occurs(&self, probability: f64) -> bool { - self.random_float() < probability - } - - /// Generate a random integer in the given range - /// - /// # Arguments - /// * `range` - The range of values to choose from (inclusive start, exclusive end) - /// - /// # Returns - /// A random integer in the specified range - /// - /// # Panics - /// Panics if the mutex is poisoned - #[must_use] - pub fn random_int(&self, range: Range) -> usize { - let mut rng = self.rng.lock().unwrap(); - rng.random_range(range) - } - - /// Sample from a precomputed `WeightedIndex` distribution with f64 weights - /// - /// # Arguments - /// * `distribution` - A precomputed `WeightedIndex` distribution with f64 weights - /// - /// # Returns - /// A random index selected according to the weights - /// - /// # Panics - /// Panics if the mutex is poisoned, with a descriptive error message - #[must_use] - pub fn sample_from_distribution(&self, distribution: &WeightedIndex) -> usize { - let mut rng = self - .rng - .lock() - .expect("Failed to lock RNG mutex in sample_from_distribution"); - distribution.sample(&mut *rng) - } - - /// Set the seed for the random number generator - /// - /// This is a convenience method that wraps `RngManageable::set_seed` but returns - /// a `QueueError` instead of `Box` for backward compatibility. - /// - /// # Arguments - /// * `seed` - The seed value - /// - /// # Returns - /// `Ok(())` if successful - /// - /// # Panics - /// Panics if the mutex is poisoned - pub fn set_seed(&mut self, seed: u64) -> Result<(), QueueError> { - // This implementation directly sets the RNG rather than using RngManageable::set_seed - // to avoid unwrapping the Arc> which would cause thread-safety issues - let new_rng = ChaCha8Rng::seed_from_u64(seed); - self.rng = Arc::new(Mutex::new(new_rng)); - Ok(()) - } - - /// Generate a random u32 in the given range - /// - /// # Arguments - /// * `range` - The range of values to choose from (inclusive start, exclusive end) - /// - /// # Returns - /// A random u32 in the specified range - /// - /// # Panics - /// Panics if the mutex is poisoned - #[must_use] - pub fn random_u32(&self, range: Range) -> u32 { - let mut rng = self.rng.lock().unwrap(); - rng.random_range(range) - } -} - -impl Default for NoiseRng { - fn default() -> Self { - Self::new() - } -} - -impl RngManageable for NoiseRng { - type Rng = ChaCha8Rng; - - fn set_rng(&mut self, rng: ChaCha8Rng) -> Result<(), Box> { - self.rng = Arc::new(Mutex::new(rng)); - Ok(()) - } - - fn rng(&self) -> &Self::Rng { - panic!("NoiseRng uses Arc> and cannot provide a direct reference") - } - - fn rng_mut(&mut self) -> &mut Self::Rng { - panic!("NoiseRng uses Arc> and cannot provide a direct mutable reference") - } -} /// Helper trait for validating probability values pub trait ProbabilityValidator { @@ -535,105 +368,18 @@ impl NoiseUtils { builder.add_prep(&[qubit]); builder.add_x(&[qubit]); } - - /// Randomly selects a single-qubit Pauli gate (X, Y, Z) or no gate (Identity) with equal probability - /// - /// # Arguments - /// * `rng` - The random number generator to use for sampling - /// * `qubit` - The target qubit for the gate - /// - /// # Returns - /// An `Option` which may contain a Pauli gate (X, Y, Z) or None (representing identity) - /// - /// Each of the four outcomes (X, Y, Z, Identity) has a 25% probability. - #[must_use] - pub fn random_pauli_or_none(rng: &NoiseRng, qubit: usize) -> Option { - // Generate a random number between 0 and 3 - let choice = rng.random_int(0..4); - - match choice { - 0 => Some(QuantumGate::x(qubit)), - 1 => Some(QuantumGate::y(qubit)), - 2 => Some(QuantumGate::z(qubit)), - _ => None, // Identity: no gate applied - } - } } #[cfg(test)] mod tests { use super::*; use crate::byte_message::GateType; + use crate::engines::noise::noise_rng::NoiseRng; use crate::engines::noise::weighted_sampler::SingleQubitWeightedSampler; + use rand_chacha::ChaCha8Rng; use std::collections::HashMap; use std::panic::{AssertUnwindSafe, catch_unwind}; - // Constants used in multiple tests - const SAMPLE_SIZE: usize = 10000; - - #[test] - fn test_noise_rng_random_float() { - let rng = NoiseRng::with_seed(42); - let value = rng.random_float(); - assert!((0.0..=1.0).contains(&value)); - - // Test with multiple calls to ensure we get different values - let values: Vec = (0..10).map(|_| rng.random_float()).collect(); - - // Don't use a HashSet for floats, instead check that at least some values are different - let mut all_same = true; - for i in 1..values.len() { - if (values[0] - values[i]).abs() > f64::EPSILON { - all_same = false; - break; - } - } - assert!(!all_same, "Random values should vary"); - } - - #[test] - fn test_noise_rng_occurs() { - let rng = NoiseRng::with_seed(42); - - // With probability 0, should never occur - for _ in 0..100 { - assert!(!rng.occurs(0.0)); - } - - // With probability 1, should always occur - for _ in 0..100 { - assert!(rng.occurs(1.0)); - } - - // With probability 0.5, should occur roughly half the time - let occurs_count = (0..1000).filter(|_| rng.occurs(0.5)).count(); - assert!(occurs_count > 400 && occurs_count < 600); - } - - #[test] - fn test_noise_rng_random_int() { - let rng = NoiseRng::with_seed(42); - - // Test with a range of 0..3 - for _ in 0..100 { - let value = rng.random_int(0..3); - assert!(value < 3); - } - - // Check distribution with a larger number of samples - let counts = (0..1000) - .map(|_| rng.random_int(0..3)) - .fold([0, 0, 0], |mut acc, val| { - acc[val] += 1; - acc - }); - - // Each value should appear roughly 1/3 of the time - for count in &counts { - assert!(*count > 250 && *count < 400); - } - } - #[test] fn test_noise_utils_create_quantum_builder() { let mut builder = NoiseUtils::create_quantum_builder(); @@ -700,7 +446,7 @@ mod tests { #[test] fn test_sample_paulis() { - let rng = NoiseRng::with_seed(42); + let mut rng = NoiseRng::::with_seed(42); // Test with a valid model // Note: Weights must sum to exactly 1.0 to pass the strict normalization check @@ -731,7 +477,7 @@ mod tests { for _ in 0..1000 { // Use the sampler to generate quantum gates based on the weighted probabilities - let result = sampler.sample_gates(&rng, 0); + let result = sampler.sample_gates(&mut rng, 0); // Only check gates (no leakage in this test) match result.gate { @@ -782,56 +528,6 @@ mod tests { assert!(result.is_err(), "Should panic for invalid keys"); } - #[test] - fn test_random_pauli_or_none() { - use crate::byte_message::GateType; - - // Define margin for tests - let margin = SAMPLE_SIZE / 20; // Allow 5% margin of error - let expected = SAMPLE_SIZE / 4; // With equal 25% probability - - let rng = NoiseRng::with_seed(42); - - // Sample many times to check the distribution - let mut x_count = 0; - let mut y_count = 0; - let mut z_count = 0; - let mut none_count = 0; - - for _ in 0..SAMPLE_SIZE { - match NoiseUtils::random_pauli_or_none(&rng, 0) { - Some(gate) => match gate.gate_type { - GateType::X => x_count += 1, - GateType::Y => y_count += 1, - GateType::Z => z_count += 1, - _ => panic!("Unexpected gate type"), - }, - None => none_count += 1, - } - } - - // Calculate absolute difference without using .abs() - assert!( - x_count.max(expected) - x_count.min(expected) < margin, - "X count {x_count} deviates too much from expected {expected}" - ); - assert!( - y_count.max(expected) - y_count.min(expected) < margin, - "Y count {y_count} deviates too much from expected {expected}" - ); - assert!( - z_count.max(expected) - z_count.min(expected) < margin, - "Z count {z_count} deviates too much from expected {expected}" - ); - assert!( - none_count.max(expected) - none_count.min(expected) < margin, - "None count {none_count} deviates too much from expected {expected}" - ); - - // Verify the sum is correct - assert_eq!(x_count + y_count + z_count + none_count, SAMPLE_SIZE); - } - #[test] #[allow( clippy::cast_possible_truncation, @@ -846,7 +542,7 @@ mod tests { // Define constants at the beginning const SAMPLE_SIZE: usize = 10000; - let rng = NoiseRng::with_seed(42); + let mut rng = NoiseRng::::with_seed(42); // Test with a valid model including leakage // Note: Weights must sum to exactly 1.0 to pass the strict normalization check @@ -879,7 +575,7 @@ mod tests { for _ in 0..SAMPLE_SIZE { // Sample gates and check for both gate operations and leakage - let result = sampler.sample_gates(&rng, 0); + let result = sampler.sample_gates(&mut rng, 0); if result.qubit_leaked { leakage_count += 1; diff --git a/crates/pecos-engines/src/engines/noise/weighted_sampler.rs b/crates/pecos-engines/src/engines/noise/weighted_sampler.rs index f3a27f2d2..45feb3fd5 100644 --- a/crates/pecos-engines/src/engines/noise/weighted_sampler.rs +++ b/crates/pecos-engines/src/engines/noise/weighted_sampler.rs @@ -1,8 +1,21 @@ +// Copyright 2025 The PECOS Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except +// in compliance with the License.You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software distributed under the License +// is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express +// or implied. See the License for the specific language governing permissions and limitations under +// the License. + +use std::collections::HashMap; + use crate::byte_message::QuantumGate; -use crate::engines::noise::NoiseRng; +use crate::engines::noise::noise_rng::NoiseRng; use crate::engines::noise::utils::{SingleQubitNoiseResult, TwoQubitNoiseResult}; use rand::distr::weighted::WeightedIndex; -use std::collections::HashMap; /// Tolerance for weight normalization - total weights should be within this amount of 1.0 const NORMALIZATION_TOLERANCE: f64 = 1e-5; @@ -18,7 +31,9 @@ pub struct WeightedSampler { } impl WeightedSampler { - /// Create a new sampler from a `HashMap` with default tolerance + /// Create a new weighted sampler from a map of keys to weights + /// + /// The weights are normalized to sum to 1.0 with a default tolerance of 1e-10 /// /// # Panics /// - If the weighted map is empty @@ -30,13 +45,12 @@ impl WeightedSampler { Self::new_with_tolerance(weighted_map, NORMALIZATION_TOLERANCE) } - /// Create a new sampler with custom tolerance + /// Create a new weighted sampler with a specific tolerance for weight normalization /// /// # Panics /// - If the weighted map is empty /// - If the total weight is not positive /// - If the total weight deviates from 1.0 by more than the tolerance - /// - If the weighted index distribution cannot be created #[must_use] pub fn new_with_tolerance(weighted_map: &HashMap, tolerance: f64) -> Self { let (normalized_weighted_map, normalized_weights) = @@ -102,17 +116,15 @@ impl WeightedSampler { (normalized_map, normalized_weights) } - /// Sample from the weighted distribution and return the corresponding key + /// Sample a key from the distribution /// - /// # Arguments - /// * `rng` - Random number generator for sampling - /// - /// # Returns - /// A random key selected according to the weights + /// # Panics + /// - If the keys vector is empty (should never happen if constructed properly) + /// - If the distribution sampling fails #[must_use] - pub fn sample(&self, rng: &NoiseRng) -> K { - let idx = rng.sample_from_distribution(&self.distribution); - self.keys[idx].clone() + pub fn sample(&self, rng: &mut NoiseRng) -> K { + let index = rng.sample(&self.distribution); + self.keys[index].clone() } /// Get a reference to the normalized weighted map @@ -122,13 +134,14 @@ impl WeightedSampler { } } -/// Helper function to create a Pauli gate for a qubit +/// Create a Pauli gate based on the Pauli operator character fn create_pauli_gate(op: char, qubit: usize) -> Option { match op { 'X' => Some(QuantumGate::x(qubit)), 'Y' => Some(QuantumGate::y(qubit)), 'Z' => Some(QuantumGate::z(qubit)), - _ => None, + 'I' => None, // Identity - no operation + _ => panic!("Invalid Pauli operator '{op}'"), } } @@ -177,7 +190,7 @@ impl SingleQubitWeightedSampler { /// Sample a raw key from the distribution #[must_use] - pub fn sample_keys(&self, rng: &NoiseRng) -> String { + pub fn sample_keys(&self, rng: &mut NoiseRng) -> String { self.sampler.sample(rng) } @@ -186,7 +199,7 @@ impl SingleQubitWeightedSampler { /// # Panics /// - If the sampled key is invalid (this should never happen if the sampler was created properly) #[must_use] - pub fn sample_gates(&self, rng: &NoiseRng, qubit: usize) -> SingleQubitNoiseResult { + pub fn sample_gates(&self, rng: &mut NoiseRng, qubit: usize) -> SingleQubitNoiseResult { let key = self.sample_keys(rng); match key.as_str() { @@ -277,7 +290,8 @@ impl TwoQubitWeightedSampler { } /// Sample a raw key from the distribution - fn sample_keys(&self, rng: &NoiseRng) -> String { + #[must_use] + pub fn sample_keys(&self, rng: &mut NoiseRng) -> String { self.sampler.sample(rng) } @@ -288,7 +302,7 @@ impl TwoQubitWeightedSampler { #[must_use] pub fn sample_gates( &self, - rng: &NoiseRng, + rng: &mut NoiseRng, qubit0: usize, qubit1: usize, ) -> TwoQubitNoiseResult { @@ -329,3 +343,379 @@ impl TwoQubitWeightedSampler { TwoQubitNoiseResult::with_leakage(qubit0_leaked, qubit1_leaked, gates) } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::engines::noise::noise_rng::NoiseRng; + use rand_chacha::ChaCha8Rng; + use std::collections::HashMap; + + const SAMPLE_SIZE: usize = 100; + + #[test] + fn test_deterministic_sampling_basic() { + // Test basic deterministic sampling with same seed + let mut weights = HashMap::new(); + weights.insert("A".to_string(), 0.3); + weights.insert("B".to_string(), 0.7); + + let sampler = WeightedSampler::new(&weights); + + // Create two RNGs with the same seed + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + // Sample from both RNGs + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample(&mut rng1)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample(&mut rng2)) + .collect(); + + // Verify exact sequence match + assert_eq!( + results1, results2, + "Sampling results should be identical with same seed" + ); + } + + #[test] + fn test_deterministic_sampling_multiple_seeds() { + // Test deterministic sampling with multiple different seeds + let mut weights = HashMap::new(); + weights.insert("A".to_string(), 0.3); + weights.insert("B".to_string(), 0.7); + + let sampler = WeightedSampler::new(&weights); + + // Test multiple seed pairs + let seed_pairs = [(42, 42), (123, 123), (999, 999), (0, 0)]; + + for (seed1, seed2) in seed_pairs { + let mut rng1 = NoiseRng::::with_seed(seed1); + let mut rng2 = NoiseRng::::with_seed(seed2); + + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample(&mut rng1)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample(&mut rng2)) + .collect(); + + assert_eq!( + results1, results2, + "Sampling results should be identical with same seed pair ({seed1}, {seed2})" + ); + } + } + + #[test] + fn test_deterministic_sampling_different_seeds() { + // Test that different seeds produce different sequences + let mut weights = HashMap::new(); + weights.insert("A".to_string(), 0.3); + weights.insert("B".to_string(), 0.7); + + let sampler = WeightedSampler::new(&weights); + + // Test multiple different seed pairs + let seed_pairs = [(42, 43), (123, 124), (999, 1000), (0, 1)]; + + for (seed1, seed2) in seed_pairs { + let mut rng1 = NoiseRng::::with_seed(seed1); + let mut rng2 = NoiseRng::::with_seed(seed2); + + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample(&mut rng1)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample(&mut rng2)) + .collect(); + + assert_ne!( + results1, results2, + "Sampling results should differ with different seed pair ({seed1}, {seed2})" + ); + } + } + + #[test] + fn test_deterministic_sampling_single_qubit() { + // Test deterministic sampling with single qubit sampler + let mut weights = HashMap::new(); + weights.insert("X".to_string(), 0.25); + weights.insert("Y".to_string(), 0.25); + weights.insert("Z".to_string(), 0.25); + weights.insert("L".to_string(), 0.25); + + let sampler = SingleQubitWeightedSampler::new(&weights); + + // Create two RNGs with the same seed + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + // Sample from both RNGs + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample_gates(&mut rng1, 0)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample_gates(&mut rng2, 0)) + .collect(); + + // Verify exact sequence match + for (i, (r1, r2)) in results1.iter().zip(results2.iter()).enumerate() { + assert_eq!( + r1.qubit_leaked, r2.qubit_leaked, + "Leakage mismatch at index {i}" + ); + match (&r1.gate, &r2.gate) { + (Some(g1), Some(g2)) => assert_eq!( + g1.gate_type, g2.gate_type, + "Gate type mismatch at index {i}" + ), + (None, None) => (), + _ => panic!("Gate presence mismatch at index {i}"), + } + } + } + + #[test] + fn test_deterministic_sampling_two_qubit() { + // Test deterministic sampling with two qubit sampler + let mut weights = HashMap::new(); + weights.insert("XX".to_string(), 0.2); + weights.insert("YY".to_string(), 0.2); + weights.insert("ZZ".to_string(), 0.2); + weights.insert("XL".to_string(), 0.2); + weights.insert("LX".to_string(), 0.2); + + let sampler = TwoQubitWeightedSampler::new(&weights); + + // Create two RNGs with the same seed + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + // Sample from both RNGs + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample_gates(&mut rng1, 0, 1)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample_gates(&mut rng2, 0, 1)) + .collect(); + + // Verify exact sequence match + for (i, (r1, r2)) in results1.iter().zip(results2.iter()).enumerate() { + assert_eq!( + r1.qubit0_leaked, r2.qubit0_leaked, + "Qubit 0 leakage mismatch at index {i}" + ); + assert_eq!( + r1.qubit1_leaked, r2.qubit1_leaked, + "Qubit 1 leakage mismatch at index {i}" + ); + match (&r1.gates, &r2.gates) { + (Some(g1), Some(g2)) => { + assert_eq!(g1.len(), g2.len(), "Gate count mismatch at index {i}"); + for (j, (gate1, gate2)) in g1.iter().zip(g2.iter()).enumerate() { + assert_eq!( + gate1.gate_type, gate2.gate_type, + "Gate type mismatch at index {i} for gate {j}" + ); + } + } + (None, None) => (), + _ => panic!("Gate presence mismatch at index {i}"), + } + } + } + + #[test] + fn test_deterministic_sampling_reset() { + // Test that resetting the RNG and using the same seed produces the same sequence + let mut weights = HashMap::new(); + weights.insert("A".to_string(), 0.3); + weights.insert("B".to_string(), 0.7); + + let sampler = WeightedSampler::new(&weights); + let seed = 42; + + // First sequence + let mut rng = NoiseRng::::with_seed(seed); + let results1: Vec = (0..SAMPLE_SIZE).map(|_| sampler.sample(&mut rng)).collect(); + + // Reset RNG with same seed + rng = NoiseRng::::with_seed(seed); + let results2: Vec = (0..SAMPLE_SIZE).map(|_| sampler.sample(&mut rng)).collect(); + + // Verify exact sequence match + assert_eq!( + results1, results2, + "Sampling results should be identical after RNG reset with same seed" + ); + } + + #[test] + fn test_deterministic_sampling_consecutive() { + // Test that consecutive samples from the same RNG are deterministic + let mut weights = HashMap::new(); + weights.insert("A".to_string(), 0.3); + weights.insert("B".to_string(), 0.7); + + let sampler = WeightedSampler::new(&weights); + let mut rng = NoiseRng::::with_seed(42); + + // Take two consecutive samples + let result1 = sampler.sample(&mut rng); + let result2 = sampler.sample(&mut rng); + + // Reset RNG and take the same two samples + rng = NoiseRng::::with_seed(42); + let result3 = sampler.sample(&mut rng); + let result4 = sampler.sample(&mut rng); + + // Verify the sequences match + assert_eq!(result1, result3, "First sample should be deterministic"); + assert_eq!(result2, result4, "Second sample should be deterministic"); + } + + #[test] + fn test_deterministic_sampling_interleaved() { + // Test that interleaved sampling from different samplers is deterministic + let mut weights1 = HashMap::new(); + weights1.insert("A".to_string(), 0.3); + weights1.insert("B".to_string(), 0.7); + + let mut weights2 = HashMap::new(); + weights2.insert("X".to_string(), 0.4); + weights2.insert("Y".to_string(), 0.6); + + let sampler1 = WeightedSampler::new(&weights1); + let sampler2 = WeightedSampler::new(&weights2); + + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + // Interleaved sampling + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| { + if rng1.random_float() < 0.5 { + sampler1.sample(&mut rng1) + } else { + sampler2.sample(&mut rng2) + } + }) + .collect(); + + // Reset RNGs and repeat + rng1 = NoiseRng::::with_seed(42); + rng2 = NoiseRng::::with_seed(42); + + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| { + if rng1.random_float() < 0.5 { + sampler1.sample(&mut rng1) + } else { + sampler2.sample(&mut rng2) + } + }) + .collect(); + + assert_eq!( + results1, results2, + "Interleaved sampling should be deterministic" + ); + } + + #[test] + fn test_deterministic_sampling_edge_cases() { + // Test edge cases for sampling + let mut weights = HashMap::new(); + weights.insert("A".to_string(), 1.0); // Single outcome with probability 1.0 + + let sampler = WeightedSampler::new(&weights); + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + // Should always get "A" regardless of RNG state + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample(&mut rng1)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample(&mut rng2)) + .collect(); + + assert_eq!( + results1, results2, + "Sampling should be deterministic even with single outcome" + ); + assert!( + results1.iter().all(|x| x == "A"), + "All results should be 'A'" + ); + } + + #[test] + fn test_deterministic_sampling_single_qubit_edge_cases() { + // Test edge cases for single qubit sampling + let mut weights = HashMap::new(); + weights.insert("L".to_string(), 1.0); // Always leak + + let sampler = SingleQubitWeightedSampler::new(&weights); + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample_gates(&mut rng1, 0)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample_gates(&mut rng2, 0)) + .collect(); + + // Verify exact sequence match + for (i, (r1, r2)) in results1.iter().zip(results2.iter()).enumerate() { + assert_eq!( + r1.qubit_leaked, r2.qubit_leaked, + "Leakage mismatch at index {i}" + ); + assert!(r1.qubit_leaked, "All results should indicate leakage"); + assert!(r1.gate.is_none(), "No gates should be present"); + } + } + + #[test] + fn test_deterministic_sampling_two_qubit_edge_cases() { + // Test edge cases for two qubit sampling + let mut weights = HashMap::new(); + weights.insert("LL".to_string(), 1.0); // Always leak both qubits + + let sampler = TwoQubitWeightedSampler::new(&weights); + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample_gates(&mut rng1, 0, 1)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler.sample_gates(&mut rng2, 0, 1)) + .collect(); + + // Verify exact sequence match + for (i, (r1, r2)) in results1.iter().zip(results2.iter()).enumerate() { + assert_eq!( + r1.qubit0_leaked, r2.qubit0_leaked, + "Qubit 0 leakage mismatch at index {i}" + ); + assert_eq!( + r1.qubit1_leaked, r2.qubit1_leaked, + "Qubit 1 leakage mismatch at index {i}" + ); + assert!( + r1.qubit0_leaked && r1.qubit1_leaked, + "Both qubits should leak" + ); + assert!(r1.gates.is_none(), "No gates should be present"); + } + } +} diff --git a/crates/pecos-engines/tests/noise_determinism.rs b/crates/pecos-engines/tests/noise_determinism.rs new file mode 100644 index 000000000..cb0a90f9f --- /dev/null +++ b/crates/pecos-engines/tests/noise_determinism.rs @@ -0,0 +1,290 @@ +use log::info; +use pecos_engines::{ + byte_message::ByteMessage, + engines::ControlEngine, + engines::noise::{NoiseModel, general::GeneralNoiseModel}, +}; +use std::collections::HashMap; + +/// Reset a noise model and set its seed in one operation +/// +/// This function works with boxed noise models and takes care of +/// downcasting to `GeneralNoiseModel` to use the `reset_with_seed` method. +fn reset_model_with_seed( + model: &mut Box, + seed: u64, +) -> Result<(), Box> { + let general_noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); + general_noise.reset_with_seed(seed) +} + +fn create_noise_model() -> Box { + info!("Creating noise model with moderate error rates"); + // Create a noise model with moderate error rates + let mut model = GeneralNoiseModel::new(0.1, 0.1, 0.1, 0.1, 0.1); + + // Set single-qubit error rates with uniform distribution + let mut single_qubit_weights = HashMap::new(); + single_qubit_weights.insert("X".to_string(), 0.25); + single_qubit_weights.insert("Y".to_string(), 0.25); + single_qubit_weights.insert("Z".to_string(), 0.25); + single_qubit_weights.insert("L".to_string(), 0.25); + info!("Setting single-qubit Pauli model"); + model.set_p1_pauli_model(&single_qubit_weights); + + // Set two-qubit error rates with uniform distribution + let mut two_qubit_weights = HashMap::new(); + two_qubit_weights.insert("XX".to_string(), 0.2); + two_qubit_weights.insert("YY".to_string(), 0.2); + two_qubit_weights.insert("ZZ".to_string(), 0.2); + two_qubit_weights.insert("XL".to_string(), 0.2); + two_qubit_weights.insert("LX".to_string(), 0.2); + info!("Setting two-qubit Pauli model"); + model.set_p2_pauli_model(&two_qubit_weights); + + // Set emission ratios to ensure errors are introduced + info!("Setting emission ratios"); + model.set_p1_emission_ratio(0.5); + model.set_p2_emission_ratio(0.5); + model.set_prep_leak_ratio(0.5); + + // Scale parameters before using the model + info!("Scaling parameters"); + model.scale_parameters(); + + // Reset the model to ensure clean state + info!("Resetting model"); + model.reset().unwrap(); + + Box::new(model) +} + +fn apply_noise(model: &mut Box, msg: &ByteMessage) -> ByteMessage { + info!("Applying noise to message"); + match model.start(msg.clone()).unwrap() { + pecos_engines::engines::EngineStage::NeedsProcessing(noisy_msg) => { + info!("Processing noisy message"); + match model.continue_processing(noisy_msg).unwrap() { + pecos_engines::engines::EngineStage::Complete(result) => result, + pecos_engines::engines::EngineStage::NeedsProcessing(_) => { + panic!("Expected Complete stage") + } + } + } + pecos_engines::engines::EngineStage::Complete(_) => { + panic!("Expected NeedsProcessing stage") + } + } +} + +fn compare_messages(msg1: &ByteMessage, msg2: &ByteMessage) -> bool { + let ops1 = msg1.parse_quantum_operations().unwrap_or_default(); + let ops2 = msg2.parse_quantum_operations().unwrap_or_default(); + ops1 == ops2 +} + +#[test] +fn test_prep_determinism() { + let seed = 42; + info!("Creating noise models with identical seeds"); + let mut model1 = create_noise_model(); + + // Apply noise to model1 + reset_model_with_seed(&mut model1, seed).unwrap(); + + // Create a message with multiple prep gates + let mut builder = ByteMessage::quantum_operations_builder(); + for _ in 0..6 { + builder.add_prep(&[0]); + } + let msg = builder.build(); + + // Apply noise to the message + let noisy1 = apply_noise(&mut model1, &msg); + + // Reset model1 with the same seed for deterministic behavior + reset_model_with_seed(&mut model1, seed).unwrap(); + + // Apply noise again to the message + let noisy2 = apply_noise(&mut model1, &msg); + + // Now these should be identical + info!("Comparing noisy1 and noisy2 - should be identical with same seed and model"); + assert!( + compare_messages(&noisy1, &noisy2), + "Messages should be identical with same seed and model" + ); + + // Now create a completely different model to verify we see different noise + info!("Creating a model with a different seed"); + let mut model3 = create_noise_model(); + reset_model_with_seed(&mut model3, seed + 1).unwrap(); // different seed + + // Apply noise with different model + let noisy3 = apply_noise(&mut model3, &msg); + + // These should be different + info!("Comparing noisy1 and noisy3 - should be different with different seeds"); + assert!( + !compare_messages(&noisy1, &noisy3), + "Different seeds should produce different messages" + ); +} + +#[test] +fn test_single_qubit_gate_determinism() { + let seed = 42; + info!("Creating noise model with seed"); + let mut model1 = create_noise_model(); + + // Apply noise to model1 + reset_model_with_seed(&mut model1, seed).unwrap(); + + // Create a message with multiple single-qubit gates + let mut builder = ByteMessage::quantum_operations_builder(); + for _ in 0..10 { + // Repeat pattern to increase chance of errors + builder.add_h(&[0]); + builder.add_rz(0.5, &[0]); + builder.add_r1xy(0.5, 0.5, &[0]); + builder.add_h(&[1]); + builder.add_rz(0.5, &[1]); + } + let msg = builder.build(); + + // Apply noise the first time + info!("Applying noise first time"); + let noisy1 = apply_noise(&mut model1, &msg); + + // Reset model with the same seed for deterministic behavior + info!("Resetting model with same seed"); + reset_model_with_seed(&mut model1, seed).unwrap(); + + // Apply noise again with the same model + info!("Applying noise second time"); + let noisy2 = apply_noise(&mut model1, &msg); + + // Verify determinism + info!("Comparing results - should be identical with same seed"); + assert!( + compare_messages(&noisy1, &noisy2), + "Results should be identical with same seed" + ); + + // Verify that we get some errors due to noise + info!("Comparing original and noisy messages"); + assert!( + !compare_messages(&msg, &noisy1), + "Original message should be different from noisy message" + ); +} + +#[test] +fn test_two_qubit_gate_determinism() { + let seed = 42; + info!("Creating noise models with identical seeds"); + let mut model1 = create_noise_model(); + + // Apply noise to model1 + reset_model_with_seed(&mut model1, seed).unwrap(); + + // Create a message with many two-qubit gates to increase chance of errors + let mut builder = ByteMessage::quantum_operations_builder(); + for _ in 0..20 { + // Repeat pattern multiple times + builder.add_cx(&[0], &[1]); + builder.add_cx(&[1], &[2]); + builder.add_cx(&[2], &[3]); + builder.add_cx(&[3], &[0]); + } + let msg = builder.build(); + + // Apply noise to the message + let noisy1 = apply_noise(&mut model1, &msg); + + // Reset model1 with the same seed for deterministic behavior + reset_model_with_seed(&mut model1, seed).unwrap(); + + // Apply noise again to the message + let noisy2 = apply_noise(&mut model1, &msg); + + // Now these should be identical + info!("Comparing noisy1 and noisy2 - should be identical with same seed and model"); + assert!( + compare_messages(&noisy1, &noisy2), + "Messages should be identical with same seed and model" + ); + + // Verify that the message is actually being modified by the noise model + info!("Verifying that noise is being applied"); + assert!( + !compare_messages(&msg, &noisy1), + "Original message should be different from noisy message" + ); +} + +#[test] +fn test_measurement_determinism() { + let seed = 42; + let mut model1 = create_noise_model(); + let mut model2 = create_noise_model(); + + reset_model_with_seed(&mut model1, seed).unwrap(); + reset_model_with_seed(&mut model2, seed).unwrap(); + + // Create a message with measurements + let mut builder = ByteMessage::quantum_operations_builder(); + builder.add_h(&[0]); + builder.add_h(&[1]); + builder.add_cx(&[0], &[1]); + builder.add_measurements(&[0], &[0]); + builder.add_measurements(&[1], &[1]); + let msg = builder.build(); + + // Apply noise multiple times + let noisy1 = apply_noise(&mut model1, &msg); + + reset_model_with_seed(&mut model1, seed).unwrap(); + + let noisy2 = apply_noise(&mut model2, &msg); + + // Verify determinism in the quantum operations + assert!(compare_messages(&noisy1, &noisy2)); +} + +#[test] +fn test_different_seeds_produce_different_results() { + let seed1 = 42; + let seed2 = 43; // Different seed + let mut model1 = create_noise_model(); + let mut model2 = create_noise_model(); + + reset_model_with_seed(&mut model1, seed1).unwrap(); + reset_model_with_seed(&mut model2, seed2).unwrap(); + + // Create a larger circuit to increase the chance of errors + let mut builder = ByteMessage::quantum_operations_builder(); + for _ in 0..15 { + // Repeat pattern to create a longer circuit + builder.add_h(&[0]); + builder.add_cx(&[0], &[1]); + builder.add_h(&[1]); + builder.add_cx(&[1], &[2]); + builder.add_h(&[2]); + } + let msg = builder.build(); + + // Apply noise with different seeds + let noisy1 = apply_noise(&mut model1, &msg); + let noisy2 = apply_noise(&mut model2, &msg); + + // With different seeds, we expect different noise results + info!("Comparing outputs from different seeds - should be different"); + assert!( + !compare_messages(&noisy1, &noisy2), + "Different seeds should produce different noise patterns" + ); +} diff --git a/crates/pecos-engines/tests/noise_test.rs b/crates/pecos-engines/tests/noise_test.rs index 6f4b95132..56f360540 100644 --- a/crates/pecos-engines/tests/noise_test.rs +++ b/crates/pecos-engines/tests/noise_test.rs @@ -9,6 +9,7 @@ use pecos_engines::byte_message::gate_type::GateType; use pecos_engines::byte_message::{ByteMessage, ByteMessageBuilder}; +use pecos_engines::engines::noise::RngManageable; use pecos_engines::engines::noise::general::GeneralNoiseModel; use pecos_engines::engines::quantum::StateVecEngine; use pecos_engines::{Engine, QuantumSystem}; From 55cf7b5b39c1c7ffb037e5b19bcb5c2a463ce1f0 Mon Sep 17 00:00:00 2001 From: Ciaran Ryan-Anderson Date: Wed, 7 May 2025 11:13:56 -0600 Subject: [PATCH 3/9] Clean up: remove pop0_prop and simplified crosstalk funcs to just placeholders --- .../src/engines/noise/general.rs | 260 ++---------------- 1 file changed, 16 insertions(+), 244 deletions(-) diff --git a/crates/pecos-engines/src/engines/noise/general.rs b/crates/pecos-engines/src/engines/noise/general.rs index dab677d76..4a11c8087 100644 --- a/crates/pecos-engines/src/engines/noise/general.rs +++ b/crates/pecos-engines/src/engines/noise/general.rs @@ -218,12 +218,6 @@ pub struct GeneralNoiseModel { /// states back to the computational subspace. seepage_prob: f64, - /// Probability that a seepage operation results in |0⟩ state (vs |1⟩) - /// - /// When a qubit returns from a leaked state to the computational subspace, this parameter - /// controls the probability that it ends up in state |0⟩ versus state |1⟩. - pop0_prob: f64, - /// Scaling parameters for RZZ gate error rate - coefficient a /// /// Part of a parameterized model for angle-dependent errors in RZZ gates. @@ -551,7 +545,6 @@ impl GeneralNoiseModel { p2_pauli_model: TwoQubitWeightedSampler::new(&p2_pauli_model), p2_emission_model: TwoQubitWeightedSampler::new(&p2_emission_model), seepage_prob: 0.5, - pop0_prob: 0.5, przz_a: 0.0, przz_b: 1.0, przz_c: 0.0, @@ -630,12 +623,6 @@ impl GeneralNoiseModel { self.seepage_prob = prob; } - /// Set the probability of preparing |0⟩ on seepage - pub fn set_pop0_prob(&mut self, prob: f64) { - Self::validate_probability(prob); - self.pop0_prob = prob; - } - /// Set RZZ parameter scaling for angle dependent error. /// /// The PECOS gate set has a parameterized-angle ZZ gate, RZZ(θ). For implementation @@ -915,7 +902,7 @@ impl GeneralNoiseModel { } else { add_original_gate = false; - let result = self.p1_emission_model.sample_gates(&mut self.rng, qubit); + let result = self.p1_emission_model.sample_gates(&mut self.rng, qubit); if result.has_leakage() { // Handle leakage @@ -1540,36 +1527,6 @@ impl GeneralNoiseModel { } } - /// Set crosstalk parameters - /// - /// # Parameters - /// * `p_crosstalk_meas` - Probability of crosstalk during measurement - /// * `p_crosstalk_prep` - Probability of crosstalk during initialization - /// * `per_gate` - Whether to apply crosstalk for each gate in a sequence - /// - /// # Panics - /// - /// Panics if either probability is less than 0.0 or greater than 1.0. - pub fn set_crosstalk_parameters( - &mut self, - p_crosstalk_meas: f64, - p_crosstalk_prep: f64, - per_gate: bool, - ) { - assert!( - (0.0..=1.0).contains(&p_crosstalk_meas), - "p_crosstalk_meas must be between 0 and 1" - ); - assert!( - (0.0..=1.0).contains(&p_crosstalk_prep), - "p_crosstalk_prep must be between 0 and 1" - ); - - self.p_crosstalk_meas = p_crosstalk_meas; - self.p_crosstalk_prep = p_crosstalk_prep; - self.crosstalk_per_gate = per_gate; - } - /// Apply idle qubit noise faults /// /// Models errors that occur during idle periods when qubits are not actively being manipulated: @@ -1622,186 +1579,23 @@ impl GeneralNoiseModel { } /// Create a new method to handle requesting nearby qubits for crosstalk - fn get_nearby_qubits_for_crosstalk(source_qubits: &[usize], num_qubits: usize) -> Vec { + #[allow(dead_code)] + fn get_nearby_qubits_for_crosstalk(_source_qubits: &[usize], _num_qubits: usize) -> Vec { // PLACEHOLDER: This will eventually request information from the ClassicalEngine // via the EngineSystem to get the nearest qubits based on device topology - - // For now, just simulate some nearby qubits - // In the future, this will be replaced with an actual request to the ClassicalEngine - let mut nearby = Vec::new(); - - // Simple placeholder that just adds nearby indices - // (this is just a temporary implementation) - for &q in source_qubits { - // Add "nearby" qubits that aren't in the source set - for offset in 1..=num_qubits { - if q > offset { - let candidate = q - offset; - if !source_qubits.contains(&candidate) && !nearby.contains(&candidate) { - nearby.push(candidate); - } - } - - let candidate = q + offset; - if !source_qubits.contains(&candidate) && !nearby.contains(&candidate) { - nearby.push(candidate); - } - - if nearby.len() >= num_qubits { - break; - } - } - - if nearby.len() >= num_qubits { - break; - } - } - - // Limit to requested number of qubits - nearby.truncate(num_qubits); - nearby + todo!() } // Replace the meas_crosstalk method to use the correct API - fn meas_crosstalk(&mut self, locations: &[usize], builder: &mut ByteMessageBuilder) { - // Get max qubit index from the set of locations to determine total qubits - let num_qubits = locations.iter().max().map_or(0, |&q| q + 1); - - // Get qubits that might be affected by crosstalk - let qubits = Self::get_nearby_qubits_for_crosstalk(locations, num_qubits); - - // Use a consistent result ID for temporary measurement results - let scratch_result_id = 9999; - - for &qubit in &qubits { - // Skip the qubits that are already being measured - if self.is_leaked(qubit) { - continue; - } - - if self.rng.random_float() - < self.p_crosstalk_meas * self.p_crosstalk_meas_rescale * self.scale - { - trace!("Applying measurement crosstalk to qubit {}", qubit); - - if self.is_leaked(qubit) { - // For leaked qubits, there's a chance of unseepage - if self.rng.random_float() < self.seepage_prob * self.leakage_scale * self.scale - { - trace!("Unseepage during measurement crosstalk for qubit {}", qubit); - self.mark_as_unleaked(qubit); - - // Measure the qubit to get a result - builder.add_measurements(&[qubit], &[scratch_result_id]); - - // 50% chance of reset - let reset_prob = 0.5; - if self.rng.random_float() < reset_prob { - // Reset to either |0⟩ or |1⟩ with equal probability - if self.rng.random_float() < 0.5 { - // Reset to |0⟩ - builder.add_prep(&[qubit]); - trace!("Meas crosstalk: qubit {} resets to |0⟩", qubit); - } else { - // Reset to |1⟩ - builder.add_prep(&[qubit]); - builder.add_x(&[qubit]); - trace!("Meas crosstalk: qubit {} resets to |1⟩", qubit); - } - } - } - } else if self.rng.random_float() - < self.p_prep_leak_ratio * self.leakage_scale * self.scale - { - // Leak the qubit - self.mark_as_leaked(qubit); - trace!("Meas crosstalk caused leakage of qubit {}", qubit); - } - } - } + #[allow(clippy::unused_self)] + fn meas_crosstalk(&mut self, _locations: &[usize], _builder: &mut ByteMessageBuilder) { + // placeholder } // Replace the prep_crosstalk method to use the correct API - fn prep_crosstalk(&mut self, locations: &[usize], builder: &mut ByteMessageBuilder) { - // Get max qubit index from the set of locations to determine total qubits - let num_qubits = locations.iter().max().map_or(0, |&q| q + 1); - - // Get qubits that might be affected by crosstalk - let qubits = Self::get_nearby_qubits_for_crosstalk(locations, num_qubits); - - for &qubit in &qubits { - // Skip the target qubits themselves - if locations.contains(&qubit) { - continue; - } - - if self.rng.random_float() - < self.p_crosstalk_prep * self.p_crosstalk_prep_rescale * self.scale - { - trace!("Applying initialization crosstalk to qubit {}", qubit); - - if self.is_leaked(qubit) { - // For leaked qubits, there's a chance of unseepage - if self.rng.random_float() < self.seepage_prob * self.leakage_scale * self.scale - { - trace!("Unseepage during prep crosstalk for qubit {}", qubit); - self.mark_as_unleaked(qubit); - - // After unseepage, the qubit is in |0⟩ with probability pop0_prob - if self.rng.random_float() < self.pop0_prob { - // Reset to |0⟩ using Prep gate - builder.add_prep(&[qubit]); - trace!( - "Prep crosstalk: qubit {} resets to |0⟩ after unseepage", - qubit - ); - } else { - // Reset to |1⟩ using Prep followed by X gate - builder.add_prep(&[qubit]); - builder.add_x(&[qubit]); - trace!( - "Prep crosstalk: qubit {} resets to |1⟩ after unseepage", - qubit - ); - } - } - } else { - // For non-leaked qubits, decide on error type - let error_type = self.rng.random_float(); - - if error_type < 0.3 { - // Reset to |0⟩ - builder.add_prep(&[qubit]); - trace!("Prep crosstalk: qubit {} resets to |0⟩", qubit); - } else if error_type < 0.6 { - // Reset to |1⟩ - builder.add_prep(&[qubit]); - builder.add_x(&[qubit]); - trace!("Prep crosstalk: qubit {} resets to |1⟩", qubit); - } else if error_type < 0.8 { - // Apply a random Pauli error - let pauli_type = self.rng.random_float(); - if pauli_type < 0.33 { - builder.add_x(&[qubit]); - trace!("Prep crosstalk: X error on qubit {}", qubit); - } else if pauli_type < 0.67 { - builder.add_y(&[qubit]); - trace!("Prep crosstalk: Y error on qubit {}", qubit); - } else { - builder.add_z(&[qubit]); - trace!("Prep crosstalk: Z error on qubit {}", qubit); - } - } else if self.rng.random_float() - < self.p_prep_leak_ratio * self.leakage_scale * self.scale - { - // Leak the qubit - self.mark_as_leaked(qubit); - trace!("Prep crosstalk: qubit {} leaks", qubit); - } - // Otherwise, leave the qubit unchanged - } - } - } + #[allow(clippy::unused_self)] + fn prep_crosstalk(&mut self, _locations: &[usize], _builder: &mut ByteMessageBuilder) { + // placeholder } /// Calculate the RZZ gate error rate based on the rotation angle @@ -1953,7 +1747,6 @@ pub struct GeneralNoiseModelBuilder { p2_emission_model: Option, p_prep_leak_ratio: Option, seepage_prob: Option, - pop0_prob: Option, seed: Option, scale: Option, memory_scale: Option, @@ -2000,7 +1793,6 @@ impl GeneralNoiseModelBuilder { p2_emission_model: None, p_prep_leak_ratio: None, seepage_prob: None, - pop0_prob: None, seed: None, scale: None, memory_scale: None, @@ -2345,21 +2137,6 @@ impl GeneralNoiseModelBuilder { self } - /// Set the probability that a seepage operation results in |0⟩ state (vs |1⟩) - /// - /// # Panics - /// - /// Panics if the probability is not between 0.0 and 1.0 (inclusive). - #[must_use] - pub fn with_pop0_prob(mut self, prob: f64) -> Self { - assert!( - (0.0..=1.0).contains(&prob), - "Pop0 probability must be between 0 and 1" - ); - self.pop0_prob = Some(prob); - self - } - /// Set the probability of crosstalk during measurement operations /// /// # Panics @@ -2450,10 +2227,6 @@ impl GeneralNoiseModelBuilder { model.set_seepage_prob(prob); } - if let Some(prob) = self.pop0_prob { - model.set_pop0_prob(prob); - } - if let Some(prob) = self.p_crosstalk_meas { // Set crosstalk parameters model.p_crosstalk_meas = prob; @@ -2464,13 +2237,6 @@ impl GeneralNoiseModelBuilder { model.p_crosstalk_prep = prob; } - if let Some(per_gate) = self.crosstalk_per_gate { - // Use existing crosstalk settings if they haven't been specified - let meas = self.p_crosstalk_meas.unwrap_or(model.p_crosstalk_meas); - let prep = self.p_crosstalk_prep.unwrap_or(model.p_crosstalk_prep); - model.set_crosstalk_parameters(meas, prep, per_gate); - } - if let Some(scale) = self.scale { model.set_scale(scale); } @@ -2542,6 +2308,12 @@ impl GeneralNoiseModelBuilder { model.set_leak2depolar(use_depolar); } + if let Some(has_crosstalk_per_gate) = self.crosstalk_per_gate { + model.crosstalk_per_gate = has_crosstalk_per_gate; + } else { + model.crosstalk_per_gate = false; + } + model.scale_parameters(); // TODO: Need this Box? Box::new(model) From 7574e8f92e5311fdba1c8c28a48ff79df34cb0fd Mon Sep 17 00:00:00 2001 From: Ciaran Ryan-Anderson Date: Wed, 7 May 2025 11:36:49 -0600 Subject: [PATCH 4/9] Hopefully fixing PR #134 for TQ gates + allowing multiple SQ gates at once --- .../src/engines/noise/general.rs | 92 ++++++++++++------- 1 file changed, 57 insertions(+), 35 deletions(-) diff --git a/crates/pecos-engines/src/engines/noise/general.rs b/crates/pecos-engines/src/engines/noise/general.rs index 4a11c8087..7191aac0d 100644 --- a/crates/pecos-engines/src/engines/noise/general.rs +++ b/crates/pecos-engines/src/engines/noise/general.rs @@ -878,15 +878,15 @@ impl GeneralNoiseModel { /// /// Panics if sampling from the Pauli model fails or if an invalid Pauli operator is encountered. fn apply_sq_faults(&mut self, gate: &QuantumGate, builder: &mut ByteMessageBuilder) { - // Track whether to add the original gate - let mut add_original_gate = true; - let mut noise = Vec::new(); - - let has_leakage = !self.leaked_qubits.is_empty() - && gate.qubits.iter().any(|&qubit| self.is_leaked(qubit)); + let mut removed_gates = false; + let mut original_gate_qubits: Vec = Vec::new(); for &qubit in &gate.qubits { + // Track whether to add the original gate + let mut add_original_gate = true; + let has_leakage = self.is_leaked(qubit); + if has_leakage { add_original_gate = false; } @@ -924,12 +924,30 @@ impl GeneralNoiseModel { } } } + + // Add the original gate only if there were no leakage errors + if add_original_gate { + original_gate_qubits.push(qubit); + } else { + removed_gates = true; + } } - // Add the original gate only if there were no leakage errors - if add_original_gate { + if removed_gates { + // There are some gates left to add + if !original_gate_qubits.is_empty() { + let new_gate = QuantumGate::new( + gate.gate_type, + original_gate_qubits, + gate.params.clone(), + None, + ); + builder.add_quantum_gate(&new_gate); + } + } else { builder.add_quantum_gate(gate); } + if !noise.is_empty() { builder.add_quantum_gates(&noise); } @@ -962,44 +980,48 @@ impl GeneralNoiseModel { if has_leakage { add_original_gate = false; + } - // Seep leaked qubits if a spontaneous emission event occurs + if self.rng.occurs(p) { if self.rng.occurs(self.p2_emission_ratio) { - for qubit in &gate.qubits { - if self.is_leaked(*qubit) { - if let Some(gates) = self.seep(*qubit) { - noise.extend(gates); + if has_leakage { + // potentially seep qubits + for qubit in &gate.qubits { + if self.is_leaked(*qubit) { + if let Some(gates) = self.seep(*qubit) { + noise.extend(gates); + } } } - } - } - } else if self.rng.occurs(p) { - if self.rng.occurs(self.p2_emission_ratio) { - // Spontaneous emission noise - add_original_gate = false; + } else { + // Spontaneous emission noise + add_original_gate = false; - let result = - self.p2_emission_model - .sample_gates(&mut self.rng, qubits[0], qubits[1]); + let result = self.p2_emission_model.sample_gates( + &mut self.rng, + qubits[0], + qubits[1], + ); - if result.has_leakage() { - for (qubit, leaked) in qubits.iter().zip(result.has_leakages().iter()) { - if *leaked { - if let Some(gate) = self.leak(*qubit) { - noise.push(gate); + if result.has_leakage() { + for (qubit, leaked) in qubits.iter().zip(result.has_leakages().iter()) { + if *leaked { + if let Some(gate) = self.leak(*qubit) { + noise.push(gate); + } } } } - } - if let Some(gates) = result.gates { - noise.extend(gates); - trace!( - "Applied Pauli error to qubits {} and {}", - qubits[0], qubits[1] - ); + if let Some(gates) = result.gates { + noise.extend(gates); + trace!( + "Applied Pauli error to qubits {} and {}", + qubits[0], qubits[1] + ); + } } - } else { + } else if !has_leakage { // Pauli noise let result = self.p2_pauli_model From 7d3329faff8bcabf1de7cde2f2e9f91ee1212f1c Mon Sep 17 00:00:00 2001 From: Ciaran Ryan-Anderson Date: Thu, 8 May 2025 16:38:51 -0600 Subject: [PATCH 5/9] removed setters, moved scaling to builder, split seepage_prob to p1 and p2 - removed setters to pivot to focus on builder - seepage_prob -> p1_seepage_prob, p2_seepage_prob - p_crosstalk_meas -> p_meas_crosstalk - p_crosstalk_prep -> p_prep_crosstalk - p_crosstalk_meas_rescale -> p_meas_crosstalk_scale - p_crosstalk_prep_rescale -> p_prep_crosstalk_scale - removed scales for GeneralNoiseModel params but left them in builder - added average_p1_probability and average_p2_probability so users can enter average probability and get it automatically rescaled to total probability - add p_meas_max as an internal GeneralNoiseModel variable to represent the overall measurement error rate - added with_meas_probability() so users who don't want biased noise can enter just one measurement error rate --- .../src/engines/noise/general.rs | 1927 ++++++++--------- .../pecos-engines/tests/noise_determinism.rs | 31 +- crates/pecos-engines/tests/noise_test.rs | 593 +++-- 3 files changed, 1314 insertions(+), 1237 deletions(-) diff --git a/crates/pecos-engines/src/engines/noise/general.rs b/crates/pecos-engines/src/engines/noise/general.rs index 7191aac0d..bec1ce114 100644 --- a/crates/pecos-engines/src/engines/noise/general.rs +++ b/crates/pecos-engines/src/engines/noise/general.rs @@ -100,20 +100,23 @@ use rand_chacha::ChaCha8Rng; /// - **Memory errors**: Dephasing during idle periods /// - **Leakage errors**: Transitions outside the computational subspace /// - **Emission errors**: Non-unitary errors that can cause leakage -/// -/// The model closely includes scaling parameters that allow for customization of error rates: -/// - Global scaling factor affecting all error probabilities -/// - Channel-specific scaling (`p1_scale`, `p2_scale`, `meas_scale`, etc.) -/// - Parameterized angle-dependent noise scaling for RZZ gates -/// -/// Two key conversion factors are applied during parameter scaling: -/// - Single-qubit gate errors (p1) are scaled by 3/2 -/// - Two-qubit gate errors (p2) are scaled by 5/4 -/// These conversions transform average error rates (typically reported in benchmarks) -/// to total error rates used in the noise model implementation. #[derive(Debug, Clone)] #[allow(clippy::struct_excessive_bools)] pub struct GeneralNoiseModel { + /// Set of gate types that should not have noise applied + /// + /// Gates in this set may be those that are implemented in software rather than + /// with physical operations, so no noise should be applied to them. + noiseless_gates: HashSet, + + /// Whether to replace leakage with depolarizing noise + /// + /// If true, instead of marking qubits as leaked, completely depolarizing noise will be applied. + /// This is useful for studying the effects for comparing the effects of leakage vs. + /// depolarizing noise. + /// TODO: Consider making this more a float and becoming `leakage_scale` + leak2depolar: bool, + /// Probability of applying a fault during preparation (initialization) /// /// This parameter models faults that occur when initializing a qubit to |0⟩. In ion trap @@ -121,23 +124,19 @@ pub struct GeneralNoiseModel { /// state preparation process. p_prep: f64, - /// Probability of flipping a 0 measurement to 1 - /// - /// This asymmetric measurement error models cases when a qubit in state |0⟩ is incorrectly - /// measured as 1. + /// Relative probability that a preparation fault leads to leakage /// - /// In ion trap systems, this may occur due to imperfect state detection or - /// background counts during fluorescence detection. - p_meas_0: f64, + /// Controls what fraction of preparation faults result in leakage out of the computational + /// subspace. In ion trap systems, this could represent population in states other than the + /// qubit states after initialization. Ranges from 0 to 1. + p_prep_leak_ratio: f64, - /// Probability of flipping a 1 measurement to 0 - /// - /// This asymmetric measurement error models cases when a qubit in state |1⟩ is incorrectly - /// measured as 0. + /// Probability of crosstalk during initialization operations /// - /// In ion trap systems, this may occur due to decay during measurement or - /// imperfect detection efficiency. - p_meas_1: f64, + /// Models the probability that an initialization operation on one qubit affects nearby qubits. + /// In ion trap systems, this could represent scattered light during optical pumping affecting + /// neighboring ions. + p_prep_crosstalk: f64, /// Probability of applying a fault after single-qubit gates /// @@ -145,17 +144,8 @@ pub struct GeneralNoiseModel { /// /// In physical systems, this represents coherent control errors, decoherence during gate /// operation, and other forms of noise affecting single-qubit operations. - /// - /// Will be scaled by 3/2 to convert from average to total error rate during parameter scaling. p1: f64, - /// Probability of applying a fault after two-qubit gates - /// - /// Models depolarizing channel + leakage noise for two-qubit gates. - /// - /// Will be scaled by 5/4 to convert from average to total error rate during parameter scaling. - p2: f64, - /// The proportion of single-qubit errors that are emission errors /// /// Controls what fraction of errors on single-qubit gates are emission errors (which can @@ -163,19 +153,13 @@ pub struct GeneralNoiseModel { /// spontaneous emission from excited states during gate operations. Ranges from 0 to 1. p1_emission_ratio: f64, - /// Relative probability that a preparation fault leads to leakage - /// - /// Controls what fraction of preparation faults result in leakage out of the computational - /// subspace. In ion trap systems, this could represent population in states other than the - /// qubit states after initialization. Ranges from 0 to 1. - p_prep_leak_ratio: f64, - - /// The proportion of two-qubit errors that are emission faults + /// Probability of a leaked qubit being seeped (released from leakage) for single-qubit gates if + /// a spontaneous emission event occurs /// - /// Controls what fraction of faults on two-qubit gates are spontaneous emission faults versus - /// standard depolarizing faults. In ion trap systems, this could model decay or transitions to - /// non-computational states during two-qubit operations. Ranges from 0 to 1. - p2_emission_ratio: f64, + /// Models the rate at which qubits that have leaked from the computational subspace + /// spontaneously return. In ion trap systems, this could represent decay from metastable + /// states back to the computational subspace. + p1_seepage_prob: f64, /// Probability model for Pauli faults on single qubit gates /// @@ -193,30 +177,10 @@ pub struct GeneralNoiseModel { /// The distribution is stored as pre-computed, cached sampler instead of the `HashMap` that is the input. p1_emission_model: SingleQubitWeightedSampler, - /// Probability model for Pauli errors on two-qubit gates - /// - /// Specifies the distribution of different two-qubit Pauli errors that can occur. - /// For a uniform depolarizing channel, each of the 15 non-identity two-qubit Pauli - /// operators would have equal probability. - /// - /// The distribution is stored as pre-computed, cached sampler instead of the `HashMap` that is the input. - p2_pauli_model: TwoQubitWeightedSampler, - - /// Probability model for spontaneous emission errors on two-qubit gates - /// - /// Specifies the distribution of different emission error types that can occur during - /// two-qubit operations. This includes errors that may cause state transitions outside - /// the computational basis. - /// - /// The distribution is stored as pre-computed, cached sampler instead of the `HashMap` that is the input. - p2_emission_model: TwoQubitWeightedSampler, - - /// Probability of a leaked qubit being seeped (released from leakage) + /// Probability of applying a fault after two-qubit gates /// - /// Models the rate at which qubits that have leaked from the computational subspace - /// spontaneously return. In ion trap systems, this could represent decay from metastable - /// states back to the computational subspace. - seepage_prob: f64, + /// Models depolarizing channel + leakage noise for two-qubit gates. + p2: f64, /// Scaling parameters for RZZ gate error rate - coefficient a /// @@ -240,94 +204,38 @@ pub struct GeneralNoiseModel { /// Typically set to 1.0 for linear scaling. przz_power: f64, - /// Set of qubits that are currently in a leaked state - /// - /// Tracks which qubits have leaked out of the computational subspace and are - /// therefore not affected by computational gates but might still affect measurements. - leaked_qubits: HashSet, - - /// Random number generator for stochastic noise processes - rng: NoiseRng, - - /// Overall scaling factor for error probabilities - /// - /// A global multiplier applied to all error rates. This allows easy adjustment of the - /// overall noise level without changing individual parameters. Typically used to - /// simulate different device qualities or to study the effect of noise strength. - scale: f64, - - /// Scaling factor for memory errors - /// - /// Controls the strength of errors that occur during idle periods or memory operations. - /// In ion trap systems, this could represent heating or dephasing during storage times. - memory_scale: f64, - - /// Scaling factor for initialization errors - /// - /// Multiplier for preparation error probabilities. Allows adjustment of the relative - /// strength of initialization errors compared to other error types. - prep_scale: f64, - - /// Scaling factor for measurement errors - /// - /// Multiplier for measurement error probabilities. Allows adjustment of the relative - /// strength of readout errors compared to other error types. - meas_scale: f64, - - /// Scaling factor for leakage errors - /// - /// Multiplier for leakage-related error probabilities. Controls how likely qubits - /// are to transition outside the computational subspace during various operations. - leakage_scale: f64, - - /// Scaling factor for single-qubit gate errors - /// - /// Multiplier for single-qubit gate error probabilities. Allows adjustment of the - /// relative strength of single-qubit gate errors compared to other error types. - p1_scale: f64, - - /// Scaling factor for two-qubit gate errors + /// The proportion of two-qubit errors that are emission faults /// - /// Multiplier for two-qubit gate error probabilities. Allows adjustment of the relative - /// strength of two-qubit gate errors compared to other error types. In most quantum - /// technologies, two-qubit gates are typically more error-prone than single-qubit gates. - p2_scale: f64, + /// Controls what fraction of faults on two-qubit gates are spontaneous emission faults versus + /// standard depolarizing faults. In ion trap systems, this could model decay or transitions to + /// non-computational states during two-qubit operations. Ranges from 0 to 1. + p2_emission_ratio: f64, - /// Scaling factor for spontaneous emission errors + /// Probability of a leaked qubit being seeped (released from leakage) for two-qubit gates if + /// a spontaneous emission event occurs /// - /// Multiplier for spontaneous-emission-related error probabilities. Controls the relative - /// strength of errors that involve transitions outside the standard computational basis. - emission_scale: f64, - - /// Probability of crosstalk during measurement operations - /// - /// Models the probability that a measurement operation on one qubit affects nearby qubits. In - /// ion trap systems, this could represent scattered light during fluorescence detection - /// affecting neighboring ions. - p_crosstalk_meas: f64, + /// Models the rate at which qubits that have leaked from the computational subspace + /// spontaneously return. In ion trap systems, this could represent decay from metastable + /// states back to the computational subspace. + p2_seepage_prob: f64, - /// Probability of crosstalk during initialization operations + /// Probability model for Pauli errors on two-qubit gates /// - /// Models the probability that an initialization operation on one qubit affects nearby qubits. - /// In ion trap systems, this could represent scattered light during optical pumping affecting - /// neighboring ions. - p_crosstalk_prep: f64, - - /// Rescaling factor for measurement crosstalk probability + /// Specifies the distribution of different two-qubit Pauli errors that can occur. + /// For a uniform depolarizing channel, each of the 15 non-identity two-qubit Pauli + /// operators would have equal probability. /// - /// Additional scaling factor specifically for measurement crosstalk probability. - p_crosstalk_meas_rescale: f64, + /// The distribution is stored as pre-computed, cached sampler instead of the `HashMap` that is the input. + p2_pauli_model: TwoQubitWeightedSampler, - /// Rescaling factor for initialization crosstalk probability + /// Probability model for spontaneous emission errors on two-qubit gates /// - /// Additional scaling factor specifically for initialization crosstalk probability. - p_crosstalk_prep_rescale: f64, - - /// Whether to apply crosstalk on a per-gate basis + /// Specifies the distribution of different emission error types that can occur during + /// two-qubit operations. This includes errors that may cause state transitions outside + /// the computational basis. /// - /// If true, crosstalk is applied separately for each target qubit in a multi-qubit - /// operation. If false, crosstalk is applied only once for the entire operation. - crosstalk_per_gate: bool, + /// The distribution is stored as pre-computed, cached sampler instead of the `HashMap` that is the input. + p2_emission_model: TwoQubitWeightedSampler, /// Whether to use coherent dephasing vs incoherent (stochastic) dephasing /// @@ -350,22 +258,52 @@ pub struct GeneralNoiseModel { /// Panics if the factor is not positive (less than or equal to 0.0). coherent_to_incoherent_factor: f64, - /// Set of gate types that should not have noise applied + /// Whether to apply crosstalk on a per-gate basis /// - /// Gates in this set may be those that are implemented in software rather than - /// with physical operations, so no noise should be applied to them. - noiseless_gates: HashSet, + /// If true, crosstalk is applied separately for each target qubit in a multi-qubit + /// operation. If false, crosstalk is applied only once for the entire operation. + /// TODO: consider separate per crosstalk channel + crosstalk_per_gate: bool, - /// Whether to replace leakage with depolarizing noise + /// Probability of flipping a 0 measurement to 1 /// - /// If true, instead of marking qubits as leaked, completely depolarizing noise will be applied. - /// This is useful for studying the effects for comparing the effects of leakage vs. - /// depolarizing noise. - leak2depolar: bool, + /// This asymmetric measurement error models cases when a qubit in state |0⟩ is incorrectly + /// measured as 1. + /// + /// In ion trap systems, this may occur due to imperfect state detection or + /// background counts during fluorescence detection. + p_meas_0: f64, - /// Whether the parameters have been scaled already. This is useful to make sure the noise - /// parameters haven't more than once... - parameters_scaled: bool, + /// Probability of flipping a 1 measurement to 0 + /// + /// This asymmetric measurement error models cases when a qubit in state |1⟩ is incorrectly + /// measured as 0. + /// + /// In ion trap systems, this may occur due to decay during measurement or + /// imperfect detection efficiency. + p_meas_1: f64, + + /// Probability of crosstalk during measurement operations + /// + /// Models the probability that a measurement operation on one qubit affects nearby qubits. In + /// ion trap systems, this could represent scattered light during fluorescence detection + /// affecting neighboring ions. + p_meas_crosstalk: f64, + + // --- internally used variables --- // + /// The maximum of `p_meas_0` and `p_meas_1` + /// + /// Used to determine the overall measurement error rate. + p_meas_max: f64, + + /// Set of qubits that are currently in a leaked state + /// + /// Tracks which qubits have leaked out of the computational subspace and are + /// therefore not affected by computational gates but might still affect measurements. + leaked_qubits: HashSet, + + /// Random number generator for stochastic noise processes + rng: NoiseRng, } impl ControlEngine for GeneralNoiseModel { @@ -379,11 +317,6 @@ impl ControlEngine for GeneralNoiseModel { &mut self, input: Self::Input, ) -> Result, QueueError> { - // scale the parameters if it hasn't been scaled already - if !self.parameters_scaled { - self.scale_parameters(); - } - // Apply noise to the gates let noisy_gates = match self.apply_noise_on_start(&input) { Ok(gates) => gates, @@ -452,15 +385,18 @@ impl ProbabilityValidator for GeneralNoiseModel {} impl GeneralNoiseModel { /// Create a new noise model with the specified error parameters /// - /// Creates a `GeneralNoiseModel` with the specified error probabilities: + /// Creates a `GeneralNoiseModel` with the specified error probabilities while using default values + /// for all other parameters. This is a convenience method for cases where you only need to customize + /// the basic error rates. + /// /// * `p_prep` - Preparation (initialization) error probability /// * `p_meas_0` - Probability of measuring 1 when the state is |0⟩ /// * `p_meas_1` - Probability of measuring 0 when the state is |1⟩ /// * `p1` - Single-qubit gate error probability (average error rate) /// * `p2` - Two-qubit gate error probability (average error rate) /// - /// Other parameters are initialized with sensible defaults, including uniform - /// distributions for Pauli errors and emission errors. + /// For more extensive customization, use the builder pattern with `GeneralNoiseModel::builder()`. + /// For default parameters, use `GeneralNoiseModel::default()`. /// /// # Example /// ``` @@ -468,108 +404,16 @@ impl GeneralNoiseModel { /// /// // Create model with specified error probabilities /// let mut model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.05, 0.1); - /// - /// // Configure additional parameters if needed - /// model.set_prep_leak_ratio(0.3); - /// model.set_przz_power(2.0); - /// - /// // Scale parameters exactly once before using the model - /// model.scale_parameters(); /// ``` #[must_use] pub fn new(p_prep: f64, p_meas_0: f64, p_meas_1: f64, p1: f64, p2: f64) -> Self { - // Validate all probabilities - Self::validate_probability(p_prep); - Self::validate_probability(p_meas_0); - Self::validate_probability(p_meas_1); - Self::validate_probability(p1); - Self::validate_probability(p2); - - // Initialize default models - let mut p1_pauli_model = HashMap::new(); - p1_pauli_model.insert("X".to_string(), 1.0 / 3.0); - p1_pauli_model.insert("Y".to_string(), 1.0 / 3.0); - p1_pauli_model.insert("Z".to_string(), 1.0 / 3.0); - - let mut p1_emission_model = HashMap::new(); - p1_emission_model.insert("X".to_string(), 1.0 / 3.0); - p1_emission_model.insert("Y".to_string(), 1.0 / 3.0); - p1_emission_model.insert("Z".to_string(), 1.0 / 3.0); - - let mut p2_pauli_model = HashMap::new(); - p2_pauli_model.insert("XX".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("XY".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("XZ".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("YX".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("YY".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("YZ".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("ZX".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("ZY".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("ZZ".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("IX".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("IY".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("IZ".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("XI".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("YI".to_string(), 1.0 / 15.0); - p2_pauli_model.insert("ZI".to_string(), 1.0 / 15.0); - - let mut p2_emission_model = HashMap::new(); - p2_emission_model.insert("XX".to_string(), 1.0 / 15.0); - p2_emission_model.insert("XY".to_string(), 1.0 / 15.0); - p2_emission_model.insert("XZ".to_string(), 1.0 / 15.0); - p2_emission_model.insert("YX".to_string(), 1.0 / 15.0); - p2_emission_model.insert("YY".to_string(), 1.0 / 15.0); - p2_emission_model.insert("YZ".to_string(), 1.0 / 15.0); - p2_emission_model.insert("ZX".to_string(), 1.0 / 15.0); - p2_emission_model.insert("ZY".to_string(), 1.0 / 15.0); - p2_emission_model.insert("ZZ".to_string(), 1.0 / 15.0); - p2_emission_model.insert("IX".to_string(), 1.0 / 15.0); - p2_emission_model.insert("IY".to_string(), 1.0 / 15.0); - p2_emission_model.insert("IZ".to_string(), 1.0 / 15.0); - p2_emission_model.insert("XI".to_string(), 1.0 / 15.0); - p2_emission_model.insert("YI".to_string(), 1.0 / 15.0); - p2_emission_model.insert("ZI".to_string(), 1.0 / 15.0); - - // Return the populated GeneralNoiseModel - Self { + GeneralNoiseModel { p_prep, - p_meas_0, - p_meas_1, p1, p2, - p1_emission_ratio: 0.5, - p_prep_leak_ratio: 0.5, - p2_emission_ratio: 0.5, - p1_pauli_model: SingleQubitWeightedSampler::new(&p1_pauli_model), - p1_emission_model: SingleQubitWeightedSampler::new(&p1_emission_model), - p2_pauli_model: TwoQubitWeightedSampler::new(&p2_pauli_model), - p2_emission_model: TwoQubitWeightedSampler::new(&p2_emission_model), - seepage_prob: 0.5, - przz_a: 0.0, - przz_b: 1.0, - przz_c: 0.0, - przz_d: 1.0, - przz_power: 1.0, - leaked_qubits: HashSet::new(), - rng: NoiseRng::default(), - scale: 1.0, - memory_scale: 1.0, - prep_scale: 1.0, - meas_scale: 1.0, - leakage_scale: 1.0, - p1_scale: 1.0, - p2_scale: 1.0, - emission_scale: 1.0, - p_crosstalk_meas: 0.0, - p_crosstalk_prep: 0.0, - p_crosstalk_meas_rescale: 1.0, - p_crosstalk_prep_rescale: 1.0, - crosstalk_per_gate: false, - coherent_dephasing: false, - coherent_to_incoherent_factor: 2.0, - noiseless_gates: HashSet::new(), - leak2depolar: false, - parameters_scaled: false, + p_meas_0, + p_meas_1, + ..Default::default() } } @@ -579,81 +423,6 @@ impl GeneralNoiseModel { GeneralNoiseModelBuilder::new() } - /// Set the preparation leakage ratio - pub fn set_prep_leak_ratio(&mut self, ratio: f64) { - Self::validate_probability(ratio); - self.p_prep_leak_ratio = ratio; - } - - /// Set the one-qubit spontaneous emission ratio - pub fn set_p1_emission_ratio(&mut self, ratio: f64) { - Self::validate_probability(ratio); - self.p1_emission_ratio = ratio; - } - - /// Set the two-qubit emission ratio - pub fn set_p2_emission_ratio(&mut self, ratio: f64) { - Self::validate_probability(ratio); - self.p2_emission_ratio = ratio; - } - - /// Set the stochastic Pauli model for single-qubit gates - pub fn set_p1_pauli_model(&mut self, model: &HashMap) { - self.p1_pauli_model = SingleQubitWeightedSampler::new(model); - } - - /// Set the stochastic spontaneous model for single-qubit gates - pub fn set_p1_emission_model(&mut self, model: &HashMap) { - self.p1_emission_model = SingleQubitWeightedSampler::new(model); - } - - /// Set the stochastic Pauli model for two-qubit gates - pub fn set_p2_pauli_model(&mut self, model: &HashMap) { - self.p2_pauli_model = TwoQubitWeightedSampler::new(model); - } - - /// Set the stochastic spontaneous model for two-qubit gates - pub fn set_p2_emission_model(&mut self, model: &HashMap) { - self.p2_emission_model = TwoQubitWeightedSampler::new(model); - } - - /// Set the seepage probability - pub fn set_seepage_prob(&mut self, prob: f64) { - Self::validate_probability(prob); - self.seepage_prob = prob; - } - - /// Set RZZ parameter scaling for angle dependent error. - /// - /// The PECOS gate set has a parameterized-angle ZZ gate, RZZ(θ). For implementation - /// Certain parameters relate to the strength of the asymmetric - /// depolarizing noise. These parameters depend on the angle θ and are normalized so that - /// θ = π/2 gives the 2-qubit fault probability (p2). - /// - /// The parameters for asymmetric depolarizing noise are fit parameters that model how the - /// noise changes as the angle θ changes according to these equations: - /// - /// For θ < 0: - /// (`przz_a` × (|`θ|/π)^przz_power` + `przz_b`) × p2 - /// - /// For θ > 0: - /// (`przz_c` × (|`θ|/π)^przz_power` + `przz_d`) × p2 - /// - /// For θ = 0: - /// (`przz_b` + `przz_d`) × 0.5 × p2 - /// - /// # Parameters - /// * `a` - Coefficient for scaling negative angles (`przz_a`) - /// * `b` - Offset for negative angles (`przz_b`) - /// * `c` - Coefficient for scaling positive angles (`przz_c`) - /// * `d` - Offset for positive angles (`przz_d`) - pub fn set_przz_params(&mut self, a: f64, b: f64, c: f64, d: f64) { - self.przz_a = a; - self.przz_b = b; - self.przz_c = c; - self.przz_d = d; - } - /// Get the current error probabilities #[must_use] pub fn probabilities(&self) -> (f64, f64, f64, f64, f64, f64) { @@ -668,7 +437,11 @@ impl GeneralNoiseModel { } /// Apply noise at the start of `QuantumSystem` processing (typically a collection of gates) - fn apply_noise_on_start(&mut self, input: &ByteMessage) -> Result { + /// + /// # Panics + /// + /// Panics if the input `ByteMessage` cannot be parsed as quantum operations. + pub fn apply_noise_on_start(&mut self, input: &ByteMessage) -> Result { let mut builder = NoiseUtils::create_quantum_builder(); let mut err = None; @@ -702,7 +475,7 @@ impl GeneralNoiseModel { // TODO: look closely at prep crosstalk... // Potentially apply crosstalk - if self.p_crosstalk_prep > 0.0 { + if self.p_prep_crosstalk > 0.0 { self.prep_crosstalk(&gate.qubits, &mut builder); } } @@ -757,7 +530,7 @@ impl GeneralNoiseModel { /// /// In physical systems, this represents detection errors, crosstalk, and special /// handling of qubit states outside the computational basis. - fn apply_noise_on_continue_processing( + pub fn apply_noise_on_continue_processing( &mut self, message: ByteMessage, ) -> Result { @@ -789,7 +562,7 @@ impl GeneralNoiseModel { // TODO: Look closely at meas crosstalk... // Now check if we need to apply measurement crosstalk - if !measured_qubits_usize.is_empty() && self.p_crosstalk_meas > 0.0 { + if !measured_qubits_usize.is_empty() && self.p_meas_crosstalk > 0.0 { // Create a new builder for quantum operations to hold crosstalk effects let mut operations_builder = ByteMessage::quantum_operations_builder(); @@ -829,7 +602,7 @@ impl GeneralNoiseModel { /// /// In ion trap systems, this models imperfect optical pumping or errors in the initial /// state preparation process that fails to correctly initialize the qubit. - fn apply_prep_faults(&mut self, gate: &QuantumGate, builder: &mut ByteMessageBuilder) { + pub fn apply_prep_faults(&mut self, gate: &QuantumGate, builder: &mut ByteMessageBuilder) { // unleaking qubits - preparation resets leaked qubits to the zero state for &qubit in &gate.qubits { if self.is_leaked(qubit) { @@ -877,7 +650,7 @@ impl GeneralNoiseModel { /// # Panics /// /// Panics if sampling from the Pauli model fails or if an invalid Pauli operator is encountered. - fn apply_sq_faults(&mut self, gate: &QuantumGate, builder: &mut ByteMessageBuilder) { + pub fn apply_sq_faults(&mut self, gate: &QuantumGate, builder: &mut ByteMessageBuilder) { let mut noise = Vec::new(); let mut removed_gates = false; let mut original_gate_qubits: Vec = Vec::new(); @@ -896,7 +669,7 @@ impl GeneralNoiseModel { if self.rng.occurs(self.p1_emission_ratio) { // If qubit has leaked and spontaneous emission has occurred... seep the qubit if has_leakage { - if let Some(gates) = self.seep(qubit) { + if let Some(gates) = self.seep(qubit, self.p1_seepage_prob) { noise.extend(gates); } } else { @@ -966,7 +739,12 @@ impl GeneralNoiseModel { /// # Panics /// /// Panics if sampling from the Pauli model fails or if an invalid Pauli operator is encountered. - fn apply_tq_faults(&mut self, gate: &QuantumGate, p: f64, builder: &mut ByteMessageBuilder) { + pub fn apply_tq_faults( + &mut self, + gate: &QuantumGate, + p: f64, + builder: &mut ByteMessageBuilder, + ) { let mut noise = Vec::new(); let mut removed_gates = false; let mut original_gate_qubits: Vec = Vec::new(); @@ -988,7 +766,7 @@ impl GeneralNoiseModel { // potentially seep qubits for qubit in &gate.qubits { if self.is_leaked(*qubit) { - if let Some(gates) = self.seep(*qubit) { + if let Some(gates) = self.seep(*qubit, self.p2_seepage_prob) { noise.extend(gates); } } @@ -1068,7 +846,7 @@ impl GeneralNoiseModel { /// 2. Special handling for leaked qubits (ensuring they measure as 1 + measurement noise) /// /// Returns a `ByteMessage` containing the biased measurement results - fn apply_meas_faults( + pub fn apply_meas_faults( &mut self, measured_qubits: &[usize], measurement_results: &[(usize, u32)], @@ -1126,6 +904,54 @@ impl GeneralNoiseModel { results_builder.build() } + /// Apply idle qubit noise faults + /// + /// Models errors that occur during idle periods when qubits are not actively being manipulated: + /// 1. Coherent dephasing: Phase rotation errors that accumulate during idle time + /// 2. Incoherent dephasing: Stochastic Z errors + /// + /// The error rates scale with the idle duration, and are affected by `memory_scale` parameter. + /// In physical systems, this sensitivity to the surrounding magnetic fields, represents + /// heating, T2 decoherence, and other environmental interactions that affect the qubit while + /// it's not being actively controlled. + #[allow(clippy::unused_self)] + pub fn apply_idle_faults(&mut self, _gate: &QuantumGate, _builder: &mut ByteMessageBuilder) { + // let duration = gate.idle_duration(); + // + // // Skip if duration is too small + // if duration < f64::EPSILON { + // // Just pass through the gate without noise + // builder.add_quantum_gate(gate); + // return; + // } + // + // // Filter out leaked qubits + // let qubits: Vec = gate + // .qubits + // .iter() + // .filter(|&&q| !self.is_leaked(q)) + // .copied() + // .collect(); + // + // if qubits.is_empty() { + // return; + // } + // + // // Call the existing dephasing method to apply the appropriate noise + // // This will use the same dephasing model as other memory operations + // self.apply_dephasing( + // builder, + // gate, + // duration, + // // For coherent dephasing + // Some(dephasing_rate), + // // For incoherent dephasing + // Some(dephasing_rate), + // // Whether to use coherent dephasing + // self.coherent_dephasing, + // ); + } + /// Mark a qubit as leaked /// /// When a qubit leaks, it moves outside the computational subspace and can no longer be @@ -1186,8 +1012,8 @@ impl GeneralNoiseModel { noise } - fn seep(&mut self, qubit: usize) -> Option> { - if self.rng.occurs(self.seepage_prob) { + fn seep(&mut self, qubit: usize, seepage_prob: f64) -> Option> { + if self.rng.occurs(seepage_prob) { Option::from(self.unleak_random_bit(qubit)) } else { None @@ -1201,167 +1027,6 @@ impl GeneralNoiseModel { // RNG state is intentionally not reset to maintain natural randomness } - /// Scale error probabilities based on scaling factors - /// - /// This method applies all scaling factors to the error probabilities: - /// - Global scale factor - /// - Type-specific scale factors (measurement, preparation, memory, etc.) - /// - Conversion factors from average to total error rates (3/2 for p1, 5/4 for p2) - /// - /// This method should be called exactly once after setting all parameters - /// and before using the noise model for simulation. Calling it multiple times will - /// compound the scaling factors incorrectly. - pub fn scale_parameters(&mut self) { - // If parameters have already been scaled, return to avoid double-scaling - if self.parameters_scaled { - return; - } - - // Get overall scale factor - let scale = self.scale; - - // Scale single-qubit gate error probability - self.p1 *= self.p1_scale * scale; - - // Scale two-qubit gate error probability - self.p2 *= self.p2_scale * scale; - - self.p_meas_0 *= self.meas_scale * scale; - self.p_meas_1 *= self.meas_scale * scale; - - // Scale preparation error probability - self.p_prep *= self.prep_scale * scale; - - // Scale preparation leakage ratio - include the global scale factor - self.p_prep_leak_ratio *= self.leakage_scale * scale; - self.p_prep_leak_ratio = self.p_prep_leak_ratio.min(1.0); - - // Apply crosstalk rescaling factors - self.p_crosstalk_meas *= self.p_crosstalk_meas_rescale; - self.p_crosstalk_prep *= self.p_crosstalk_prep_rescale; - - // Then apply the regular scaling to crosstalks - self.p_crosstalk_meas *= self.meas_scale * scale; - self.p_crosstalk_prep *= self.prep_scale * scale; - - // Scale emission ratios - self.p1_emission_ratio *= self.emission_scale * scale; - self.p1_emission_ratio = self.p1_emission_ratio.min(1.0); - - self.p2_emission_ratio *= self.emission_scale * scale; - self.p2_emission_ratio = self.p2_emission_ratio.min(1.0); - - // Rescaling from average error to total error as in the Python implementation - // - // This conversion is necessary because experiments report average error rates, - // but our noise models use total error rates. - // - // For a single-qubit gate with uniform error distribution across 3 Pauli errors, - // the ratio of total error rate to average error rate is 3/2. - // - // For a two-qubit gate with uniform error distribution across 15 Pauli errors, - // the ratio of total error rate to average error rate is 5/4. - self.p1 *= 3.0 / 2.0; - self.p2 *= 5.0 / 4.0; - - // Scale crosstalk probabilities by their respective conversion factors (18/5) - self.p_crosstalk_meas *= 18.0 / 5.0; - self.p_crosstalk_prep *= 18.0 / 5.0; - - self.parameters_scaled = true; - } - - /// Reset all scaling factors to their default values (1.0) - /// - /// Resets all scaling factors to 1.0 to clear previous scaling: - /// - Global scale - /// - Memory, initialization, measurement, and leakage scales - /// - Gate error scales (`p1_scale`, `p2_scale`) - /// - Emission and other specialized scaling factors - /// - /// This method is typically called before applying new scaling factors - /// to avoid compounding effects from multiple scale applications, ensuring - /// that each new scaling operation starts from a clean baseline. - pub fn reset_scaling_factors(&mut self) { - self.scale = 1.0; - self.memory_scale = 1.0; - self.prep_scale = 1.0; - self.meas_scale = 1.0; - self.leakage_scale = 1.0; - self.p1_scale = 1.0; - self.p2_scale = 1.0; - self.emission_scale = 1.0; - self.p_crosstalk_meas_rescale = 1.0; - self.p_crosstalk_prep_rescale = 1.0; - } - - /// Set the overall scaling factor - pub fn set_scale(&mut self, scale: f64) { - self.scale = scale; - } - - /// Set the memory scaling factor - pub fn set_memory_scale(&mut self, scale: f64) { - self.memory_scale = scale; - } - - /// Set the initialization scaling factor - pub fn set_prep_scale(&mut self, scale: f64) { - self.prep_scale = scale; - } - - /// Set the measurement scaling factor - pub fn set_meas_scale(&mut self, scale: f64) { - self.meas_scale = scale; - } - - /// Set the leakage scaling factor - pub fn set_leakage_scale(&mut self, scale: f64) { - self.leakage_scale = scale; - } - - /// Set the single-qubit gate scaling factor - pub fn set_p1_scale(&mut self, scale: f64) { - self.p1_scale = scale; - } - - /// Set the two-qubit gate scaling factor - pub fn set_p2_scale(&mut self, scale: f64) { - self.p2_scale = scale; - } - - /// Set the emission scaling factor - pub fn set_emission_scale(&mut self, scale: f64) { - self.emission_scale = scale; - } - - /// Set whether to use coherent dephasing - /// - /// # Parameters - /// * `use_coherent` - If true, use coherent dephasing (RZ gates). If false, use incoherent dephasing (stochastic Z gates). - pub fn set_coherent_dephasing(&mut self, use_coherent: bool) { - self.coherent_dephasing = use_coherent; - } - - /// Set the coherent-to-incoherent conversion factor for dephasing - /// - /// This factor is applied when incoherent dephasing is used. - /// - /// # Parameters - /// * `factor` - The scaling factor used as a fudge factor when going from coherent rates to - /// incoherent rates to attempt to make up for not simulating coherent effects. - /// - /// # Panics - /// - /// Panics if the factor is not positive (less than or equal to 0.0). - pub fn set_coherent_to_incoherent_factor(&mut self, factor: f64) { - assert!( - factor > 0.0, - "Coherent-to-incoherent factor must be positive" - ); - self.coherent_to_incoherent_factor = factor; - } - /// Apply coherent dephasing noise to a gate /// /// This method implements coherent phase rotation (systematic Z-rotation) noise @@ -1482,6 +1147,7 @@ impl GeneralNoiseModel { /// * `coherent_rate` - Rate parameter for coherent dephasing (if applicable) /// * `incoherent_rate` - Rate parameter for incoherent dephasing (if applicable) /// * `use_coherent` - Whether to use coherent dephasing, overrides model's setting + #[allow(dead_code)] fn apply_dephasing( &mut self, builder: &mut ByteMessageBuilder, @@ -1527,77 +1193,26 @@ impl GeneralNoiseModel { qubit, p_deph ); } - } - } - } - - // Apply additional linear incoherent dephasing if rate is provided - if let Some(rate) = incoherent_rate { - let p_deph = rate * duration; // Linear scaling - - // Apply Z errors with probability p_deph - for &qubit in &gate.qubits { - if !self.is_leaked(qubit) && self.rng.occurs(p_deph) { - // Apply Z gate for phase error - builder.add_z(&[qubit]); - trace!( - "Applied linear incoherent dephasing (Z error) to qubit {}", - qubit - ); - } - } - } - } - - /// Apply idle qubit noise faults - /// - /// Models errors that occur during idle periods when qubits are not actively being manipulated: - /// 1. Coherent dephasing: Phase rotation errors that accumulate during idle time - /// 2. Incoherent dephasing: Stochastic Z errors - /// - /// The error rates scale with the idle duration, and are affected by `memory_scale` parameter. - /// In physical systems, this sensitivity to the surrounding magnetic fields, represents - /// heating, T2 decoherence, and other environmental interactions that affect the qubit while - /// it's not being actively controlled. - fn apply_idle_faults(&mut self, gate: &QuantumGate, builder: &mut ByteMessageBuilder) { - let duration = gate.idle_duration(); - - // Skip if duration is too small - if duration < f64::EPSILON { - // Just pass through the gate without noise - builder.add_quantum_gate(gate); - return; + } + } } - // Filter out leaked qubits - let qubits: Vec = gate - .qubits - .iter() - .filter(|&&q| !self.is_leaked(q)) - .copied() - .collect(); + // Apply additional linear incoherent dephasing if rate is provided + if let Some(rate) = incoherent_rate { + let p_deph = rate * duration; // Linear scaling - if qubits.is_empty() { - return; + // Apply Z errors with probability p_deph + for &qubit in &gate.qubits { + if !self.is_leaked(qubit) && self.rng.occurs(p_deph) { + // Apply Z gate for phase error + builder.add_z(&[qubit]); + trace!( + "Applied linear incoherent dephasing (Z error) to qubit {}", + qubit + ); + } + } } - - // Apply dephasing errors based on the duration - // Use memory_scale to adjust the dephasing rate - let dephasing_rate = self.memory_scale * self.scale; - - // Call the existing dephasing method to apply the appropriate noise - // This will use the same dephasing model as other memory operations - self.apply_dephasing( - builder, - gate, - duration, - // For coherent dephasing - Some(dephasing_rate), - // For incoherent dephasing - Some(dephasing_rate), - // Whether to use coherent dephasing - self.coherent_dephasing, - ); } /// Create a new method to handle requesting nearby qubits for crosstalk @@ -1624,7 +1239,8 @@ impl GeneralNoiseModel { /// /// with additional support for asymmetric scaling and power-law scaling /// Includes scaling by p2 (two-qubit gate error probability) to match Python implementation - fn rzz_error_rate(&self, angle: f64) -> f64 { + #[must_use] + pub fn rzz_error_rate(&self, angle: f64) -> f64 { // Normalize angle by π - convert to a value in [0, 1] range let theta = angle.abs() / std::f64::consts::PI; @@ -1646,27 +1262,6 @@ impl GeneralNoiseModel { base_rate * self.p2 } - /// Set power parameter for RZZ error scaling - /// - /// # Parameters - /// * `power` - The power to which theta is raised in the RZZ error rate formula - /// - /// # Panics - /// - /// Panics if the power parameter is not positive (less than or equal to 0.0). - pub fn set_przz_power(&mut self, power: f64) { - assert!(power > 0.0, "RZZ power parameter must be positive"); - self.przz_power = power; - } - - /// Set whether to replace leakage with depolarizing noise - /// - /// # Parameters - /// * `use_depolar` - If true, replace leakage with depolarizing errors - pub fn set_leak2depolar(&mut self, use_depolar: bool) { - self.leak2depolar = use_depolar; - } - /// Add a gate type to the set of noiseless gates /// /// Gates in this set will not have noise applied to them. @@ -1702,16 +1297,6 @@ impl GeneralNoiseModel { self.noiseless_gates.contains(gate_type) } - /// Set the measurement crosstalk rescale factor - pub fn set_p_crosstalk_meas_rescale(&mut self, scale: f64) { - self.p_crosstalk_meas_rescale = scale; - } - - /// Set the preparation crosstalk rescale factor - pub fn set_p_crosstalk_prep_rescale(&mut self, scale: f64) { - self.p_crosstalk_prep_rescale = scale; - } - /// Accessor for the p1 Pauli distribution #[must_use] pub fn p1_pauli_model(&self) -> &SingleQubitWeightedSampler { @@ -1738,8 +1323,8 @@ impl GeneralNoiseModel { /// Reset the noise model and then set a new seed for the RNG /// - /// This is a convenience method that combines calling `reset_noise_model()` - /// followed by `set_seed()` in a single call. + /// This method rebuilds the noise model with the same parameters but a new seed, + /// using the builder pattern. /// /// # Parameters /// * `seed` - The seed to set for the RNG @@ -1768,7 +1353,8 @@ pub struct GeneralNoiseModelBuilder { p2_pauli_model: Option, p2_emission_model: Option, p_prep_leak_ratio: Option, - seepage_prob: Option, + p1_seepage_prob: Option, + p2_seepage_prob: Option, seed: Option, scale: Option, memory_scale: Option, @@ -1778,10 +1364,10 @@ pub struct GeneralNoiseModelBuilder { p1_scale: Option, p2_scale: Option, emission_scale: Option, - p_crosstalk_meas: Option, - p_crosstalk_prep: Option, - p_crosstalk_meas_rescale: Option, - p_crosstalk_prep_rescale: Option, + p_meas_crosstalk: Option, + p_prep_crosstalk: Option, + p_meas_crosstalk_scale: Option, + p_prep_crosstalk_scale: Option, crosstalk_per_gate: Option, coherent_dephasing: Option, coherent_to_incoherent_factor: Option, @@ -1814,7 +1400,8 @@ impl GeneralNoiseModelBuilder { p2_pauli_model: None, p2_emission_model: None, p_prep_leak_ratio: None, - seepage_prob: None, + p1_seepage_prob: None, + p2_seepage_prob: None, seed: None, scale: None, memory_scale: None, @@ -1824,10 +1411,10 @@ impl GeneralNoiseModelBuilder { p1_scale: None, p2_scale: None, emission_scale: None, - p_crosstalk_meas: None, - p_crosstalk_prep: None, - p_crosstalk_meas_rescale: None, - p_crosstalk_prep_rescale: None, + p_meas_crosstalk: None, + p_prep_crosstalk: None, + p_meas_crosstalk_scale: None, + p_prep_crosstalk_scale: None, crosstalk_per_gate: None, coherent_dephasing: None, coherent_to_incoherent_factor: None, @@ -1838,31 +1425,75 @@ impl GeneralNoiseModelBuilder { } } + /// Validate that a value is a valid probability (between 0 and 1) + fn validate_probability(prob: f64) -> f64 { + assert!( + (0.0..=1.0).contains(&prob), + "Probability must be between 0 and 1, got {prob}" + ); + prob + } + + /// Validate that a value is positive + fn validate_positive(value: f64, name: &str) -> f64 { + assert!(value > 0.0, "{name} must be positive, got {value}"); + value + } + + /// Validate that a value is non-negative + fn validate_non_negative(value: f64, name: &str) -> f64 { + assert!(value >= 0.0, "{name} must be non-negative, got {value}"); + value + } + /// Set the probability of error during preparation #[must_use] pub fn with_prep_probability(mut self, probability: f64) -> Self { - self.p_prep = Some(probability); + self.p_prep = Some(Self::validate_probability(probability)); + self + } + + /// Set the probability of bit flipping the measurement result + #[must_use] + pub fn with_meas_probability(mut self, probability: f64) -> Self { + self.p_meas_0 = Some(Self::validate_probability(probability)); + self.p_meas_1 = Some(Self::validate_probability(probability)); self } /// Set the probability of flipping 0 to 1 during measurement #[must_use] pub fn with_meas_0_probability(mut self, probability: f64) -> Self { - self.p_meas_0 = Some(probability); + self.p_meas_0 = Some(Self::validate_probability(probability)); self } /// Set the probability of flipping 1 to 0 during measurement #[must_use] pub fn with_meas_1_probability(mut self, probability: f64) -> Self { - self.p_meas_1 = Some(probability); + self.p_meas_1 = Some(Self::validate_probability(probability)); + self + } + + /// Set the average probability of error after single-qubit gates + /// + /// Rescaling from average error to total error + /// + /// This conversion is necessary because experiments report average error rates, + /// but our noise models use total error rates. + /// + /// For a single-qubit gate with uniform error distribution across 3 Pauli errors, + /// the ratio of total error rate to average error rate is 3/2. + #[must_use] + pub fn with_average_p1_probability(mut self, probability: f64) -> Self { + self.p1 = Some(Self::validate_probability(probability * 3.0 / 2.0)); self } /// Set the probability of error after single-qubit gates #[must_use] pub fn with_p1_probability(mut self, probability: f64) -> Self { - self.p1 = Some(probability); + self.p1 = Some(Self::validate_probability(probability)); self } @@ -1874,10 +1505,25 @@ impl GeneralNoiseModelBuilder { self.with_p1_probability(probability) } + /// Set the probability of error after two-qubit gates + /// + /// Rescaling from average error to total error + /// + /// This conversion is necessary because experiments report average error rates, + /// but our noise models use total error rates. + /// + /// For a two-qubit gate with uniform error distribution across 15 Pauli errors, + /// the ratio of total error rate to average error rate is 5/4. + #[must_use] + pub fn with_average_p2_probability(mut self, probability: f64) -> Self { + self.p2 = Some(Self::validate_probability(probability * 5.0 / 4.0)); + self + } + /// Set the probability of error after two-qubit gates #[must_use] pub fn with_p2_probability(mut self, probability: f64) -> Self { - self.p2 = Some(probability); + self.p2 = Some(Self::validate_probability(probability)); self } @@ -1906,7 +1552,7 @@ impl GeneralNoiseModelBuilder { /// Set the preparation leakage ratio #[must_use] pub fn with_prep_leak_ratio(mut self, ratio: f64) -> Self { - self.p_prep_leak_ratio = Some(ratio); + self.p_prep_leak_ratio = Some(Self::validate_probability(ratio)); self } @@ -1917,56 +1563,83 @@ impl GeneralNoiseModelBuilder { self } - /// Set the overall scaling factor + /// Set the overall scaling factor for error probabilities + /// + /// A global multiplier applied to all error rates. This allows easy adjustment of the + /// overall noise level without changing individual parameters. Typically used to + /// simulate different device qualities or to study the effect of noise strength. #[must_use] pub fn with_scale(mut self, scale: f64) -> Self { self.scale = Some(scale); self } - /// Set the memory scaling factor + /// Set the scaling factor for memory errors + /// + /// Controls the strength of errors that occur during idle periods or memory operations. + /// In ion trap systems, this could represent heating or dephasing during storage times. #[must_use] pub fn with_memory_scale(mut self, scale: f64) -> Self { self.memory_scale = Some(scale); self } - /// Set the initialization scaling factor + /// Set the scaling factor for initialization errors + /// + /// Multiplier for preparation error probabilities. Allows adjustment of the relative + /// strength of initialization errors compared to other error types. #[must_use] pub fn with_prep_scale(mut self, scale: f64) -> Self { self.prep_scale = Some(scale); self } - /// Set the measurement scaling factor + /// Set the scaling factor for measurement faults + /// + /// Multiplier for measurement error probabilities. Allows adjustment of the relative + /// strength of readout errors compared to other error types. #[must_use] pub fn with_meas_scale(mut self, scale: f64) -> Self { self.meas_scale = Some(scale); self } - /// Set the leakage scaling factor + /// Set the scaling factor for leakage errors + /// + /// Multiplier for leakage-related error probabilities. Controls how likely qubits + /// are to transition outside the computational subspace during various operations. #[must_use] pub fn with_leakage_scale(mut self, scale: f64) -> Self { self.leakage_scale = Some(scale); self } - /// Set the single-qubit gate scaling factor + /// Set the scaling factor for single-qubit gate errors + /// + /// Multiplier for single-qubit gate error probabilities. Allows adjustment of the + /// relative strength of single-qubit gate errors compared to other error types. #[must_use] pub fn with_p1_scale(mut self, scale: f64) -> Self { self.p1_scale = Some(scale); self } - /// Set the two-qubit gate scaling factor + /// Set the scaling factor for two-qubit gate errors + /// + /// Multiplier for two-qubit gate error probabilities. Allows adjustment of the relative + /// strength of two-qubit gate errors compared to other error types. In most quantum + /// technologies, two-qubit gates are typically more error-prone than single-qubit gates. #[must_use] pub fn with_p2_scale(mut self, scale: f64) -> Self { self.p2_scale = Some(scale); self } - /// Set the emission scaling factor + /// Set the scaling factor for spontaneous emission errors + /// + /// Multiplier for spontaneous-emission-related error probabilities. Controls the relative + /// strength of errors that involve transitions outside the standard computational basis. + /// TODO: consider replacing with leak2depolar #[must_use] pub fn with_emission_scale(mut self, scale: f64) -> Self { self.emission_scale = Some(scale); @@ -1984,17 +1657,12 @@ impl GeneralNoiseModelBuilder { /// /// # Parameters /// * `factor` - The conversion factor between coherent and incoherent dephasing rates - /// - /// # Panics - /// - /// Panics if the factor is not positive (less than or equal to 0.0). #[must_use] pub fn with_coherent_to_incoherent_factor(mut self, factor: f64) -> Self { - assert!( - factor > 0.0, - "Coherent-to-incoherent factor must be positive" - ); - self.coherent_to_incoherent_factor = Some(factor); + self.coherent_to_incoherent_factor = Some(Self::validate_positive( + factor, + "Coherent-to-incoherent factor", + )); self } @@ -2032,14 +1700,9 @@ impl GeneralNoiseModelBuilder { /// /// # Parameters /// * `power` - The power to which theta is raised in the RZZ error rate formula - /// - /// # Panics - /// - /// Panics if the power parameter is not positive (less than or equal to 0.0). #[must_use] pub fn with_przz_power(mut self, power: f64) -> Self { - assert!(power > 0.0, "RZZ power parameter must be positive"); - self.przz_power = Some(power); + self.przz_power = Some(Self::validate_positive(power, "RZZ power parameter")); self } @@ -2064,39 +1727,27 @@ impl GeneralNoiseModelBuilder { self } - /// Set the measurement crosstalk rescale factor - /// - /// # Parameters - /// * `scale` - The measurement crosstalk rescale factor + /// Set the scaling factor for measurement crosstalk probability /// - /// # Panics - /// - /// Panics if the scale is negative (less than 0.0). + /// Additional scaling factor specifically for measurement crosstalk probability. #[must_use] - pub fn with_p_crosstalk_meas_rescale(mut self, scale: f64) -> Self { - assert!( - scale >= 0.0, - "Measurement crosstalk rescale factor must be non-negative" - ); - self.p_crosstalk_meas_rescale = Some(scale); + pub fn with_p_meas_crosstalk_scale(mut self, scale: f64) -> Self { + self.p_meas_crosstalk_scale = Some(Self::validate_non_negative( + scale, + "Measurement crosstalk rescale factor", + )); self } - /// Set the preparation crosstalk rescale factor - /// - /// # Parameters - /// * `scale` - The preparation crosstalk rescale factor - /// - /// # Panics + /// Set the scaling factor for initialization crosstalk probability /// - /// Panics if the scale is negative (less than 0.0). + /// Additional scaling factor specifically for initialization crosstalk probability. #[must_use] - pub fn with_p_crosstalk_prep_rescale(mut self, scale: f64) -> Self { - assert!( - scale >= 0.0, - "Preparation crosstalk rescale factor must be non-negative" - ); - self.p_crosstalk_prep_rescale = Some(scale); + pub fn with_p_prep_crosstalk_scale(mut self, scale: f64) -> Self { + self.p_prep_crosstalk_scale = Some(Self::validate_non_negative( + scale, + "Preparation crosstalk rescale factor", + )); self } @@ -2115,77 +1766,52 @@ impl GeneralNoiseModelBuilder { } /// Set the emission ratio for single-qubit gate errors - /// - /// # Panics - /// - /// Panics if the ratio is not between 0.0 and 1.0 (inclusive). #[must_use] pub fn with_p1_emission_ratio(mut self, ratio: f64) -> Self { - assert!( - (0.0..=1.0).contains(&ratio), - "Emission ratio must be between 0 and 1" - ); - self.p1_emission_ratio = Some(ratio); + self.p1_emission_ratio = Some(Self::validate_probability(ratio)); self } /// Set the two-qubit emission ratio - /// - /// # Panics - /// - /// Panics if the ratio is not between 0.0 and 1.0 (inclusive). #[must_use] pub fn with_p2_emission_ratio(mut self, ratio: f64) -> Self { - assert!( - (0.0..=1.0).contains(&ratio), - "Emission ratio must be between 0 and 1" - ); - self.p2_emission_ratio = Some(ratio); + self.p2_emission_ratio = Some(Self::validate_probability(ratio)); + self + } + + /// Set the probability of a leaked qubit being seeped (released from leakage) + #[must_use] + pub fn with_p1_seepage_prob(mut self, prob: f64) -> Self { + self.p1_seepage_prob = Some(Self::validate_probability(prob)); + self + } + + /// Set the probability of a leaked qubit being seeped (released from leakage) + #[must_use] + pub fn with_p2_seepage_prob(mut self, prob: f64) -> Self { + self.p2_seepage_prob = Some(Self::validate_probability(prob)); self } /// Set the probability of a leaked qubit being seeped (released from leakage) - /// - /// # Panics - /// - /// Panics if the probability is not between 0.0 and 1.0 (inclusive). #[must_use] pub fn with_seepage_prob(mut self, prob: f64) -> Self { - assert!( - (0.0..=1.0).contains(&prob), - "Seepage probability must be between 0 and 1" - ); - self.seepage_prob = Some(prob); + self.p1_seepage_prob = Some(Self::validate_probability(prob)); + self.p2_seepage_prob = Some(Self::validate_probability(prob)); self } /// Set the probability of crosstalk during measurement operations - /// - /// # Panics - /// - /// Panics if the probability is not between 0.0 and 1.0 (inclusive). #[must_use] pub fn with_p_crosstalk_meas(mut self, prob: f64) -> Self { - assert!( - (0.0..=1.0).contains(&prob), - "Measurement crosstalk probability must be between 0 and 1" - ); - self.p_crosstalk_meas = Some(prob); + self.p_meas_crosstalk = Some(Self::validate_probability(prob)); self } /// Set the probability of crosstalk during initialization operations - /// - /// # Panics - /// - /// Panics if the probability is not between 0.0 and 1.0 (inclusive). #[must_use] pub fn with_p_crosstalk_prep(mut self, prob: f64) -> Self { - assert!( - (0.0..=1.0).contains(&prob), - "Preparation crosstalk probability must be between 0 and 1" - ); - self.p_crosstalk_prep = Some(prob); + self.p_prep_crosstalk = Some(Self::validate_probability(prob)); self } @@ -2196,25 +1822,99 @@ impl GeneralNoiseModelBuilder { self } + /// Scale error probabilities based on scaling factors + /// + /// This method applies all scaling factors to the error probabilities: + /// - Global scale factor + /// - Type-specific scale factors (measurement, preparation, memory, etc.) + /// - Conversion factors from average to total error rates (3/2 for p1, 5/4 for p2) + /// + /// This method should be called exactly once after setting all parameters + /// and before using the noise model for simulation. Calling it multiple times will + /// compound the scaling factors incorrectly. + pub fn scale_parameters(&mut self, model: &mut GeneralNoiseModel) { + let scale = self.scale.unwrap_or(1.0); + // let memory_scale = self.memory_scale.unwrap_or(1.0); + let prep_scale = self.prep_scale.unwrap_or(1.0); + let meas_scale = self.meas_scale.unwrap_or(1.0); + let leakage_scale = self.leakage_scale.unwrap_or(1.0); + let p1_scale = self.p1_scale.unwrap_or(1.0); + let p2_scale = self.p2_scale.unwrap_or(1.0); + let emission_scale = self.emission_scale.unwrap_or(1.0); + let p_meas_crosstalk_scale = self.p_meas_crosstalk_scale.unwrap_or(1.0); + let p_prep_crosstalk_scale = self.p_prep_crosstalk_scale.unwrap_or(1.0); + + // Apply dephasing errors based on the duration + // Use memory_scale to adjust the dephasing rate + // model.dephasing_rate *= self.memory_scale * self.scale; + + // Scale single-qubit gate error probability + model.p1 *= p1_scale * scale; + + // Scale two-qubit gate error probability + model.p2 *= p2_scale * scale; + + model.p_meas_0 *= meas_scale * scale; + model.p_meas_1 *= meas_scale * scale; + + // Scale preparation error probability + model.p_prep *= prep_scale * scale; + + // Scale preparation leakage ratio - include the global scale factor + model.p_prep_leak_ratio *= leakage_scale * scale; + model.p_prep_leak_ratio = model.p_prep_leak_ratio.min(1.0); + + // Apply crosstalk rescaling factors + model.p_meas_crosstalk *= p_meas_crosstalk_scale; + model.p_prep_crosstalk *= p_prep_crosstalk_scale; + + // Then apply the regular scaling to crosstalks + model.p_meas_crosstalk *= meas_scale * scale; + model.p_prep_crosstalk *= prep_scale * scale; + + // Scale emission ratios + model.p1_emission_ratio *= emission_scale * scale; + model.p1_emission_ratio = model.p1_emission_ratio.min(1.0); + + model.p2_emission_ratio *= emission_scale * scale; + model.p2_emission_ratio = model.p2_emission_ratio.min(1.0); + } + /// Build the general noise model /// + /// TODO: Consider another build with noiseless default + /// /// # Returns /// A boxed noise model /// /// # Panics /// Panics if any probabilities are not set or are not between 0 and 1. #[must_use] - pub fn build(self) -> Box { - let mut model = GeneralNoiseModel::new( - self.p_prep.unwrap_or(0.01), - self.p_meas_0.unwrap_or(0.01), - self.p_meas_1.unwrap_or(0.01), - self.p1.unwrap_or(0.01), - self.p2.unwrap_or(0.01), - ); + pub fn build(mut self) -> Box { + // Start with the default noise model as a base + let mut model = GeneralNoiseModel::default(); - if let Some(seed) = self.seed { - let _ = model.set_seed(seed); + // Apply all parameters that were explicitly set + if let Some(p_prep) = self.p_prep { + model.p_prep = p_prep; + } + + if let Some(p_meas_0) = self.p_meas_0 { + model.p_meas_0 = p_meas_0; + } + + if let Some(p_meas_1) = self.p_meas_1 { + model.p_meas_1 = p_meas_1; + } + + model.p_meas_max = model.p_meas_0.max(model.p_meas_1); + + if let Some(p1) = self.p1 { + model.p1 = p1; + } + + if let Some(p2) = self.p2 { + model.p2 = p2; } if let Some(ratio) = self.p1_emission_ratio { @@ -2222,123 +1922,239 @@ impl GeneralNoiseModelBuilder { } if let Some(ratio) = self.p2_emission_ratio { - model.set_p2_emission_ratio(ratio); + model.p2_emission_ratio = ratio; } - if let Some(model_map) = self.p1_pauli_model { + if let Some(model_map) = self.p1_pauli_model.clone() { model.p1_pauli_model = model_map; } - if let Some(model_map) = self.p1_emission_model { + if let Some(model_map) = self.p1_emission_model.clone() { model.p1_emission_model = model_map; } - if let Some(model_map) = self.p2_pauli_model { + if let Some(model_map) = self.p2_pauli_model.clone() { model.p2_pauli_model = model_map; } - if let Some(model_map) = self.p2_emission_model { + if let Some(model_map) = self.p2_emission_model.clone() { model.p2_emission_model = model_map; } if let Some(ratio) = self.p_prep_leak_ratio { - model.set_prep_leak_ratio(ratio); - } - - if let Some(prob) = self.seepage_prob { - model.set_seepage_prob(prob); + model.p_prep_leak_ratio = ratio; } - if let Some(prob) = self.p_crosstalk_meas { - // Set crosstalk parameters - model.p_crosstalk_meas = prob; + if let Some(prob) = self.p1_seepage_prob { + model.p1_seepage_prob = prob; } - if let Some(prob) = self.p_crosstalk_prep { - // Set crosstalk parameters - model.p_crosstalk_prep = prob; + if let Some(prob) = self.p2_seepage_prob { + model.p2_seepage_prob = prob; } - if let Some(scale) = self.scale { - model.set_scale(scale); + if let Some(seed) = self.seed { + // Use the with_seed constructor for NoiseRng + model.rng = NoiseRng::with_seed(seed); } - if let Some(scale) = self.memory_scale { - model.set_memory_scale(scale); + if let Some(coherent) = self.coherent_dephasing { + model.coherent_dephasing = coherent; } - if let Some(scale) = self.prep_scale { - model.set_prep_scale(scale); + if let Some(factor) = self.coherent_to_incoherent_factor { + model.coherent_to_incoherent_factor = factor; } - if let Some(scale) = self.meas_scale { - model.set_meas_scale(scale); + if let Some(przz_params) = self.przz_params { + model.przz_a = przz_params.0; + model.przz_b = przz_params.1; + model.przz_c = przz_params.2; + model.przz_d = przz_params.3; } - if let Some(scale) = self.leakage_scale { - model.set_leakage_scale(scale); + if let Some(power) = self.przz_power { + model.przz_power = power; } - if let Some(scale) = self.p1_scale { - model.set_p1_scale(scale); + if let Some(gates) = self.noiseless_gates.clone() { + for gate in gates { + model.add_noiseless_gate(gate); + } } - if let Some(scale) = self.p2_scale { - model.set_p2_scale(scale); + if let Some(leak2depolar) = self.leak2depolar { + model.leak2depolar = leak2depolar; } - if let Some(scale) = self.emission_scale { - model.set_emission_scale(scale); + if let Some(has_crosstalk_per_gate) = self.crosstalk_per_gate { + model.crosstalk_per_gate = has_crosstalk_per_gate; } - if let Some(scale) = self.p_crosstalk_meas_rescale { - model.set_p_crosstalk_meas_rescale(scale); + if let Some(prob) = self.p_meas_crosstalk { + model.p_meas_crosstalk = prob; } - if let Some(scale) = self.p_crosstalk_prep_rescale { - model.set_p_crosstalk_prep_rescale(scale); + if let Some(prob) = self.p_prep_crosstalk { + model.p_prep_crosstalk = prob; } - if let Some(coherent) = self.coherent_dephasing { - model.set_coherent_dephasing(coherent); - } + self.scale_parameters(&mut model); + Box::new(model) + } - if let Some(factor) = self.coherent_to_incoherent_factor { - model.set_coherent_to_incoherent_factor(factor); + /// Create a new builder from an existing model's configuration + /// + /// This method is useful for creating a new model that is identical to an existing one + /// except for a few changed parameters. + /// + /// # Arguments + /// * `model` - The existing model to copy parameters from + /// + /// # Returns + /// A builder with parameters copied from the existing model + #[must_use] + pub fn from_model(model: &GeneralNoiseModel) -> Self { + Self { + p_prep: Some(model.p_prep), + p_meas_0: Some(model.p_meas_0), + p_meas_1: Some(model.p_meas_1), + p1: Some(model.p1), + p2: Some(model.p2), + p1_emission_ratio: Some(model.p1_emission_ratio), + p2_emission_ratio: Some(model.p2_emission_ratio), + p1_pauli_model: Some(model.p1_pauli_model.clone()), + p1_emission_model: Some(model.p1_emission_model.clone()), + p2_pauli_model: Some(model.p2_pauli_model.clone()), + p2_emission_model: Some(model.p2_emission_model.clone()), + p_prep_leak_ratio: Some(model.p_prep_leak_ratio), + p1_seepage_prob: Some(model.p1_seepage_prob), + p2_seepage_prob: Some(model.p2_seepage_prob), + seed: None, // Don't copy the seed + scale: None, + memory_scale: None, + prep_scale: None, + meas_scale: None, + leakage_scale: None, + p1_scale: None, + p2_scale: None, + emission_scale: None, + p_meas_crosstalk: Some(model.p_meas_crosstalk), + p_prep_crosstalk: Some(model.p_prep_crosstalk), + p_meas_crosstalk_scale: None, + p_prep_crosstalk_scale: None, + crosstalk_per_gate: Some(model.crosstalk_per_gate), + coherent_dephasing: Some(model.coherent_dephasing), + coherent_to_incoherent_factor: Some(model.coherent_to_incoherent_factor), + przz_params: Some((model.przz_a, model.przz_b, model.przz_c, model.przz_d)), + przz_power: Some(model.przz_power), + noiseless_gates: Some(model.noiseless_gates.clone()), + leak2depolar: Some(model.leak2depolar), } + } +} - if let Some(przz_params) = self.przz_params { - model.set_przz_params(przz_params.0, przz_params.1, przz_params.2, przz_params.3); - } else { - model.set_przz_params(0.0, 1.0, 0.0, 1.0); - } +impl Default for GeneralNoiseModel { + /// Create a new noise model with default error parameters + /// + /// Creates a `GeneralNoiseModel` with sensible default error probabilities: + /// * `p_prep` - Preparation (initialization) error probability: 0.01 + /// * `p_meas_0` - Probability of measuring 1 when the state is |0⟩: 0.01 + /// * `p_meas_1` - Probability of measuring 0 when the state is |1⟩: 0.01 + /// * `p1` - Single-qubit gate error probability (average error rate): 0.001 + /// * `p2` - Two-qubit gate error probability (average error rate): 0.01 + /// + /// Other parameters are initialized with sensible defaults, including uniform + /// distributions for Pauli errors and emission errors. + /// + /// # Example + /// ``` + /// use pecos_engines::engines::noise::GeneralNoiseModel; + /// + /// // Create model with default error probabilities + /// let mut model = GeneralNoiseModel::default(); + /// ``` + fn default() -> Self { + // Initialize default models + let mut p1_pauli_model = HashMap::new(); + p1_pauli_model.insert("X".to_string(), 1.0 / 3.0); + p1_pauli_model.insert("Y".to_string(), 1.0 / 3.0); + p1_pauli_model.insert("Z".to_string(), 1.0 / 3.0); - if let Some(power) = self.przz_power { - model.set_przz_power(power); - } + let mut p1_emission_model = HashMap::new(); + p1_emission_model.insert("X".to_string(), 1.0 / 3.0); + p1_emission_model.insert("Y".to_string(), 1.0 / 3.0); + p1_emission_model.insert("Z".to_string(), 1.0 / 3.0); - if let Some(gates) = self.noiseless_gates { - for gate in gates { - model.add_noiseless_gate(gate); - } - } else { - // If no noiseless gates specified, ensure RZ is still a noiseless gate - model.add_noiseless_gate(GateType::RZ); - } + let mut p2_pauli_model = HashMap::new(); + p2_pauli_model.insert("XX".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("XY".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("XZ".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("YX".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("YY".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("YZ".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("ZX".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("ZY".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("ZZ".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("IX".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("IY".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("IZ".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("XI".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("YI".to_string(), 1.0 / 15.0); + p2_pauli_model.insert("ZI".to_string(), 1.0 / 15.0); - if let Some(use_depolar) = self.leak2depolar { - model.set_leak2depolar(use_depolar); - } + let mut p2_emission_model = HashMap::new(); + p2_emission_model.insert("XX".to_string(), 1.0 / 15.0); + p2_emission_model.insert("XY".to_string(), 1.0 / 15.0); + p2_emission_model.insert("XZ".to_string(), 1.0 / 15.0); + p2_emission_model.insert("YX".to_string(), 1.0 / 15.0); + p2_emission_model.insert("YY".to_string(), 1.0 / 15.0); + p2_emission_model.insert("YZ".to_string(), 1.0 / 15.0); + p2_emission_model.insert("ZX".to_string(), 1.0 / 15.0); + p2_emission_model.insert("ZY".to_string(), 1.0 / 15.0); + p2_emission_model.insert("ZZ".to_string(), 1.0 / 15.0); + p2_emission_model.insert("IX".to_string(), 1.0 / 15.0); + p2_emission_model.insert("IY".to_string(), 1.0 / 15.0); + p2_emission_model.insert("IZ".to_string(), 1.0 / 15.0); + p2_emission_model.insert("XI".to_string(), 1.0 / 15.0); + p2_emission_model.insert("YI".to_string(), 1.0 / 15.0); + p2_emission_model.insert("ZI".to_string(), 1.0 / 15.0); - if let Some(has_crosstalk_per_gate) = self.crosstalk_per_gate { - model.crosstalk_per_gate = has_crosstalk_per_gate; - } else { - model.crosstalk_per_gate = false; - } + let p_meas_0: f64 = 0.01; // 1% probability of measuring 1 when state is |0⟩ + let p_meas_1: f64 = 0.01; // 1% probability of measuring 0 when state is |1⟩ - model.scale_parameters(); - // TODO: Need this Box? - Box::new(model) + // Default error probabilities + Self { + p_prep: 0.01, + p_meas_0, + p_meas_1, + p1: 0.001, + p2: 0.01, + p1_emission_ratio: 0.5, + p_prep_leak_ratio: 0.5, + p2_emission_ratio: 0.5, + p1_pauli_model: SingleQubitWeightedSampler::new(&p1_pauli_model), + p1_emission_model: SingleQubitWeightedSampler::new(&p1_emission_model), + p2_pauli_model: TwoQubitWeightedSampler::new(&p2_pauli_model), + p2_emission_model: TwoQubitWeightedSampler::new(&p2_emission_model), + p1_seepage_prob: 0.5, + p2_seepage_prob: 0.5, + przz_a: 0.0, + przz_b: 1.0, + przz_c: 0.0, + przz_d: 1.0, + przz_power: 1.0, + leaked_qubits: HashSet::new(), + rng: NoiseRng::default(), + p_meas_crosstalk: 0.0, + p_prep_crosstalk: 0.0, + crosstalk_per_gate: false, + coherent_dephasing: false, + coherent_to_incoherent_factor: 2.0, + noiseless_gates: HashSet::new(), + p_meas_max: p_meas_0.max(p_meas_1), + leak2depolar: false, + } } } @@ -2347,7 +2163,54 @@ mod tests { use super::*; use crate::byte_message::ByteMessageBuilder; use crate::byte_message::gate_type::{GateType, QuantumGate}; - use rand::SeedableRng; + + #[test] + fn test_default() { + // Create a noise model with the default settings + let model = GeneralNoiseModel::default(); + + // Check the default values + assert!( + (model.p_prep - 0.01).abs() < f64::EPSILON, + "Default p_prep should be 0.01" + ); + assert!( + (model.p_meas_0 - 0.01).abs() < f64::EPSILON, + "Default p_meas_0 should be 0.01" + ); + assert!( + (model.p_meas_1 - 0.01).abs() < f64::EPSILON, + "Default p_meas_1 should be 0.01" + ); + assert!( + (model.p1 - 0.001).abs() < f64::EPSILON, + "Default p1 should be 0.001" + ); + assert!( + (model.p2 - 0.01).abs() < f64::EPSILON, + "Default p2 should be 0.01" + ); + assert!( + (model.p1_emission_ratio - 0.5).abs() < f64::EPSILON, + "Default p1_emission_ratio should be 0.5" + ); + assert!( + (model.p_prep_leak_ratio - 0.5).abs() < f64::EPSILON, + "Default p_prep_leak_ratio should be 0.5" + ); + assert!( + (model.p2_emission_ratio - 0.5).abs() < f64::EPSILON, + "Default p2_emission_ratio should be 0.5" + ); + assert!( + (model.p1_seepage_prob - 0.5).abs() < f64::EPSILON, + "Default seepage_prob should be 0.5" + ); + assert!( + (model.p2_seepage_prob - 0.5).abs() < f64::EPSILON, + "Default seepage_prob should be 0.5" + ); + } #[test] fn test_builder() { @@ -2356,8 +2219,8 @@ mod tests { .with_prep_probability(0.1) .with_meas_0_probability(0.2) .with_meas_1_probability(0.3) - .with_p1_probability(0.4) - .with_p2_probability(0.5) + .with_average_p1_probability(0.4) + .with_average_p2_probability(0.5) .with_prep_leak_ratio(0.6) .build(); @@ -2406,6 +2269,23 @@ mod tests { ); assert!((p_prep_leak_ratio - 0.6).abs() < f64::EPSILON); + + // Test the builder with no parameters (should use defaults) + let default_noise = GeneralNoiseModel::builder().build(); + let default_ref = default_noise + .as_any() + .downcast_ref::() + .unwrap(); + + // Verify a few key default values + assert!( + (default_ref.p1 - 0.001).abs() < 1e-6, + "Default p1 should be 0.001" + ); + assert!( + (default_ref.p2 - 0.01).abs() < 1e-6, + "Default p2 should be 0.01" + ); } #[test] @@ -2474,8 +2354,15 @@ mod tests { use crate::byte_message::{ByteMessageBuilder, GateType, QuantumGate}; // Create a noise model with 100% prep error probability and 100% leakage ratio - let mut noise = GeneralNoiseModel::new(1.0, 0.0, 0.0, 0.0, 0.0); - noise.set_prep_leak_ratio(1.0); + // using the builder pattern + let mut model = GeneralNoiseModel::builder() + .with_prep_probability(1.0) + .with_prep_leak_ratio(1.0) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Create a quantum gate operation (Prep on qubit 0) let gate = QuantumGate { @@ -2497,8 +2384,14 @@ mod tests { assert!(noise.is_leaked(0), "Qubit 0 should be marked as leaked"); // Now, create a noise model with 100% prep error probability but 0% leakage ratio - let mut noise = GeneralNoiseModel::new(1.0, 0.0, 0.0, 0.0, 0.0); - noise.set_prep_leak_ratio(0.0); + let mut model = GeneralNoiseModel::builder() + .with_prep_probability(1.0) + .with_prep_leak_ratio(0.0) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Create a new builder let mut builder = ByteMessageBuilder::new(); @@ -2537,7 +2430,17 @@ mod tests { use crate::byte_message::ByteMessageBuilder; // Create a noise model with no spontaneous errors - let mut noise = GeneralNoiseModel::new(0.0, 0.0, 0.0, 0.0, 0.0); + let mut model = GeneralNoiseModel::builder() + .with_prep_probability(0.0) + .with_meas_0_probability(0.0) + .with_meas_1_probability(0.0) + .with_p1_probability(0.0) + .with_p2_probability(0.0) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Manually mark qubit 0 as leaked noise.mark_as_leaked(0); @@ -2569,19 +2472,24 @@ mod tests { #[test] fn test_parameter_scaling() { - // Test that scaling factors are applied correctly - let mut noise = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.01, 0.01); - - // Set scaling factors - noise.set_scale(2.0); // Double everything - noise.set_p1_scale(3.0); // Triple p1 (in addition to doubling) - noise.set_p2_scale(4.0); // Quadruple p2 (in addition to doubling) - noise.set_prep_scale(5.0); // 5x prep (in addition to doubling) - noise.set_meas_scale(6.0); // 6x meas (in addition to doubling) - noise.set_leakage_scale(0.25); // 7x leakage - - // Apply scaling - noise.scale_parameters(); // Apply scaling + // Test that scaling factors are applied correctly - use builder pattern + let mut model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.01) + .with_average_p2_probability(0.01) + .with_scale(2.0) + .with_p1_scale(3.0) + .with_p2_scale(4.0) + .with_prep_scale(5.0) + .with_meas_scale(6.0) + .with_leakage_scale(0.25) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Get values after scaling let (p_prep, p_meas_0, p_meas_1, p1, p2, p_prep_leak_ratio) = noise.probabilities(); @@ -2593,8 +2501,7 @@ mod tests { let expected_p2 = 0.01 * 4.0 * 2.0 * (5.0 / 4.0); // Base * p2_scale * overall scale * avg->total // Initial value in constructor is 0.5 - // and we scale it by leakage_scale (7.0) and overall scale (2.0) - // This would be 7.0, but capped to 1.0 since it's a probability + // and we scale it by leakage_scale (0.25) and overall scale (2.0) let expected_leak_ratio = 0.5 * 0.25 * 2.0; // Base * leakage_scale * overall scale, capped at 1.0 println!( @@ -2644,8 +2551,8 @@ mod tests { .with_prep_probability(0.01) .with_meas_0_probability(0.01) .with_meas_1_probability(0.01) - .with_single_qubit_probability(0.01) - .with_two_qubit_probability(0.01) + .with_average_p1_probability(0.01) + .with_average_p2_probability(0.01) .with_prep_leak_ratio(0.01) .with_scale(2.0) .with_p1_scale(3.0) @@ -2713,113 +2620,119 @@ mod tests { #[test] fn test_emission_ratio_scaling() { - // Test that emission ratios are properly scaled and capped at 1.0 - let mut noise = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.01, 0.01); - - // Set emission ratio to 0.5 (default) - assert!((noise.p1_emission_ratio - 0.5).abs() < 1e-6); - assert!((noise.p2_emission_ratio - 0.5).abs() < 1e-6); - - // Set scaling factors that would push ratios above 1.0 - noise.set_scale(3.0); - noise.set_emission_scale(4.0); - - // Apply scaling - noise.scale_parameters(); + // Test that emission ratios are properly scaled and capped at a maximum of 1.0 + // Default emission ratios are 0.5 + let mut model = GeneralNoiseModel::builder() + .with_scale(3.0) + .with_emission_scale(4.0) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); - // Check that p1_emission_ratio is properly scaled and capped + // Verify both ratios are 0.5 after scaling + // When scaled: 0.5 * 3.0 (scale) * 4.0 (emission_scale) = 6.0 + // But capped at 1.0 assert!( (noise.p1_emission_ratio - 1.0).abs() < 1e-6, - "p1_emission_ratio should be capped at 1.0, but was {}", - noise.p1_emission_ratio + "p1_emission_ratio should be 1.0 after scaling/capping" ); - - // Check that p2_emission_ratio is properly scaled and capped assert!( (noise.p2_emission_ratio - 1.0).abs() < 1e-6, - "p2_emission_ratio should be capped at 1.0, but was {}", - noise.p2_emission_ratio + "p2_emission_ratio should be 1.0 after scaling/capping" ); // Now test with values that won't exceed the cap - let mut noise = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.01, 0.01); - noise.p1_emission_ratio = 0.1; - noise.p2_emission_ratio = 0.1; - - noise.set_scale(2.0); - noise.set_emission_scale(3.0); - - // Apply scaling - noise.scale_parameters(); + let mut model = GeneralNoiseModel::builder() + .with_p1_emission_ratio(0.1) + .with_p2_emission_ratio(0.1) + .with_scale(2.0) + .with_emission_scale(3.0) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Expected values: 0.1 * 3.0 (emission) * 2.0 (overall) = 0.6 assert!((noise.p1_emission_ratio - 0.6).abs() < 1e-6); assert!((noise.p2_emission_ratio - 0.6).abs() < 1e-6); } - #[test] - fn test_coherent_dephasing() { - // Create a circuit builder - let mut builder = ByteMessageBuilder::new(); - let _ = builder.for_quantum_operations(); - - // Create a noise model with coherent dephasing - let mut noise = GeneralNoiseModel::new(0.0, 0.0, 0.0, 0.0, 0.0); - noise.set_coherent_dephasing(true); - - // Create an idle gate - let gate = QuantumGate { - gate_type: GateType::Idle, - qubits: vec![0], - params: vec![1.0], // 1 second duration - result_id: None, - noiseless: false, - }; - - // Apply idle faults - should use coherent dephasing (RZ gates) - noise.apply_idle_faults(&gate, &mut builder); - - // Get the message and verify it contains RZ gates - let message = builder.build(); - let gates = message.parse_quantum_operations().unwrap(); - - // At least one gate should be an RZ gate - assert!(!gates.is_empty(), "Should have at least one gate"); - assert!( - gates.iter().any(|g| g.gate_type == GateType::RZ), - "Should contain at least one RZ gate" - ); - - // Now test with incoherent dephasing - let mut builder = ByteMessageBuilder::new(); - let _ = builder.for_quantum_operations(); - - let mut noise = GeneralNoiseModel::new(0.0, 0.0, 0.0, 0.0, 0.0); - noise.set_coherent_dephasing(false); - - // Force the RNG to produce deterministic outcomes - let rng = ChaCha8Rng::seed_from_u64(42); - noise.set_rng(rng).unwrap(); - - // Apply idle faults with incoherent dephasing - noise.apply_idle_faults(&gate, &mut builder); - - // The message may contain Z gates or be empty depending on random outcomes - let message = builder.build(); - let _gates = message.parse_quantum_operations().unwrap(); - - // We can't assert specific outcomes due to randomness, but the code should run without errors - } + // #[test] + // fn test_coherent_dephasing() { + // // Create a circuit builder + // let mut builder = ByteMessageBuilder::new(); + // let _ = builder.for_quantum_operations(); + // + // // Create a noise model with coherent dephasing + // let mut model = GeneralNoiseModel::builder() + // .with_coherent_dephasing(true) + // .build(); + // let noise = model + // .as_any_mut() + // .downcast_mut::() + // .unwrap(); + // + // // Create an idle gate + // let gate = QuantumGate { + // gate_type: GateType::Idle, + // qubits: vec![0], + // params: vec![1.0], // 1 second duration + // result_id: None, + // noiseless: false, + // }; + // + // // Apply idle faults - should use coherent dephasing (RZ gates) + // noise.apply_idle_faults(&gate, &mut builder); + // + // // Get the message and verify it contains RZ gates + // let message = builder.build(); + // let gates = message.parse_quantum_operations().unwrap(); + // + // // At least one gate should be an RZ gate + // assert!(!gates.is_empty(), "Should have at least one gate"); + // assert!( + // gates.iter().any(|g| g.gate_type == GateType::RZ), + // "Should contain at least one RZ gate" + // ); + // + // // Now test with incoherent dephasing + // let mut builder = ByteMessageBuilder::new(); + // let _ = builder.for_quantum_operations(); + // + // let mut model = GeneralNoiseModel::builder() + // .with_coherent_dephasing(false) + // .with_seed(42) + // .build(); + // let noise = model + // .as_any_mut() + // .downcast_mut::() + // .unwrap(); + // + // // Apply idle faults with incoherent dephasing + // noise.apply_idle_faults(&gate, &mut builder); + // + // // The message may contain Z gates or be empty depending on random outcomes + // let message = builder.build(); + // let _gates = message.parse_quantum_operations().unwrap(); + // + // // We can't assert specific outcomes due to randomness, but the code should run without errors + // } #[test] #[allow(clippy::unreadable_literal)] fn test_rzz_error_rate() { - let mut noise = GeneralNoiseModel::new(0.0, 0.0, 0.0, 0.0, 0.1); - noise.set_przz_params(0.1, 0.0, 0.25, 0.0); - noise.set_przz_power(1.0); - - // Apply scaling factors - noise.scale_parameters(); + let mut model = GeneralNoiseModel::builder() + .with_average_p2_probability(0.1) + .with_przz_params(0.1, 0.0, 0.25, 0.0) + .with_przz_power(1.0) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Test negative angle let neg_theta = -std::f64::consts::PI / 2.0; @@ -2840,7 +2753,16 @@ mod tests { ); // Test quadratic scaling - noise.set_przz_power(2.0); + let mut model = GeneralNoiseModel::builder() + .with_average_p2_probability(0.1) + .with_przz_params(0.1, 0.0, 0.25, 0.0) + .with_przz_power(2.0) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); + let error_quad = noise.rzz_error_rate(pos_theta); let expected_quad = 0.0078125; assert!( @@ -2852,8 +2774,14 @@ mod tests { #[test] fn test_noiseless_gates() { // Create a noise model and mark RZ as a noiseless gate - let mut noise = GeneralNoiseModel::new(0.0, 0.0, 0.0, 1.0, 0.0); - noise.add_noiseless_gate(GateType::RZ); + let mut model = GeneralNoiseModel::builder() + .with_p1_probability(0.5) // Use a moderate valid probability + .with_noiseless_gate(GateType::RZ) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Create a builder to capture gates let mut builder = ByteMessageBuilder::new(); @@ -2910,8 +2838,11 @@ mod tests { #[test] fn test_leak2depolar() { // Create a noise model with leak2depolar set to true - let mut noise = GeneralNoiseModel::new(0.0, 0.0, 0.0, 0.0, 0.0); - noise.set_leak2depolar(true); + let mut model = GeneralNoiseModel::builder().with_leak2depolar(true).build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Create a builder let mut builder = ByteMessageBuilder::new(); @@ -2927,7 +2858,13 @@ mod tests { ); // Reset and try with leak2depolar=false - noise.set_leak2depolar(false); + let mut model = GeneralNoiseModel::builder() + .with_leak2depolar(false) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Clear the builder let mut builder = ByteMessageBuilder::new(); @@ -2945,30 +2882,49 @@ mod tests { #[test] fn test_rzz_error_rate_debug() { - let mut noise = GeneralNoiseModel::new(0.0, 0.0, 0.0, 0.0, 0.1); - noise.set_przz_params(0.1, 0.0, 0.25, 0.0); - // p2 is already set to 0.1 in the constructor + let mut model = GeneralNoiseModel::builder() + .with_average_p2_probability(0.1) + .with_przz_params(0.1, 0.0, 0.25, 0.0) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); // Check unscaled przz error rate let theta = std::f64::consts::PI / 4.0; let norm_theta = theta / std::f64::consts::PI; let error_unscaled = noise.rzz_error_rate(theta); let c = 0.25; - let expected_unscaled = c * norm_theta * 0.1; // Multiply by p2 (0.1) + + // After build(), parameters are scaled: p2 is scaled by 5/4 + let p2_scaled = 0.1 * (5.0 / 4.0); + let expected_unscaled = c * norm_theta * p2_scaled; // 0.0078125 + assert!( (error_unscaled - expected_unscaled).abs() < 1e-6, "Expected {expected_unscaled}, got {error_unscaled}" ); // Check scaled przz error rate - noise.set_scale(2.0); - noise.scale_parameters(); + let mut model = GeneralNoiseModel::builder() + .with_average_p2_probability(0.1) + .with_przz_params(0.1, 0.0, 0.25, 0.0) + .with_scale(2.0) + .build(); + let noise = model + .as_any_mut() + .downcast_mut::() + .unwrap(); + let error_scaled = noise.rzz_error_rate(theta); - // After scaling, p2 is scaled by: + + // After build() with scale 2.0, p2 is scaled by: // - scale (2.0) - // - p2_scale (defaults to 1.0) // - 5/4 conversion factor (from average to total error) - let expected_scaled = c * norm_theta * 0.1 * 2.0 * 1.0 * (5.0 / 4.0); + let p2_scaled = 0.1 * 2.0 * (5.0 / 4.0); + let expected_scaled = c * norm_theta * p2_scaled; // 0.015625 + assert!( (error_scaled - expected_scaled).abs() < 1e-6, "Expected {expected_scaled}, got {error_scaled}" @@ -2981,18 +2937,42 @@ mod tests { // Define epsilon for approximate float comparisons const EPSILON: f64 = 0.005; // Increased tolerance for sampler discretization - let mut model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.1, 0.2); - - // Test p1_pauli_model setter + // Create all our custom models first let mut custom_p1_pauli = HashMap::new(); custom_p1_pauli.insert("X".to_string(), 0.7); custom_p1_pauli.insert("Y".to_string(), 0.2); custom_p1_pauli.insert("Z".to_string(), 0.1); - model.set_p1_pauli_model(&custom_p1_pauli); + let mut custom_p1_emission = HashMap::new(); + custom_p1_emission.insert("X".to_string(), 0.4); + custom_p1_emission.insert("Y".to_string(), 0.6); + + let mut custom_p2_pauli = HashMap::new(); + custom_p2_pauli.insert("XX".to_string(), 0.5); + custom_p2_pauli.insert("YY".to_string(), 0.3); + custom_p2_pauli.insert("ZZ".to_string(), 0.2); + + let mut custom_p2_emission = HashMap::new(); + custom_p2_emission.insert("XX".to_string(), 0.25); + custom_p2_emission.insert("YY".to_string(), 0.75); + + // Create a noise model with custom Pauli and emission models using the builder + let model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_p1_probability(0.1) + .with_p2_probability(0.2) + .with_p1_pauli_model(&custom_p1_pauli) + .with_p1_emission_model(&custom_p1_emission) + .with_p2_pauli_model(&custom_p2_pauli) + .with_p2_emission_model(&custom_p2_emission) + .build(); + + let noise = model.as_any().downcast_ref::().unwrap(); // Get the distribution to verify using the direct accessor pattern - let p1_pauli_dist = model.p1_pauli_model().get_weighted_map(); + let p1_pauli_dist = noise.p1_pauli_model().get_weighted_map(); // Check that the distribution contains the right keys and approximate values assert!( @@ -3021,15 +3001,8 @@ mod tests { "Expected Z value to be close to 0.1" ); - // Test p1_emission_model setter - let mut custom_p1_emission = HashMap::new(); - custom_p1_emission.insert("X".to_string(), 0.4); - custom_p1_emission.insert("Y".to_string(), 0.6); - - model.set_p1_emission_model(&custom_p1_emission); - - // Verify p1_emission_model was updated correctly - let p1_emission_dist = model.p1_emission_model().get_weighted_map(); + // Verify p1_emission_model was set correctly + let p1_emission_dist = noise.p1_emission_model().get_weighted_map(); assert!( p1_emission_dist.contains_key("X"), "Distribution should contain X" @@ -3048,31 +3021,8 @@ mod tests { "Expected Y value to be close to 0.6" ); - // Verify p1_pauli_model was NOT changed by setting p1_emission_model - let p1_pauli_dist = model.p1_pauli_model().get_weighted_map(); - assert!( - (p1_pauli_dist["X"] - 0.7).abs() < EPSILON, - "Expected X value to be close to 0.7" - ); - assert!( - (p1_pauli_dist["Y"] - 0.2).abs() < EPSILON, - "Expected Y value to be close to 0.2" - ); - assert!( - (p1_pauli_dist["Z"] - 0.1).abs() < EPSILON, - "Expected Z value to be close to 0.1" - ); - - // Test p2_pauli_model setter - let mut custom_p2_pauli = HashMap::new(); - custom_p2_pauli.insert("XX".to_string(), 0.5); - custom_p2_pauli.insert("YY".to_string(), 0.3); - custom_p2_pauli.insert("ZZ".to_string(), 0.2); - - model.set_p2_pauli_model(&custom_p2_pauli); - - // Verify p2_pauli_model was updated correctly - let p2_pauli_dist = model.p2_pauli_model().get_weighted_map(); + // Verify p2_pauli_model was set correctly + let p2_pauli_dist = noise.p2_pauli_model().get_weighted_map(); assert!( p2_pauli_dist.contains_key("XX"), "Distribution should contain XX" @@ -3099,15 +3049,8 @@ mod tests { "Expected ZZ value to be close to 0.2" ); - // Test p2_emission_model setter - let mut custom_p2_emission = HashMap::new(); - custom_p2_emission.insert("XX".to_string(), 0.25); - custom_p2_emission.insert("YY".to_string(), 0.75); - - model.set_p2_emission_model(&custom_p2_emission); - - // Verify p2_emission_model was updated correctly - let p2_emission_dist = model.p2_emission_model().get_weighted_map(); + // Verify p2_emission_model was set correctly + let p2_emission_dist = noise.p2_emission_model().get_weighted_map(); assert!( p2_emission_dist.contains_key("XX"), "Distribution should contain XX" @@ -3125,35 +3068,5 @@ mod tests { (p2_emission_dist["YY"] - 0.75).abs() < EPSILON, "Expected YY value to be close to 0.75" ); - - // Verify p2_pauli_model was NOT changed by setting p2_emission_model - let p2_pauli_dist = model.p2_pauli_model().get_weighted_map(); - assert!( - (p2_pauli_dist["XX"] - 0.5).abs() < EPSILON, - "Expected XX value to be close to 0.5" - ); - assert!( - (p2_pauli_dist["YY"] - 0.3).abs() < EPSILON, - "Expected YY value to be close to 0.3" - ); - assert!( - (p2_pauli_dist["ZZ"] - 0.2).abs() < EPSILON, - "Expected ZZ value to be close to 0.2" - ); - - // Verify p1 models were not affected by p2 model changes - let p1_pauli_dist = model.p1_pauli_model().get_weighted_map(); - assert!( - (p1_pauli_dist["X"] - 0.7).abs() < EPSILON, - "Expected X value to be close to 0.7" - ); - assert!( - (p1_pauli_dist["Y"] - 0.2).abs() < EPSILON, - "Expected Y value to be close to 0.2" - ); - assert!( - (p1_pauli_dist["Z"] - 0.1).abs() < EPSILON, - "Expected Z value to be close to 0.1" - ); } } diff --git a/crates/pecos-engines/tests/noise_determinism.rs b/crates/pecos-engines/tests/noise_determinism.rs index cb0a90f9f..cfea793bc 100644 --- a/crates/pecos-engines/tests/noise_determinism.rs +++ b/crates/pecos-engines/tests/noise_determinism.rs @@ -23,17 +23,14 @@ fn reset_model_with_seed( fn create_noise_model() -> Box { info!("Creating noise model with moderate error rates"); - // Create a noise model with moderate error rates - let mut model = GeneralNoiseModel::new(0.1, 0.1, 0.1, 0.1, 0.1); + // Create a noise model with moderate error rates using the builder pattern // Set single-qubit error rates with uniform distribution let mut single_qubit_weights = HashMap::new(); single_qubit_weights.insert("X".to_string(), 0.25); single_qubit_weights.insert("Y".to_string(), 0.25); single_qubit_weights.insert("Z".to_string(), 0.25); single_qubit_weights.insert("L".to_string(), 0.25); - info!("Setting single-qubit Pauli model"); - model.set_p1_pauli_model(&single_qubit_weights); // Set two-qubit error rates with uniform distribution let mut two_qubit_weights = HashMap::new(); @@ -42,24 +39,26 @@ fn create_noise_model() -> Box { two_qubit_weights.insert("ZZ".to_string(), 0.2); two_qubit_weights.insert("XL".to_string(), 0.2); two_qubit_weights.insert("LX".to_string(), 0.2); - info!("Setting two-qubit Pauli model"); - model.set_p2_pauli_model(&two_qubit_weights); - // Set emission ratios to ensure errors are introduced - info!("Setting emission ratios"); - model.set_p1_emission_ratio(0.5); - model.set_p2_emission_ratio(0.5); - model.set_prep_leak_ratio(0.5); - - // Scale parameters before using the model - info!("Scaling parameters"); - model.scale_parameters(); + // Use builder to construct the model with all parameters set + let mut model = GeneralNoiseModel::builder() + .with_prep_probability(0.1) + .with_meas_0_probability(0.1) + .with_meas_1_probability(0.1) + .with_p1_probability(0.1) + .with_p2_probability(0.1) + .with_p1_pauli_model(&single_qubit_weights) + .with_p2_pauli_model(&two_qubit_weights) + .with_p1_emission_ratio(0.5) + .with_p2_emission_ratio(0.5) + .with_prep_leak_ratio(0.5) + .build(); // Reset the model to ensure clean state info!("Resetting model"); model.reset().unwrap(); - Box::new(model) + model } fn apply_noise(model: &mut Box, msg: &ByteMessage) -> ByteMessage { diff --git a/crates/pecos-engines/tests/noise_test.rs b/crates/pecos-engines/tests/noise_test.rs index 56f360540..7444fa53d 100644 --- a/crates/pecos-engines/tests/noise_test.rs +++ b/crates/pecos-engines/tests/noise_test.rs @@ -88,31 +88,30 @@ fn count_results( fn test_single_qubit_gate_noise_distributions() { const NUM_SHOTS: usize = 10000; - // Create noise model with high error rates - let mut noise_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - - // Disable emission errors first, before scaling - but don't explicitly set Pauli models - noise_model.set_p1_emission_ratio(0.0); - - // Print p1 and emission ratio before scaling + // Create noise model with high error rates using the builder pattern + let noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) // Disable emission errors + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model = noise_model + .as_any() + .downcast_ref::() + .unwrap(); + + // Print p1 and emission ratio after scaling (the builder applies scaling) println!( - "Before scaling: p1={}, p2={}", + "After building: p1={}, p2={}", noise_model.probabilities().3, noise_model.probabilities().4 ); - // Now scale parameters - noise_model.scale_parameters(); - - // Print p1 and emission ratio after scaling - println!( - "After scaling: p1={}, p2={}", - noise_model.probabilities().3, - noise_model.probabilities().4 - ); - - noise_model.set_seed(42).expect("Failed to set seed"); - // Test Pauli noise channel with uniform distribution // Define a mapping of gate name to expected error rates let gates_to_test = [ @@ -140,7 +139,7 @@ fn test_single_qubit_gate_noise_distributions() { let circ = builder.build(); println!("Testing {desc}..."); - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 1); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 1); // Expected bit pattern after applying gate to |0⟩ let expected_bit = if expected_zeros { "0" } else { "1" }; @@ -177,11 +176,21 @@ fn test_single_qubit_gate_noise_distributions() { fn test_rotation_gate_with_different_angles() { const NUM_SHOTS: usize = 2000; - // Create noise model with high error rates for clearer results - let mut noise_model = GeneralNoiseModel::new(0.05, 0.05, 0.05, 0.1, 0.2); - - // Ensure RZ is not marked as a software gate for this test - noise_model.remove_noiseless_gate(GateType::RZ); + // Create noise model with high error rates for clearer results using the builder pattern + // Explicitly avoid marking RZ as a noiseless gate for this test + let noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.05) + .with_meas_0_probability(0.05) + .with_meas_1_probability(0.05) + .with_average_p1_probability(0.1) + .with_average_p2_probability(0.2) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model = noise_model + .as_any() + .downcast_ref::() + .unwrap(); // Test rotation gates with different angles let angles_to_test = [ @@ -215,7 +224,7 @@ fn test_rotation_gate_with_different_angles() { println!("Failed to parse circuit operations"); } - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 1); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 1); println!("Counts: {counts:?}"); // For RX(0), expect mostly |0⟩ @@ -289,7 +298,7 @@ fn test_rotation_gate_with_different_angles() { println!("Failed to parse X gate circuit operations"); } - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 1); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 1); println!("X gate test counts: {counts:?}"); // Circuit should produce mostly |1⟩ states @@ -314,8 +323,20 @@ fn test_rotation_gate_with_different_angles() { fn test_two_qubit_gate_noise_distributions() { const NUM_SHOTS: usize = 2000; - // Create noise model with high error rates for clearer results - let noise_model = GeneralNoiseModel::new(0.05, 0.05, 0.05, 0.1, 0.2); + // Create noise model with high error rates for clearer results using the builder pattern + let noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.05) + .with_meas_0_probability(0.05) + .with_meas_1_probability(0.05) + .with_average_p1_probability(0.1) + .with_average_p2_probability(0.2) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model = noise_model + .as_any() + .downcast_ref::() + .unwrap(); // Test CNOT gate with different input states @@ -327,7 +348,7 @@ fn test_two_qubit_gate_noise_distributions() { builder.add_measurements(&[0, 1], &[0, 1]); let circ = builder.build(); - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 2); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 2); // Expect mostly |00⟩ outcomes with some errors let count_00 = *counts.get("00").unwrap_or(&0); @@ -357,7 +378,7 @@ fn test_two_qubit_gate_noise_distributions() { builder.add_measurements(&[0, 1], &[0, 1]); let circ = builder.build(); - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 2); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 2); // Expect mostly |11⟩ outcomes with some errors let count_11 = *counts.get("11").unwrap_or(&0); @@ -387,7 +408,7 @@ fn test_two_qubit_gate_noise_distributions() { builder.add_measurements(&[0, 1], &[0, 1]); let circ = builder.build(); - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 2); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 2); // Expect mostly |01⟩ outcomes with some errors let count_01 = *counts.get("01").unwrap_or(&0); @@ -418,7 +439,7 @@ fn test_two_qubit_gate_noise_distributions() { builder.add_measurements(&[0, 1], &[0, 1]); let circ = builder.build(); - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 2); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 2); // Expect mostly |10⟩ outcomes with some errors let count_10 = *counts.get("10").unwrap_or(&0); @@ -444,11 +465,23 @@ fn test_two_qubit_gate_noise_distributions() { fn test_rzz_angle_dependent_error_model() { const NUM_SHOTS: usize = 2000; - // Create noise model with RZZ angle-dependent error parameters - let mut noise_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.05, 0.1); - noise_model.set_przz_params(0.05, 0.0, 0.1, 0.0); // a=0.05, b=0, c=0.1, d=0 - noise_model.set_przz_power(1.0); // Linear scaling with angle - noise_model.set_seed(42).expect("Failed to set seed"); + // Create noise model with RZZ angle-dependent error parameters using the builder pattern + let noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.05) + .with_average_p2_probability(0.1) + .with_przz_params(0.05, 0.0, 0.1, 0.0) // a=0.05, b=0, c=0.1, d=0 + .with_przz_power(1.0) // Linear scaling with angle + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model = noise_model + .as_any() + .downcast_ref::() + .unwrap(); // Test RZZ gates with different rotation angles let angles_to_test = [ @@ -483,7 +516,7 @@ fn test_rzz_angle_dependent_error_model() { let circ = builder.build(); // Run with noise model and count results - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 2); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 2); // For RZZ(θ), calculate expected error rate based on our parameters // Error model: przz_a/c * (|angle|/π)^przz_power + przz_b/d @@ -522,12 +555,23 @@ fn test_rzz_angle_dependent_error_model() { fn test_leakage_model() { const NUM_SHOTS: usize = 2000; - // Create noise model with significant leakage - let mut noise_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.05, 0.1); - // There's no direct setter for p1_emission_ratio, so we'll use available parameters - noise_model.set_p2_emission_ratio(0.8); // High emission ratio for obvious effect - noise_model.set_prep_leak_ratio(0.5); // 50% of prep errors lead to leakage - noise_model.set_seed(42).expect("Failed to set seed"); + // Create noise model with significant leakage using the builder pattern + let noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.05) + .with_average_p2_probability(0.1) + .with_p2_emission_ratio(0.8) // High emission ratio for obvious effect + .with_prep_leak_ratio(0.5) // 50% of prep errors lead to leakage + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model = noise_model + .as_any() + .downcast_ref::() + .unwrap(); // Test leaked qubit behavior with measurement let mut builder = ByteMessageBuilder::new(); @@ -543,7 +587,7 @@ fn test_leakage_model() { let circ = builder.build(); // Run with noise model and count results - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 1); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 1); // In our model, leaked qubits should consistently measure as 1 // So we expect to see a bias toward 1 in the results @@ -560,11 +604,22 @@ fn test_leakage_model() { fn test_software_gates_not_affected_by_noise() { const NUM_SHOTS: usize = 2000; - // Create noise model with high error rates - let mut noise_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.3, 0.3); - - noise_model.add_noiseless_gate(GateType::RZ); - noise_model.set_seed(42).expect("Failed to set seed"); + // Create noise model with high error rates using the builder pattern + let noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.3) + .with_average_p2_probability(0.3) + .with_seed(42) + .with_noiseless_gate(GateType::RZ) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model = noise_model + .as_any() + .downcast_ref::() + .unwrap(); // Create two similar circuits: one with RZ (software gate) and one with hardware gate @@ -585,8 +640,8 @@ fn test_software_gates_not_affected_by_noise() { let circ_hardware = builder2.build(); // Run both circuits with noise model - let counts_rz = count_results(&noise_model, &circ_rz, NUM_SHOTS, 1); - let counts_hardware = count_results(&noise_model, &circ_hardware, NUM_SHOTS, 1); + let counts_rz = count_results(noise_model, &circ_rz, NUM_SHOTS, 1); + let counts_hardware = count_results(noise_model, &circ_hardware, NUM_SHOTS, 1); // RZ should be nearly perfect (no noise) let rz_count_0 = *counts_rz.get("0").unwrap_or(&0); @@ -594,7 +649,7 @@ fn test_software_gates_not_affected_by_noise() { // Hardware sequence should show significant noise let hw_count_1 = *counts_hardware.get("1").unwrap_or(&0); - let hw_percentage_1 = (hw_count_1 as f64) / (NUM_SHOTS as f64) * 100.0; + let hw_percentage_1 = (hw_count_1 as f64 / NUM_SHOTS as f64) * 100.0; assert!( rz_percentage_0 > 95.0, @@ -611,15 +666,39 @@ fn test_software_gates_not_affected_by_noise() { fn test_coherent_vs_incoherent_dephasing() { const NUM_SHOTS: usize = 2000; - // Create two noise models with different dephasing types - let mut coherent_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.05, 0.1); - coherent_model.set_coherent_dephasing(true); - coherent_model.set_seed(42).expect("Failed to set seed"); - - let mut incoherent_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.05, 0.1); - incoherent_model.set_coherent_dephasing(false); - incoherent_model.set_coherent_to_incoherent_factor(2.0); - incoherent_model.set_seed(42).expect("Failed to set seed"); + // Create two noise models with different dephasing types using the builder pattern + let coherent_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.05) + .with_average_p2_probability(0.1) + .with_coherent_dephasing(true) + .with_seed(42) + .build(); + + // Get the coherent model as a GeneralNoiseModel reference + let coherent_model = coherent_model + .as_any() + .downcast_ref::() + .unwrap(); + + let incoherent_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.05) + .with_average_p2_probability(0.1) + .with_coherent_dephasing(false) + .with_coherent_to_incoherent_factor(2.0) + .with_seed(42) + .build(); + + // Get the incoherent model as a GeneralNoiseModel reference + let incoherent_model = incoherent_model + .as_any() + .downcast_ref::() + .unwrap(); // Create a dephasing test circuit: // 1. Prepare |+⟩ state with H @@ -644,8 +723,8 @@ fn test_coherent_vs_incoherent_dephasing() { let circ = builder.build(); // Run with both noise models - let coherent_counts = count_results(&coherent_model, &circ, NUM_SHOTS, 1); - let incoherent_counts = count_results(&incoherent_model, &circ, NUM_SHOTS, 1); + let coherent_counts = count_results(coherent_model, &circ, NUM_SHOTS, 1); + let incoherent_counts = count_results(incoherent_model, &circ, NUM_SHOTS, 1); // Calculate bias toward 0 in both cases let coherent_0 = *coherent_counts.get("0").unwrap_or(&0); @@ -676,13 +755,25 @@ fn test_parameter_scaling_impact() { let mut results = Vec::new(); for scale in scale_factors { - let mut noise_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.05, 0.1); - noise_model.set_scale(scale); // Apply overall scaling - noise_model.scale_parameters(); // Apply the scaling - noise_model.set_seed(42).expect("Failed to set seed"); + // Create a noise model with the given scale factor using the builder pattern + let noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.05) + .with_average_p2_probability(0.1) + .with_scale(scale) // Apply overall scaling + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model = noise_model + .as_any() + .downcast_ref::() + .unwrap(); // Run with this noise model - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 1); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 1); // After X gate, we expect to measure |1⟩, so count 0s as errors let error_count = *counts.get("0").unwrap_or(&0); @@ -705,8 +796,9 @@ fn test_parameter_scaling_impact() { // higher scales can actually lead to lower error rates due to normalization effects. // Simply check that error rates change with different scales. for i in 1..results.len() { - assert!( - results[i].1 != results[i - 1].1, + assert_ne!( + results[i].1, + results[i - 1].1, "Scale {} should result in different error rate compared to scale {}, but got similar values: {:.1}% vs {:.1}%", results[i].0, results[i - 1].0, @@ -721,10 +813,21 @@ fn test_debug_x_gate_noise() { const NUM_SHOTS: usize = 10000; const MARGIN: f64 = 5.0; // 5% margin - // Create a simple noise model with high error rate but no emission errors - let mut noise_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - noise_model.set_p1_emission_ratio(0.0); - noise_model.scale_parameters(); + // Create a simple noise model with high error rate but no emission errors using the builder pattern + let noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model = noise_model + .as_any() + .downcast_ref::() + .unwrap(); println!( "Debug test: p1 after scaling = {}", @@ -739,7 +842,7 @@ fn test_debug_x_gate_noise() { let circ = builder.build(); // Run many shots and collect statistics - let counts = count_results(&noise_model, &circ, NUM_SHOTS, 1); + let counts = count_results(noise_model, &circ, NUM_SHOTS, 1); // Calculate percentages let count_0 = *counts.get("0").unwrap_or(&0); @@ -770,10 +873,21 @@ fn test_debug_x_gate_noise() { fn test_seed_effect() { const NUM_SHOTS: usize = 5000; - // Create a simple noise model with high error rate but no emission errors - let mut noise_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - noise_model.set_p1_emission_ratio(0.0); - noise_model.scale_parameters(); + // Create a simple noise model with high error rate but no emission errors using the builder pattern + let noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model = noise_model + .as_any() + .downcast_ref::() + .unwrap(); println!("Model p1 = {}", noise_model.probabilities().3); @@ -833,31 +947,39 @@ fn test_seed_effect() { "\nRunning with the approach from the failing test_single_qubit_gate_noise_distributions:" ); - // Create a new noise model like in the failing test - let mut complex_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - - // Disable emission errors first, before scaling - complex_model.set_p1_emission_ratio(0.0); - complex_model.set_p1_pauli_model( - &[ - ("X".to_string(), 1.0 / 3.0), - ("Y".to_string(), 1.0 / 3.0), - ("Z".to_string(), 1.0 / 3.0), - ] - .into_iter() - .collect(), - ); - complex_model.set_p1_emission_model( - &[("X".to_string(), 0.5), ("Y".to_string(), 0.5)] - .into_iter() - .collect(), - ); + // Create a new noise model using the builder pattern + let pauli_model: HashMap = [ + ("X".to_string(), 1.0 / 3.0), + ("Y".to_string(), 1.0 / 3.0), + ("Z".to_string(), 1.0 / 3.0), + ] + .into_iter() + .collect(); - complex_model.scale_parameters(); - complex_model.set_seed(42).expect("Failed to set seed"); + let emission_model: HashMap = [("X".to_string(), 0.5), ("Y".to_string(), 0.5)] + .into_iter() + .collect(); + + let complex_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) + .with_p1_pauli_model(&pauli_model) + .with_p1_emission_model(&emission_model) + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let complex_model = complex_model + .as_any() + .downcast_ref::() + .unwrap(); // Run the circuit - let complex_counts = count_results(&complex_model, &circ, NUM_SHOTS, 1); + let complex_counts = count_results(complex_model, &circ, NUM_SHOTS, 1); // Calculate percentages let complex_zero_count = *complex_counts.get("0").unwrap_or(&0); @@ -873,10 +995,22 @@ fn test_combined_comparison() { const NUM_SHOTS: usize = 5000; println!("=== TESTING SIMPLER MODEL ==="); - // Create a simple noise model with high error rate but no emission errors - let mut simple_noise_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - simple_noise_model.set_p1_emission_ratio(0.0); - simple_noise_model.scale_parameters(); + // Create a simple noise model with high error rate but no emission errors using the builder pattern + let simple_noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let simple_noise_model = simple_noise_model + .as_any() + .downcast_ref::() + .unwrap(); println!( "Simple model: p1 after scaling = {}", @@ -891,7 +1025,7 @@ fn test_combined_comparison() { let circ = builder.build(); // Run tests with simple model - let simple_counts = count_results(&simple_noise_model, &circ, NUM_SHOTS, 1); + let simple_counts = count_results(simple_noise_model, &circ, NUM_SHOTS, 1); // Calculate percentages let simple_count_0 = *simple_counts.get("0").unwrap_or(&0); @@ -904,54 +1038,48 @@ fn test_combined_comparison() { println!(" |1> measurements: {simple_count_1} ({simple_percent_1}%)"); println!("\n=== TESTING COMPLEX MODEL ==="); - // Create noise model with extremely high error rates to diagnose if errors are being applied - let mut complex_noise_model = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.9); - - // Disable emission errors first, before scaling - complex_noise_model.set_p1_emission_ratio(0.0); // p1_emission_ratio = 0, so no leakage errors - complex_noise_model.set_p1_pauli_model( - &[ - ("X".to_string(), 1.0 / 3.0), - ("Y".to_string(), 1.0 / 3.0), - ("Z".to_string(), 1.0 / 3.0), - ] - .into_iter() - .collect(), - ); - complex_noise_model.set_p1_emission_model( - &[ - // We still need to provide a valid emission model that sums to 1.0, - // even though emission ratio is 0 so it won't be used - ("X".to_string(), 0.5), - ("Y".to_string(), 0.5), - ] - .into_iter() - .collect(), - ); - - // Print p1 and emission ratio before scaling - println!( - "Complex model before scaling: p1={}, p1_emission_ratio={}", - complex_noise_model.probabilities().3, - complex_noise_model.probabilities().5 - ); - - // Now scale parameters - complex_noise_model.scale_parameters(); + // Create complex noise model with the builder + // Define Pauli and emission models + let pauli_model: HashMap = [ + ("X".to_string(), 1.0 / 3.0), + ("Y".to_string(), 1.0 / 3.0), + ("Z".to_string(), 1.0 / 3.0), + ] + .into_iter() + .collect(); - // Print p1 and emission ratio after scaling + let emission_model: HashMap = [("X".to_string(), 0.5), ("Y".to_string(), 0.5)] + .into_iter() + .collect(); + + // Create the model with the builder + let complex_noise_model = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.8) + .with_p1_emission_ratio(0.0) // No leakage errors + .with_p1_pauli_model(&pauli_model) + .with_p1_emission_model(&emission_model) + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let complex_noise_model = complex_noise_model + .as_any() + .downcast_ref::() + .unwrap(); + + // Print p1 and emission ratio println!( - "Complex model after scaling: p1={}, p1_emission_ratio={}", + "Complex model: p1={}, p1_emission_ratio={}", complex_noise_model.probabilities().3, complex_noise_model.probabilities().5 ); - complex_noise_model - .set_seed(42) - .expect("Failed to set seed"); - // Run tests with complex model - let complex_counts = count_results(&complex_noise_model, &circ, NUM_SHOTS, 1); + let complex_counts = count_results(complex_noise_model, &circ, NUM_SHOTS, 1); // Calculate percentages let complex_count_0 = *complex_counts.get("0").unwrap_or(&0); @@ -991,10 +1119,22 @@ fn test_pauli_model_effect() { const NUM_SHOTS: usize = 5000; println!("=== Test with default Pauli model ==="); - let mut noise_model1 = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - noise_model1.set_p1_emission_ratio(0.0); - noise_model1.scale_parameters(); - noise_model1.set_seed(42).expect("Failed to set seed"); + // Create a noise model with default Pauli model using the builder pattern + let noise_model1 = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model1 = noise_model1 + .as_any() + .downcast_ref::() + .unwrap(); // Create a circuit with just an X gate and measurement let mut builder = ByteMessageBuilder::new(); @@ -1003,7 +1143,7 @@ fn test_pauli_model_effect() { builder.add_measurements(&[0], &[0]); let circ = builder.build(); - let counts1 = count_results(&noise_model1, &circ, NUM_SHOTS, 1); + let counts1 = count_results(noise_model1, &circ, NUM_SHOTS, 1); // Calculate percentages let default_zero_count = *counts1.get("0").unwrap_or(&0); @@ -1014,10 +1154,7 @@ fn test_pauli_model_effect() { println!("Default model: {default_zero_percent}% |0>, {default_one_percent}% |1>"); println!("\n=== Test with explicitly set Pauli model ==="); - let mut noise_model2 = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - noise_model2.set_p1_emission_ratio(0.0); - - // Explicitly set the Pauli model (even though it's the same as default) + // Create X-biased model with builder pattern let x_biased_model: HashMap = [ ("X".to_string(), 0.8), ("Y".to_string(), 0.1), @@ -1025,19 +1162,30 @@ fn test_pauli_model_effect() { ] .into_iter() .collect(); - noise_model2.set_p1_pauli_model(&x_biased_model); - - // Set emission model (even though emission ratio is 0) - noise_model2.set_p1_emission_model( - &[("X".to_string(), 0.5), ("Y".to_string(), 0.5)] - .into_iter() - .collect(), - ); - - noise_model2.scale_parameters(); - noise_model2.set_seed(42).expect("Failed to set seed"); - let counts2 = count_results(&noise_model2, &circ, NUM_SHOTS, 1); + let emission_model: HashMap = [("X".to_string(), 0.5), ("Y".to_string(), 0.5)] + .into_iter() + .collect(); + + let noise_model2 = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) + .with_p1_pauli_model(&x_biased_model) + .with_p1_emission_model(&emission_model) + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model2 = noise_model2 + .as_any() + .downcast_ref::() + .unwrap(); + + let counts2 = count_results(noise_model2, &circ, NUM_SHOTS, 1); // Calculate percentages let explicit_zero_count = *counts2.get("0").unwrap_or(&0); @@ -1047,10 +1195,8 @@ fn test_pauli_model_effect() { println!("Explicit model: {explicit_zero_percent}% |0>, {explicit_one_percent}% |1>"); - println!("\n=== Test with p1_pauli_model set first, then emission ratio ==="); - let mut noise_model3 = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - - // First set Pauli model + println!("\n=== Test with Z-biased Pauli model ==="); + // Create Z-biased model with builder pattern let z_biased_model: HashMap = [ ("X".to_string(), 0.1), ("Y".to_string(), 0.1), @@ -1058,22 +1204,26 @@ fn test_pauli_model_effect() { ] .into_iter() .collect(); - noise_model3.set_p1_pauli_model(&z_biased_model); - - // Then set emission ratio to 0 - noise_model3.set_p1_emission_ratio(0.0); - - // Set emission model - noise_model3.set_p1_emission_model( - &[("X".to_string(), 0.5), ("Y".to_string(), 0.5)] - .into_iter() - .collect(), - ); - - noise_model3.scale_parameters(); - noise_model3.set_seed(42).expect("Failed to set seed"); - let counts3 = count_results(&noise_model3, &circ, NUM_SHOTS, 1); + let noise_model3 = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) + .with_p1_pauli_model(&z_biased_model) + .with_p1_emission_model(&emission_model) + .with_seed(42) + .build(); + + // Get the model as a GeneralNoiseModel reference + let noise_model3 = noise_model3 + .as_any() + .downcast_ref::() + .unwrap(); + + let counts3 = count_results(noise_model3, &circ, NUM_SHOTS, 1); // Calculate percentages let ordered_zero_count = *counts3.get("0").unwrap_or(&0); @@ -1081,9 +1231,7 @@ fn test_pauli_model_effect() { let ordered_zero_percent = (ordered_zero_count as f64 / NUM_SHOTS as f64) * 100.0; let ordered_one_percent = (ordered_one_count as f64 / NUM_SHOTS as f64) * 100.0; - println!( - "Model with Pauli model first: {ordered_zero_percent}% |0>, {ordered_one_percent}% |1>" - ); + println!("Z-biased model: {ordered_zero_percent}% |0>, {ordered_one_percent}% |1>"); } #[test] @@ -1100,13 +1248,20 @@ fn test_pauli_model_behavior() { let circ = builder.build(); // ====== Model 1: Default model (equal distribution of X, Y, Z errors) ====== - let mut model1 = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - model1.set_p1_emission_ratio(0.0); // Turn off emission errors - model1.scale_parameters(); - model1.set_seed(42).expect("Failed to set seed"); + let model1 = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) // Turn off emission errors + .with_seed(42) + .build(); + + let model1 = model1.as_any().downcast_ref::().unwrap(); println!("Running with default Pauli model (uniform distribution)"); - let default_counts = count_results(&model1, &circ, NUM_SHOTS, 1); + let default_counts = count_results(model1, &circ, NUM_SHOTS, 1); // Calculate percentages let default_zero_count = *default_counts.get("0").unwrap_or(&0); @@ -1117,10 +1272,6 @@ fn test_pauli_model_behavior() { println!(" Default model: {default_zero_percent}% |0>, {default_one_percent}% |1>"); // ====== Model 2: X-biased model (mostly X errors) ====== - let mut model2 = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - model2.set_p1_emission_ratio(0.0); // Turn off emission errors - - // Set X-biased Pauli error model let x_biased_model: HashMap = [ ("X".to_string(), 0.8), ("Y".to_string(), 0.1), @@ -1128,13 +1279,22 @@ fn test_pauli_model_behavior() { ] .into_iter() .collect(); - model2.set_p1_pauli_model(&x_biased_model); - model2.scale_parameters(); - model2.set_seed(42).expect("Failed to set seed"); + let model2 = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) // Turn off emission errors + .with_p1_pauli_model(&x_biased_model) + .with_seed(42) + .build(); + + let model2 = model2.as_any().downcast_ref::().unwrap(); println!("Running with X-biased Pauli model (80% X, 10% Y, 10% Z)"); - let xbiased_counts = count_results(&model2, &circ, NUM_SHOTS, 1); + let xbiased_counts = count_results(model2, &circ, NUM_SHOTS, 1); // Calculate percentages let xbiased_zero_count = *xbiased_counts.get("0").unwrap_or(&0); @@ -1145,10 +1305,6 @@ fn test_pauli_model_behavior() { println!(" X-biased model: {xbiased_zero_percent}% |0>, {xbiased_one_percent}% |1>"); // ====== Model 3: Z-biased model (mostly Z errors) ====== - let mut model3 = GeneralNoiseModel::new(0.01, 0.01, 0.01, 0.5, 0.1); - model3.set_p1_emission_ratio(0.0); // Turn off emission errors - - // Set Z-biased Pauli error model let z_biased_model: HashMap = [ ("X".to_string(), 0.1), ("Y".to_string(), 0.1), @@ -1156,13 +1312,22 @@ fn test_pauli_model_behavior() { ] .into_iter() .collect(); - model3.set_p1_pauli_model(&z_biased_model); - model3.scale_parameters(); - model3.set_seed(42).expect("Failed to set seed"); + let model3 = GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.01) + .with_meas_1_probability(0.01) + .with_average_p1_probability(0.5) + .with_average_p2_probability(0.1) + .with_p1_emission_ratio(0.0) // Turn off emission errors + .with_p1_pauli_model(&z_biased_model) + .with_seed(42) + .build(); + + let model3 = model3.as_any().downcast_ref::().unwrap(); println!("Running with Z-biased Pauli model (10% X, 10% Y, 80% Z)"); - let zbiased_counts = count_results(&model3, &circ, NUM_SHOTS, 1); + let zbiased_counts = count_results(model3, &circ, NUM_SHOTS, 1); // Calculate percentages let zbiased_zero_count = *zbiased_counts.get("0").unwrap_or(&0); From 77c5563289d7daca99802dfcd0543056e289af66 Mon Sep 17 00:00:00 2001 From: Ciaran Ryan-Anderson Date: Fri, 9 May 2025 12:18:12 -0600 Subject: [PATCH 6/9] Switched WeightedSampler and related code to use BTreeMap. Added LLVM 14 errors --- .gitignore | 2 + README.md | 12 + crates/pecos-cli/src/main.rs | 221 +++++++- crates/pecos-engines/QIR_RUNTIME.md | 24 +- crates/pecos-engines/build.rs | 210 ++++++- .../src/engines/monte_carlo/engine.rs | 49 +- .../src/engines/noise/general.rs | 28 +- .../pecos-engines/src/engines/noise/utils.rs | 14 +- .../src/engines/noise/weighted_sampler.rs | 136 +++-- .../pecos-engines/src/engines/qir/compiler.rs | 92 +++- crates/pecos-engines/tests/bell_state_test.rs | 24 +- .../pecos-engines/tests/noise_determinism.rs | 516 +++++++++++++++++- crates/pecos-engines/tests/noise_test.rs | 24 +- .../tests/qir_bell_state_test.rs | 65 ++- crates/pecos/src/prelude.rs | 5 + 15 files changed, 1290 insertions(+), 132 deletions(-) diff --git a/.gitignore b/.gitignore index 79b090a9d..e079e1171 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ +**/.*/settings.local.json + # Ignore helper text in root *.txt diff --git a/README.md b/README.md index f4c1ecae8..ac609b7eb 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,7 @@ calls to Wasm VMs, conditional branching, and more. - Fast Simulation: Leverages a fast stabilizer simulation algorithm. - Multi-language extensions: Core functionalities implemented via Rust for performance and safety. Additional add-ons and extension support in C/C++ via Cython. +- QIR Support: Execute Quantum Intermediate Representation programs (requires LLVM version 14 with the 'llc' tool). ## Getting Started @@ -97,6 +98,17 @@ To use PECOS in your Rust project, add the following to your `Cargo.toml`: pecos = "0.x.x" # Replace with the latest version ``` +#### Optional Dependencies + +- **LLVM version 14**: Required for QIR (Quantum Intermediate Representation) support + - Linux: `sudo apt install llvm-14` + - macOS: `brew install llvm@14` + - Windows: Download LLVM 14.x installer from [LLVM releases](https://releases.llvm.org/download.html#14.0.0) + + **Note**: Only LLVM version 14.x is compatible. LLVM 15 or later versions will not work with PECOS's QIR implementation. + + If LLVM 14 is not installed, PECOS will still function normally but QIR-related features will be disabled. + ## Development Setup If you are interested in editing or developing the code in this project, see this diff --git a/crates/pecos-cli/src/main.rs b/crates/pecos-cli/src/main.rs index 26d7e54e9..b1c87fd24 100644 --- a/crates/pecos-cli/src/main.rs +++ b/crates/pecos-cli/src/main.rs @@ -29,7 +29,30 @@ struct CompileArgs { program: String, } -#[derive(Args)] +#[derive(PartialEq, Eq, Clone, Debug, Default)] +enum NoiseModelType { + /// Simple depolarizing noise model with uniform error probabilities + #[default] + Depolarizing, + /// General noise model with configurable error probabilities + General, +} + +impl std::str::FromStr for NoiseModelType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s.to_lowercase().as_str() { + "depolarizing" | "dep" => Ok(NoiseModelType::Depolarizing), + "general" | "gen" => Ok(NoiseModelType::General), + _ => Err(format!( + "Unknown noise model type: {s}. Valid options are 'depolarizing' (dep) or 'general' (gen)" + )), + } + } +} + +#[derive(Args, Debug)] struct RunArgs { /// Path to the quantum program (LLVM IR or JSON) program: String, @@ -42,40 +65,169 @@ struct RunArgs { #[arg(short, long, default_value_t = 1)] workers: usize, - /// Depolarizing noise probability (between 0 and 1) + /// Type of noise model to use (depolarizing or general) + #[arg(long = "model", value_parser, default_value = "depolarizing")] + noise_model: NoiseModelType, + + /// Noise probability (between 0 and 1) + /// For depolarizing model: uniform error probability + /// For general model: comma-separated probabilities in order: + /// `prep,meas_0,meas_1,single_qubit,two_qubit` + /// Example: --noise 0.01,0.02,0.02,0.05,0.1 #[arg(short = 'p', long = "noise", value_parser = parse_noise_probability)] - noise_probability: Option, + noise_probability: Option, /// Seed for random number generation (for reproducible results) #[arg(short = 'd', long)] seed: Option, } -fn parse_noise_probability(arg: &str) -> Result { - let prob: f64 = arg - .parse() - .map_err(|_| "Must be a valid floating point number")?; - if !(0.0..=1.0).contains(&prob) { - return Err("Noise probability must be between 0 and 1".into()); +fn parse_noise_probability(arg: &str) -> Result { + // Check if it's a comma-separated list + if arg.contains(',') { + // Split by comma and parse each value + let probs: Result, _> = arg + .split(',') + .map(|s| { + s.trim().parse::().map_err(|_| { + format!( + "Invalid probability value '{s}': must be a valid floating point number" + ) + }) + }) + .collect(); + + // Check if all values are valid probabilities + let probs = probs?; + for prob in &probs { + if !(0.0..=1.0).contains(prob) { + return Err(format!("Noise probability {prob} must be between 0 and 1")); + } + } + + // For general noise model, we expect 5 probabilities + if probs.len() != 5 && probs.len() != 1 { + return Err(format!( + "Expected either 1 probability for depolarizing model or 5 probabilities for general model, got {}", + probs.len() + )); + } + + // Return the original string since it's valid + Ok(arg.to_string()) + } else { + // Single probability value + let prob: f64 = arg + .parse() + .map_err(|_| "Must be a valid floating point number")?; + + if !(0.0..=1.0).contains(&prob) { + return Err("Noise probability must be between 0 and 1".into()); + } + + Ok(arg.to_string()) } - Ok(prob) } fn run_program(args: &RunArgs) -> Result<(), Box> { let program_path = get_program_path(&args.program)?; - let prob = args.noise_probability.unwrap_or(0.0); - let classical_engine = setup_engine(&program_path, Some(args.shots.div_ceil(args.workers)))?; - let results = MonteCarloEngine::run_with_classical_engine( - classical_engine, - prob, - args.shots, - args.workers, - args.seed, - )?; + // Process based on the selected noise model + match args.noise_model { + NoiseModelType::Depolarizing => { + // Single noise probability for depolarizing model + let prob = if let Some(noise_str) = &args.noise_probability { + // If it contains commas, take the first value + if noise_str.contains(',') { + noise_str + .split(',') + .next() + .unwrap() + .trim() + .parse::() + .unwrap_or(0.0) + } else { + noise_str.parse::().unwrap_or(0.0) + } + } else { + 0.0 + }; + + // Create a depolarizing noise model + let mut noise_model = DepolarizingNoiseModel::new_uniform(prob); + + // If a seed is provided, set it on the noise model + if let Some(s) = args.seed { + let noise_seed = derive_seed(s, "noise_model"); + noise_model.set_seed(noise_seed)?; + } + + // Use the generic approach with noise model + let results = MonteCarloEngine::run_with_noise_model( + classical_engine, + Box::new(noise_model), + args.shots, + args.workers, + args.seed, + )?; + + results.print(); + } + NoiseModelType::General => { + // For general model, we need to parse the comma-separated probabilities + let (prep, meas_0, meas_1, single_qubit, two_qubit) = + if let Some(noise_str) = &args.noise_probability { + if noise_str.contains(',') { + // Parse the comma-separated values + let probs: Vec = noise_str + .split(',') + .map(|s| s.trim().parse::().unwrap_or(0.0)) + .collect(); + + // We should already have validated the length in the parser + if probs.len() == 5 { + (probs[0], probs[1], probs[2], probs[3], probs[4]) + } else { + // Use the first value for all if only one value is provided + let p = probs[0]; + (p, p, p, p, p) + } + } else { + // Single probability value - use for all parameters + let p = noise_str.parse::().unwrap_or(0.0); + (p, p, p, p, p) + } + } else { + // Default: no noise + (0.0, 0.0, 0.0, 0.0, 0.0) + }; - results.print(); + // Create the general noise model + let mut noise_model = + GeneralNoiseModel::new(prep, meas_0, meas_1, single_qubit, two_qubit); + + // If a seed is provided, set it on the noise model + if let Some(s) = args.seed { + let noise_seed = derive_seed(s, "noise_model"); + // We can now silence the non-deterministic warning since we've fixed that issue + noise_model.reset_with_seed(noise_seed).map_err(|e| { + Box::::from(format!("Failed to set noise model seed: {e}")) + })?; + } + + // Use the generic function with the general noise model + let results = MonteCarloEngine::run_with_noise_model( + classical_engine, + Box::new(noise_model), + args.shots, + args.workers, + args.seed, + )?; + + results.print(); + } + } Ok(()) } @@ -128,6 +280,7 @@ mod tests { assert_eq!(args.seed, Some(42)); assert_eq!(args.shots, 100); assert_eq!(args.workers, 2); + assert_eq!(args.noise_model, NoiseModelType::Depolarizing); // Default } Commands::Compile(_) => panic!("Expected Run command"), } @@ -142,6 +295,34 @@ mod tests { assert_eq!(args.seed, None); assert_eq!(args.shots, 100); assert_eq!(args.workers, 2); + assert_eq!(args.noise_model, NoiseModelType::Depolarizing); // Default + } + Commands::Compile(_) => panic!("Expected Run command"), + } + } + + #[test] + fn verify_cli_general_noise_model() { + let cmd = Cli::parse_from([ + "pecos", + "run", + "program.json", + "--model", + "general", + "-p", + "0.01,0.02,0.03,0.04,0.05", + "-d", + "42", + ]); + + match cmd.command { + Commands::Run(args) => { + assert_eq!(args.seed, Some(42)); + assert_eq!(args.noise_model, NoiseModelType::General); + assert_eq!( + args.noise_probability, + Some("0.01,0.02,0.03,0.04,0.05".to_string()) + ); } Commands::Compile(_) => panic!("Expected Run command"), } diff --git a/crates/pecos-engines/QIR_RUNTIME.md b/crates/pecos-engines/QIR_RUNTIME.md index 50a9c396c..852174085 100644 --- a/crates/pecos-engines/QIR_RUNTIME.md +++ b/crates/pecos-engines/QIR_RUNTIME.md @@ -2,14 +2,32 @@ The QIR (Quantum Intermediate Representation) compiler in PECOS uses a Rust runtime library to implement quantum operations. This library is automatically built by the `build.rs` script in the `pecos-engines` crate. +## Requirements + +To use QIR functionality, you need: + +- **LLVM version 14 specifically**: + - On Linux: Install using your package manager (e.g., `sudo apt install llvm-14`) + - On macOS: Install using Homebrew (`brew install llvm@14`) + - On Windows: Download and install LLVM 14.x from the [LLVM website](https://releases.llvm.org/download.html#14.0.0) + +- **Required tools**: + - Linux/macOS: The `llc` compiler tool must be in your PATH + - Windows: The `clang` compiler must be in your PATH + +**Note**: PECOS requires LLVM version 14.x specifically, not newer versions. LLVM 15 or later versions are not compatible with PECOS's QIR implementation. + +If LLVM 14 is not installed or the required tools aren't found, QIR functionality will be disabled but the rest of PECOS will continue to work normally. + ## How It Works The `build.rs` script: 1. Runs automatically when building the `pecos-engines` crate -2. Checks if the QIR runtime library needs to be rebuilt -3. Builds the library only if necessary (if source files have changed) -4. Places the built library in both `target/debug` and `target/release` directories +2. Checks for LLVM 14+ dependencies +3. Checks if the QIR runtime library needs to be rebuilt +4. Builds the library only if necessary (if source files have changed) +5. Places the built library in both `target/debug` and `target/release` directories When the QIR compiler runs, it looks for the pre-built library in these locations. If the library is not found, the compiler will attempt to build it by running `cargo build -p pecos-engines` before raising an error. diff --git a/crates/pecos-engines/build.rs b/crates/pecos-engines/build.rs index 17b7eca60..cdb52b006 100644 --- a/crates/pecos-engines/build.rs +++ b/crates/pecos-engines/build.rs @@ -8,16 +8,168 @@ use std::process::Command; /// This script automatically builds the QIR runtime library that is used by the QIR compiler. /// The library is built only when necessary (when source files have changed). fn main() { - // Tell Cargo to rerun this script if any of these files change + // Use a more surgical approach to rebuild triggers + // Only track the specific files and environment variables we care about + + // Only track build.rs itself - this is the most critical + println!("cargo:rerun-if-changed=build.rs"); + + // Track QIR source files for file in QIR_SOURCE_FILES { println!("cargo:rerun-if-changed={file}"); } - // Build the QIR runtime library - if let Err(e) = build_qir_runtime() { - eprintln!("Warning: Failed to build QIR runtime library: {e}"); - eprintln!("QIR compilation will be slower as it will build the runtime on-demand."); + // Track only pecos-core/Cargo.toml for major version changes + println!("cargo:rerun-if-changed=../pecos-core/Cargo.toml"); + + // Only track environment variables specifically for LLVM paths + // Intentionally NOT tracking PATH as it changes too often + println!("cargo:rerun-if-env-changed=PECOS_LLVM_PATH"); + println!("cargo:rerun-if-env-changed=LLVM_HOME"); + + // Check for LLVM dependencies first + match check_llvm_dependencies() { + Ok(version) => { + println!("Found LLVM version {version}"); + // Build the QIR runtime library + if let Err(e) = build_qir_runtime() { + eprintln!("Warning: Failed to build QIR runtime library: {e}"); + eprintln!("QIR compilation will be slower as it will build the runtime on-demand."); + } + } + Err(e) => { + println!("cargo:warning=LLVM dependency check failed: {e}"); + eprintln!("Warning: {e}"); + eprintln!( + "QIR functionality will be unavailable. Install LLVM version 14 (specifically 'llc' tool) to enable QIR support." + ); + eprintln!("QIR tests will be skipped, but other tests will continue to run."); + } + } +} + +/// Check for required LLVM dependencies +/// Returns the LLVM version if found and meets requirements +fn check_llvm_dependencies() -> Result { + // Use a simple caching mechanism to avoid checking repeatedly + const CACHE_FILE: &str = "target/qir_runtime_build/llvm_version_cache.txt"; + + // First, try to read from the cache + if let Ok(cached_version) = fs::read_to_string(CACHE_FILE) { + let cached_version = cached_version.trim(); + + // Only return the cached version if it's valid (version 14.x) + if cached_version.starts_with("14.") || cached_version == "14" { + println!("Using cached LLVM version: {cached_version}"); + return Ok(cached_version.to_string()); + } + } + + // If no cache or invalid version, check normally + let tool_path = find_tool_in_path()?; + let version = check_llvm_version(&tool_path)?; + + // Cache the result for next time + if let Some(parent) = std::path::Path::new(CACHE_FILE).parent() { + let _ = fs::create_dir_all(parent); + } + let _ = fs::write(CACHE_FILE, &version); + + Ok(version) +} + +/// Find LLVM tool in the system path +fn find_tool_in_path() -> Result { + // Set the tool name based on platform + #[cfg(not(target_os = "windows"))] + let tool_name = "llc"; + #[cfg(target_os = "windows")] + let tool_name = "clang"; + + // Create executable name with extension if needed + let executable_name = if cfg!(windows) { + format!("{tool_name}.exe") + } else { + tool_name.to_string() + }; + + // Define standard search locations + let env_vars = ["PECOS_LLVM_PATH", "LLVM_HOME"]; + + // Try environment variables first + for env_var in &env_vars { + if let Ok(llvm_path) = env::var(env_var) { + let tool_path = PathBuf::from(llvm_path).join("bin").join(&executable_name); + if tool_path.exists() { + return Ok(tool_path); + } + } + } + + // Try to find in PATH directly + if let Ok(path_var) = env::var("PATH") { + let separator = if cfg!(windows) { ';' } else { ':' }; + for path_entry in path_var.split(separator) { + let full_path = Path::new(path_entry).join(&executable_name); + if full_path.exists() { + return Ok(full_path); + } + } + } + + // If we get here, the tool wasn't found + Err(format!( + "Required LLVM tool '{tool_name}' not found. Please install LLVM version 14 to enable QIR functionality." + )) +} + +/// Check LLVM version and verify it meets specific version requirements (LLVM 14.x only) +fn check_llvm_version(tool_path: &Path) -> Result { + // Get the version output + let output = Command::new(tool_path) + .arg("--version") + .output() + .map_err(|e| format!("Failed to check LLVM version: {e}"))?; + + if !output.status.success() { + return Err("Failed to get LLVM version. Tool returned non-zero status.".to_string()); + } + + let version_output = String::from_utf8_lossy(&output.stdout); + let first_line = version_output + .lines() + .next() + .ok_or_else(|| "Empty LLVM version output".to_string())?; + + // Extract version number - first look for X.Y.Z format + let version = first_line + .split_whitespace() + .find(|&part| part.contains('.') && part.chars().any(|c| c.is_ascii_digit())) + // If no X.Y.Z format found, look for just numbers + .or_else(|| { + first_line + .split_whitespace() + .find(|&part| part.chars().all(|c| c.is_ascii_digit())) + }) + .ok_or_else(|| format!("Could not parse version from: {first_line}"))?; + + // Extract major version and check requirements + let major_version = version + .split('.') + .next() + .ok_or_else(|| format!("Malformed LLVM version: {version}"))?; + + let major = major_version + .parse::() + .map_err(|_| format!("Failed to parse LLVM major version: {major_version}"))?; + + if major != 14 { + return Err(format!( + "LLVM version {version} is not compatible. PECOS requires LLVM version 14.x specifically for QIR functionality." + )); } + + Ok(version.to_string()) } // Source files that trigger rebuilds when changed @@ -275,14 +427,56 @@ fn run_cargo_build(build_dir: &Path) -> Result { fn needs_rebuild(manifest_dir: &Path, lib_path: &Path) -> bool { // If the library doesn't exist, we need to build it if !lib_path.exists() { + println!( + "QIR runtime library not found at {}, rebuilding", + lib_path.display() + ); + return true; + } + + // Check library size - if it's suspiciously small, rebuild + if let Ok(metadata) = fs::metadata(lib_path) { + if metadata.len() < 1000 { + // Arbitrary small size check + println!( + "QIR runtime library at {} appears to be too small ({}b), rebuilding", + lib_path.display(), + metadata.len() + ); + return true; + } + } else { + println!("Could not read metadata for QIR runtime library, rebuilding"); return true; } // Get the modification time of the library let Ok(lib_modified) = fs::metadata(lib_path).and_then(|m| m.modified()) else { - return true; // If we can't get the modification time, rebuild to be safe + println!("Could not determine modification time of QIR runtime library, rebuilding"); + return true; }; + // Only check if build.rs has changed - the most critical file + if let Ok(metadata) = fs::metadata(manifest_dir.join("build.rs")) { + if let Ok(modified) = metadata.modified() { + if modified > lib_modified { + println!("build.rs is newer than library, rebuilding"); + return true; + } + } + } + + // Check pecos-core version but only Cargo.toml + let core_cargo_path = manifest_dir.parent().unwrap().join("pecos-core/Cargo.toml"); + if let Ok(metadata) = fs::metadata(&core_cargo_path) { + if let Ok(modified) = metadata.modified() { + if modified > lib_modified { + println!("pecos-core Cargo.toml is newer than library, rebuilding"); + return true; + } + } + } + // Check if any source files are newer than the library for file in QIR_SOURCE_FILES { let file_path = manifest_dir.join(file); @@ -293,6 +487,10 @@ fn needs_rebuild(manifest_dir: &Path, lib_path: &Path) -> bool { return true; } } + } else { + // If a source file is missing, that's a problem and we should rebuild + println!("Source file {file_path:?} not found, rebuilding"); + return true; } } diff --git a/crates/pecos-engines/src/engines/monte_carlo/engine.rs b/crates/pecos-engines/src/engines/monte_carlo/engine.rs index de9813039..4df073106 100644 --- a/crates/pecos-engines/src/engines/monte_carlo/engine.rs +++ b/crates/pecos-engines/src/engines/monte_carlo/engine.rs @@ -18,6 +18,7 @@ use crate::engines::quantum::{QuantumEngine, StateVecEngine}; use crate::engines::{ClassicalEngine, ControlEngine, Engine, EngineStage, HybridEngine}; use crate::errors::QueueError; use log::{debug, info}; +use pecos_core::rng::RngManageable; use pecos_core::rng::rng_manageable::derive_seed; use rand::{RngCore, SeedableRng}; use rand_chacha::ChaCha8Rng; @@ -392,14 +393,15 @@ impl MonteCarloEngine { engine.run(num_shots, num_workers) } - /// Static method to run a simulation with a classical engine and depolarizing noise. + /// Static method to run a simulation with a classical engine and any noise model. /// - /// This is a convenience method that sets up a `MonteCarloEngine` with a state vector - /// quantum engine and a depolarizing noise model with the specified probability. + /// This is a generic method that sets up a `MonteCarloEngine` with a state vector + /// quantum engine and any provided noise model. This is a more flexible approach + /// than the specialized methods for specific noise models. /// /// # Parameters /// - `classical_engine`: The classical engine to use. - /// - `p`: The probability parameter for the depolarizing noise model. + /// - `noise_model`: The noise model to apply during simulation. /// - `num_shots`: The total number of circuit executions to perform. /// - `num_workers`: The number of worker threads to use for parallel execution. /// - `seed`: Optional seed for deterministic behavior. @@ -409,30 +411,13 @@ impl MonteCarloEngine { /// /// # Errors /// Returns a `QueueError` if any part of the simulation fails. - pub fn run_with_classical_engine( + pub fn run_with_noise_model( classical_engine: Box, - p: f64, + noise_model: Box, num_shots: usize, num_workers: usize, seed: Option, ) -> Result { - use crate::engines::noise::depolarizing::DepolarizingNoiseModelBuilder; - - // Create a noise model with the specified probability - let noise_model = if let Some(s) = seed { - // If a seed is provided, create a noise model with the seed - let noise_seed = derive_seed(s, "noise_model"); - DepolarizingNoiseModelBuilder::new() - .with_uniform_probability(p) - .with_seed(noise_seed) - .build() - } else { - // Otherwise, create a noise model without a specific seed - Box::new(crate::engines::noise::DepolarizingNoiseModel::new_uniform( - p, - )) - }; - // Create a quantum engine with the same number of qubits as the classical engine let num_qubits = classical_engine.num_qubits(); let quantum_engine = Box::new(StateVecEngine::new(num_qubits)); @@ -481,7 +466,23 @@ impl MonteCarloEngine { })?; let classical_engine = Box::new(ExternalClassicalEngine::new()); - Self::run_with_classical_engine(classical_engine, p, num_shots, num_workers, seed) + + // Create a depolarizing noise model with the parsed probability + let mut noise_model = crate::engines::noise::DepolarizingNoiseModel::new_uniform(p); + + // If a seed is provided, set it on the noise model + if let Some(s) = seed { + let noise_seed = pecos_core::rng::rng_manageable::derive_seed(s, "noise_model"); + noise_model.set_seed(noise_seed)?; + } + + Self::run_with_noise_model( + classical_engine, + Box::new(noise_model), + num_shots, + num_workers, + seed, + ) } } diff --git a/crates/pecos-engines/src/engines/noise/general.rs b/crates/pecos-engines/src/engines/noise/general.rs index bec1ce114..d25b9930e 100644 --- a/crates/pecos-engines/src/engines/noise/general.rs +++ b/crates/pecos-engines/src/engines/noise/general.rs @@ -75,7 +75,7 @@ #![allow(clippy::too_many_lines)] use std::any::Any; -use std::collections::HashMap; +use std::collections::BTreeMap; use std::collections::HashSet; use crate::byte_message::{ByteMessage, ByteMessageBuilder, QuantumGate, gate_type::GateType}; @@ -1537,14 +1537,14 @@ impl GeneralNoiseModelBuilder { /// Set the Pauli error model for single-qubit gates #[must_use] - pub fn with_p1_pauli_model(mut self, model: &HashMap) -> Self { + pub fn with_p1_pauli_model(mut self, model: &BTreeMap) -> Self { self.p1_pauli_model = Some(SingleQubitWeightedSampler::new(model)); self } /// Set the emission error model for single-qubit gates #[must_use] - pub fn with_p1_emission_model(mut self, model: &HashMap) -> Self { + pub fn with_p1_emission_model(mut self, model: &BTreeMap) -> Self { self.p1_emission_model = Some(SingleQubitWeightedSampler::new(model)); self } @@ -1753,14 +1753,14 @@ impl GeneralNoiseModelBuilder { /// Set the probability model for two-qubit Pauli errors #[must_use] - pub fn with_p2_pauli_model(mut self, model: &HashMap) -> Self { + pub fn with_p2_pauli_model(mut self, model: &BTreeMap) -> Self { self.p2_pauli_model = Some(TwoQubitWeightedSampler::new(model)); self } /// Set the probability model for two-qubit emission errors #[must_use] - pub fn with_p2_emission_model(mut self, model: &HashMap) -> Self { + pub fn with_p2_emission_model(mut self, model: &BTreeMap) -> Self { self.p2_emission_model = Some(TwoQubitWeightedSampler::new(model)); self } @@ -2076,17 +2076,17 @@ impl Default for GeneralNoiseModel { /// ``` fn default() -> Self { // Initialize default models - let mut p1_pauli_model = HashMap::new(); + let mut p1_pauli_model = BTreeMap::new(); p1_pauli_model.insert("X".to_string(), 1.0 / 3.0); p1_pauli_model.insert("Y".to_string(), 1.0 / 3.0); p1_pauli_model.insert("Z".to_string(), 1.0 / 3.0); - let mut p1_emission_model = HashMap::new(); + let mut p1_emission_model = BTreeMap::new(); p1_emission_model.insert("X".to_string(), 1.0 / 3.0); p1_emission_model.insert("Y".to_string(), 1.0 / 3.0); p1_emission_model.insert("Z".to_string(), 1.0 / 3.0); - let mut p2_pauli_model = HashMap::new(); + let mut p2_pauli_model = BTreeMap::new(); p2_pauli_model.insert("XX".to_string(), 1.0 / 15.0); p2_pauli_model.insert("XY".to_string(), 1.0 / 15.0); p2_pauli_model.insert("XZ".to_string(), 1.0 / 15.0); @@ -2103,7 +2103,7 @@ impl Default for GeneralNoiseModel { p2_pauli_model.insert("YI".to_string(), 1.0 / 15.0); p2_pauli_model.insert("ZI".to_string(), 1.0 / 15.0); - let mut p2_emission_model = HashMap::new(); + let mut p2_emission_model = BTreeMap::new(); p2_emission_model.insert("XX".to_string(), 1.0 / 15.0); p2_emission_model.insert("XY".to_string(), 1.0 / 15.0); p2_emission_model.insert("XZ".to_string(), 1.0 / 15.0); @@ -2933,26 +2933,26 @@ mod tests { #[test] fn test_pauli_and_emission_model_setters() { - use std::collections::HashMap; + use std::collections::BTreeMap; // Define epsilon for approximate float comparisons const EPSILON: f64 = 0.005; // Increased tolerance for sampler discretization // Create all our custom models first - let mut custom_p1_pauli = HashMap::new(); + let mut custom_p1_pauli = BTreeMap::new(); custom_p1_pauli.insert("X".to_string(), 0.7); custom_p1_pauli.insert("Y".to_string(), 0.2); custom_p1_pauli.insert("Z".to_string(), 0.1); - let mut custom_p1_emission = HashMap::new(); + let mut custom_p1_emission = BTreeMap::new(); custom_p1_emission.insert("X".to_string(), 0.4); custom_p1_emission.insert("Y".to_string(), 0.6); - let mut custom_p2_pauli = HashMap::new(); + let mut custom_p2_pauli = BTreeMap::new(); custom_p2_pauli.insert("XX".to_string(), 0.5); custom_p2_pauli.insert("YY".to_string(), 0.3); custom_p2_pauli.insert("ZZ".to_string(), 0.2); - let mut custom_p2_emission = HashMap::new(); + let mut custom_p2_emission = BTreeMap::new(); custom_p2_emission.insert("XX".to_string(), 0.25); custom_p2_emission.insert("YY".to_string(), 0.75); diff --git a/crates/pecos-engines/src/engines/noise/utils.rs b/crates/pecos-engines/src/engines/noise/utils.rs index 9a89471c8..e9ec670fb 100644 --- a/crates/pecos-engines/src/engines/noise/utils.rs +++ b/crates/pecos-engines/src/engines/noise/utils.rs @@ -377,7 +377,7 @@ mod tests { use crate::engines::noise::noise_rng::NoiseRng; use crate::engines::noise::weighted_sampler::SingleQubitWeightedSampler; use rand_chacha::ChaCha8Rng; - use std::collections::HashMap; + use std::collections::BTreeMap; use std::panic::{AssertUnwindSafe, catch_unwind}; #[test] @@ -450,7 +450,7 @@ mod tests { // Test with a valid model // Note: Weights must sum to exactly 1.0 to pass the strict normalization check - let valid_model: HashMap = [ + let valid_model: BTreeMap = [ ("X".to_string(), 0.5), ("Y".to_string(), 0.3), ("Z".to_string(), 0.2), @@ -510,14 +510,14 @@ mod tests { assert!(result.is_err(), "Should panic for invalid Pauli operator"); // Test that empty model causes the sampler constructor to panic - let empty_model: HashMap = HashMap::new(); + let empty_model: BTreeMap = BTreeMap::new(); let result = catch_unwind(AssertUnwindSafe(|| { let _ = SingleQubitWeightedSampler::new(&empty_model); })); assert!(result.is_err(), "Should panic for empty model"); // Test that model with invalid keys causes the sampler constructor to panic - let invalid_keys: HashMap = + let invalid_keys: BTreeMap = [("X".to_string(), 0.5), ("INVALID".to_string(), 0.5)] .iter() .cloned() @@ -546,7 +546,7 @@ mod tests { // Test with a valid model including leakage // Note: Weights must sum to exactly 1.0 to pass the strict normalization check - let valid_model: HashMap = [ + let valid_model: BTreeMap = [ ("X".to_string(), 0.4), ("Y".to_string(), 0.3), ("Z".to_string(), 0.2), @@ -618,7 +618,7 @@ mod tests { assert_eq!(x_count + y_count + z_count + leakage_count, SAMPLE_SIZE); // Test error cases with catch_unwind - let empty_model: HashMap = HashMap::new(); + let empty_model: BTreeMap = BTreeMap::new(); let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { // This should trigger an "empty model" panic let _ = SingleQubitWeightedSampler::new(&empty_model); @@ -626,7 +626,7 @@ mod tests { assert!(result.is_err(), "Empty model should cause panic"); // Test invalid operation - let invalid_model: HashMap = [ + let invalid_model: BTreeMap = [ ("X".to_string(), 0.3), ("INVALID".to_string(), 0.7), // Not a valid Pauli or L ] diff --git a/crates/pecos-engines/src/engines/noise/weighted_sampler.rs b/crates/pecos-engines/src/engines/noise/weighted_sampler.rs index 45feb3fd5..cc58f283c 100644 --- a/crates/pecos-engines/src/engines/noise/weighted_sampler.rs +++ b/crates/pecos-engines/src/engines/noise/weighted_sampler.rs @@ -10,7 +10,7 @@ // or implied. See the License for the specific language governing permissions and limitations under // the License. -use std::collections::HashMap; +use std::collections::BTreeMap; use crate::byte_message::QuantumGate; use crate::engines::noise::noise_rng::NoiseRng; @@ -23,14 +23,17 @@ const NORMALIZATION_TOLERANCE: f64 = 1e-5; const FLOAT_EPSILON: f64 = 1e-10; /// A sampler that selects keys with probability proportional to their weights +/// +/// Uses `BTreeMap` for deterministic key ordering, ensuring consistent behavior +/// when using the same seed across multiple runs or threads. #[derive(Debug, Clone)] -pub struct WeightedSampler { +pub struct WeightedSampler { keys: Vec, distribution: WeightedIndex, - weighted_map: HashMap, + weighted_map: BTreeMap, } -impl WeightedSampler { +impl WeightedSampler { /// Create a new weighted sampler from a map of keys to weights /// /// The weights are normalized to sum to 1.0 with a default tolerance of 1e-10 @@ -41,7 +44,7 @@ impl WeightedSampler { /// - If the total weight deviates from 1.0 by more than the tolerance /// - If the weighted index distribution cannot be created #[must_use] - pub fn new(weighted_map: &HashMap) -> Self { + pub fn new(weighted_map: &BTreeMap) -> Self { Self::new_with_tolerance(weighted_map, NORMALIZATION_TOLERANCE) } @@ -52,12 +55,14 @@ impl WeightedSampler { /// - If the total weight is not positive /// - If the total weight deviates from 1.0 by more than the tolerance #[must_use] - pub fn new_with_tolerance(weighted_map: &HashMap, tolerance: f64) -> Self { + pub fn new_with_tolerance(weighted_map: &BTreeMap, tolerance: f64) -> Self { let (normalized_weighted_map, normalized_weights) = Self::validate_and_normalize(weighted_map, tolerance); + // BTreeMap already provides deterministic ordering of keys let keys: Vec = weighted_map.keys().cloned().collect(); + // Create the distribution using deterministically ordered weights let distribution = WeightedIndex::new(&normalized_weights) .expect("WeightedSampler: failed to create weighted distribution"); @@ -69,11 +74,11 @@ impl WeightedSampler { } /// Validates that the weights are positive and approximately sum to 1.0 - /// Returns a normalized `HashMap` and a Vec of normalized weights for creating the distribution + /// Returns a normalized `BTreeMap` and a Vec of normalized weights for creating the distribution fn validate_and_normalize( - weighted_map: &HashMap, + weighted_map: &BTreeMap, tolerance: f64, - ) -> (HashMap, Vec) { + ) -> (BTreeMap, Vec) { assert!( !weighted_map.is_empty(), "WeightedSampler: weighted_map cannot be empty" @@ -100,8 +105,8 @@ impl WeightedSampler { weighted_map.values().copied().collect() }; - // Create normalized HashMap - let mut normalized_map = HashMap::with_capacity(weighted_map.len()); + // Create normalized BTreeMap + let mut normalized_map = BTreeMap::new(); for (key, &value) in weighted_map { normalized_map.insert( key.clone(), @@ -129,7 +134,7 @@ impl WeightedSampler { /// Get a reference to the normalized weighted map #[must_use] - pub fn get_weighted_map(&self) -> &HashMap { + pub fn get_weighted_map(&self) -> &BTreeMap { &self.weighted_map } } @@ -162,7 +167,7 @@ impl SingleQubitWeightedSampler { /// - If the total weight is not positive /// - If the total weight deviates from 1.0 by more than the tolerance #[must_use] - pub fn new(weighted_map: &HashMap) -> Self { + pub fn new(weighted_map: &BTreeMap) -> Self { Self::validate_pauli_leakage_keys(weighted_map); Self { @@ -170,7 +175,7 @@ impl SingleQubitWeightedSampler { } } - fn validate_pauli_leakage_keys(weighted_map: &HashMap) { + fn validate_pauli_leakage_keys(weighted_map: &BTreeMap) { for key in weighted_map.keys() { let key_str = key.as_ref(); match key_str { @@ -184,7 +189,7 @@ impl SingleQubitWeightedSampler { /// Get a reference to the normalized weighted map #[must_use] - pub fn get_weighted_map(&self) -> &HashMap { + pub fn get_weighted_map(&self) -> &BTreeMap { self.sampler.get_weighted_map() } @@ -245,7 +250,7 @@ impl TwoQubitWeightedSampler { /// - If the total weight is not positive /// - If the total weight deviates from 1.0 by more than the tolerance #[must_use] - pub fn new(weighted_map: &HashMap) -> Self { + pub fn new(weighted_map: &BTreeMap) -> Self { Self::validate_two_qubit_keys(weighted_map); Self { @@ -253,7 +258,7 @@ impl TwoQubitWeightedSampler { } } - fn validate_two_qubit_keys(weighted_map: &HashMap) { + fn validate_two_qubit_keys(weighted_map: &BTreeMap) { for key in weighted_map.keys() { let key_str: &str = key.as_ref(); @@ -285,7 +290,7 @@ impl TwoQubitWeightedSampler { /// Get a reference to the normalized weighted map #[must_use] - pub fn get_weighted_map(&self) -> &HashMap { + pub fn get_weighted_map(&self) -> &BTreeMap { self.sampler.get_weighted_map() } @@ -349,14 +354,83 @@ mod tests { use super::*; use crate::engines::noise::noise_rng::NoiseRng; use rand_chacha::ChaCha8Rng; - use std::collections::HashMap; const SAMPLE_SIZE: usize = 100; + #[test] + fn test_different_sampler_instances_same_results() { + // Create two weighted samplers with the same weights + let mut weights1 = BTreeMap::new(); + weights1.insert("A".to_string(), 0.3); + weights1.insert("B".to_string(), 0.7); + + // Make a separate instance with the same data + let mut weights2 = BTreeMap::new(); + weights2.insert("A".to_string(), 0.3); + weights2.insert("B".to_string(), 0.7); + + let sampler1 = WeightedSampler::new(&weights1); + let sampler2 = WeightedSampler::new(&weights2); + + // Use the same seed for both RNGs + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + // Sample from both samplers + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler1.sample(&mut rng1)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler2.sample(&mut rng2)) + .collect(); + + // Results should be identical with same seed + assert_eq!( + results1, results2, + "Different sampler instances with same weights should produce identical results with same seed" + ); + } + + #[test] + fn test_deterministic_ordering_with_shuffled_keys() { + // Create two weighted samplers with the same weights but different insertion order + let mut weights1 = BTreeMap::new(); + weights1.insert("A".to_string(), 0.3); + weights1.insert("B".to_string(), 0.2); + weights1.insert("C".to_string(), 0.5); + + // Insert in different order + let mut weights2 = BTreeMap::new(); + weights2.insert("C".to_string(), 0.5); + weights2.insert("A".to_string(), 0.3); + weights2.insert("B".to_string(), 0.2); + + let sampler1 = WeightedSampler::new(&weights1); + let sampler2 = WeightedSampler::new(&weights2); + + // Use the same seed for both RNGs + let mut rng1 = NoiseRng::::with_seed(42); + let mut rng2 = NoiseRng::::with_seed(42); + + // Sample from both samplers + let results1: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler1.sample(&mut rng1)) + .collect(); + let results2: Vec = (0..SAMPLE_SIZE) + .map(|_| sampler2.sample(&mut rng2)) + .collect(); + + // Results should be identical despite different insertion order + assert_eq!( + results1, results2, + "Samplers with differently ordered but equivalent maps should produce identical results" + ); + } + #[test] fn test_deterministic_sampling_basic() { // Test basic deterministic sampling with same seed - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("A".to_string(), 0.3); weights.insert("B".to_string(), 0.7); @@ -384,7 +458,7 @@ mod tests { #[test] fn test_deterministic_sampling_multiple_seeds() { // Test deterministic sampling with multiple different seeds - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("A".to_string(), 0.3); weights.insert("B".to_string(), 0.7); @@ -414,7 +488,7 @@ mod tests { #[test] fn test_deterministic_sampling_different_seeds() { // Test that different seeds produce different sequences - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("A".to_string(), 0.3); weights.insert("B".to_string(), 0.7); @@ -444,7 +518,7 @@ mod tests { #[test] fn test_deterministic_sampling_single_qubit() { // Test deterministic sampling with single qubit sampler - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("X".to_string(), 0.25); weights.insert("Y".to_string(), 0.25); weights.insert("Z".to_string(), 0.25); @@ -484,7 +558,7 @@ mod tests { #[test] fn test_deterministic_sampling_two_qubit() { // Test deterministic sampling with two qubit sampler - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("XX".to_string(), 0.2); weights.insert("YY".to_string(), 0.2); weights.insert("ZZ".to_string(), 0.2); @@ -534,7 +608,7 @@ mod tests { #[test] fn test_deterministic_sampling_reset() { // Test that resetting the RNG and using the same seed produces the same sequence - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("A".to_string(), 0.3); weights.insert("B".to_string(), 0.7); @@ -559,7 +633,7 @@ mod tests { #[test] fn test_deterministic_sampling_consecutive() { // Test that consecutive samples from the same RNG are deterministic - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("A".to_string(), 0.3); weights.insert("B".to_string(), 0.7); @@ -583,11 +657,11 @@ mod tests { #[test] fn test_deterministic_sampling_interleaved() { // Test that interleaved sampling from different samplers is deterministic - let mut weights1 = HashMap::new(); + let mut weights1 = BTreeMap::new(); weights1.insert("A".to_string(), 0.3); weights1.insert("B".to_string(), 0.7); - let mut weights2 = HashMap::new(); + let mut weights2 = BTreeMap::new(); weights2.insert("X".to_string(), 0.4); weights2.insert("Y".to_string(), 0.6); @@ -631,7 +705,7 @@ mod tests { #[test] fn test_deterministic_sampling_edge_cases() { // Test edge cases for sampling - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("A".to_string(), 1.0); // Single outcome with probability 1.0 let sampler = WeightedSampler::new(&weights); @@ -659,7 +733,7 @@ mod tests { #[test] fn test_deterministic_sampling_single_qubit_edge_cases() { // Test edge cases for single qubit sampling - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("L".to_string(), 1.0); // Always leak let sampler = SingleQubitWeightedSampler::new(&weights); @@ -687,7 +761,7 @@ mod tests { #[test] fn test_deterministic_sampling_two_qubit_edge_cases() { // Test edge cases for two qubit sampling - let mut weights = HashMap::new(); + let mut weights = BTreeMap::new(); weights.insert("LL".to_string(), 1.0); // Always leak both qubits let sampler = TwoQubitWeightedSampler::new(&weights); diff --git a/crates/pecos-engines/src/engines/qir/compiler.rs b/crates/pecos-engines/src/engines/qir/compiler.rs index 0e5431a8e..9849cd6eb 100644 --- a/crates/pecos-engines/src/engines/qir/compiler.rs +++ b/crates/pecos-engines/src/engines/qir/compiler.rs @@ -388,6 +388,71 @@ impl QirCompiler { None } + /// Check LLVM version and verify it meets specific version requirements (LLVM 14.x only) + fn check_llvm_version(tool_path: &Path) -> Result { + // Get the version output + let output = Command::new(tool_path) + .arg("--version") + .output() + .map_err(|e| format!("Failed to check LLVM version: {e}"))?; + + if !output.status.success() { + return Err("Failed to get LLVM version. Tool returned non-zero status.".to_string()); + } + + let version_output = String::from_utf8_lossy(&output.stdout); + + // Parse the version from output + let version = if let Some(version_str) = version_output.lines().next() { + // Different LLVM tools might have different version output formats + // Try to handle both "LLVM version X.Y.Z" and "clang version X.Y.Z" formats + + // Split by whitespace and look for version number pattern + let parts: Vec<&str> = version_str.split_whitespace().collect(); + let mut version_part = None; + + // Try to find something that looks like a version (contains dots and digits) + for &part in &parts { + if part.contains('.') && part.chars().any(|c| c.is_ascii_digit()) { + version_part = Some(part); + break; + } + } + + // If we didn't find anything with dots, look for just digits + if version_part.is_none() { + for &part in &parts { + if part.chars().all(|c| c.is_ascii_digit()) { + version_part = Some(part); + break; + } + } + } + + version_part.ok_or_else(|| format!("Could not parse version from: {version_str}"))? + } else { + return Err("Empty LLVM version output".to_string()); + }; + + // Extract major version and check requirements + let major_version = version + .split('.') + .next() + .ok_or_else(|| format!("Malformed LLVM version: {version}"))?; + + let major = major_version + .parse::() + .map_err(|_| format!("Failed to parse LLVM major version: {major_version}"))?; + + if major != 14 { + return Err(format!( + "LLVM version {version} is not compatible. PECOS requires LLVM version 14.x specifically for QIR functionality." + )); + } + + Ok(version.to_string()) + } + /// Compile QIR file to object file using LLVM tools /// /// On Windows, this uses clang directly with the dllexport attribute added to the main function. @@ -411,12 +476,22 @@ impl QirCompiler { let clang = Self::find_llvm_tool("clang").ok_or_else(|| { Self::log_error( QirError::CompilationFailed( - "clang not found in system. Please install LLVM tools.".to_string(), + "clang not found in system. LLVM version 14 is required for QIR functionality. \ + Please install LLVM version 14 and ensure 'clang' is in your PATH.".to_string(), ), thread_id, ) })?; + // Verify LLVM version + let version_result = Self::check_llvm_version(&clang); + if let Err(version_err) = version_result { + return Err(Self::log_error( + QirError::CompilationFailed(version_err), + thread_id, + )); + } + debug!( "QIR Compiler: [Thread {}] Using clang at {:?} on Windows", thread_id, clang @@ -435,11 +510,24 @@ impl QirCompiler { { let llc_path = Self::find_llvm_tool("llc").ok_or_else(|| { Self::log_error( - QirError::CompilationFailed("Could not find llc tool".to_string()), + QirError::CompilationFailed( + "Could not find 'llc' tool. LLVM version 14 is required for QIR functionality. \ + Please install LLVM version 14 using your package manager (e.g. 'sudo apt install llvm-14' on Ubuntu, \ + 'brew install llvm@14' on macOS). After installation, ensure 'llc' is in your PATH.".to_string() + ), thread_id, ) })?; + // Verify LLVM version + let version_result = Self::check_llvm_version(&llc_path); + if let Err(version_err) = version_result { + return Err(Self::log_error( + QirError::CompilationFailed(version_err), + thread_id, + )); + } + let result = Command::new(llc_path) .args(["-filetype=obj", "-o"]) .arg(object_file) diff --git a/crates/pecos-engines/tests/bell_state_test.rs b/crates/pecos-engines/tests/bell_state_test.rs index 2d4ef34bb..efbde229d 100644 --- a/crates/pecos-engines/tests/bell_state_test.rs +++ b/crates/pecos-engines/tests/bell_state_test.rs @@ -1,3 +1,4 @@ +use pecos_core::rng::RngManageable; use pecos_engines::engines::MonteCarloEngine; use pecos_engines::engines::classical::setup_engine; use std::collections::HashMap; @@ -12,9 +13,15 @@ fn test_bell_state_noiseless() { // Run the Bell state example with 100 shots and 2 workers let classical_engine = setup_engine(&bell_file, None).unwrap(); - let results = MonteCarloEngine::run_with_classical_engine( + + // Create a noiseless model + let noise_model = + Box::new(pecos_engines::engines::noise::DepolarizingNoiseModel::new_uniform(0.0)); + + // Use the generic approach + let results = MonteCarloEngine::run_with_noise_model( classical_engine, - 0.0, // No noise + noise_model, 100, 2, None, // No specific seed @@ -57,9 +64,18 @@ fn test_bell_state_with_noise() { // Run the Bell state example with high noise probability for more reliable testing let classical_engine = setup_engine(&bell_file, None).unwrap(); - let results = MonteCarloEngine::run_with_classical_engine( + + // Create a noise model with 30% depolarizing noise + let mut noise_model = + pecos_engines::engines::noise::DepolarizingNoiseModel::new_uniform(0.3); + + // Set the seed + noise_model.set_seed(seed).unwrap(); + + // Use the generic approach + let results = MonteCarloEngine::run_with_noise_model( classical_engine, - 0.3, // 30% noise - higher to ensure we get some noise effects + Box::new(noise_model), 100, // 100 shots is enough for this simple test 2, Some(seed), // Use the current iteration as seed diff --git a/crates/pecos-engines/tests/noise_determinism.rs b/crates/pecos-engines/tests/noise_determinism.rs index cfea793bc..7f966649f 100644 --- a/crates/pecos-engines/tests/noise_determinism.rs +++ b/crates/pecos-engines/tests/noise_determinism.rs @@ -1,10 +1,24 @@ +// This test file contains numeric conversions that are safe in our context but trigger Clippy warnings. +// The following safety considerations apply: +// 1. u32 to i32 casts: Measurement results in quantum simulations are always small non-negative values. +// 2. i32 to u64 casts: Loop indices are always non-negative, so no sign information is actually lost. +// 3. usize to u32 casts: We're using small loop counts (e.g., 0..100) that are guaranteed to fit in u32. +// 4. Type conversions and small f64 multiplications: These maintain sufficient precision for our tests. +// +// Given these constraints and the nature of these tests, we can safely allow the warnings. +#![allow(clippy::cast_possible_wrap)] +#![allow(clippy::cast_sign_loss)] +#![allow(clippy::cast_possible_truncation)] + use log::info; use pecos_engines::{ + Engine, QuantumSystem, byte_message::ByteMessage, engines::ControlEngine, engines::noise::{NoiseModel, general::GeneralNoiseModel}, + engines::quantum::{QuantumEngine, StateVecEngine}, }; -use std::collections::HashMap; +use std::collections::BTreeMap; /// Reset a noise model and set its seed in one operation /// @@ -26,14 +40,14 @@ fn create_noise_model() -> Box { // Create a noise model with moderate error rates using the builder pattern // Set single-qubit error rates with uniform distribution - let mut single_qubit_weights = HashMap::new(); + let mut single_qubit_weights = BTreeMap::new(); single_qubit_weights.insert("X".to_string(), 0.25); single_qubit_weights.insert("Y".to_string(), 0.25); single_qubit_weights.insert("Z".to_string(), 0.25); single_qubit_weights.insert("L".to_string(), 0.25); // Set two-qubit error rates with uniform distribution - let mut two_qubit_weights = HashMap::new(); + let mut two_qubit_weights = BTreeMap::new(); two_qubit_weights.insert("XX".to_string(), 0.2); two_qubit_weights.insert("YY".to_string(), 0.2); two_qubit_weights.insert("ZZ".to_string(), 0.2); @@ -287,3 +301,499 @@ fn test_different_seeds_produce_different_results() { "Different seeds should produce different noise patterns" ); } + +/// Runs a complete quantum simulation including the actual measurement outcomes +/// +/// This function: +/// 1. Creates a `QuantumSystem` with the provided noise model and quantum engine +/// 2. Sets the seed for the system +/// 3. Runs the circuit and collects the actual measurement outcomes +/// 4. Returns the measurement results as a `BTreeMap` of result IDs to values +fn run_complete_simulation( + noise_model: &mut Box, + quantum_engine: Box, + circuit: &ByteMessage, + seed: u64, +) -> BTreeMap { + // Create a quantum system with the noise model and quantum engine + let mut system = QuantumSystem::new(noise_model.clone(), quantum_engine); + + // Set the seed for deterministic behavior + system.set_seed(seed).expect("Failed to set seed"); + + // Reset the system to ensure clean state + system.reset().expect("Failed to reset system"); + + // Run the circuit through the system + let output = system + .process(circuit.clone()) + .expect("Failed to process circuit"); + + // Extract the measurement results + let measurements = output + .measurement_results_as_vec() + .expect("Failed to extract measurements"); + + // Convert u32 values to i32 for the HashMap, handling potential overflow + measurements + .into_iter() + .map(|(k, v)| { + // Safe conversion from u32 to i32, handling potential overflow + let value = if v > i32::MAX as u32 { + i32::MAX + } else { + v as i32 + }; + (k, value) + }) + .collect() +} + +#[test] +fn test_complete_measurement_determinism() { + let seed = 42; + info!("Testing complete measurement determinism with end-to-end simulation"); + + // Create two identical noise models + let mut model1 = create_noise_model(); + let mut model2 = create_noise_model(); + + // Set the same seed for both models + reset_model_with_seed(&mut model1, seed).unwrap(); + reset_model_with_seed(&mut model2, seed).unwrap(); + + // Create a circuit with superposition and entanglement to test measurement + let mut builder = ByteMessage::quantum_operations_builder(); + // Create a Bell state + builder.add_h(&[0]); + builder.add_cx(&[0], &[1]); + // Add measurements for both qubits + builder.add_measurements(&[0, 1], &[0, 1]); + let circuit = builder.build(); + + // Create two identical quantum engines + let engine1 = Box::new(StateVecEngine::new(2)); + let engine2 = Box::new(StateVecEngine::new(2)); + + // Run complete simulations with both models + info!("Running first complete simulation"); + let results1 = run_complete_simulation(&mut model1, engine1, &circuit, seed); + + info!("Running second complete simulation with identical seed"); + let results2 = run_complete_simulation(&mut model2, engine2, &circuit, seed); + + // The measurement results should be identical + info!("Comparing measurement results between runs"); + assert_eq!( + results1, results2, + "Measurement results should be identical with the same seed" + ); + + // Now run with a different seed + info!("Running third simulation with different seed"); + let mut model3 = create_noise_model(); + reset_model_with_seed(&mut model3, seed + 1).unwrap(); + let engine3 = Box::new(StateVecEngine::new(2)); + let results3 = run_complete_simulation(&mut model3, engine3, &circuit, seed + 1); + + // These should be different (most of the time) + // Note: There's a small probability they could be the same by chance, + // so we don't strictly assert, but log the comparison + if results1 == results3 { + info!("NOTE: Results with different seeds happened to be identical (small probability)"); + } else { + info!("Results with different seeds are different, as expected"); + } +} + +#[test] +fn test_deterministic_measurement() { + // This test verifies that using the same seed produces the same measurement results + let seed = 42; + println!("Testing deterministic measurement with seed {seed}"); + + // Create a noise model with significant measurement error + let mut model = Box::new( + GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.2) + .with_meas_1_probability(0.2) + .with_average_p1_probability(0.1) + .with_average_p2_probability(0.1) + .build(), + ); + + // Create a circuit that puts a qubit in superposition and measures it + let mut builder = ByteMessage::quantum_operations_builder(); + builder.add_h(&[0]); // Put qubit 0 in superposition + builder.add_measurements(&[0], &[0]); // Measure qubit 0 + let circuit = builder.build(); + + println!("Running first measurement with seed {seed}"); + reset_model_with_seed(&mut model, seed).unwrap(); + let engine1 = Box::new(StateVecEngine::new(1)); + let result1 = run_complete_simulation(&mut model, engine1, &circuit, seed); + let value1 = result1.get(&0).copied().unwrap_or(0); + + println!("First measurement result: {value1}"); + + println!("Running second measurement with same seed {seed}"); + reset_model_with_seed(&mut model, seed).unwrap(); + let engine2 = Box::new(StateVecEngine::new(1)); + let result2 = run_complete_simulation(&mut model, engine2, &circuit, seed); + let value2 = result2.get(&0).copied().unwrap_or(0); + + println!("Second measurement result: {value2}"); + + // The results should be identical with the same seed + assert_eq!( + value1, value2, + "Measurement results should be identical with the same seed" + ); + + // Now try with a different seed + let different_seed = seed + 1000; + println!("Running measurement with different seed {different_seed}"); + reset_model_with_seed(&mut model, different_seed).unwrap(); + let engine3 = Box::new(StateVecEngine::new(1)); + let result3 = run_complete_simulation(&mut model, engine3, &circuit, different_seed); + let value3 = result3.get(&0).copied().unwrap_or(0); + + println!("Different seed result: {value3}"); + + // IMPROVEMENT 1: Assert that different seeds produce different results + // (with a caveat for the small probability that they might be the same by chance) + if value1 == value3 { + println!( + "NOTE: Same measurement result with different seeds. This can happen with low probability." + ); + + // Try one more seed to reduce the probability of false positives + let another_seed = seed + 2000; + reset_model_with_seed(&mut model, another_seed).unwrap(); + let engine4 = Box::new(StateVecEngine::new(1)); + let result4 = run_complete_simulation(&mut model, engine4, &circuit, another_seed); + let value4 = result4.get(&0).copied().unwrap_or(0); + + // With a second different seed, the probability of getting the same result again is even lower + if value1 == value4 { + println!( + "NOTE: Still same measurement result with a third seed. Very unlikely but possible." + ); + } else { + // Different results with the new seed, so we can assert determinism + println!("Different seed produced different result: {value4}"); + assert_ne!( + value1, value4, + "Different seeds should usually produce different measurement results" + ); + } + } else { + // Different results as expected + assert_ne!( + value1, value3, + "Different seeds should usually produce different measurement results" + ); + } + + // Now run multiple measurements with increasing seeds to test we get a mix of results + let mut zeros = 0; + let mut ones = 0; + let num_tests = 20; + + println!("Running {num_tests} measurements with different seeds"); + for i in 0..num_tests { + // Convert the loop variable to u64 safely (always positive in this context) + let test_seed = seed + i as u64; // Safe since i is always non-negative in this loop + reset_model_with_seed(&mut model, test_seed).unwrap(); + let engine = Box::new(StateVecEngine::new(1)); + let result = run_complete_simulation(&mut model, engine, &circuit, test_seed); + let value = result.get(&0).copied().unwrap_or(0); + + if value == 0 { + zeros += 1; + } else { + ones += 1; + } + } + + println!("Got {zeros} zeros and {ones} ones with different seeds"); + + // With enough different seeds, we should get some variation + // The probability of getting all zeros or all ones with 20 measurements and a roughly + // 50/50 chance for each is approximately 2^(-19), which is extremely unlikely + if zeros == 0 || ones == 0 { + println!( + "NOTE: Got only {} measurements. This is highly unusual but technically possible.", + if zeros == 0 { "ones" } else { "zeros" } + ); + } else { + println!("Got a mixture of results with different seeds, as expected"); + } +} + +/// IMPROVEMENT 2: Comprehensive end-to-end test combining all noise types +#[test] +fn test_comprehensive_noise_determinism() { + println!("Testing comprehensive noise determinism (all noise types)"); + + // Create a noise model with all types of noise + let mut model = Box::new( + GeneralNoiseModel::builder() + // Preparation errors + .with_prep_probability(0.05) + .with_prep_leak_ratio(0.2) + // Measurement errors + .with_meas_0_probability(0.1) + .with_meas_1_probability(0.15) + // Gate errors + .with_average_p1_probability(0.2) + .with_average_p2_probability(0.1) + // Leakage and emission errors + .with_p1_emission_ratio(0.3) + .with_p2_emission_ratio(0.3) + .build(), + ); + + // Create a complex circuit with all types of operations: + // 1. Preparation (implicit at start) + // 2. Various single-qubit gates + // 3. Two-qubit gates + // 4. Parameterized gates + // 5. Measurements + let mut builder = ByteMessage::quantum_operations_builder(); + + // Use 3 qubits + // Apply a variety of single and two-qubit gates + builder.add_h(&[0]); // Apply Hadamard to qubit 0 + builder.add_rz(0.5, &[1]); // Apply RZ to qubit 1 + builder.add_cx(&[0], &[1]); // Apply CNOT from qubit 0 to qubit 1 + builder.add_h(&[2]); // Apply Hadamard to qubit 2 + builder.add_cx(&[1], &[2]); // Apply CNOT from qubit 1 to qubit 2 + + // RX and RY gates can be implemented using H-RZ-H and other combinations + builder.add_h(&[0]); // Start of RX implementation + builder.add_rz(0.25, &[0]); + builder.add_h(&[0]); // End of RX implementation + + builder.add_h(&[1]); // Start of RY approximation + builder.add_z(&[1]); + builder.add_rz(0.33, &[1]); + builder.add_z(&[1]); + builder.add_h(&[1]); // End of RY approximation + + builder.add_x(&[2]); // Apply X to qubit 2 + builder.add_y(&[0]); // Apply Y to qubit 0 + builder.add_z(&[1]); // Apply Z to qubit 1 + builder.add_rzz(0.75, &[0], &[2]); // Apply RZZ to qubits 0 and 2 + builder.add_cx(&[2], &[0]); // Apply CNOT from qubit 2 to qubit 0 + + // Add measurements for all qubits + builder.add_measurements(&[0, 1, 2], &[0, 1, 2]); + + let circuit = builder.build(); + + // Run the circuit with a fixed seed + let seed = 9876; + println!("Running first simulation with seed {seed}"); + reset_model_with_seed(&mut model, seed).unwrap(); + let engine1 = Box::new(StateVecEngine::new(3)); + let results1 = run_complete_simulation(&mut model, engine1, &circuit, seed); + + // Sort and print results for readability + let mut results1_vec: Vec<(usize, i32)> = results1.iter().map(|(&k, &v)| (k, v)).collect(); + results1_vec.sort_by_key(|&(k, _)| k); + println!("First run results: {results1_vec:?}"); + + // Run again with the same seed - should get identical results + println!("Running second simulation with the same seed {seed}"); + reset_model_with_seed(&mut model, seed).unwrap(); + let engine2 = Box::new(StateVecEngine::new(3)); + let results2 = run_complete_simulation(&mut model, engine2, &circuit, seed); + + // Sort and print results for readability + let mut results2_vec: Vec<(usize, i32)> = results2.iter().map(|(&k, &v)| (k, v)).collect(); + results2_vec.sort_by_key(|&(k, _)| k); + println!("Second run results: {results2_vec:?}"); + + // The results should be identical with the same seed + assert_eq!( + results1, results2, + "Measurement results should be identical with the same seed in comprehensive test" + ); + + // Run again with a different seed - should get different results + let different_seed = seed + 1000; + println!("Running third simulation with different seed {different_seed}"); + reset_model_with_seed(&mut model, different_seed).unwrap(); + let engine3 = Box::new(StateVecEngine::new(3)); + let results3 = run_complete_simulation(&mut model, engine3, &circuit, different_seed); + + // Sort and print results for readability + let mut results3_vec: Vec<(usize, i32)> = results3.iter().map(|(&k, &v)| (k, v)).collect(); + results3_vec.sort_by_key(|&(k, _)| k); + println!("Different seed results: {results3_vec:?}"); + + // The results should be different (high probability) + // If they happen to be identical, try yet another seed + if results1 == results3 { + println!( + "NOTE: Same measurement results with different seeds. This can happen with low probability." + ); + + let another_seed = seed + 2000; + println!("Trying yet another seed: {another_seed}"); + reset_model_with_seed(&mut model, another_seed).unwrap(); + let engine4 = Box::new(StateVecEngine::new(3)); + let results4 = run_complete_simulation(&mut model, engine4, &circuit, another_seed); + + // The probability of getting identical results again is extremely low + if results1 == results4 { + println!( + "NOTE: Still same results with a third seed. Extremely unlikely but technically possible." + ); + } else { + println!("Different seed produced different results as expected"); + assert_ne!( + results1, results4, + "Different seeds should produce different results in comprehensive test" + ); + } + } else { + println!("Different seed produced different results as expected"); + assert_ne!( + results1, results3, + "Different seeds should produce different results in comprehensive test" + ); + } +} + +/// IMPROVEMENT 3: Test long-running determinism with a large circuit +#[test] +fn test_long_running_determinism() { + println!("Testing long-running determinism with many operations"); + + // Create a noise model with moderate error rates + let mut model = Box::new( + GeneralNoiseModel::builder() + .with_prep_probability(0.01) + .with_meas_0_probability(0.02) + .with_meas_1_probability(0.02) + .with_average_p1_probability(0.1) + .with_average_p2_probability(0.05) + .build(), + ); + + // Create a circuit with a very large number of operations + let mut builder = ByteMessage::quantum_operations_builder(); + + // First create a GHZ state across 5 qubits + builder.add_h(&[0]); + builder.add_cx(&[0], &[1]); + builder.add_cx(&[0], &[2]); + builder.add_cx(&[0], &[3]); + builder.add_cx(&[0], &[4]); + + // Now apply a repeated pattern of gates to create a long sequence + // This gives the RNG many opportunities to diverge if there are issues + println!("Building a circuit with 500+ operations..."); + // We're using a small, positive loop count where usize will fit in both u32 and f64 without precision loss + for i in 0..100 { + // 100 repetitions of 5+ operations = 500+ operations total + // Rotate each qubit differently based on iteration + builder.add_rz(0.01 * f64::from(i as u32), &[0]); + + // Implement RX using H-RZ-H + builder.add_h(&[1]); + builder.add_rz(0.02 * f64::from(i as u32), &[1]); + builder.add_h(&[1]); + + // Implement RY using H-Z-RZ-Z-H + builder.add_h(&[2]); + builder.add_z(&[2]); + builder.add_rz(0.03 * f64::from(i as u32), &[2]); + builder.add_z(&[2]); + builder.add_h(&[2]); + + builder.add_rz(0.04 * f64::from(i as u32), &[3]); + + // Another RX implementation + builder.add_h(&[4]); + builder.add_rz(0.05 * f64::from(i as u32), &[4]); + builder.add_h(&[4]); + + // Add entangling operations that change with iteration + let q1 = i % 5; + let q2 = (i + 1) % 5; + builder.add_cx(&[q1], &[q2]); + } + + // Add measurements for all qubits + builder.add_measurements(&[0, 1, 2, 3, 4], &[0, 1, 2, 3, 4]); + + let circuit = builder.build(); + + // Run the circuit twice with the same seed + let seed = 54321; + println!("Running first long simulation with seed {seed}"); + reset_model_with_seed(&mut model, seed).unwrap(); + let engine1 = Box::new(StateVecEngine::new(5)); + let results1 = run_complete_simulation(&mut model, engine1, &circuit, seed); + + println!("Running second long simulation with the same seed {seed}"); + reset_model_with_seed(&mut model, seed).unwrap(); + let engine2 = Box::new(StateVecEngine::new(5)); + let results2 = run_complete_simulation(&mut model, engine2, &circuit, seed); + + // Sort and print a summary of the results + let mut results1_vec: Vec<(usize, i32)> = results1.iter().map(|(&k, &v)| (k, v)).collect(); + results1_vec.sort_by_key(|&(k, _)| k); + println!("First run results: {results1_vec:?}"); + + let mut results2_vec: Vec<(usize, i32)> = results2.iter().map(|(&k, &v)| (k, v)).collect(); + results2_vec.sort_by_key(|&(k, _)| k); + println!("Second run results: {results2_vec:?}"); + + // Results should be identical despite the long sequence of operations + assert_eq!( + results1, results2, + "Results should be identical with the same seed even with a very long circuit" + ); + + // Run with a different seed + let different_seed = seed + 1000; + println!("Running with a different seed {different_seed}"); + reset_model_with_seed(&mut model, different_seed).unwrap(); + let engine3 = Box::new(StateVecEngine::new(5)); + let results3 = run_complete_simulation(&mut model, engine3, &circuit, different_seed); + + // Results should be different (with high probability) + if results1 == results3 { + println!("NOTE: Same results with different seeds. This is very unlikely but possible."); + + // Try one more seed + let another_seed = seed + 2000; + println!("Trying yet another seed: {another_seed}"); + reset_model_with_seed(&mut model, another_seed).unwrap(); + let engine4 = Box::new(StateVecEngine::new(5)); + let results4 = run_complete_simulation(&mut model, engine4, &circuit, another_seed); + + if results1 == results4 { + println!("NOTE: Still same results with a third seed. Extremely unlikely."); + } else { + println!("Different seed produced different results as expected"); + assert_ne!( + results1, results4, + "Different seeds should produce different results" + ); + } + } else { + println!("Different seed produced different results as expected"); + assert_ne!( + results1, results3, + "Different seeds should produce different results" + ); + } + + println!("Long-running determinism test passed successfully!"); +} diff --git a/crates/pecos-engines/tests/noise_test.rs b/crates/pecos-engines/tests/noise_test.rs index 7444fa53d..d85809827 100644 --- a/crates/pecos-engines/tests/noise_test.rs +++ b/crates/pecos-engines/tests/noise_test.rs @@ -13,7 +13,7 @@ use pecos_engines::engines::noise::RngManageable; use pecos_engines::engines::noise::general::GeneralNoiseModel; use pecos_engines::engines::quantum::StateVecEngine; use pecos_engines::{Engine, QuantumSystem}; -use std::collections::HashMap; +use std::collections::BTreeMap; use std::f64::consts::PI; // Helper function to count measurement results from multiple shots @@ -22,12 +22,12 @@ fn count_results( circ: &ByteMessage, num_shots: usize, num_qubits: usize, -) -> HashMap { +) -> BTreeMap { let quantum = Box::new(StateVecEngine::new(num_qubits)); let mut system = QuantumSystem::new(Box::new(noise_model.clone()), quantum); system.set_seed(42).expect("Failed to set seed"); - let mut counts = HashMap::new(); + let mut counts = BTreeMap::new(); // Debug info println!("*** Start debugging count_results ***"); @@ -948,7 +948,7 @@ fn test_seed_effect() { ); // Create a new noise model using the builder pattern - let pauli_model: HashMap = [ + let pauli_model: BTreeMap = [ ("X".to_string(), 1.0 / 3.0), ("Y".to_string(), 1.0 / 3.0), ("Z".to_string(), 1.0 / 3.0), @@ -956,7 +956,7 @@ fn test_seed_effect() { .into_iter() .collect(); - let emission_model: HashMap = [("X".to_string(), 0.5), ("Y".to_string(), 0.5)] + let emission_model: BTreeMap = [("X".to_string(), 0.5), ("Y".to_string(), 0.5)] .into_iter() .collect(); @@ -1040,7 +1040,7 @@ fn test_combined_comparison() { println!("\n=== TESTING COMPLEX MODEL ==="); // Create complex noise model with the builder // Define Pauli and emission models - let pauli_model: HashMap = [ + let pauli_model: BTreeMap = [ ("X".to_string(), 1.0 / 3.0), ("Y".to_string(), 1.0 / 3.0), ("Z".to_string(), 1.0 / 3.0), @@ -1048,7 +1048,7 @@ fn test_combined_comparison() { .into_iter() .collect(); - let emission_model: HashMap = [("X".to_string(), 0.5), ("Y".to_string(), 0.5)] + let emission_model: BTreeMap = [("X".to_string(), 0.5), ("Y".to_string(), 0.5)] .into_iter() .collect(); @@ -1155,7 +1155,7 @@ fn test_pauli_model_effect() { println!("\n=== Test with explicitly set Pauli model ==="); // Create X-biased model with builder pattern - let x_biased_model: HashMap = [ + let x_biased_model: BTreeMap = [ ("X".to_string(), 0.8), ("Y".to_string(), 0.1), ("Z".to_string(), 0.1), @@ -1163,7 +1163,7 @@ fn test_pauli_model_effect() { .into_iter() .collect(); - let emission_model: HashMap = [("X".to_string(), 0.5), ("Y".to_string(), 0.5)] + let emission_model: BTreeMap = [("X".to_string(), 0.5), ("Y".to_string(), 0.5)] .into_iter() .collect(); @@ -1197,7 +1197,7 @@ fn test_pauli_model_effect() { println!("\n=== Test with Z-biased Pauli model ==="); // Create Z-biased model with builder pattern - let z_biased_model: HashMap = [ + let z_biased_model: BTreeMap = [ ("X".to_string(), 0.1), ("Y".to_string(), 0.1), ("Z".to_string(), 0.8), @@ -1272,7 +1272,7 @@ fn test_pauli_model_behavior() { println!(" Default model: {default_zero_percent}% |0>, {default_one_percent}% |1>"); // ====== Model 2: X-biased model (mostly X errors) ====== - let x_biased_model: HashMap = [ + let x_biased_model: BTreeMap = [ ("X".to_string(), 0.8), ("Y".to_string(), 0.1), ("Z".to_string(), 0.1), @@ -1305,7 +1305,7 @@ fn test_pauli_model_behavior() { println!(" X-biased model: {xbiased_zero_percent}% |0>, {xbiased_one_percent}% |1>"); // ====== Model 3: Z-biased model (mostly Z errors) ====== - let z_biased_model: HashMap = [ + let z_biased_model: BTreeMap = [ ("X".to_string(), 0.1), ("Y".to_string(), 0.1), ("Z".to_string(), 0.8), diff --git a/crates/pecos-engines/tests/qir_bell_state_test.rs b/crates/pecos-engines/tests/qir_bell_state_test.rs index 13db1552b..209f45a84 100644 --- a/crates/pecos-engines/tests/qir_bell_state_test.rs +++ b/crates/pecos-engines/tests/qir_bell_state_test.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use std::path::PathBuf; +use pecos_core::rng::RngManageable; use pecos_engines::engines::MonteCarloEngine; use pecos_engines::engines::qir::QirEngine; @@ -11,20 +12,60 @@ fn get_qir_program_path() -> PathBuf { workspace_dir.join("examples/qir/bell.ll") } +/// Check if LLVM llc tool version 14 is available +fn is_llc_available() -> bool { + if cfg!(windows) { + std::env::var("PATH") + .map(|paths| { + paths + .split(';') + .any(|dir| std::path::Path::new(dir).join("llc.exe").exists()) + }) + .unwrap_or(false) + } else { + std::env::var("PATH") + .map(|paths| { + paths + .split(':') + .any(|dir| std::path::Path::new(dir).join("llc").exists()) + }) + .unwrap_or(false) + } +} + +/// Skip the test with appropriate message if LLVM is not available +fn skip_if_llc_missing(test_name: &str) -> bool { + if !is_llc_available() { + println!("Skipping {test_name}: LLVM 'llc' tool not found"); + println!("To enable QIR tests, install LLVM version 14 (e.g., 'sudo apt install llvm-14')"); + return true; + } + false +} + #[test] fn test_qir_bell_state_noiseless() { + // Skip if LLVM is not available + if skip_if_llc_missing("test_qir_bell_state_noiseless") { + return; + } + // Create a QIR engine directly with the file path let qir_engine = QirEngine::new(get_qir_program_path()); + // Create a noiseless model + let noise_model = + Box::new(pecos_engines::engines::noise::DepolarizingNoiseModel::new_uniform(0.0)); + // Run the Bell state example with 100 shots and 2 workers - let results = MonteCarloEngine::run_with_classical_engine( + let results = MonteCarloEngine::run_with_noise_model( Box::new(qir_engine), - 0.0, // No noise + noise_model, 100, 2, None, // No specific seed ) - .unwrap(); + .expect("QIR execution should succeed as we already checked for LLVM availability"); // Count occurrences of each result let mut counts: HashMap = HashMap::new(); @@ -52,6 +93,11 @@ fn test_qir_bell_state_noiseless() { #[allow(clippy::missing_panics_doc)] #[allow(clippy::cast_precision_loss)] pub fn test_qir_bell_state_with_noise() { + // Skip if LLVM is not available + if skip_if_llc_missing("test_qir_bell_state_with_noise") { + return; + } + // Try a few seeds for seed in 1..=3 { println!("Testing with seed: {seed}"); @@ -62,15 +108,22 @@ pub fn test_qir_bell_state_with_noise() { // Create QirEngine let qir_engine = QirEngine::new(get_qir_program_path()); + // Create a noise model with the specified probability + let mut noise_model = + pecos_engines::engines::noise::DepolarizingNoiseModel::new_uniform(noise_probability); + + // Set the seed on the noise model + noise_model.set_seed(seed).unwrap(); + // Run with the MonteCarloEngine directly, specifying the number of shots - let results = MonteCarloEngine::run_with_classical_engine( + let results = MonteCarloEngine::run_with_noise_model( Box::new(qir_engine), - noise_probability, + Box::new(noise_model), shots, 2, // Number of workers Some(seed), ) - .unwrap(); + .expect("QIR execution should succeed as we already checked for LLVM availability"); // Count results let mut counts: HashMap = HashMap::new(); diff --git a/crates/pecos/src/prelude.rs b/crates/pecos/src/prelude.rs index 61b878d71..dabcfef6e 100644 --- a/crates/pecos/src/prelude.rs +++ b/crates/pecos/src/prelude.rs @@ -20,6 +20,11 @@ pub use pecos_engines::{ QirEngine, QuantumEngine, QuantumSystem, QueueError, ShotResult, ShotResults, }; +// Re-exporting noise models +pub use pecos_core::rng::RngManageable; +pub use pecos_core::rng::rng_manageable::derive_seed; +pub use pecos_engines::engines::noise::general::GeneralNoiseModel; + // Re-exporting specific implementations that aren't at the crate root pub use pecos_engines::engines::{ classical::{ProgramType, detect_program_type, get_program_path, setup_engine}, From 0f1e3e455622fce670efeeed34267007a72cca20 Mon Sep 17 00:00:00 2001 From: Ciaran Ryan-Anderson Date: Fri, 9 May 2025 13:33:28 -0600 Subject: [PATCH 7/9] code simplification --- crates/pecos-cli/src/main.rs | 229 ++++++------ crates/pecos-engines/build.rs | 351 +++++++++++------- .../src/engines/monte_carlo/engine.rs | 163 +++----- .../pecos-engines/src/engines/noise/utils.rs | 88 ++--- .../src/engines/noise/weighted_sampler.rs | 124 +++---- .../pecos-engines/src/engines/qir/compiler.rs | 141 +++---- .../pecos-engines/tests/noise_determinism.rs | 91 +++-- 7 files changed, 592 insertions(+), 595 deletions(-) diff --git a/crates/pecos-cli/src/main.rs b/crates/pecos-cli/src/main.rs index b1c87fd24..1dd20ed76 100644 --- a/crates/pecos-cli/src/main.rs +++ b/crates/pecos-cli/src/main.rs @@ -29,12 +29,22 @@ struct CompileArgs { program: String, } +/// Type of quantum noise model to use for simulation #[derive(PartialEq, Eq, Clone, Debug, Default)] enum NoiseModelType { /// Simple depolarizing noise model with uniform error probabilities + /// + /// This model applies the same error probability to all operations #[default] Depolarizing, /// General noise model with configurable error probabilities + /// + /// This model allows setting different error probabilities for: + /// - state preparation + /// - measurement of |0⟩ state + /// - measurement of |1⟩ state + /// - single-qubit gates + /// - two-qubit gates General, } @@ -82,152 +92,139 @@ struct RunArgs { seed: Option, } +/// Parse noise probability specification from command line argument +/// +/// For a depolarizing model, a single probability is expected: "0.01" +/// For a general model, five probabilities are expected: "0.01,0.02,0.02,0.05,0.1" +/// representing [prep, `meas_0`, `meas_1`, `single_qubit`, `two_qubit`] fn parse_noise_probability(arg: &str) -> Result { - // Check if it's a comma-separated list - if arg.contains(',') { - // Split by comma and parse each value - let probs: Result, _> = arg - .split(',') - .map(|s| { - s.trim().parse::().map_err(|_| { - format!( - "Invalid probability value '{s}': must be a valid floating point number" - ) - }) - }) - .collect(); - - // Check if all values are valid probabilities - let probs = probs?; - for prob in &probs { - if !(0.0..=1.0).contains(prob) { - return Err(format!("Noise probability {prob} must be between 0 and 1")); - } - } + // Split string into values (either a single value or comma-separated list) + let values: Vec<&str> = if arg.contains(',') { + arg.split(',').collect() + } else { + vec![arg] + }; + + // Check number of values + if values.len() != 1 && values.len() != 5 { + return Err(format!( + "Expected 1 or 5 probabilities, got {}", + values.len() + )); + } + + // Validate each probability value + for s in &values { + // Parse and validate numeric value + let prob = s + .trim() + .parse::() + .map_err(|_| format!("Invalid value '{s}': not a valid number"))?; - // For general noise model, we expect 5 probabilities - if probs.len() != 5 && probs.len() != 1 { - return Err(format!( - "Expected either 1 probability for depolarizing model or 5 probabilities for general model, got {}", - probs.len() - )); + // Check value range + if !(0.0..=1.0).contains(&prob) { + return Err(format!("Probability {prob} must be between 0 and 1")); } + } + + Ok(arg.to_string()) +} - // Return the original string since it's valid - Ok(arg.to_string()) +/// Extract probability values from noise specification string +/// +/// Handles both single value and comma-separated formats, with safe defaults +fn parse_noise_values(noise_str_opt: Option<&String>) -> Vec { + // Default to 0.0 if no string provided + let Some(noise_str) = noise_str_opt else { + return vec![0.0]; + }; + + // Parse either comma-separated or single value + if noise_str.contains(',') { + noise_str + .split(',') + .map(|s| s.trim().parse::().unwrap_or(0.0)) + .collect() } else { - // Single probability value - let prob: f64 = arg - .parse() - .map_err(|_| "Must be a valid floating point number")?; + vec![noise_str.parse::().unwrap_or(0.0)] + } +} - if !(0.0..=1.0).contains(&prob) { - return Err("Noise probability must be between 0 and 1".into()); - } +/// Parse a single probability value for depolarizing noise model +/// +/// Takes the first probability value if multiple are provided +fn parse_depolarizing_noise_probability(noise_str_opt: Option<&String>) -> f64 { + parse_noise_values(noise_str_opt)[0] // Always has at least one value +} + +/// Parse five probability values for general noise model +/// +/// Returns a tuple of five probabilities: (prep, `meas_0`, `meas_1`, `single_qubit`, `two_qubit`) +/// If a single value is provided, it's used for all five parameters +fn parse_general_noise_probabilities(noise_str_opt: Option<&String>) -> (f64, f64, f64, f64, f64) { + let probs = parse_noise_values(noise_str_opt); - Ok(arg.to_string()) + if probs.len() == 5 { + (probs[0], probs[1], probs[2], probs[3], probs[4]) + } else { + // Use the first value for all parameters + let p = probs[0]; + (p, p, p, p, p) } } +/// Run a quantum program with the specified arguments +/// +/// This function sets up the appropriate engines and noise models based on +/// the command line arguments, then runs the specified program and outputs +/// the results. fn run_program(args: &RunArgs) -> Result<(), Box> { let program_path = get_program_path(&args.program)?; let classical_engine = setup_engine(&program_path, Some(args.shots.div_ceil(args.workers)))?; - // Process based on the selected noise model - match args.noise_model { + // Create the appropriate noise model based on user selection + let noise_model: Box = match args.noise_model { NoiseModelType::Depolarizing => { - // Single noise probability for depolarizing model - let prob = if let Some(noise_str) = &args.noise_probability { - // If it contains commas, take the first value - if noise_str.contains(',') { - noise_str - .split(',') - .next() - .unwrap() - .trim() - .parse::() - .unwrap_or(0.0) - } else { - noise_str.parse::().unwrap_or(0.0) - } - } else { - 0.0 - }; - - // Create a depolarizing noise model - let mut noise_model = DepolarizingNoiseModel::new_uniform(prob); + // Create a depolarizing noise model with single probability + let prob = parse_depolarizing_noise_probability(args.noise_probability.as_ref()); + let mut model = DepolarizingNoiseModel::new_uniform(prob); - // If a seed is provided, set it on the noise model + // Set seed if provided if let Some(s) = args.seed { let noise_seed = derive_seed(s, "noise_model"); - noise_model.set_seed(noise_seed)?; + model.set_seed(noise_seed)?; } - // Use the generic approach with noise model - let results = MonteCarloEngine::run_with_noise_model( - classical_engine, - Box::new(noise_model), - args.shots, - args.workers, - args.seed, - )?; - - results.print(); + Box::new(model) } NoiseModelType::General => { - // For general model, we need to parse the comma-separated probabilities + // Create a general noise model with five probabilities let (prep, meas_0, meas_1, single_qubit, two_qubit) = - if let Some(noise_str) = &args.noise_probability { - if noise_str.contains(',') { - // Parse the comma-separated values - let probs: Vec = noise_str - .split(',') - .map(|s| s.trim().parse::().unwrap_or(0.0)) - .collect(); - - // We should already have validated the length in the parser - if probs.len() == 5 { - (probs[0], probs[1], probs[2], probs[3], probs[4]) - } else { - // Use the first value for all if only one value is provided - let p = probs[0]; - (p, p, p, p, p) - } - } else { - // Single probability value - use for all parameters - let p = noise_str.parse::().unwrap_or(0.0); - (p, p, p, p, p) - } - } else { - // Default: no noise - (0.0, 0.0, 0.0, 0.0, 0.0) - }; - - // Create the general noise model - let mut noise_model = - GeneralNoiseModel::new(prep, meas_0, meas_1, single_qubit, two_qubit); - - // If a seed is provided, set it on the noise model + parse_general_noise_probabilities(args.noise_probability.as_ref()); + let mut model = GeneralNoiseModel::new(prep, meas_0, meas_1, single_qubit, two_qubit); + + // Set seed if provided if let Some(s) = args.seed { let noise_seed = derive_seed(s, "noise_model"); - // We can now silence the non-deterministic warning since we've fixed that issue - noise_model.reset_with_seed(noise_seed).map_err(|e| { + model.reset_with_seed(noise_seed).map_err(|e| { Box::::from(format!("Failed to set noise model seed: {e}")) })?; } - // Use the generic function with the general noise model - let results = MonteCarloEngine::run_with_noise_model( - classical_engine, - Box::new(noise_model), - args.shots, - args.workers, - args.seed, - )?; - - results.print(); + Box::new(model) } - } + }; + + // Use the generic approach with the selected noise model + let results = MonteCarloEngine::run_with_noise_model( + classical_engine, + noise_model, + args.shots, + args.workers, + args.seed, + )?; + + results.print(); Ok(()) } diff --git a/crates/pecos-engines/build.rs b/crates/pecos-engines/build.rs index cdb52b006..0f4230c9a 100644 --- a/crates/pecos-engines/build.rs +++ b/crates/pecos-engines/build.rs @@ -3,29 +3,42 @@ use std::fs; use std::path::{Path, PathBuf}; use std::process::Command; -/// Build script for the pecos-engines crate -/// -/// This script automatically builds the QIR runtime library that is used by the QIR compiler. -/// The library is built only when necessary (when source files have changed). -fn main() { - // Use a more surgical approach to rebuild triggers - // Only track the specific files and environment variables we care about +//------------------------------------------------------------------------------ +// Configuration Constants +//------------------------------------------------------------------------------ - // Only track build.rs itself - this is the most critical - println!("cargo:rerun-if-changed=build.rs"); +// Source files that trigger rebuilds when changed +const QIR_SOURCE_FILES: [&str; 5] = [ + "src/engines/qir/runtime.rs", + "src/engines/qir/common.rs", + "src/engines/qir/state.rs", + "src/core/result_id.rs", + "src/byte_message/quantum_cmd.rs", +]; - // Track QIR source files - for file in QIR_SOURCE_FILES { - println!("cargo:rerun-if-changed={file}"); - } +// LLVM version required by PECOS +const REQUIRED_LLVM_VERSION: u32 = 14; - // Track only pecos-core/Cargo.toml for major version changes - println!("cargo:rerun-if-changed=../pecos-core/Cargo.toml"); +// LLVM version cache location +const LLVM_CACHE_FILE: &str = "target/qir_runtime_build/llvm_version_cache.txt"; - // Only track environment variables specifically for LLVM paths - // Intentionally NOT tracking PATH as it changes too often - println!("cargo:rerun-if-env-changed=PECOS_LLVM_PATH"); - println!("cargo:rerun-if-env-changed=LLVM_HOME"); +// Environment variables to check for LLVM path +const LLVM_ENV_VARS: [&str; 2] = ["PECOS_LLVM_PATH", "LLVM_HOME"]; + +/// Build script for the pecos-engines crate +/// +/// This script automatically builds the QIR runtime library that is used by the QIR compiler. +/// The library is built only when necessary (when source files have changed or the build +/// environment has been modified). +/// +/// # Key behaviors: +/// - Builds the QIR runtime library as a static library (.a or .lib) +/// - Checks for LLVM dependencies (specifically version 14) +/// - Optimizes build performance by selectively tracking files that trigger rebuilds +/// - Provides clear error messages when dependencies are missing +fn main() { + // Configure rebuild triggers - only track specific files and environment variables + configure_rebuild_triggers(); // Check for LLVM dependencies first match check_llvm_dependencies() { @@ -41,63 +54,88 @@ fn main() { println!("cargo:warning=LLVM dependency check failed: {e}"); eprintln!("Warning: {e}"); eprintln!( - "QIR functionality will be unavailable. Install LLVM version 14 (specifically 'llc' tool) to enable QIR support." + "QIR functionality will be unavailable. Install LLVM version {REQUIRED_LLVM_VERSION} (specifically 'llc' tool) to enable QIR support." ); eprintln!("QIR tests will be skipped, but other tests will continue to run."); } } } -/// Check for required LLVM dependencies -/// Returns the LLVM version if found and meets requirements -fn check_llvm_dependencies() -> Result { - // Use a simple caching mechanism to avoid checking repeatedly - const CACHE_FILE: &str = "target/qir_runtime_build/llvm_version_cache.txt"; +/// Configure which files and environment variables should trigger rebuilds +fn configure_rebuild_triggers() { + // Track build.rs itself - this is the most critical + println!("cargo:rerun-if-changed=build.rs"); - // First, try to read from the cache - if let Ok(cached_version) = fs::read_to_string(CACHE_FILE) { - let cached_version = cached_version.trim(); + // Track QIR source files + for file in QIR_SOURCE_FILES { + println!("cargo:rerun-if-changed={file}"); + } - // Only return the cached version if it's valid (version 14.x) - if cached_version.starts_with("14.") || cached_version == "14" { + // Track only pecos-core/Cargo.toml for major version changes + println!("cargo:rerun-if-changed=../pecos-core/Cargo.toml"); + + // Track environment variables specifically for LLVM paths + // Intentionally NOT tracking PATH as it changes too often + for env_var in LLVM_ENV_VARS { + println!("cargo:rerun-if-env-changed={env_var}"); + } +} + +/// Check for required LLVM dependencies (must be version 14.x) +/// +/// Tries to use a cached version first, then searches for the tool and verifies its version. +/// +/// # Returns +/// - `Ok(String)` - The LLVM version string if found and compatible +/// - `Err(String)` - A descriptive error message if the dependency check fails +fn check_llvm_dependencies() -> Result { + // Try to get cached version first + if let Ok(cached_version) = fs::read_to_string(LLVM_CACHE_FILE) { + let cached_version = cached_version.trim(); + if cached_version.starts_with(&format!("{REQUIRED_LLVM_VERSION}.")) + || cached_version == REQUIRED_LLVM_VERSION.to_string() + { println!("Using cached LLVM version: {cached_version}"); return Ok(cached_version.to_string()); } } - // If no cache or invalid version, check normally + // Find the tool and check its version let tool_path = find_tool_in_path()?; let version = check_llvm_version(&tool_path)?; // Cache the result for next time - if let Some(parent) = std::path::Path::new(CACHE_FILE).parent() { + if let Some(parent) = Path::new(LLVM_CACHE_FILE).parent() { let _ = fs::create_dir_all(parent); } - let _ = fs::write(CACHE_FILE, &version); + let _ = fs::write(LLVM_CACHE_FILE, &version); Ok(version) } -/// Find LLVM tool in the system path +/// Find LLVM tool (llc on Unix, clang on Windows) in the system +/// +/// Searches in environment variables and system PATH +/// +/// # Returns +/// - `Ok(PathBuf)` - Path to the found tool +/// - `Err(String)` - Error message if tool not found fn find_tool_in_path() -> Result { - // Set the tool name based on platform + // Determine the tool name based on platform #[cfg(not(target_os = "windows"))] let tool_name = "llc"; #[cfg(target_os = "windows")] let tool_name = "clang"; - // Create executable name with extension if needed + // Add .exe extension on Windows let executable_name = if cfg!(windows) { format!("{tool_name}.exe") } else { tool_name.to_string() }; - // Define standard search locations - let env_vars = ["PECOS_LLVM_PATH", "LLVM_HOME"]; - // Try environment variables first - for env_var in &env_vars { + for env_var in LLVM_ENV_VARS { if let Ok(llvm_path) = env::var(env_var) { let tool_path = PathBuf::from(llvm_path).join("bin").join(&executable_name); if tool_path.exists() { @@ -106,7 +144,7 @@ fn find_tool_in_path() -> Result { } } - // Try to find in PATH directly + // Try system PATH if let Ok(path_var) = env::var("PATH") { let separator = if cfg!(windows) { ';' } else { ':' }; for path_entry in path_var.split(separator) { @@ -117,15 +155,21 @@ fn find_tool_in_path() -> Result { } } - // If we get here, the tool wasn't found Err(format!( - "Required LLVM tool '{tool_name}' not found. Please install LLVM version 14 to enable QIR functionality." + "Required LLVM tool '{tool_name}' not found. Please install LLVM version {REQUIRED_LLVM_VERSION}." )) } -/// Check LLVM version and verify it meets specific version requirements (LLVM 14.x only) +/// Check LLVM version and verify it's compatible with PECOS requirements +/// +/// # Arguments +/// * `tool_path` - Path to the LLVM tool executable +/// +/// # Returns +/// - `Ok(String)` - The version string if compatible +/// - `Err(String)` - Error message if version check fails or incompatible fn check_llvm_version(tool_path: &Path) -> Result { - // Get the version output + // Run the version command let output = Command::new(tool_path) .arg("--version") .output() @@ -135,17 +179,19 @@ fn check_llvm_version(tool_path: &Path) -> Result { return Err("Failed to get LLVM version. Tool returned non-zero status.".to_string()); } - let version_output = String::from_utf8_lossy(&output.stdout); - let first_line = version_output + // Parse the output to find version number + let version_text = String::from_utf8_lossy(&output.stdout); + let first_line = version_text .lines() .next() .ok_or_else(|| "Empty LLVM version output".to_string())?; - // Extract version number - first look for X.Y.Z format + // Extract version string using two different patterns let version = first_line .split_whitespace() + // Look for X.Y.Z format with digits .find(|&part| part.contains('.') && part.chars().any(|c| c.is_ascii_digit())) - // If no X.Y.Z format found, look for just numbers + // Or just a plain number .or_else(|| { first_line .split_whitespace() @@ -153,35 +199,26 @@ fn check_llvm_version(tool_path: &Path) -> Result { }) .ok_or_else(|| format!("Could not parse version from: {first_line}"))?; - // Extract major version and check requirements - let major_version = version + // Extract major version and verify compatibility + let major = version .split('.') .next() + .and_then(|v| v.parse::().ok()) .ok_or_else(|| format!("Malformed LLVM version: {version}"))?; - let major = major_version - .parse::() - .map_err(|_| format!("Failed to parse LLVM major version: {major_version}"))?; - - if major != 14 { + if major != REQUIRED_LLVM_VERSION { return Err(format!( - "LLVM version {version} is not compatible. PECOS requires LLVM version 14.x specifically for QIR functionality." + "LLVM version {version} not compatible. PECOS requires version {REQUIRED_LLVM_VERSION}.x." )); } Ok(version.to_string()) } -// Source files that trigger rebuilds when changed -const QIR_SOURCE_FILES: [&str; 5] = [ - "src/engines/qir/runtime.rs", - "src/engines/qir/common.rs", - "src/engines/qir/state.rs", - "src/core/result_id.rs", - "src/byte_message/quantum_cmd.rs", -]; - -// File paths to copy or modify +/// File paths used during the QIR runtime build process +/// +/// Contains source and destination paths for all files that need to be +/// copied or modified during the QIR runtime library build process struct FilePaths { common: (PathBuf, PathBuf), state: (PathBuf, PathBuf), @@ -193,6 +230,20 @@ struct FilePaths { lib_rs: PathBuf, } +/// Build the QIR runtime library +/// +/// This function: +/// 1. Creates a temporary build directory +/// 2. Copies and modifies necessary source files +/// 3. Sets up a minimal Cargo project +/// 4. Builds the static library +/// 5. Copies the result to the target directories +/// +/// The build is skipped if the library already exists and is up-to-date. +/// +/// # Returns +/// - `Ok(())` - Build successful or skipped (up-to-date) +/// - `Err(String)` - Error message if build fails fn build_qir_runtime() -> Result<(), String> { println!("Building QIR runtime library..."); @@ -211,7 +262,7 @@ fn build_qir_runtime() -> Result<(), String> { let debug_lib_path = workspace_dir.join(format!("target/debug/{lib_filename}")); let release_lib_path = workspace_dir.join(format!("target/release/{lib_filename}")); - // Check if we need to rebuild + // Skip build if libraries exist and are up-to-date if !needs_rebuild(&manifest_dir, &debug_lib_path) && !needs_rebuild(&manifest_dir, &release_lib_path) { @@ -225,10 +276,8 @@ fn build_qir_runtime() -> Result<(), String> { fs::create_dir_all(build_dir.join("src/byte_message")) .map_err(|e| format!("Failed to create source directories: {e}"))?; - // Set up file paths + // Set up file paths and create temporary project let paths = setup_file_paths(&manifest_dir, &build_dir); - - // Setup temporary project setup_temp_project(workspace_dir, &paths)?; // Build the library @@ -259,6 +308,17 @@ fn build_qir_runtime() -> Result<(), String> { Ok(()) } +/// Set up file paths for the QIR runtime build +/// +/// Creates a `FilePaths` struct with source and destination paths for all files +/// that need to be copied or modified during the build process. +/// +/// # Arguments +/// * `manifest_dir` - Path to the crate's manifest directory +/// * `build_dir` - Path to the temporary build directory +/// +/// # Returns +/// A `FilePaths` struct with all required paths fn setup_file_paths(manifest_dir: &Path, build_dir: &Path) -> FilePaths { FilePaths { common: ( @@ -287,6 +347,17 @@ fn setup_file_paths(manifest_dir: &Path, build_dir: &Path) -> FilePaths { } } +/// Set up a temporary Cargo project for building the QIR runtime +/// +/// Creates a standalone Cargo project with all necessary source files. +/// +/// # Arguments +/// * `workspace_dir` - Path to the workspace root directory +/// * `paths` - `FilePaths` struct with all source and destination paths +/// +/// # Returns +/// - `Ok(())` - Setup successful +/// - `Err(String)` - Error message if setup fails fn setup_temp_project(workspace_dir: &Path, paths: &FilePaths) -> Result<(), String> { // Create Cargo.toml let cargo_toml_content = format!( @@ -312,11 +383,17 @@ members = ["."] fs::write(&paths.cargo_toml, cargo_toml_content) .map_err(|e| format!("Failed to write Cargo.toml: {e}"))?; - // Copy common.rs + // Perform file operations one by one + + // 1. Copy common.rs fs::copy(&paths.common.0, &paths.common.1) .map_err(|e| format!("Failed to copy common.rs: {e}"))?; - // Copy and modify state.rs + // 2. Copy result_id.rs + fs::copy(&paths.result_id.0, &paths.result_id.1) + .map_err(|e| format!("Failed to copy result_id.rs: {e}"))?; + + // 3. Modify state.rs: update imports let state_content = fs::read_to_string(&paths.state.0).map_err(|e| format!("Failed to read state.rs: {e}"))?; let modified_state = @@ -324,11 +401,7 @@ members = ["."] fs::write(&paths.state.1, modified_state) .map_err(|e| format!("Failed to write state.rs: {e}"))?; - // Copy result_id.rs - fs::copy(&paths.result_id.0, &paths.result_id.1) - .map_err(|e| format!("Failed to copy result_id.rs: {e}"))?; - - // Copy and modify quantum_cmd.rs + // 4. Modify quantum_cmd.rs: update imports let quantum_cmd_content = fs::read_to_string(&paths.quantum_cmd.0) .map_err(|e| format!("Failed to read quantum_cmd.rs: {e}"))?; let modified_quantum_cmd = quantum_cmd_content.replace( @@ -338,18 +411,18 @@ members = ["."] fs::write(&paths.quantum_cmd.1, modified_quantum_cmd) .map_err(|e| format!("Failed to write quantum_cmd.rs: {e}"))?; - // Create byte_message.rs + // 5. Create byte_message.rs module file fs::write( &paths.byte_message, "pub mod quantum_cmd;\npub use quantum_cmd::QuantumCmd;\n", ) .map_err(|e| format!("Failed to write byte_message.rs: {e}"))?; - // Read and modify runtime.rs + // 6. Create lib.rs with modified runtime content let runtime_content = fs::read_to_string(&paths.runtime.0) .map_err(|e| format!("Failed to read runtime.rs: {e}"))?; - // More careful replacements to ensure imports are correct + // Update imports let modified_runtime = runtime_content .replace("use crate::engines::qir::common::", "use crate::common::") .replace("use crate::engines::qir::state::", "use crate::state::") @@ -359,13 +432,15 @@ members = ["."] ) .replace("use crate::core::result_id::", "use crate::result_id::"); - // Add module declarations and write lib.rs + // Add module declarations let module_declarations = "pub mod byte_message;\npub mod result_id;\npub mod common;\npub mod state;\n\n"; - // Ensure MEASUREMENT_RESULTS is property initialized and used - let fixed_runtime = format!("{module_declarations}{modified_runtime}"); - fs::write(&paths.lib_rs, fixed_runtime).map_err(|e| format!("Failed to write lib.rs: {e}"))?; + fs::write( + &paths.lib_rs, + format!("{module_declarations}{modified_runtime}"), + ) + .map_err(|e| format!("Failed to write lib.rs: {e}"))?; // On Windows, create a DEF file for exports if cfg!(windows) { @@ -401,6 +476,15 @@ members = ["."] Ok(()) } +/// Run 'cargo build --release' in the temporary project directory +/// +/// # Arguments +/// * `build_dir` - Path to the temporary build directory +/// +/// # Returns +/// - `Ok(true)` - Build successful +/// - `Ok(false)` - Build failed but not due to a system error +/// - `Err(String)` - Error message if command execution fails fn run_cargo_build(build_dir: &Path) -> Result { let output = Command::new("cargo") .arg("build") @@ -410,8 +494,8 @@ fn run_cargo_build(build_dir: &Path) -> Result { .map_err(|e| format!("Failed to execute cargo: {e}"))?; if !output.status.success() { + // On Windows, show detailed output where CI issues are more common if cfg!(windows) { - // Only show detailed output on Windows where CI issues are more common let stdout = String::from_utf8_lossy(&output.stdout); let stderr = String::from_utf8_lossy(&output.stderr); println!("Cargo build failed: {}", output.status); @@ -424,8 +508,17 @@ fn run_cargo_build(build_dir: &Path) -> Result { Ok(true) } +/// Check if the QIR runtime library needs to be rebuilt +/// +/// # Returns +/// * `true` if any of these conditions are met: +/// - Library doesn't exist or is too small +/// - build.rs is newer than the library +/// - pecos-core/Cargo.toml is newer than the library +/// - Any source file is newer than the library +/// * `false` if library is up-to-date fn needs_rebuild(manifest_dir: &Path, lib_path: &Path) -> bool { - // If the library doesn't exist, we need to build it + // Check if library exists and has reasonable size if !lib_path.exists() { println!( "QIR runtime library not found at {}, rebuilding", @@ -434,65 +527,63 @@ fn needs_rebuild(manifest_dir: &Path, lib_path: &Path) -> bool { return true; } - // Check library size - if it's suspiciously small, rebuild - if let Ok(metadata) = fs::metadata(lib_path) { - if metadata.len() < 1000 { - // Arbitrary small size check - println!( - "QIR runtime library at {} appears to be too small ({}b), rebuilding", - lib_path.display(), - metadata.len() - ); - return true; - } - } else { + // Get library metadata + let Ok(lib_metadata) = fs::metadata(lib_path) else { println!("Could not read metadata for QIR runtime library, rebuilding"); return true; + }; + + // Check if library is suspiciously small + if lib_metadata.len() < 1000 { + println!( + "QIR runtime library too small ({}b), rebuilding", + lib_metadata.len() + ); + return true; } - // Get the modification time of the library - let Ok(lib_modified) = fs::metadata(lib_path).and_then(|m| m.modified()) else { - println!("Could not determine modification time of QIR runtime library, rebuilding"); + // Get library modification time + let Ok(lib_modified) = lib_metadata.modified() else { + println!("Could not determine library modification time, rebuilding"); return true; }; - // Only check if build.rs has changed - the most critical file - if let Ok(metadata) = fs::metadata(manifest_dir.join("build.rs")) { - if let Ok(modified) = metadata.modified() { - if modified > lib_modified { - println!("build.rs is newer than library, rebuilding"); - return true; - } + // Check if any critical file is newer than the library + let check_file = |path: &Path, desc: &str| -> bool { + if !path.exists() { + println!("{desc} not found, rebuilding"); + return true; } - } - // Check pecos-core version but only Cargo.toml - let core_cargo_path = manifest_dir.parent().unwrap().join("pecos-core/Cargo.toml"); - if let Ok(metadata) = fs::metadata(&core_cargo_path) { - if let Ok(modified) = metadata.modified() { - if modified > lib_modified { - println!("pecos-core Cargo.toml is newer than library, rebuilding"); - return true; + match fs::metadata(path).and_then(|meta| meta.modified()) { + Ok(time) if time > lib_modified => { + println!("{desc} is newer than library, rebuilding"); + true + } + Err(_) => { + println!("Cannot check time of {desc}, rebuilding"); + true } + _ => false, } + }; + + // Check build script and core dependency + if check_file(&manifest_dir.join("build.rs"), "build.rs") + || check_file( + &manifest_dir.parent().unwrap().join("pecos-core/Cargo.toml"), + "pecos-core Cargo.toml", + ) + { + return true; } - // Check if any source files are newer than the library + // Check source files for file in QIR_SOURCE_FILES { - let file_path = manifest_dir.join(file); - if let Ok(metadata) = fs::metadata(&file_path) { - if let Ok(modified) = metadata.modified() { - if modified > lib_modified { - println!("Source file {file_path:?} is newer than library, rebuilding"); - return true; - } - } - } else { - // If a source file is missing, that's a problem and we should rebuild - println!("Source file {file_path:?} not found, rebuilding"); + if check_file(&manifest_dir.join(file), &format!("Source file {file}")) { return true; } } - false + false // Library is up-to-date } diff --git a/crates/pecos-engines/src/engines/monte_carlo/engine.rs b/crates/pecos-engines/src/engines/monte_carlo/engine.rs index 4df073106..6a97f8603 100644 --- a/crates/pecos-engines/src/engines/monte_carlo/engine.rs +++ b/crates/pecos-engines/src/engines/monte_carlo/engine.rs @@ -17,7 +17,7 @@ use crate::engines::noise::NoiseModel; use crate::engines::quantum::{QuantumEngine, StateVecEngine}; use crate::engines::{ClassicalEngine, ControlEngine, Engine, EngineStage, HybridEngine}; use crate::errors::QueueError; -use log::{debug, info}; +use log::debug; use pecos_core::rng::RngManageable; use pecos_core::rng::rng_manageable::derive_seed; use rand::{RngCore, SeedableRng}; @@ -201,13 +201,8 @@ impl MonteCarloEngine { /// # Errors /// Returns a `QueueError` if setting the seed fails for any component pub fn set_seed(&mut self, seed: u64) -> Result<(), QueueError> { - // Set the seed for the internal RNG self.rng = ChaCha8Rng::seed_from_u64(seed); - - // Set the seed for the hybrid engine template - self.hybrid_engine_template.set_seed(seed)?; - - Ok(()) + self.hybrid_engine_template.set_seed(seed) } /// Run a Monte Carlo simulation with the specified number of shots and worker threads. @@ -230,40 +225,22 @@ impl MonteCarloEngine { /// - If `num_shots` is zero. /// - If `num_workers` is zero. pub fn run(&mut self, num_shots: usize, num_workers: usize) -> Result { - assert!((num_shots != 0), "num_shots cannot be zero"); + assert!(num_shots > 0, "num_shots cannot be zero"); + assert!(num_workers > 0, "num_workers cannot be zero"); - assert!((num_workers != 0), "num_workers cannot be zero"); + debug!("Running Monte Carlo simulation: {num_shots} shots, {num_workers} workers"); - debug!( - "Running Monte Carlo simulation with {} shots on {} workers", - num_shots, num_workers - ); - - // Create a vector to hold the results with worker ID and shot index information - // (worker_idx, shot_idx, result) + // Shared results collection let results_vec = Arc::new(Mutex::new( Vec::<(usize, usize, ShotResult)>::with_capacity(num_shots), )); - // Calculate work distribution (shots per worker) + // Determine shots per worker and generate deterministic seeds let shots_per_worker = distribute_shots(num_shots, num_workers); - - // Seed management: derive seeds for each worker deterministically from the base seed let base_seed = self.rng.next_u64(); - let worker_seeds: Vec = (0..num_workers) - .map(|idx| { - let context = format!("worker_{idx}"); - derive_seed(base_seed, &context) - }) - .collect(); - info!( - "Distributing {} shots across {} workers", - num_shots, num_workers - ); - - // Run the shots in parallel - let _ = (0..num_workers) + // Run shots in parallel across workers + (0..num_workers) .into_par_iter() .map(|worker_idx| { let shots_this_worker = shots_per_worker[worker_idx]; @@ -271,47 +248,43 @@ impl MonteCarloEngine { return Ok(()); } - // Create a copy of the template engine and set its seed + // Create worker engine with derived seed let mut engine = self.hybrid_engine_template.clone(); - let worker_seed = worker_seeds[worker_idx]; + let worker_seed = derive_seed(base_seed, &format!("worker_{worker_idx}")); - // Set seed for this worker's engine if let Err(e) = engine.set_seed(worker_seed) { return Err(QueueError::OperationError(format!( "Failed to set seed for worker {worker_idx}: {e}" ))); } - // Run assigned shots + // Process all shots for this worker debug!( - "Worker {} running {} shots with seed {}", - worker_idx, shots_this_worker, worker_seed + "Worker {worker_idx} running {shots_this_worker} shots with seed {worker_seed}" ); + for shot_idx in 0..shots_this_worker { - // Reset the engine state before each shot engine.reset()?; - let shot_result = engine.run_shot()?; - // Store the result with the worker index and shot index for deterministic ordering - let mut results = results_vec.lock().unwrap(); - results.push((worker_idx, shot_idx, shot_result)); + // Store with worker/shot indices for deterministic ordering + results_vec + .lock() + .unwrap() + .push((worker_idx, shot_idx, shot_result)); } Ok(()) }) .collect::, QueueError>>()?; - // Sort the results by worker ID and then by shot index within each worker - // This ensures a completely deterministic ordering regardless of execution timing + // Ensure deterministic ordering of results let mut results = results_vec.lock().unwrap(); results.sort_by(|(w1, s1, _), (w2, s2, _)| w1.cmp(w2).then(s1.cmp(s2))); - // Extract just the shot results in the sorted order + // Convert to final results format let shot_results: Vec = results.iter().map(|(_, _, shot)| shot.clone()).collect(); - - // Convert the results to a ShotResults object let combined_results = ShotResults::from_measurements(&shot_results); debug!("Monte Carlo simulation completed successfully"); @@ -379,17 +352,14 @@ impl MonteCarloEngine { num_workers: usize, seed: Option, ) -> Result { - // Create a Monte Carlo engine with the provided hybrid engine let mut engine = MonteCarloEngineBuilder::new() .with_hybrid_engine(hybrid_engine) .build(); - // Set the seed if provided if let Some(s) = seed { engine.set_seed(s)?; } - // Run the simulation engine.run(num_shots, num_workers) } @@ -418,18 +388,15 @@ impl MonteCarloEngine { num_workers: usize, seed: Option, ) -> Result { - // Create a quantum engine with the same number of qubits as the classical engine - let num_qubits = classical_engine.num_qubits(); - let quantum_engine = Box::new(StateVecEngine::new(num_qubits)); - - // Create a hybrid engine with the provided components + // Create a hybrid engine with the state vector quantum engine + let quantum_engine = Box::new(StateVecEngine::new(classical_engine.num_qubits())); let mut hybrid_engine = HybridEngineBuilder::new() .with_classical_engine(classical_engine) .with_quantum_engine(quantum_engine) .with_noise_model(noise_model) .build(); - // If a seed is provided, explicitly set it on the hybrid engine + // Set seed if provided if let Some(s) = seed { hybrid_engine.set_seed(s)?; } @@ -459,25 +426,21 @@ impl MonteCarloEngine { num_workers: usize, seed: Option, ) -> Result { - // Parse the configuration string and create the engine - // For now, we'll treat it as a simple noise probability + // Parse the configuration string as a noise probability let p = config.parse::().map_err(|e| { QueueError::OperationError(format!("Failed to parse config string as float: {e}")) })?; - let classical_engine = Box::new(ExternalClassicalEngine::new()); - - // Create a depolarizing noise model with the parsed probability + // Create and seed a depolarizing noise model let mut noise_model = crate::engines::noise::DepolarizingNoiseModel::new_uniform(p); - // If a seed is provided, set it on the noise model if let Some(s) = seed { - let noise_seed = pecos_core::rng::rng_manageable::derive_seed(s, "noise_model"); - noise_model.set_seed(noise_seed)?; + noise_model.set_seed(derive_seed(s, "noise_model"))?; } + // Run simulation with external classical engine Self::run_with_noise_model( - classical_engine, + Box::new(ExternalClassicalEngine::new()), Box::new(noise_model), num_shots, num_workers, @@ -495,28 +458,24 @@ impl Clone for MonteCarloEngine { } } -/// Utility function to distribute shots across workers -/// -/// This function calculates how many shots each worker should execute -/// based on the total number of shots and workers. -/// -/// # Arguments -/// * `num_shots` - The total number of shots to distribute -/// * `num_workers` - The number of workers available +/// Distributes shots evenly across workers with any remainder going to initial workers /// /// # Returns -/// A vector where each element is the number of shots for a worker +/// A vector containing the number of shots for each worker fn distribute_shots(num_shots: usize, num_workers: usize) -> Vec { - let mut shots_per_worker = vec![num_shots / num_workers; num_workers]; + let base = num_shots / num_workers; let remainder = num_shots % num_workers; - // Distribute the remainder shots among the first few workers - shots_per_worker + // Create vector with base shots per worker + let mut result = vec![base; num_workers]; + + // Add remainder shots to first 'remainder' workers + result .iter_mut() .take(remainder) .for_each(|shots| *shots += 1); - shots_per_worker + result } /// An external classical engine implementation used for testing and examples. @@ -551,27 +510,9 @@ impl Engine for ExternalClassicalEngine { type Output = ShotResult; fn process(&mut self, _input: Self::Input) -> Result { - // Generate a ByteMessage with a simple circuit + // For this stub implementation, just generate commands and return results let _message = self.generate_commands()?; - - // Process it somehow (in a real engine, this would run the quantum simulation) - // For this stub, we'll just return the stored results - let mut shot_result = ShotResult::default(); - - // Convert the HashMap to HashMap - let measurements: HashMap = self - .results - .iter() - .map(|(k, v)| { - // For a test utility, simply clamp values that are out of bounds - let value = u32::try_from(*v).unwrap_or(0); - (k.clone(), value) - }) - .collect(); - - shot_result.measurements = measurements; - - Ok(shot_result) + self.get_results() } fn reset(&mut self) -> Result<(), QueueError> { @@ -601,21 +542,15 @@ impl ClassicalEngine for ExternalClassicalEngine { } fn get_results(&self) -> Result { - // Create a ShotResult with the stored results - let mut shot_result = ShotResult::default(); - - // Convert the HashMap to HashMap - let measurements: HashMap = self - .results - .iter() - .map(|(k, v)| { - // For a test utility, simply clamp values that are out of bounds - let value = u32::try_from(*v).unwrap_or(0); - (k.clone(), value) - }) - .collect(); - - shot_result.measurements = measurements; + // Create ShotResult with converted measurements + let shot_result = ShotResult { + measurements: self + .results + .iter() + .map(|(k, v)| (k.clone(), u32::try_from(*v).unwrap_or(0))) + .collect(), + ..ShotResult::default() + }; Ok(shot_result) } diff --git a/crates/pecos-engines/src/engines/noise/utils.rs b/crates/pecos-engines/src/engines/noise/utils.rs index e9ec670fb..96bc8a7e0 100644 --- a/crates/pecos-engines/src/engines/noise/utils.rs +++ b/crates/pecos-engines/src/engines/noise/utils.rs @@ -166,11 +166,14 @@ impl NoiseUtils { /// * `gate` - The gate to add /// /// # Panics - /// Panics if `gate.result_id` is `None` when processing a measurement gate. + /// Panics if: + /// - `gate.result_id` is `None` when processing a measurement gate + /// - The gate type is invalid or has insufficient parameters/qubits for the operation pub fn add_gate_to_builder(builder: &mut ByteMessageBuilder, gate: &QuantumGate) { use crate::byte_message::GateType; match gate.gate_type { + // Single-qubit gates that operate directly on qubit lists GateType::X => { builder.add_x(&gate.qubits); } @@ -183,52 +186,48 @@ impl NoiseUtils { GateType::H => { builder.add_h(&gate.qubits); } - GateType::CX => { - if gate.qubits.len() >= 2 { - builder.add_cx(&[gate.qubits[0]], &[gate.qubits[1]]); - } + GateType::Prep => { + builder.add_prep(&gate.qubits); } - GateType::RZZ => { - if gate.qubits.len() >= 2 && !gate.params.is_empty() { - builder.add_rzz(gate.params[0], &[gate.qubits[0]], &[gate.qubits[1]]); - } + + // Two-qubit gates that need qubit validation + GateType::CX if gate.qubits.len() >= 2 => { + builder.add_cx(&[gate.qubits[0]], &[gate.qubits[1]]); } - GateType::SZZ => { - if gate.qubits.len() >= 2 { - builder.add_szz(&[gate.qubits[0]], &[gate.qubits[1]]); - } + GateType::SZZ if gate.qubits.len() >= 2 => { + builder.add_szz(&[gate.qubits[0]], &[gate.qubits[1]]); } - GateType::SZZdg => { - if gate.qubits.len() >= 2 { - builder.add_szzdg(&[gate.qubits[0]], &[gate.qubits[1]]); - } + GateType::SZZdg if gate.qubits.len() >= 2 => { + builder.add_szzdg(&[gate.qubits[0]], &[gate.qubits[1]]); } - GateType::RZ => { - if !gate.params.is_empty() { - builder.add_rz(gate.params[0], &gate.qubits); - } + + // Gates with parameters that need validation + GateType::RZ if !gate.params.is_empty() => { + builder.add_rz(gate.params[0], &gate.qubits); } - GateType::R1XY => { - if gate.params.len() >= 2 { - builder.add_r1xy(gate.params[0], gate.params[1], &gate.qubits); - } + GateType::RZZ if gate.qubits.len() >= 2 && !gate.params.is_empty() => { + builder.add_rzz(gate.params[0], &[gate.qubits[0]], &[gate.qubits[1]]); } - GateType::Measure => { - if !gate.qubits.is_empty() && gate.result_id.is_some() { - builder.add_measurements(&gate.qubits, &[gate.result_id.unwrap()]); - } + GateType::R1XY if gate.params.len() >= 2 => { + builder.add_r1xy(gate.params[0], gate.params[1], &gate.qubits); } - GateType::Prep => { - builder.add_prep(&gate.qubits); + + // Measurement gates need both qubits and result IDs + GateType::Measure if !gate.qubits.is_empty() && gate.result_id.is_some() => { + builder.add_measurements(&gate.qubits, &[gate.result_id.unwrap()]); } - GateType::Idle => { - // Handle Idle gates - let mut idle_qubits = Vec::with_capacity(gate.qubits.len()); - for &q in &gate.qubits { - idle_qubits.push(q); - } - builder.add_idle(gate.params[0], &idle_qubits); + + // Idle gates need special handling for qubit lists + GateType::Idle if !gate.params.is_empty() => { + // Use gate params for idle time + builder.add_idle(gate.params[0], &gate.qubits); } + + // Invalid cases (not enough qubits, missing parameters, etc.) + _ => panic!( + "Invalid gate type {:?} or insufficient parameters/qubits", + gate.gate_type + ), } } @@ -241,11 +240,7 @@ impl NoiseUtils { /// true if the message contains measurement results, false otherwise #[must_use] pub fn has_measurements(message: &ByteMessage) -> bool { - if let Ok(measurements) = message.parse_measurements() { - !measurements.is_empty() - } else { - false - } + message.parse_measurements().is_ok_and(|m| !m.is_empty()) } /// Creates a new `ByteMessageBuilder` for quantum operations @@ -269,9 +264,9 @@ impl NoiseUtils { #[must_use] pub fn create_gate_message(gates: &[QuantumGate]) -> ByteMessage { let mut builder = Self::create_quantum_builder(); - for gate in gates { - Self::add_gate_to_builder(&mut builder, gate); - } + gates + .iter() + .for_each(|gate| Self::add_gate_to_builder(&mut builder, gate)); builder.build() } @@ -341,7 +336,6 @@ impl NoiseUtils { /// # Errors /// Returns an error if the pauli string is not one of "X", "Y", or "Z" pub fn create_pauli_gate(pauli: &str, qubit: usize) -> Result { - // QuantumGate::try_from_pauli(pauli, qubit) match pauli { "X" => Ok(QuantumGate::x(qubit)), "Y" => Ok(QuantumGate::y(qubit)), diff --git a/crates/pecos-engines/src/engines/noise/weighted_sampler.rs b/crates/pecos-engines/src/engines/noise/weighted_sampler.rs index cc58f283c..df9cb248c 100644 --- a/crates/pecos-engines/src/engines/noise/weighted_sampler.rs +++ b/crates/pecos-engines/src/engines/noise/weighted_sampler.rs @@ -97,25 +97,25 @@ impl WeightedSampler "WeightedSampler: total weight {total_weight} deviates from 1.0 by more than tolerance {tolerance}" ); - let normalized_weights = if (total_weight - 1.0).abs() > FLOAT_EPSILON { - // Within tolerance but not exactly 1.0 - normalize + // Determine if we need to normalize (only normalize if not already very close to 1.0) + let needs_normalization = (total_weight - 1.0).abs() > FLOAT_EPSILON; + + // Collect normalized weights for the distribution + let normalized_weights: Vec = if needs_normalization { weighted_map.values().map(|&w| w / total_weight).collect() } else { - // Already exactly 1.0 (within floating point precision) weighted_map.values().copied().collect() }; // Create normalized BTreeMap let mut normalized_map = BTreeMap::new(); for (key, &value) in weighted_map { - normalized_map.insert( - key.clone(), - if (total_weight - 1.0).abs() < FLOAT_EPSILON { - value - } else { - value / total_weight - }, - ); + let normalized_value = if needs_normalization { + value / total_weight + } else { + value + }; + normalized_map.insert(key.clone(), normalized_value); } (normalized_map, normalized_weights) @@ -140,6 +140,7 @@ impl WeightedSampler } /// Create a Pauli gate based on the Pauli operator character +/// Returns None for identity ('I') operations fn create_pauli_gate(op: char, qubit: usize) -> Option { match op { 'X' => Some(QuantumGate::x(qubit)), @@ -176,14 +177,13 @@ impl SingleQubitWeightedSampler { } fn validate_pauli_leakage_keys(weighted_map: &BTreeMap) { + const VALID_KEYS: [&str; 4] = ["X", "Y", "Z", "L"]; + for key in weighted_map.keys() { - let key_str = key.as_ref(); - match key_str { - "X" | "Y" | "Z" | "L" => {} // Valid keys - _ => panic!( - "SingleQubitWeightedSampler: invalid key '{key_str}' - must be one of \"X\", \"Y\", \"Z\", or \"L\"" - ), - } + assert!( + VALID_KEYS.contains(&key.as_str()), + "SingleQubitWeightedSampler: invalid key '{key}' - must be one of X, Y, Z, or L" + ); } } @@ -207,26 +207,27 @@ impl SingleQubitWeightedSampler { pub fn sample_gates(&self, rng: &mut NoiseRng, qubit: usize) -> SingleQubitNoiseResult { let key = self.sample_keys(rng); - match key.as_str() { - "X" => SingleQubitNoiseResult { - gate: Some(QuantumGate::x(qubit)), - qubit_leaked: false, - }, - "Y" => SingleQubitNoiseResult { - gate: Some(QuantumGate::y(qubit)), - qubit_leaked: false, - }, - "Z" => SingleQubitNoiseResult { - gate: Some(QuantumGate::z(qubit)), - qubit_leaked: false, - }, - "L" => SingleQubitNoiseResult { + // Check for leakage first + if key == "L" { + return SingleQubitNoiseResult { gate: None, qubit_leaked: true, - }, + }; + } + + // For Pauli gates, create appropriate gate + let gate = match key.as_str() { + "X" => QuantumGate::x(qubit), + "Y" => QuantumGate::y(qubit), + "Z" => QuantumGate::z(qubit), _ => panic!( "SingleQubitWeightedSampler: invalid key '{key}' - must be one of \"X\", \"Y\", \"Z\", or \"L\"" ), + }; + + SingleQubitNoiseResult { + gate: Some(gate), + qubit_leaked: false, } } } @@ -259,30 +260,28 @@ impl TwoQubitWeightedSampler { } fn validate_two_qubit_keys(weighted_map: &BTreeMap) { - for key in weighted_map.keys() { - let key_str: &str = key.as_ref(); + const VALID_CHARS: [char; 5] = ['X', 'Y', 'Z', 'I', 'L']; - // Key should be exactly 2 characters long + for key in weighted_map.keys() { + // Key must be exactly 2 characters long assert_eq!( - key_str.len(), + key.len(), 2, - "TwoQubitWeightedSampler: invalid key '{key_str}' - must be exactly 2 characters" + "TwoQubitWeightedSampler: invalid key '{key}' - must be exactly 2 characters" ); - // Each character should be one of the valid operators - let chars: Vec = key_str.chars().collect(); - for &c in &chars { - match c { - 'X' | 'Y' | 'Z' | 'I' | 'L' => {} // Valid characters - _ => panic!( - "TwoQubitWeightedSampler: invalid character '{c}' in key '{key_str}' - each character must be one of \"X\", \"Y\", \"Z\", \"I\", or \"L\"" - ), - } + // Check each character is valid + for c in key.chars() { + assert!( + VALID_CHARS.contains(&c), + "TwoQubitWeightedSampler: invalid character '{c}' in key '{key}' - must be one of X, Y, Z, I, or L" + ); } - // Special case: "II" is not allowed (it would represent no operation) + // Special case: "II" is not allowed assert_ne!( - key_str, "II", + key.as_str(), + "II", "TwoQubitWeightedSampler: key 'II' is not allowed as it represents no operation" ); } @@ -311,41 +310,40 @@ impl TwoQubitWeightedSampler { qubit0: usize, qubit1: usize, ) -> TwoQubitNoiseResult { + // Sample a key and extract the characters let key_str = self.sample_keys(rng); - - // Extract the two characters from the key let chars: Vec = key_str.chars().collect(); - let op0 = chars[0]; - let op1 = chars[1]; - // Check for leakage - let qubit0_leaked = op0 == 'L'; - let qubit1_leaked = op1 == 'L'; + // Determine leakage status + let qubit0_leaked = chars[0] == 'L'; + let qubit1_leaked = chars[1] == 'L'; - // If both qubits leaked, return early + // If both qubits leaked, no gates needed if qubit0_leaked && qubit1_leaked { return TwoQubitNoiseResult::with_leakage(true, true, None); } - // Build gates based on the operations + // Build gates for non-leaked qubits only let mut gates = Vec::new(); - // Add gates for non-leaked qubits with non-identity operations + // Convert the first operation if not leaked if !qubit0_leaked { - if let Some(gate) = create_pauli_gate(op0, qubit0) { + if let Some(gate) = create_pauli_gate(chars[0], qubit0) { gates.push(gate); } } + // Convert the second operation if not leaked if !qubit1_leaked { - if let Some(gate) = create_pauli_gate(op1, qubit1) { + if let Some(gate) = create_pauli_gate(chars[1], qubit1) { gates.push(gate); } } - let gates = if gates.is_empty() { None } else { Some(gates) }; + // Only return gates if we have some + let gates_option = if gates.is_empty() { None } else { Some(gates) }; - TwoQubitNoiseResult::with_leakage(qubit0_leaked, qubit1_leaked, gates) + TwoQubitNoiseResult::with_leakage(qubit0_leaked, qubit1_leaked, gates_option) } } diff --git a/crates/pecos-engines/src/engines/qir/compiler.rs b/crates/pecos-engines/src/engines/qir/compiler.rs index 9849cd6eb..f3b7535a9 100644 --- a/crates/pecos-engines/src/engines/qir/compiler.rs +++ b/crates/pecos-engines/src/engines/qir/compiler.rs @@ -44,8 +44,10 @@ impl QirCompiler { thread_id: &str, ) -> Result { result.map_err(|e| { - let error_msg = format!("{error_msg}: {e}"); - Self::log_error(QirError::CompilationFailed(error_msg), thread_id) + Self::log_error( + QirError::CompilationFailed(format!("{error_msg}: {e}")), + thread_id, + ) }) } @@ -57,12 +59,11 @@ impl QirCompiler { ) -> Result<(), QueueError> { if !output.status.success() { let stderr = String::from_utf8_lossy(&output.stderr); - let error_msg = format!( - "{command_name} failed with status: {} and error: {stderr}", - output.status - ); return Err(Self::log_error( - QirError::CompilationFailed(error_msg), + QirError::CompilationFailed(format!( + "{command_name} failed with status: {} and error: {stderr}", + output.status + )), thread_id, )); } @@ -84,10 +85,9 @@ impl QirCompiler { /// Helper function to ensure a path's parent directory exists fn ensure_parent_dir_exists(path: &Path, thread_id: &str) -> Result<(), QueueError> { - if let Some(parent) = path.parent() { - Self::ensure_directory_exists(parent, thread_id)?; - } - Ok(()) + path.parent().map_or(Ok(()), |parent| { + Self::ensure_directory_exists(parent, thread_id) + }) } /// Compile a QIR program to a dynamically loadable library @@ -249,8 +249,6 @@ impl QirCompiler { .unwrap_or_default() .as_secs(); - let lib_name = format!("{file_stem_str}_{timestamp}"); - // Determine file paths let object_file = output_dir.join(format!("{file_stem_str}.o")); @@ -262,7 +260,8 @@ impl QirCompiler { #[cfg(target_os = "windows")] let lib_extension = "dll"; - let library_file = output_dir.join(format!("lib{lib_name}.{lib_extension}")); + let library_file = + output_dir.join(format!("lib{file_stem_str}_{timestamp}.{lib_extension}")); debug!("QIR Compiler: [Thread {}] Compilation paths:", thread_id); debug!( @@ -284,38 +283,31 @@ impl QirCompiler { /// Helper function to find an LLVM tool in the system /// /// Search order: - /// 1. `LLVM_HOME` environment variable (points to LLVM installation) - /// 2. `PECOS_LLVM_PATH` environment variable (specific override for this project) + /// 1. `PECOS_LLVM_PATH` environment variable (specific override for this project) + /// 2. `LLVM_HOME` environment variable (points to LLVM installation) /// 3. System PATH /// 4. Standard installation directories fn find_llvm_tool(tool_name: &str) -> Option { let thread_id = get_thread_id(); - // Check environment variables first - if let Some(path) = Self::find_tool_from_env(tool_name) { - debug!( - "QIR Compiler: [Thread {}] Found {} from environment variable: {:?}", - thread_id, tool_name, path - ); - return Some(path); - } - - // Then check PATH - if let Some(path) = Self::find_tool_from_path(tool_name) { - debug!( - "QIR Compiler: [Thread {}] Found {} in PATH: {:?}", - thread_id, tool_name, path - ); - return Some(path); - } - - // Finally check standard installation directories - if let Some(path) = Self::find_tool_from_standard_locations(tool_name) { - debug!( - "QIR Compiler: [Thread {}] Found {} in standard location: {:?}", - thread_id, tool_name, path - ); - return Some(path); + // Use a simpler approach - try each method in sequence + let search_methods = [ + ("environment variable", Self::find_tool_from_env(tool_name)), + ("PATH", Self::find_tool_from_path(tool_name)), + ( + "standard location", + Self::find_tool_from_standard_locations(tool_name), + ), + ]; + + for (source, maybe_path) in search_methods { + if let Some(path) = maybe_path { + debug!( + "QIR Compiler: [Thread {}] Found {} from {}: {:?}", + thread_id, tool_name, source, path + ); + return Some(path); + } } debug!( @@ -327,26 +319,17 @@ impl QirCompiler { /// Find tool from environment variables fn find_tool_from_env(tool_name: &str) -> Option { - // Check PECOS_LLVM_PATH first (project-specific override) - if let Ok(llvm_path) = env::var("PECOS_LLVM_PATH") { - let tool_path = PathBuf::from(llvm_path) - .join("bin") - .join(executable_name(tool_name)); - if tool_path.exists() { - return Some(tool_path); - } - } - - // Then check LLVM_HOME - if let Ok(llvm_home) = env::var("LLVM_HOME") { - let tool_path = PathBuf::from(llvm_home) - .join("bin") - .join(executable_name(tool_name)); - if tool_path.exists() { - return Some(tool_path); + // Check environment variables in order of precedence + for env_var in ["PECOS_LLVM_PATH", "LLVM_HOME"] { + if let Ok(path) = env::var(env_var) { + let tool_path = PathBuf::from(path) + .join("bin") + .join(executable_name(tool_name)); + if tool_path.exists() { + return Some(tool_path); + } } } - None } @@ -358,34 +341,24 @@ impl QirCompiler { #[cfg(not(target_os = "windows"))] let command = "which"; - if let Ok(output) = Command::new(command).arg(tool_name).output() { - if output.status.success() { - if let Ok(path_str) = String::from_utf8(output.stdout) { - if let Some(path_line) = path_str.lines().next() { - let path = PathBuf::from(path_line.trim()); - if path.exists() { - return Some(path); - } - } - } - } - } - - None + Command::new(command) + .arg(tool_name) + .output() + .ok() + .filter(|output| output.status.success()) + .and_then(|output| String::from_utf8(output.stdout).ok()) + .and_then(|path_str| path_str.lines().next().map(|s| s.trim().to_string())) + .map(PathBuf::from) + .filter(|path| path.exists()) } /// Find tool from standard installation locations fn find_tool_from_standard_locations(tool_name: &str) -> Option { let exec_name = executable_name(tool_name); - - for base_path in standard_llvm_paths() { - let tool_path = base_path.join(&exec_name); - if tool_path.exists() { - return Some(tool_path); - } - } - - None + standard_llvm_paths() + .into_iter() + .map(|base| base.join(&exec_name)) + .find(|path| path.exists()) } /// Check LLVM version and verify it meets specific version requirements (LLVM 14.x only) @@ -1146,13 +1119,13 @@ __declspec(dllexport) void __quantum__rt__result_record_output(int result) {} } // Try each fallback tool - for fallback in fallbacks { + for &fallback in fallbacks { if let Some(path) = Self::find_llvm_tool(fallback) { debug!( "QIR Compiler: [Thread {}] Using fallback tool {} instead of {} at {:?}", thread_id, fallback, primary_tool, path ); - return Some((path, (*fallback).to_string())); + return Some((path, fallback.to_string())); } } diff --git a/crates/pecos-engines/tests/noise_determinism.rs b/crates/pecos-engines/tests/noise_determinism.rs index 7f966649f..147a1d4be 100644 --- a/crates/pecos-engines/tests/noise_determinism.rs +++ b/crates/pecos-engines/tests/noise_determinism.rs @@ -93,10 +93,18 @@ fn apply_noise(model: &mut Box, msg: &ByteMessage) -> ByteMessag } } +/// Compare two `ByteMessage`s by parsing their quantum operations +/// +/// This function extracts and compares the quantum operations from two messages +/// to determine if they represent the same quantum circuit. fn compare_messages(msg1: &ByteMessage, msg2: &ByteMessage) -> bool { let ops1 = msg1.parse_quantum_operations().unwrap_or_default(); let ops2 = msg2.parse_quantum_operations().unwrap_or_default(); + + // For determinism tests, we just need to know if they're equal ops1 == ops2 + // Note: If additional debug info is needed when messages don't match, + // we could expand this function to return details about the differences } #[test] @@ -410,7 +418,7 @@ fn test_complete_measurement_determinism() { fn test_deterministic_measurement() { // This test verifies that using the same seed produces the same measurement results let seed = 42; - println!("Testing deterministic measurement with seed {seed}"); + info!("Testing deterministic measurement with seed {seed}"); // Create a noise model with significant measurement error let mut model = Box::new( @@ -429,21 +437,21 @@ fn test_deterministic_measurement() { builder.add_measurements(&[0], &[0]); // Measure qubit 0 let circuit = builder.build(); - println!("Running first measurement with seed {seed}"); + info!("Running first measurement with seed {seed}"); reset_model_with_seed(&mut model, seed).unwrap(); let engine1 = Box::new(StateVecEngine::new(1)); let result1 = run_complete_simulation(&mut model, engine1, &circuit, seed); let value1 = result1.get(&0).copied().unwrap_or(0); - println!("First measurement result: {value1}"); + info!("First measurement result: {value1}"); - println!("Running second measurement with same seed {seed}"); + info!("Running second measurement with same seed {seed}"); reset_model_with_seed(&mut model, seed).unwrap(); let engine2 = Box::new(StateVecEngine::new(1)); let result2 = run_complete_simulation(&mut model, engine2, &circuit, seed); let value2 = result2.get(&0).copied().unwrap_or(0); - println!("Second measurement result: {value2}"); + info!("Second measurement result: {value2}"); // The results should be identical with the same seed assert_eq!( @@ -453,18 +461,18 @@ fn test_deterministic_measurement() { // Now try with a different seed let different_seed = seed + 1000; - println!("Running measurement with different seed {different_seed}"); + info!("Running measurement with different seed {different_seed}"); reset_model_with_seed(&mut model, different_seed).unwrap(); let engine3 = Box::new(StateVecEngine::new(1)); let result3 = run_complete_simulation(&mut model, engine3, &circuit, different_seed); let value3 = result3.get(&0).copied().unwrap_or(0); - println!("Different seed result: {value3}"); + info!("Different seed result: {value3}"); // IMPROVEMENT 1: Assert that different seeds produce different results // (with a caveat for the small probability that they might be the same by chance) if value1 == value3 { - println!( + info!( "NOTE: Same measurement result with different seeds. This can happen with low probability." ); @@ -477,12 +485,12 @@ fn test_deterministic_measurement() { // With a second different seed, the probability of getting the same result again is even lower if value1 == value4 { - println!( + info!( "NOTE: Still same measurement result with a third seed. Very unlikely but possible." ); } else { // Different results with the new seed, so we can assert determinism - println!("Different seed produced different result: {value4}"); + info!("Different seed produced different result: {value4}"); assert_ne!( value1, value4, "Different seeds should usually produce different measurement results" @@ -501,10 +509,11 @@ fn test_deterministic_measurement() { let mut ones = 0; let num_tests = 20; - println!("Running {num_tests} measurements with different seeds"); + info!("Running {num_tests} measurements with different seeds"); for i in 0..num_tests { - // Convert the loop variable to u64 safely (always positive in this context) - let test_seed = seed + i as u64; // Safe since i is always non-negative in this loop + // Use a different deterministic seed for each test iteration derived from the base seed + // Converting i to u64 is safe since we're only using small non-negative loop values + let test_seed = seed + i as u64; reset_model_with_seed(&mut model, test_seed).unwrap(); let engine = Box::new(StateVecEngine::new(1)); let result = run_complete_simulation(&mut model, engine, &circuit, test_seed); @@ -517,25 +526,25 @@ fn test_deterministic_measurement() { } } - println!("Got {zeros} zeros and {ones} ones with different seeds"); + info!("Got {zeros} zeros and {ones} ones with different seeds"); // With enough different seeds, we should get some variation // The probability of getting all zeros or all ones with 20 measurements and a roughly // 50/50 chance for each is approximately 2^(-19), which is extremely unlikely if zeros == 0 || ones == 0 { - println!( + info!( "NOTE: Got only {} measurements. This is highly unusual but technically possible.", if zeros == 0 { "ones" } else { "zeros" } ); } else { - println!("Got a mixture of results with different seeds, as expected"); + info!("Got a mixture of results with different seeds, as expected"); } } /// IMPROVEMENT 2: Comprehensive end-to-end test combining all noise types #[test] fn test_comprehensive_noise_determinism() { - println!("Testing comprehensive noise determinism (all noise types)"); + info!("Testing comprehensive noise determinism (all noise types)"); // Create a noise model with all types of noise let mut model = Box::new( @@ -595,7 +604,7 @@ fn test_comprehensive_noise_determinism() { // Run the circuit with a fixed seed let seed = 9876; - println!("Running first simulation with seed {seed}"); + info!("Running first simulation with seed {seed}"); reset_model_with_seed(&mut model, seed).unwrap(); let engine1 = Box::new(StateVecEngine::new(3)); let results1 = run_complete_simulation(&mut model, engine1, &circuit, seed); @@ -603,10 +612,10 @@ fn test_comprehensive_noise_determinism() { // Sort and print results for readability let mut results1_vec: Vec<(usize, i32)> = results1.iter().map(|(&k, &v)| (k, v)).collect(); results1_vec.sort_by_key(|&(k, _)| k); - println!("First run results: {results1_vec:?}"); + info!("First run results: {results1_vec:?}"); // Run again with the same seed - should get identical results - println!("Running second simulation with the same seed {seed}"); + info!("Running second simulation with the same seed {seed}"); reset_model_with_seed(&mut model, seed).unwrap(); let engine2 = Box::new(StateVecEngine::new(3)); let results2 = run_complete_simulation(&mut model, engine2, &circuit, seed); @@ -614,7 +623,7 @@ fn test_comprehensive_noise_determinism() { // Sort and print results for readability let mut results2_vec: Vec<(usize, i32)> = results2.iter().map(|(&k, &v)| (k, v)).collect(); results2_vec.sort_by_key(|&(k, _)| k); - println!("Second run results: {results2_vec:?}"); + info!("Second run results: {results2_vec:?}"); // The results should be identical with the same seed assert_eq!( @@ -624,7 +633,7 @@ fn test_comprehensive_noise_determinism() { // Run again with a different seed - should get different results let different_seed = seed + 1000; - println!("Running third simulation with different seed {different_seed}"); + info!("Running third simulation with different seed {different_seed}"); reset_model_with_seed(&mut model, different_seed).unwrap(); let engine3 = Box::new(StateVecEngine::new(3)); let results3 = run_complete_simulation(&mut model, engine3, &circuit, different_seed); @@ -632,35 +641,35 @@ fn test_comprehensive_noise_determinism() { // Sort and print results for readability let mut results3_vec: Vec<(usize, i32)> = results3.iter().map(|(&k, &v)| (k, v)).collect(); results3_vec.sort_by_key(|&(k, _)| k); - println!("Different seed results: {results3_vec:?}"); + info!("Different seed results: {results3_vec:?}"); // The results should be different (high probability) // If they happen to be identical, try yet another seed if results1 == results3 { - println!( + info!( "NOTE: Same measurement results with different seeds. This can happen with low probability." ); let another_seed = seed + 2000; - println!("Trying yet another seed: {another_seed}"); + info!("Trying yet another seed: {another_seed}"); reset_model_with_seed(&mut model, another_seed).unwrap(); let engine4 = Box::new(StateVecEngine::new(3)); let results4 = run_complete_simulation(&mut model, engine4, &circuit, another_seed); // The probability of getting identical results again is extremely low if results1 == results4 { - println!( + info!( "NOTE: Still same results with a third seed. Extremely unlikely but technically possible." ); } else { - println!("Different seed produced different results as expected"); + info!("Different seed produced different results as expected"); assert_ne!( results1, results4, "Different seeds should produce different results in comprehensive test" ); } } else { - println!("Different seed produced different results as expected"); + info!("Different seed produced different results as expected"); assert_ne!( results1, results3, "Different seeds should produce different results in comprehensive test" @@ -671,7 +680,7 @@ fn test_comprehensive_noise_determinism() { /// IMPROVEMENT 3: Test long-running determinism with a large circuit #[test] fn test_long_running_determinism() { - println!("Testing long-running determinism with many operations"); + info!("Testing long-running determinism with many operations"); // Create a noise model with moderate error rates let mut model = Box::new( @@ -696,7 +705,7 @@ fn test_long_running_determinism() { // Now apply a repeated pattern of gates to create a long sequence // This gives the RNG many opportunities to diverge if there are issues - println!("Building a circuit with 500+ operations..."); + info!("Building a circuit with 500+ operations..."); // We're using a small, positive loop count where usize will fit in both u32 and f64 without precision loss for i in 0..100 { // 100 repetitions of 5+ operations = 500+ operations total @@ -735,12 +744,12 @@ fn test_long_running_determinism() { // Run the circuit twice with the same seed let seed = 54321; - println!("Running first long simulation with seed {seed}"); + info!("Running first long simulation with seed {seed}"); reset_model_with_seed(&mut model, seed).unwrap(); let engine1 = Box::new(StateVecEngine::new(5)); let results1 = run_complete_simulation(&mut model, engine1, &circuit, seed); - println!("Running second long simulation with the same seed {seed}"); + info!("Running second long simulation with the same seed {seed}"); reset_model_with_seed(&mut model, seed).unwrap(); let engine2 = Box::new(StateVecEngine::new(5)); let results2 = run_complete_simulation(&mut model, engine2, &circuit, seed); @@ -748,11 +757,11 @@ fn test_long_running_determinism() { // Sort and print a summary of the results let mut results1_vec: Vec<(usize, i32)> = results1.iter().map(|(&k, &v)| (k, v)).collect(); results1_vec.sort_by_key(|&(k, _)| k); - println!("First run results: {results1_vec:?}"); + info!("First run results: {results1_vec:?}"); let mut results2_vec: Vec<(usize, i32)> = results2.iter().map(|(&k, &v)| (k, v)).collect(); results2_vec.sort_by_key(|&(k, _)| k); - println!("Second run results: {results2_vec:?}"); + info!("Second run results: {results2_vec:?}"); // Results should be identical despite the long sequence of operations assert_eq!( @@ -762,38 +771,38 @@ fn test_long_running_determinism() { // Run with a different seed let different_seed = seed + 1000; - println!("Running with a different seed {different_seed}"); + info!("Running with a different seed {different_seed}"); reset_model_with_seed(&mut model, different_seed).unwrap(); let engine3 = Box::new(StateVecEngine::new(5)); let results3 = run_complete_simulation(&mut model, engine3, &circuit, different_seed); // Results should be different (with high probability) if results1 == results3 { - println!("NOTE: Same results with different seeds. This is very unlikely but possible."); + info!("NOTE: Same results with different seeds. This is very unlikely but possible."); // Try one more seed let another_seed = seed + 2000; - println!("Trying yet another seed: {another_seed}"); + info!("Trying yet another seed: {another_seed}"); reset_model_with_seed(&mut model, another_seed).unwrap(); let engine4 = Box::new(StateVecEngine::new(5)); let results4 = run_complete_simulation(&mut model, engine4, &circuit, another_seed); if results1 == results4 { - println!("NOTE: Still same results with a third seed. Extremely unlikely."); + info!("NOTE: Still same results with a third seed. Extremely unlikely."); } else { - println!("Different seed produced different results as expected"); + info!("Different seed produced different results as expected"); assert_ne!( results1, results4, "Different seeds should produce different results" ); } } else { - println!("Different seed produced different results as expected"); + info!("Different seed produced different results as expected"); assert_ne!( results1, results3, "Different seeds should produce different results" ); } - println!("Long-running determinism test passed successfully!"); + info!("Long-running determinism test passed successfully!"); } From 7bec04ec6bdf6cbd261ac7e55b99fe4a242bcfd2 Mon Sep 17 00:00:00 2001 From: Ciaran Ryan-Anderson Date: Tue, 13 May 2025 14:37:32 -0600 Subject: [PATCH 8/9] Fix MacOS LLVM 14 workflow issues --- .github/workflows/rust-test.yml | 7 ++++--- .gitignore | 1 + 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/rust-test.yml b/.github/workflows/rust-test.yml index 0c116440e..3e663740c 100644 --- a/.github/workflows/rust-test.yml +++ b/.github/workflows/rust-test.yml @@ -117,11 +117,12 @@ jobs: - name: Install LLVM Tools (macOS) if: matrix.os == 'macos-latest' run: | - brew install llvm - echo "$(brew --prefix llvm)/bin" >> $GITHUB_PATH + brew install llvm@14 + echo "$(brew --prefix llvm@14)/bin" >> $GITHUB_PATH # Make sure it's available in the current step too - export PATH="$(brew --prefix llvm)/bin:$PATH" + export PATH="$(brew --prefix llvm@14)/bin:$PATH" which llc + llc --version - name: Install LLVM Tools (Windows) if: matrix.os == 'windows-latest' diff --git a/.gitignore b/.gitignore index e079e1171..62515fb8d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +tmp/ **/.*/settings.local.json # Ignore helper text in root From 2b404d34c178129c5d3b979a0fd3f912cf6d2662 Mon Sep 17 00:00:00 2001 From: Ciaran Ryan-Anderson Date: Tue, 13 May 2025 14:42:09 -0600 Subject: [PATCH 9/9] Fix Ubuntu's LLVM 14 workflow issue... --- .github/workflows/rust-test.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.github/workflows/rust-test.yml b/.github/workflows/rust-test.yml index 3e663740c..742dbc1a6 100644 --- a/.github/workflows/rust-test.yml +++ b/.github/workflows/rust-test.yml @@ -110,7 +110,16 @@ jobs: if: matrix.os == 'ubuntu-latest' run: | sudo apt-get update - sudo apt-get install -y llvm clang + # Add LLVM 14 repository + wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | sudo apt-key add - + sudo add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-14 main" + sudo apt-get update + # Install LLVM 14 specifically + sudo apt-get install -y llvm-14 clang-14 + # Create symlinks for llc and clang + sudo update-alternatives --install /usr/bin/llc llc /usr/bin/llc-14 100 + sudo update-alternatives --install /usr/bin/clang clang /usr/bin/clang-14 100 + # Verify installation which llc llc --version