From 0a10c7d50933a854c3d0aae02274d99b2fcba3d3 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Tue, 21 Apr 2026 10:51:28 -0700 Subject: [PATCH 1/2] Copy sparse simulator into QDK This change removes the dependency on the qir-runner repo by moving the `quantum_sparse_sim` directly into the QDK. This will make it easier to implement QDK-specific features in the sparse simulator, allowing it to evolve more tightly with the QDK use cases and other QDK hosted simulators. To enable this, some common types, specifically `NearlyZero` trait and `IndexMap` struct, are relocated/updated to avoid duplicate implementations. --- Cargo.lock | 21 +- Cargo.toml | 2 +- .../compiler/qsc_data_structures/Cargo.toml | 1 + .../compiler/qsc_data_structures/src/lib.rs | 3 +- .../compiler/qsc_eval/src/intrinsic/utils.rs | 20 +- source/index_map/Cargo.toml | 15 + source/index_map/src/lib.rs | 341 +++++ source/simulators/Cargo.toml | 3 +- source/simulators/src/lib.rs | 7 +- source/simulators/src/quantum_sparse_sim.rs | 1326 +++++++++++++++++ .../src/quantum_sparse_sim/matrix_testing.rs | 648 ++++++++ .../src/quantum_sparse_sim/nearly_zero.rs | 24 + .../src/quantum_sparse_sim/tests.rs | 723 +++++++++ 13 files changed, 3096 insertions(+), 38 deletions(-) create mode 100644 source/index_map/Cargo.toml create mode 100644 source/index_map/src/lib.rs create mode 100644 source/simulators/src/quantum_sparse_sim.rs create mode 100644 source/simulators/src/quantum_sparse_sim/matrix_testing.rs create mode 100644 source/simulators/src/quantum_sparse_sim/nearly_zero.rs create mode 100644 source/simulators/src/quantum_sparse_sim/tests.rs diff --git a/Cargo.lock b/Cargo.lock index 71b05e0509..54475b9100 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1159,6 +1159,10 @@ version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "964de6e86d545b246d84badc0fef527924ace5134f30641c203ef52ba83f58d5" +[[package]] +name = "index_map" +version = "0.0.0" + [[package]] name = "indexmap" version = "2.13.0" @@ -2086,13 +2090,14 @@ dependencies = [ "expect-test", "futures", "iai-callgrind", + "index_map", "nalgebra", + "ndarray", "noisy_simulator", "num-bigint", "num-complex", "num-traits", "paulimer", - "quantum-sparse-sim", "rand 0.8.5", "regex-lite", "rustc-hash", @@ -2204,6 +2209,7 @@ version = "0.0.0" dependencies = [ "bitflags 2.11.0", "expect-test", + "index_map", "miette", "rustc-hash", "serde", @@ -2541,19 +2547,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "quantum-sparse-sim" -version = "0.9.4" -source = "git+https://github.com/qir-alliance/qir-runner?rev=2c130b0e4397c8d82e483b2d18fc3e241a5fa31c#2c130b0e4397c8d82e483b2d18fc3e241a5fa31c" -dependencies = [ - "ndarray", - "num-bigint", - "num-complex", - "num-traits", - "rand 0.8.5", - "rustc-hash", -] - [[package]] name = "quick-error" version = "1.2.3" diff --git a/Cargo.toml b/Cargo.toml index 1514ee61d4..9cf7e50e97 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,6 +24,7 @@ members = [ "source/compiler/qsc_project", "source/compiler/qsc_rir", "source/fuzz", + "source/index_map", "source/language_service", "source/simulators", "source/pip", @@ -82,7 +83,6 @@ wasm-bindgen-futures = "0.4" rand = "0.8" serde_json = "1.0" pyo3 = "0.28" -quantum-sparse-sim = { git = "https://github.com/qir-alliance/qir-runner", rev = "2c130b0e4397c8d82e483b2d18fc3e241a5fa31c" } async-trait = "0.1" tokio = { version = "1.50", features = ["macros", "rt"] } rayon = "1.11.0" diff --git a/source/compiler/qsc_data_structures/Cargo.toml b/source/compiler/qsc_data_structures/Cargo.toml index 1d117ec103..b4ebc58489 100644 --- a/source/compiler/qsc_data_structures/Cargo.toml +++ b/source/compiler/qsc_data_structures/Cargo.toml @@ -9,6 +9,7 @@ license.workspace = true repository.workspace = true [dependencies] +index_map = { path = "../../index_map" } miette = { workspace = true } serde = { workspace = true } bitflags = { workspace = true } diff --git a/source/compiler/qsc_data_structures/src/lib.rs b/source/compiler/qsc_data_structures/src/lib.rs index 31828a484a..033ee408c7 100644 --- a/source/compiler/qsc_data_structures/src/lib.rs +++ b/source/compiler/qsc_data_structures/src/lib.rs @@ -5,10 +5,11 @@ pub mod attrs; pub mod display; pub mod error; pub mod functors; -pub mod index_map; pub mod language_features; pub mod line_column; pub mod namespaces; pub mod source; pub mod span; pub mod target; + +pub use index_map; diff --git a/source/compiler/qsc_eval/src/intrinsic/utils.rs b/source/compiler/qsc_eval/src/intrinsic/utils.rs index ed1f38c4d9..26001ad410 100644 --- a/source/compiler/qsc_eval/src/intrinsic/utils.rs +++ b/source/compiler/qsc_eval/src/intrinsic/utils.rs @@ -6,6 +6,7 @@ use std::collections::hash_map::Entry; use num_bigint::BigUint; use num_complex::{Complex, Complex64}; use num_traits::Zero; +use qdk_simulators::NearlyZero; use rustc_hash::{FxHashMap, FxHashSet}; /// Given a state and a set of qubits, split the state into two parts: the qubits to dump and the remaining qubits. @@ -152,25 +153,6 @@ fn normalize_and_reorder( } } -trait NearlyZero { - fn is_nearly_zero(&self) -> bool; -} - -impl NearlyZero for f64 { - fn is_nearly_zero(&self) -> bool { - self.abs() <= 1e-10 - } -} - -impl NearlyZero for Complex -where - T: NearlyZero, -{ - fn is_nearly_zero(&self) -> bool { - self.re.is_nearly_zero() && self.im.is_nearly_zero() - } -} - pub(crate) fn state_to_matrix( state: Vec<(BigUint, Complex64)>, qubit_count: usize, diff --git a/source/index_map/Cargo.toml b/source/index_map/Cargo.toml new file mode 100644 index 0000000000..d1b953dcd9 --- /dev/null +++ b/source/index_map/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "index_map" + +version.workspace = true +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true + +[lints] +workspace = true + +[lib] +doctest = false diff --git a/source/index_map/src/lib.rs b/source/index_map/src/lib.rs new file mode 100644 index 0000000000..3a81134f53 --- /dev/null +++ b/source/index_map/src/lib.rs @@ -0,0 +1,341 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use std::{ + fmt::{self, Debug, Formatter}, + iter::Enumerate, + marker::PhantomData, + ops::{Index, IndexMut}, + option::Option, + slice, vec, +}; + +pub struct IndexMap { + _keys: PhantomData, + values: Vec>, +} + +impl IndexMap +where + K: Into, + V: Default, +{ + pub fn get_mut_or_default(&mut self, key: K) -> &mut V { + let index: usize = key.into(); + if index >= self.values.len() { + self.values.resize_with(index + 1, Option::default); + } + self.values + .get_mut(index) + .expect("IndexMap::get_mut_or_default: index out of bounds") + .get_or_insert_with(Default::default) + } +} + +impl IndexMap { + #[must_use] + pub fn new() -> Self { + Self::default() + } + + #[must_use] + pub fn with_capacity(capacity: usize) -> Self { + Self { + _keys: PhantomData, + values: Vec::with_capacity(capacity), + } + } + + #[must_use] + pub fn is_empty(&self) -> bool { + self.values.iter().all(Option::is_none) + } + + // `Iter` does implement `Iterator`, but it has an additional bound on `K`. + #[allow(clippy::iter_not_returning_iterator)] + #[must_use] + pub fn iter(&self) -> Iter<'_, K, V> { + Iter { + _keys: PhantomData, + base: self.values.iter().enumerate(), + } + } + + // `Iter` does implement `Iterator`, but it has an additional bound on `K`. + #[allow(clippy::iter_not_returning_iterator)] + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + IterMut { + _keys: PhantomData, + base: self.values.iter_mut().enumerate(), + } + } + + pub fn drain(&mut self) -> Drain<'_, K, V> { + Drain { + _keys: PhantomData, + base: self.values.drain(..).enumerate(), + } + } + + #[must_use] + pub fn values(&self) -> Values<'_, V> { + Values { + base: self.values.iter(), + } + } + + pub fn values_mut(&mut self) -> ValuesMut<'_, V> { + ValuesMut { + base: self.values.iter_mut(), + } + } + + pub fn retain(&mut self, mut f: F) + where + F: FnMut(K, &V) -> bool, + K: From, + { + for (k, v) in self.values.iter_mut().enumerate() { + let remove = if let Some(value) = v { + !f(K::from(k), value) + } else { + false + }; + if remove { + *v = None; + } + } + } + + pub fn clear(&mut self) { + self.values.clear(); + } +} + +impl, V> IndexMap { + pub fn insert(&mut self, key: K, value: V) { + let index = key.into(); + if index >= self.values.len() { + self.values.resize_with(index + 1, || None); + } + self.values[index] = Some(value); + } + + pub fn contains_key(&self, key: K) -> bool { + let index: usize = key.into(); + self.values.get(index).is_some_and(Option::is_some) + } + + pub fn get(&self, key: K) -> Option<&V> { + let index: usize = key.into(); + self.values.get(index).and_then(Option::as_ref) + } + + pub fn get_mut(&mut self, key: K) -> Option<&mut V> { + let index: usize = key.into(); + self.values.get_mut(index).and_then(Option::as_mut) + } + + pub fn remove(&mut self, key: K) { + let index: usize = key.into(); + if index < self.values.len() { + self.values[index] = None; + } + } +} + +impl Clone for IndexMap { + fn clone(&self) -> Self { + Self { + _keys: PhantomData, + values: self.values.clone(), + } + } +} + +impl Debug for IndexMap { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + f.debug_struct("IndexMap") + .field( + "values", + &self + .values + .iter() + .enumerate() + .filter_map(|(k, v)| v.as_ref().map(|val| format!("{k:?}: {val:?}"))) + .collect::>(), + ) + .finish() + } +} + +impl Default for IndexMap { + fn default() -> Self { + Self { + _keys: PhantomData, + values: Vec::default(), + } + } +} + +impl, V> IntoIterator for IndexMap { + type Item = (K, V); + + type IntoIter = IntoIter; + + fn into_iter(self) -> Self::IntoIter { + IntoIter { + _keys: PhantomData, + base: self.values.into_iter().enumerate(), + } + } +} + +impl<'a, K: From, V> IntoIterator for &'a IndexMap { + type Item = (K, &'a V); + + type IntoIter = Iter<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl, V> FromIterator<(K, V)> for IndexMap { + fn from_iter>(iter: T) -> Self { + let iter = iter.into_iter(); + let mut map = Self::new(); + let (lo, hi) = iter.size_hint(); + map.values.reserve(hi.unwrap_or(lo)); + for (key, value) in iter { + map.insert(key, value); + } + map + } +} + +pub struct Iter<'a, K, V> { + _keys: PhantomData, + base: Enumerate>>, +} + +impl<'a, K: From, V> Iterator for Iter<'a, K, V> { + type Item = (K, &'a V); + + fn next(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next()? { + break Some((index.into(), value)); + } + } + } +} + +pub struct IterMut<'a, K, V> { + _keys: PhantomData, + base: Enumerate>>, +} + +impl, V> DoubleEndedIterator for Iter<'_, K, V> { + fn next_back(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next_back()? { + break Some((index.into(), value)); + } + } + } +} + +impl<'a, K: From, V> Iterator for IterMut<'a, K, V> { + type Item = (K, &'a mut V); + + fn next(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next()? { + break Some((index.into(), value)); + } + } + } +} + +pub struct IntoIter { + _keys: PhantomData, + base: Enumerate>>, +} + +impl, V> Iterator for IntoIter { + type Item = (K, V); + + fn next(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next()? { + break Some((index.into(), value)); + } + } + } +} + +pub struct Drain<'a, K, V> { + _keys: PhantomData, + base: Enumerate>>, +} + +impl, V> Iterator for Drain<'_, K, V> { + type Item = (K, V); + + fn next(&mut self) -> Option { + loop { + if let (index, Some(value)) = self.base.next()? { + break Some((index.into(), value)); + } + } + } +} + +pub struct Values<'a, V> { + base: slice::Iter<'a, Option>, +} + +impl<'a, V> Iterator for Values<'a, V> { + type Item = &'a V; + + fn next(&mut self) -> Option { + loop { + if let Some(value) = self.base.next()? { + break Some(value); + } + } + } +} + +pub struct ValuesMut<'a, V> { + base: slice::IterMut<'a, Option>, +} + +impl<'a, V> Iterator for ValuesMut<'a, V> { + type Item = &'a mut V; + + fn next(&mut self) -> Option { + loop { + if let Some(value) = self.base.next()? { + break Some(value); + } + } + } +} + +impl Index for IndexMap { + type Output = usize; + + fn index(&self, index: usize) -> &Self::Output { + self.get(index) + .expect("IndexMap::index: index out of bounds") + } +} + +impl IndexMut for IndexMap { + fn index_mut(&mut self, index: usize) -> &mut Self::Output { + self.get_mut(index) + .expect("IndexMap::index_mut: index out of bounds") + } +} diff --git a/source/simulators/Cargo.toml b/source/simulators/Cargo.toml index 4f50b7a531..9a03b6653e 100644 --- a/source/simulators/Cargo.toml +++ b/source/simulators/Cargo.toml @@ -9,18 +9,19 @@ license.workspace = true repository.workspace = true [dependencies] -quantum-sparse-sim = { workspace = true } bytemuck = { workspace = true } futures = { workspace = true } wgpu = { workspace = true } rand = { workspace = true } nalgebra = { workspace = true } +ndarray = { workspace = true } num-bigint = { workspace = true } num-complex = { workspace = true } num-traits = { workspace = true } regex-lite = { workspace = true } rustc-hash = { workspace = true } paulimer = { path = "../paulimer" } +index_map = { path = "../index_map" } noisy_simulator = { path = "../noisy_simulator" } [dev-dependencies] diff --git a/source/simulators/src/lib.rs b/source/simulators/src/lib.rs index b5b2d2f8ca..bb68461f36 100644 --- a/source/simulators/src/lib.rs +++ b/source/simulators/src/lib.rs @@ -2,12 +2,15 @@ // Licensed under the MIT License. pub mod bytecode; -pub mod noise_config; -pub use quantum_sparse_sim::QuantumSim; pub mod cpu_full_state_simulator; mod gpu_full_state_simulator; +pub mod noise_config; +pub mod quantum_sparse_sim; pub mod stabilizer_simulator; + pub use gpu_full_state_simulator::*; +pub use quantum_sparse_sim::QuantumSim; +pub use quantum_sparse_sim::nearly_zero::NearlyZero; /// A qubit ID. pub type QubitID = usize; diff --git a/source/simulators/src/quantum_sparse_sim.rs b/source/simulators/src/quantum_sparse_sim.rs new file mode 100644 index 0000000000..5789ed4b71 --- /dev/null +++ b/source/simulators/src/quantum_sparse_sim.rs @@ -0,0 +1,1326 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +//! # Sparse State Quantum Simulator +//! This libary implements sparse state simulation, based on the design from +//! Leveraging state sparsity for more efficient quantum simulations. + +pub mod nearly_zero; + +#[cfg(test)] +mod matrix_testing; +#[cfg(test)] +mod tests; + +use index_map::IndexMap; +use ndarray::{Array2, s}; +use nearly_zero::NearlyZero; +use num_bigint::BigUint; +use num_complex::Complex64; +use num_traits::{One, ToPrimitive, Zero}; +use rand::{Rng, SeedableRng, rngs::StdRng}; +use rustc_hash::{FxHashMap, FxHashSet}; +use std::{cell::RefCell, f64::consts::FRAC_1_SQRT_2, fmt::Write}; + +type SparseState = Vec<(BigUint, Complex64)>; +type SparseStateMap = FxHashMap; + +const QUEUE_LIMIT: usize = 10_000; +const DEFAULT_INITIAL_SIZE: usize = 50; + +/// The `QuantumSim` struct contains the necessary state for tracking the simulation. Each instance of a +/// `QuantumSim` represents an independant simulation. +pub struct QuantumSim { + /// The structure that describes the current quantum state. + pub(crate) state: SparseState, + + /// The mapping from qubit identifiers to internal state locations. + pub(crate) id_map: IndexMap, + + /// The random number generator used for probabilistic operations. + rng: RefCell, + + /// The bitmap that tracks whether a given qubit has an pending H operation queued on it. + h_flag: BigUint, + + /// The map for tracking queued Pauli-X rotations by a given angle for a given qubit. + rx_queue: IndexMap, + + /// The map for tracking queued Pauli-Y rotations by a given angle for a given qubit. + ry_queue: IndexMap, + + /// The list of queued gate operations. + op_queue: Vec<(Vec, usize, OpCode)>, +} + +/// Operations that support generic queuing. +#[derive(Debug, Copy, Clone)] +pub(crate) enum OpCode { + X, + Y, + Z, + S, + Sadj, + T, + Tadj, + Rz(f64), +} + +/// Levels for flushing of queued gates. +#[derive(Debug, Copy, Clone)] +pub(crate) enum FlushLevel { + H, + HRx, + HRxRy, +} + +impl Default for QuantumSim { + fn default() -> Self { + Self::new(None) + } +} + +/// Provides the common set of functionality across all quantum simulation types. +impl QuantumSim { + /// Creates a new sparse state quantum simulator object with empty initial state (no qubits allocated, no operations buffered). + #[must_use] + pub fn new(rng: Option) -> Self { + let initial_state = vec![(BigUint::zero(), Complex64::one())]; + + QuantumSim { + state: initial_state, + id_map: IndexMap::with_capacity(DEFAULT_INITIAL_SIZE), + rng: RefCell::new(rng.unwrap_or_else(StdRng::from_entropy)), + h_flag: BigUint::zero(), + rx_queue: IndexMap::with_capacity(DEFAULT_INITIAL_SIZE), + ry_queue: IndexMap::with_capacity(DEFAULT_INITIAL_SIZE), + op_queue: Vec::with_capacity(DEFAULT_INITIAL_SIZE), + } + } + + /// Sets the seed for the random number generator used for probabilistic operations. + pub fn set_rng_seed(&mut self, seed: u64) { + self.rng.replace(StdRng::seed_from_u64(seed)); + } + + pub fn take_rng(&mut self) -> StdRng { + self.rng.replace(StdRng::from_entropy()) + } + + /// Returns a sorted copy of the current sparse state as a vector of pairs of indices and complex numbers, along with + /// the total number of currently allocated qubits to help in interpreting the sparse state. + #[allow(clippy::missing_panics_doc)] // reason="Panics can only occur if the keys are not present in the map, which should not happen." + #[must_use] + pub fn get_state(&mut self) -> (Vec<(BigUint, Complex64)>, usize) { + // Swap all the entries in the state to be ordered by qubit identifier. This makes + // interpreting the state easier for external consumers that don't have access to the id map. + let sorted_keys: Vec = self.id_map.iter().map(|(k, _)| k).collect(); + self.flush_queue(&sorted_keys, FlushLevel::HRxRy); + + sorted_keys.iter().enumerate().for_each(|(index, &key)| { + if index != self.id_map[key] { + self.swap_qubit_state(self.id_map[key], index); + if let Some((swapped_key, _)) = + self.id_map.iter().find(|&(_, &value)| value == index) + { + *(self + .id_map + .get_mut(swapped_key) + .expect("key should be present in map")) = self.id_map[key]; + } + *(self + .id_map + .get_mut(key) + .expect("key should be present in map")) = index; + } + }); + + let mut state = self.state.clone(); + state.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + (state, sorted_keys.len()) + } + + /// Allocates a fresh qubit, returning its identifier. Note that this will use the lowest available + /// identifier, and may result in qubits being allocated "in the middle" of an existing register + /// if those identifiers are available. + #[must_use] + pub fn allocate(&mut self) -> usize { + // Add the new entry into the FxHashMap at the first available sequential ID and first available + // sequential location. + let sorted_keys: Vec = self.id_map.iter().map(|(k, _)| k).collect(); + let mut sorted_vals: Vec<&usize> = self.id_map.values().collect(); + sorted_vals.sort_unstable(); + let new_key = sorted_keys + .iter() + .enumerate() + .take_while(|(index, key)| index == *key) + .last() + .map_or(0_usize, |(_, &key)| key + 1); + let new_val = sorted_vals + .iter() + .enumerate() + .take_while(|(index, val)| index == **val) + .last() + .map_or(0_usize, |(_, &&val)| val + 1); + self.id_map.insert(new_key, new_val); + + // Return the new ID that was used. + new_key + } + + /// Releases the given qubit, collapsing its state in the process. After release that identifier is + /// no longer valid for use in other functions and will cause an error if used. + /// # Panics + /// + /// The function will panic if the given id does not correpsond to an allocated qubit. + pub fn release(&mut self, id: usize) { + if self.id_map.iter().count() == 1 { + // This is a release of the last qubit. + // When no qubits are allocated, we can reset the sparse state to a clean ground, so + // any accumulated phase dissappears. + // There is no need to apply any pending operations, as we are about throw away the state + // anyway. + self.op_queue = Vec::with_capacity(DEFAULT_INITIAL_SIZE); + self.h_flag = BigUint::zero(); + self.rx_queue = IndexMap::with_capacity(DEFAULT_INITIAL_SIZE); + self.ry_queue = IndexMap::with_capacity(DEFAULT_INITIAL_SIZE); + self.state = vec![(BigUint::zero(), Complex64::one())]; + } else { + // Measure and collapse the state for this qubit. This will also apply any queued operations. + let res = self.measure(id); + let loc = self.id_map[id]; + + // If the result of measurement was true then we must set the bit for this qubit in every key + // to zero to "reset" the qubit. + if res { + self.state.iter_mut().for_each(|(k, _)| { + if k.bit(loc as u64) { + k.set_bit(loc as u64, false); + } + }); + } + } + + // Remove the qubit from the ID map now that any operations on it are complete. + self.id_map.remove(id); + } + + /// Prints the current state vector to standard output with integer labels for the states, skipping any + /// states with zero amplitude. + #[allow(clippy::missing_panics_doc)] // reason="Panics can only occur if the keys are not present in the map, which should not happen." + #[must_use] + pub fn dump(&mut self) -> String { + // Swap all the entries in the state to be ordered by qubit identifier. This makes + // interpreting the state easier for external consumers that don't have access to the id map. + let mut sorted_keys: Vec = self.id_map.iter().map(|(k, _)| k).collect(); + self.flush_queue(&sorted_keys, FlushLevel::HRxRy); + + sorted_keys.sort_unstable(); + sorted_keys.iter().enumerate().for_each(|(index, &key)| { + if index != self.id_map[key] { + self.swap_qubit_state(self.id_map[key], index); + if let Some((swapped_key, _)) = + self.id_map.iter().find(|&(_, &value)| value == index) + { + *(self + .id_map + .get_mut(swapped_key) + .expect("key should be present in map")) = self.id_map[key]; + } + *(self + .id_map + .get_mut(key) + .expect("key should be present in map")) = index; + } + }); + + self.dump_impl(false) + } + + /// Utility function that performs the actual output of state (and optionally map) to screen. Can + /// be called internally from other functions to aid in debugging and does not perform any modification + /// of the internal structures. + fn dump_impl(&mut self, print_id_map: bool) -> String { + #[cfg(windows)] + const LINE_ENDING: &[u8] = b"\r\n"; + #[cfg(not(windows))] + const LINE_ENDING: &[u8] = b"\n"; + + let mut output = String::new(); + let nl = String::from_utf8(LINE_ENDING.to_vec()).expect("Failed to create newline string"); + if print_id_map { + output + .write_str(&format!("MAP: {:?}", self.id_map)) + .expect("Failed to write output"); + output.write_str(&nl).expect("Failed to write output"); + } + output + .write_str("STATE: [ ") + .expect("Failed to write output"); + + self.state.sort_unstable_by(|a, b| a.0.cmp(&b.0)); + for (key, value) in &self.state { + output + .write_str(&format!("|{key}\u{27e9}: {value}, ")) + .expect("Failed to write output"); + } + output.write_str("]").expect("Failed to write output"); + output.write_str(&nl).expect("Failed to write output"); + output + } + + /// Checks the probability of parity measurement in the computational basis for the given set of + /// qubits. + /// # Panics + /// + /// This function will panic if the given ids do not all correspond to allocated qubits. + /// This function will panic if there are duplicate ids in the given list. + #[must_use] + pub fn joint_probability(&mut self, ids: &[usize]) -> f64 { + // Flush the queue only if there are pending H, Rx, or Ry operations on this qubit. + // Other queued operations will be applied by `check_joint_probability` below. + self.maybe_flush_queue(ids, FlushLevel::HRxRy); + + Self::check_for_duplicates(ids); + let locs: Vec = ids + .iter() + .map(|id| { + *self + .id_map + .get(*id) + .unwrap_or_else(|| panic!("Unable to find qubit with id {id}")) + }) + .collect(); + + self.check_joint_probability(&locs) + } + + /// Checks the internal state of the given qubit and returns true only if the given qubit is in exactly the |0⟩ state. + pub fn qubit_is_zero(&mut self, id: usize) -> bool { + self.joint_probability(&[id]).is_nearly_zero() + } + + /// Measures the qubit with the given id, collapsing the state based on the measured result. + /// # Panics + /// + /// This funciton will panic if the given identifier does not correspond to an allocated qubit. + #[must_use] + pub fn measure(&mut self, id: usize) -> bool { + // We only need to flush the queue here if there are pending H, Rx, or Ry operations. + // Any operations in `self.op_queue` will get applied when `check_joint_probability` + // iterates through the state vector. + self.maybe_flush_queue(&[id], FlushLevel::HRxRy); + + let loc = *self + .id_map + .get(id) + .unwrap_or_else(|| panic!("Unable to find qubit with id {id}")); + let random_sample = self.rng.borrow_mut().r#gen::(); + let prob = self.check_joint_probability(&[loc]); + let res = random_sample < prob; + self.collapse(loc, res, prob); + res + } + + /// Forces the collapse of the qubit with the given id to the specified value, + /// returning the total probability of that value before the collapse. + /// Note that this is not a physical operation, but can be useful for testing and debugging. + /// If the probability of the given value is zero then this function will not perform any + /// collapse since that would result in an invalid state, and it will still return the + /// zero probability as a signal to the caller. + /// # Panics + /// + /// This function will panic if the given identifier does not correspond to an allocated qubit. + pub fn force_collapse(&mut self, val: bool, id: usize) -> f64 { + // Like with measure, flush the queue here if there are pending H, Rx, or Ry operations. + // Any operations in `self.op_queue` will get applied when `check_joint_probability` + // iterates through the state vector. + self.maybe_flush_queue(&[id], FlushLevel::HRxRy); + + let loc = *self + .id_map + .get(id) + .unwrap_or_else(|| panic!("Unable to find qubit with id {id}")); + let prob = self.check_joint_probability(&[loc]); + // Only perform the collapse if the resulting probability is greater than zero, otherwise it would + // collapse to non-existant states and fail normalization computation with a divide by zero. + // Since `check_joint_probability` sums the probability of measuring `true`, we must check that + // either val is true and the probability is not zero or that val is false and + // the 1.0 minus the probability is not zero. + if (val && !prob.is_nearly_zero()) || (!val && !(1.0 - prob).is_nearly_zero()) { + self.collapse(loc, val, prob); + } + if val { prob } else { 1.0 - prob } + } + + /// Performs a joint measurement to get the parity of the given qubits, collapsing the state + /// based on the measured result. + /// # Panics + /// + /// This function will panic if any of the given identifiers do not correspond to an allocated qubit. + /// This function will panic if any of the given identifiers are duplicates. + #[must_use] + pub fn joint_measure(&mut self, ids: &[usize]) -> bool { + // Flush the queue only if there are pending H, Rx, or Ry operations on this qubit. + // Other queued operations will be applied by `check_joint_probability` below. + self.maybe_flush_queue(ids, FlushLevel::HRxRy); + + Self::check_for_duplicates(ids); + let locs: Vec = ids + .iter() + .map(|id| { + *self + .id_map + .get(*id) + .unwrap_or_else(|| panic!("Unable to find qubit with id {id}")) + }) + .collect(); + + let random_sample = self.rng.borrow_mut().r#gen::(); + let prob = self.check_joint_probability(&locs); + let res = random_sample < prob; + self.joint_collapse(&locs, res, prob); + res + } + + /// Utility to get the sum of all probabilies where an odd number of the bits at the given locations + /// are set. This corresponds to the probability of jointly measuring those qubits in the computational + /// basis. + fn check_joint_probability(&mut self, locs: &[usize]) -> f64 { + let mask = locs.iter().fold(BigUint::zero(), |accum, loc| { + accum | (BigUint::one() << loc) + }); + + let ops = self.take_ops(); + + self.state.iter_mut().fold(0.0_f64, |accum, (index, val)| { + apply_ops(&ops, index, val); + if (&*index & &mask).count_ones() & 1 > 0 { + accum + val.norm_sqr() + } else { + accum + } + }) + } + + /// Takes ownership of the queued operations and returns them, clearing the `op_queue`. + /// This also resolves and checks the qubits for each operation, mapping them to their locations. + fn take_ops(&mut self) -> Vec<(Vec, u64, OpCode)> { + let ops = self + .op_queue + .iter() + .map(|(ctls, target, op)| { + let (target, ctls) = self.resolve_and_check_qubits(*target, ctls); + (ctls, target, *op) + }) + .collect::>(); + self.op_queue.clear(); + ops + } + + /// Utility to collapse the probability at the given location based on the boolean value. This means + /// that if the given value is 'true' then all keys in the sparse state where the given location + /// has a zero bit will be reduced to zero and removed. Then the sparse state is normalized. + fn collapse(&mut self, loc: usize, val: bool, scaling_denominator: f64) { + self.joint_collapse(&[loc], val, scaling_denominator); + } + + /// Utility to collapse the joint probability of a particular set of locations in the sparse state. + /// The entries that do not correspond to the given boolean value are removed, and then the whole + /// state is normalized. + fn joint_collapse(&mut self, locs: &[usize], val: bool, scaling_denominator: f64) { + let mask = locs.iter().fold(BigUint::zero(), |accum, loc| { + accum | (BigUint::one() << loc) + }); + + // Normalize the new state using the accumulated scaling. + let scaling = 1.0 + / (if val { + scaling_denominator + } else { + 1.0 - scaling_denominator + }) + .sqrt(); + self.state = self + .state + .drain(..) + .filter_map(|(k, v)| { + if (&k & &mask).count_ones() & 1 == u64::from(val) { + Some((k, v * scaling)) + } else { + None + } + }) + .collect(); + } + + /// Swaps the mapped ids for the given qubits. + /// # Panics + /// This function will panic if either of the given identifiers do not correspond to an allocated qubit. + pub fn swap_qubit_ids(&mut self, qubit1: usize, qubit2: usize) { + self.flush_ops(); + + // Must also swap any queued operations. + let (h_val1, h_val2) = ( + self.h_flag.bit(qubit1 as u64), + self.h_flag.bit(qubit2 as u64), + ); + self.h_flag.set_bit(qubit1 as u64, h_val2); + self.h_flag.set_bit(qubit2 as u64, h_val1); + + let x_angle1 = self.rx_queue.get(qubit1).copied(); + let x_angle2 = self.rx_queue.get(qubit2).copied(); + if let Some(angle) = x_angle1 { + self.rx_queue.insert(qubit2, angle); + } else { + self.rx_queue.remove(qubit2); + } + if let Some(angle) = x_angle2 { + self.rx_queue.insert(qubit1, angle); + } else { + self.rx_queue.remove(qubit1); + } + + let y_angle1 = self.ry_queue.get(qubit1).copied(); + let y_angle2 = self.ry_queue.get(qubit2).copied(); + if let Some(ry_val) = y_angle1 { + self.ry_queue.insert(qubit2, ry_val); + } else { + self.ry_queue.remove(qubit2); + } + if let Some(ry_val) = y_angle2 { + self.ry_queue.insert(qubit1, ry_val); + } else { + self.ry_queue.remove(qubit1); + } + + let qubit1_mapped = *self + .id_map + .get(qubit1) + .unwrap_or_else(|| panic!("Unable to find qubit with id {qubit1}")); + let qubit2_mapped = *self + .id_map + .get(qubit2) + .unwrap_or_else(|| panic!("Unable to find qubit with id {qubit2}")); + self.id_map[qubit1] = qubit2_mapped; + self.id_map[qubit2] = qubit1_mapped; + } + + /// Swaps the states of two qubits throughout the sparse state map. + pub(crate) fn swap_qubit_state(&mut self, qubit1: usize, qubit2: usize) { + if qubit1 == qubit2 { + return; + } + + self.flush_queue(&[qubit1, qubit2], FlushLevel::HRxRy); + + let (q1, q2) = (qubit1 as u64, qubit2 as u64); + + // Swap entries in the sparse state to correspond to swapping of two qubits' locations. + self.state.iter_mut().for_each(|(k, _)| { + if k.bit(q1) != k.bit(q2) { + let mut new_k = k.clone(); + new_k.set_bit(q1, !k.bit(q1)); + new_k.set_bit(q2, !k.bit(q2)); + *k = new_k; + } + }); + } + + pub(crate) fn check_for_duplicates(ids: &[usize]) { + let mut unique = FxHashSet::default(); + for id in ids { + assert!( + unique.insert(id), + "Duplicate qubit id '{id}' found in application." + ); + } + } + + /// Verifies that the given target and list of controls does not contain any duplicate entries, and returns + /// those values mapped to internal identifiers and converted to `u64`. + fn resolve_and_check_qubits(&self, target: usize, ctls: &[usize]) -> (u64, Vec) { + let mut ids = ctls.to_owned(); + ids.push(target); + Self::check_for_duplicates(&ids); + + let target = *self + .id_map + .get(target) + .unwrap_or_else(|| panic!("Unable to find qubit with id {target}")) + as u64; + + let ctls: Vec = ctls + .iter() + .map(|c| { + *self + .id_map + .get(*c) + .unwrap_or_else(|| panic!("Unable to find qubit with id {c}")) + as u64 + }) + .collect(); + + (target, ctls) + } + + fn enqueue_op(&mut self, target: usize, ctls: Vec, op: OpCode) { + if self.op_queue.len() == QUEUE_LIMIT { + self.flush_ops(); + } + self.op_queue.push((ctls, target, op)); + } + + fn has_queued_hrxy(&self, target: usize) -> bool { + self.h_flag.bit(target as u64) + || self.rx_queue.contains_key(target) + || self.ry_queue.contains_key(target) + } + + fn maybe_flush_queue(&mut self, qubits: &[usize], level: FlushLevel) { + if qubits.iter().any(|q| self.has_queued_hrxy(*q)) { + self.flush_queue(qubits, level); + } + } + + pub(crate) fn flush_queue(&mut self, qubits: &[usize], level: FlushLevel) { + for target in qubits { + if self.h_flag.bit(*target as u64) { + self.apply_mch(&[], *target); + self.h_flag.set_bit(*target as u64, false); + } + match level { + FlushLevel::H => (), + FlushLevel::HRx => self.flush_rx(*target), + FlushLevel::HRxRy => { + self.flush_rx(*target); + self.flush_ry(*target); + } + } + } + // Always call flush ops afterward to make sure no pending operations remain. If any of the above + // already applied operations, this will be a no-op since the queue will be empty. + self.flush_ops(); + } + + fn flush_ops(&mut self) { + if !self.op_queue.is_empty() { + let ops = self.take_ops(); + self.state.iter_mut().for_each(|(index, value)| { + apply_ops(&ops, index, value); + }); + } + } + + fn flush_rx(&mut self, target: usize) { + if let Some(theta) = self.rx_queue.get(target) { + self.mcrotation(&[], *theta, target, false); + self.rx_queue.remove(target); + } + } + + fn flush_ry(&mut self, target: usize) { + if let Some(theta) = self.ry_queue.get(target) { + self.mcrotation(&[], *theta, target, true); + self.ry_queue.remove(target); + } + } + + /// Performs the Pauli-X transformation on a single state. + fn x_transform((index, _val): (&mut BigUint, &mut Complex64), target: u64) { + index.set_bit(target, !index.bit(target)); + } + + /// Single qubit X gate. + pub fn x(&mut self, target: usize) { + if let Some(entry) = self.ry_queue.get_mut(target) { + // XY = -YX, so switch the sign on any queued Ry rotations. + *entry *= -1.0; + } + if self.h_flag.bit(target as u64) { + // XH = HZ, so execute a Z transformation if there is an H queued. + self.enqueue_op(target, Vec::new(), OpCode::Z); + } else { + self.enqueue_op(target, Vec::new(), OpCode::X); + } + } + + /// Multi-controlled X gate. + pub fn mcx(&mut self, ctls: &[usize], target: usize) { + if ctls.is_empty() { + self.x(target); + return; + } + + if self.ry_queue.contains_key(target) { + self.flush_queue(&[target], FlushLevel::HRxRy); + } + + if ctls.len() > 1 { + self.maybe_flush_queue(ctls, FlushLevel::HRxRy); + } else if self.ry_queue.contains_key(ctls[0]) + || self.rx_queue.contains_key(ctls[0]) + || (self.h_flag.bit(ctls[0] as u64) && !self.h_flag.bit(target as u64)) + { + self.flush_queue(ctls, FlushLevel::HRxRy); + } + + if self.h_flag.bit(target as u64) { + if ctls.len() == 1 && self.h_flag.bit(ctls[0] as u64) { + // An H on both target and single control means we can perform a CNOT with the control + // and target switched. + self.enqueue_op(ctls[0], vec![target], OpCode::X); + } else { + // XH = HZ, so perform a mulit-controlled Z here. + self.enqueue_op(target, ctls.into(), OpCode::Z); + } + } else { + self.enqueue_op(target, ctls.into(), OpCode::X); + } + } + + /// Performs the Pauli-Y transformation on a single state. + fn y_transform((index, val): (&mut BigUint, &mut Complex64), target: u64) { + index.set_bit(target, !index.bit(target)); + *val *= if index.bit(target) { + Complex64::i() + } else { + -Complex64::i() + }; + } + + /// Single qubit Y gate. + pub fn y(&mut self, target: usize) { + if let Some(entry) = self.rx_queue.get_mut(target) { + // XY = -YX, so flip the sign on any queued Rx rotation. + *entry *= -1.0; + } + + self.enqueue_op(target, Vec::new(), OpCode::Y); + } + + /// Multi-controlled Y gate. + #[allow(clippy::missing_panics_doc)] // reason="Panics can only occur if ctrls are empty, which is handled at the top of the function." + pub fn mcy(&mut self, ctls: &[usize], target: usize) { + if ctls.is_empty() { + self.y(target); + return; + } + + self.maybe_flush_queue(ctls, FlushLevel::HRxRy); + + if self.rx_queue.contains_key(target) { + self.flush_queue(&[target], FlushLevel::HRx); + } + + if self.h_flag.bit(target as u64) { + // HY = -YH, so add a phase to one of the controls. + let (target, ctls) = ctls + .split_first() + .expect("Controls list cannot be empty here."); + self.enqueue_op(*target, ctls.into(), OpCode::Z); + } + + self.enqueue_op(target, ctls.into(), OpCode::Y); + } + + /// Performs a phase transformation (a rotation in the computational basis) on a single state. + fn phase_transform( + phase: Complex64, + (index, val): (&mut BigUint, &mut Complex64), + target: u64, + ) { + *val *= if index.bit(target) { + phase + } else { + Complex64::one() + }; + } + + /// Multi-controlled phase rotation ("G" gate). + pub fn mcphase(&mut self, ctls: &[usize], phase: Complex64, target: usize) { + self.flush_queue(ctls, FlushLevel::HRxRy); + self.flush_queue(&[target], FlushLevel::HRxRy); + + let (target, ctls) = self.resolve_and_check_qubits(target, ctls); + + self.state.iter_mut().for_each(|(index, value)| { + if ctls.iter().all(|c| index.bit(*c)) { + Self::phase_transform(phase, (index, value), target); + } + }); + } + + /// Performs the Pauli-Z transformation on a single state. + fn z_transform((index, val): (&mut BigUint, &mut Complex64), target: u64) { + Self::phase_transform(-Complex64::one(), (index, val), target); + } + + /// Single qubit Z gate. + pub fn z(&mut self, target: usize) { + if let Some(entry) = self.ry_queue.get_mut(target) { + // ZY = -YZ, so flip the sign on any queued Ry rotations. + *entry *= -1.0; + } + + if let Some(entry) = self.rx_queue.get_mut(target) { + // ZX = -XZ, so flip the sign on any queued Rx rotations. + *entry *= -1.0; + } + + if self.h_flag.bit(target as u64) { + // HZ = XH, so execute an X if an H is queued. + self.enqueue_op(target, Vec::new(), OpCode::X); + } else { + self.enqueue_op(target, Vec::new(), OpCode::Z); + } + } + + /// Multi-controlled Z gate. + pub fn mcz(&mut self, ctls: &[usize], target: usize) { + if ctls.is_empty() { + self.z(target); + return; + } + + // Count up the instances of queued H and Rx/Ry on controls and target, treating rotations as 2. + let count = ctls.iter().fold(0, |accum, c| { + accum + + i32::from(self.h_flag.bit(*c as u64)) + + if self.rx_queue.contains_key(*c) || self.ry_queue.contains_key(*c) { + 2 + } else { + 0 + } + }) + i32::from(self.h_flag.bit(target as u64)) + + if self.rx_queue.contains_key(target) || self.ry_queue.contains_key(target) { + 2 + } else { + 0 + }; + + if count == 1 { + // Only when count is exactly one can we optimize, meaning there is exactly one H on either + // the target or one control. Create a new controls list and target where the target is whichever + // qubit has the H queued. + let (ctls, target): (Vec, usize) = + if let Some(h_ctl) = ctls.iter().find(|c| self.h_flag.bit(**c as u64)) { + // The H is queued on one control, so create a new controls list that swaps that control for the original target. + ( + ctls.iter() + .map(|c| if c == h_ctl { target } else { *c }) + .collect(), + *h_ctl, + ) + } else { + // The H is queued on the target, so use the original values. + (ctls.to_owned(), target) + }; + // With a single H queued, treat the multi-controlled Z as a multi-controlled X. + self.enqueue_op(target, ctls, OpCode::X); + } else { + self.flush_queue(ctls, FlushLevel::HRxRy); + self.flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, ctls.into(), OpCode::Z); + } + } + + /// Performs the S transformation on a single state. + fn s_transform((index, val): (&mut BigUint, &mut Complex64), target: u64) { + Self::phase_transform(Complex64::i(), (index, val), target); + } + + /// Single qubit S gate. + pub fn s(&mut self, target: usize) { + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, Vec::new(), OpCode::S); + } + + /// Multi-controlled S gate. + pub fn mcs(&mut self, ctls: &[usize], target: usize) { + self.maybe_flush_queue(ctls, FlushLevel::HRxRy); + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, ctls.into(), OpCode::S); + } + + /// Performs the adjoint S transformation on a single state. + fn sadj_transform((index, val): (&mut BigUint, &mut Complex64), target: u64) { + Self::phase_transform(-Complex64::i(), (index, val), target); + } + + /// Single qubit Adjoint S Gate. + pub fn sadj(&mut self, target: usize) { + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, Vec::new(), OpCode::Sadj); + } + + /// Multi-controlled Adjoint S gate. + pub fn mcsadj(&mut self, ctls: &[usize], target: usize) { + self.maybe_flush_queue(ctls, FlushLevel::HRxRy); + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, ctls.into(), OpCode::Sadj); + } + + /// Performs the T transformation on a single state. + fn t_transform((index, val): (&mut BigUint, &mut Complex64), target: u64) { + Self::phase_transform( + Complex64::new(FRAC_1_SQRT_2, FRAC_1_SQRT_2), + (index, val), + target, + ); + } + + /// Single qubit T gate. + pub fn t(&mut self, target: usize) { + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, Vec::new(), OpCode::T); + } + + /// Multi-controlled T gate. + pub fn mct(&mut self, ctls: &[usize], target: usize) { + self.maybe_flush_queue(ctls, FlushLevel::HRxRy); + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, ctls.into(), OpCode::T); + } + + /// Performs the adjoint T transformation to a single state. + fn tadj_transform((index, val): (&mut BigUint, &mut Complex64), target: u64) { + Self::phase_transform( + Complex64::new(FRAC_1_SQRT_2, -FRAC_1_SQRT_2), + (index, val), + target, + ); + } + + /// Single qubit Adjoint T gate. + pub fn tadj(&mut self, target: usize) { + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, Vec::new(), OpCode::Tadj); + } + + /// Multi-controlled Adjoint T gate. + pub fn mctadj(&mut self, ctls: &[usize], target: usize) { + self.maybe_flush_queue(ctls, FlushLevel::HRxRy); + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, ctls.into(), OpCode::Tadj); + } + + /// Performs the Rz transformation with the given angle to a single state. + fn rz_transform((index, val): (&mut BigUint, &mut Complex64), theta: f64, target: u64) { + *val *= Complex64::exp(Complex64::new( + 0.0, + theta / 2.0 * if index.bit(target) { 1.0 } else { -1.0 }, + )); + } + + /// Single qubit Rz gate. + pub fn rz(&mut self, theta: f64, target: usize) { + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, Vec::new(), OpCode::Rz(theta)); + } + + /// Multi-controlled Rz gate. + pub fn mcrz(&mut self, ctls: &[usize], theta: f64, target: usize) { + self.maybe_flush_queue(ctls, FlushLevel::HRxRy); + self.maybe_flush_queue(&[target], FlushLevel::HRxRy); + self.enqueue_op(target, ctls.into(), OpCode::Rz(theta)); + } + + /// Single qubit H gate. + pub fn h(&mut self, target: usize) { + if let Some(entry) = self.ry_queue.get_mut(target) { + // YH = -HY, so flip the sign on any queued Ry rotations. + *entry *= -1.0; + } + + if self.rx_queue.contains_key(target) { + // Can't commute well with queued Rx, so flush those ops. + self.flush_queue(&[target], FlushLevel::HRx); + } + + self.h_flag + .set_bit(target as u64, !self.h_flag.bit(target as u64)); + } + + /// Multi-controlled H gate. + pub fn mch(&mut self, ctls: &[usize], target: usize) { + self.flush_queue(ctls, FlushLevel::HRxRy); + if self.ry_queue.contains_key(target) || self.rx_queue.contains_key(target) { + self.flush_queue(&[target], FlushLevel::HRxRy); + } + + self.apply_mch(ctls, target); + } + + /// Apply the full state transformation corresponding to the multi-controlled H gate. Note that + /// this can increase the size of the state vector by introducing new non-zero states + /// or decrease the size by bringing some states to zero. + fn apply_mch(&mut self, ctls: &[usize], target: usize) { + let (target, ctls) = self.resolve_and_check_qubits(target, ctls); + + // This operation requires reading other entries in the state vector while modifying one, so convert it into a state map + // to support lookups. Apply any pending operations in the process. + let ops = self.take_ops(); + let mapped_state: SparseStateMap = self + .state + .drain(..) + .map(|(mut index, mut val)| { + apply_ops(&ops, &mut index, &mut val); + (index, val) + }) + .collect(); + + let mut flipped = BigUint::zero(); + flipped.set_bit(target, true); + + self.state.extend(mapped_state.iter().fold( + SparseState::default(), + |mut accum, (index, value)| { + if ctls.iter().all(|c| index.bit(*c)) { + let flipped_index = index ^ &flipped; + if !mapped_state.contains_key(&flipped_index) { + // The state vector does not have an entry for the state where the target is flipped + // and all other qubits are the same, meaning there is no superposition for this state. + // Create the additional state caluclating the resulting superposition. + let mut zero_bit_index = index.clone(); + zero_bit_index.set_bit(target, false); + accum.push((zero_bit_index, value * std::f64::consts::FRAC_1_SQRT_2)); + + let mut one_bit_index = index.clone(); + one_bit_index.set_bit(target, true); + accum.push(( + one_bit_index, + value + * std::f64::consts::FRAC_1_SQRT_2 + * (if index.bit(target) { -1.0 } else { 1.0 }), + )); + } else if !index.bit(target) { + // The state vector already has a superposition for this state, so calculate the resulting + // updates using the value from the flipped state. Note we only want to perform this for one + // of the states to avoid duplication, so we pick the Zero state by checking the target bit + // in the index is not set. + let flipped_value = &mapped_state[&flipped_index]; + + let new_val = (value + flipped_value) as Complex64; + if !new_val.is_nearly_zero() { + accum.push((index.clone(), new_val * std::f64::consts::FRAC_1_SQRT_2)); + } + + let new_val = (value - flipped_value) as Complex64; + if !new_val.is_nearly_zero() { + accum.push(( + index | &flipped, + new_val * std::f64::consts::FRAC_1_SQRT_2, + )); + } + } + } else { + accum.push((index.clone(), *value)); + } + accum + }, + )); + } + + /// Performs a rotation in the non-computational basis, which cannot be done in-place. This + /// corresponds to an Rx or Ry depending on the requested sign flip, and notably can increase or + /// decrease the size of the state vector. + fn mcrotation(&mut self, ctls: &[usize], theta: f64, target: usize, sign_flip: bool) { + // Calculate the matrix entries for the rotation by the given angle, respecting the sign flip. + let m00 = Complex64::new(f64::cos(theta / 2.0), 0.0); + let m01 = Complex64::new(0.0, f64::sin(theta / -2.0)) + * if sign_flip { + -Complex64::i() + } else { + Complex64::one() + }; + + if m00.is_nearly_zero() { + // This is just a Pauli rotation. + if sign_flip { + self.mcy(ctls, target); + } else { + self.mcx(ctls, target); + } + // Rx/Ry are different from X/Y by a global phase of -i, so apply that here when indicated by m01, + // for mathematical correctness. + let (_, ctls) = self.resolve_and_check_qubits(target, ctls); + let factor = m01 + * if sign_flip { + Complex64::i() + } else { + Complex64::one() + }; + if factor != Complex64::one() { + let ops = self.take_ops(); + self.state.iter_mut().for_each(|(index, value)| { + apply_ops(&ops, index, value); + if ctls.iter().all(|c| index.bit(*c)) { + *value *= factor; + } + }); + } + } else if m01.is_nearly_zero() { + // This is just identity, so we can effectively no-op, and just add a phase of -1 as indicated by m00. + // Here, m00 + 1 == 0 is used to check if m00 == -1. + if (m00 + Complex64::one()).is_nearly_zero() { + let ops = self.take_ops(); + let (_, ctls) = self.resolve_and_check_qubits(target, ctls); + self.state.iter_mut().for_each(|(index, value)| { + apply_ops(&ops, index, value); + if ctls.iter().all(|c| index.bit(*c)) { + *value *= -Complex64::one(); + } + }); + } + } else { + let (target, ctls) = self.resolve_and_check_qubits(target, ctls); + let m10 = m01 * if sign_flip { -1.0 } else { 1.0 }; + let mut flipped = BigUint::zero(); + flipped.set_bit(target, true); + + // This operation requires reading other entries in the state vector while modifying one, so convert it into a state map + // to support lookups. Apply any pending operations in the process. + let ops = self.take_ops(); + let mapped_state: SparseStateMap = self + .state + .drain(..) + .map(|(mut index, mut val)| { + apply_ops(&ops, &mut index, &mut val); + (index, val) + }) + .collect(); + + self.state.extend(mapped_state.iter().fold( + SparseState::default(), + |mut accum, (index, value)| { + if ctls.iter().all(|c| index.bit(*c)) { + let flipped_index = index ^ &flipped; + if !mapped_state.contains_key(&flipped_index) { + // The state vector doesn't have an entry for the flipped target bit, so there + // isn't a superposition. Calculate the superposition using the matrix entries. + if index.bit(target) { + accum.push((flipped_index, value * m01)); + accum.push((index.clone(), value * m00)); + } else { + accum.push((index.clone(), value * m00)); + accum.push((flipped_index, value * m10)); + } + } else if !index.bit(target) { + // There is already a superposition of the target for this state, so calculate the new + // entries using the values from the flipped state. Note we only want to do this for one of + // the states, so we pick the Zero state by checking the target bit in the index is not set. + let flipped_val = mapped_state[&flipped_index]; + + let new_val = (value * m00 + flipped_val * m01) as Complex64; + if !new_val.is_nearly_zero() { + accum.push((index.clone(), new_val)); + } + + let new_val = (value * m10 + flipped_val * m00) as Complex64; + if !new_val.is_nearly_zero() { + accum.push((flipped_index, new_val)); + } + } + } else { + accum.push((index.clone(), *value)); + } + accum + }, + )); + } + } + + /// Single qubit Rx gate. + pub fn rx(&mut self, theta: f64, target: usize) { + if self.h_flag.bit(target as u64) || self.ry_queue.contains_key(target) { + self.flush_queue(&[target], FlushLevel::HRxRy); + } + if let Some(entry) = self.rx_queue.get_mut(target) { + *entry += theta; + if entry.is_nearly_zero() { + self.rx_queue.remove(target); + } + } else { + self.rx_queue.insert(target, theta); + } + } + + /// Multi-controlled Rx gate. + pub fn mcrx(&mut self, ctls: &[usize], theta: f64, target: usize) { + self.flush_queue(ctls, FlushLevel::HRxRy); + + if self.ry_queue.contains_key(target) { + self.flush_queue(&[target], FlushLevel::HRxRy); + } else if self.h_flag.bit(target as u64) { + self.flush_queue(&[target], FlushLevel::H); + } + + self.mcrotation(ctls, theta, target, false); + } + + /// Single qubit Ry gate. + pub fn ry(&mut self, theta: f64, target: usize) { + if let Some(entry) = self.ry_queue.get_mut(target) { + *entry += theta; + if entry.is_nearly_zero() { + self.ry_queue.remove(target); + } + } else { + self.ry_queue.insert(target, theta); + } + } + + /// Multi-controlled Ry gate. + pub fn mcry(&mut self, ctls: &[usize], theta: f64, target: usize) { + self.flush_queue(ctls, FlushLevel::HRxRy); + + if self.rx_queue.contains_key(target) { + self.flush_queue(&[target], FlushLevel::HRx); + } else if self.h_flag.bit(target as u64) { + self.flush_queue(&[target], FlushLevel::H); + } + + self.mcrotation(ctls, theta, target, true); + } + + /// Applies the given unitary to the given targets, extending the unitary to accomodate controls if any. + /// # Panics + /// + /// This function will panic if given ids in either targets or optional controls that do not correspond to allocated + /// qubits, or if there is a duplicate id in targets or controls. + /// This funciton will panic if the given unitary matrix does not match the number of targets provided. + /// This function will panic if the given unitary is not square. + /// This function will panic if the total number of targets and controls too large for a `u32`. + pub fn apply( + &mut self, + unitary: &Array2, + targets: &[usize], + controls: Option<&[usize]>, + ) { + let mut targets = targets.to_vec(); + let mut unitary = unitary.clone(); + + assert!( + unitary.ncols() == unitary.nrows(), + "Application given non-square matrix." + ); + + assert!( + unitary.ncols() == 1_usize << targets.len(), + "Matrix size must be {}, got {}.", + 1_usize << targets.len(), + unitary.ncols() + ); + + if let Some(ctrls) = controls { + // Add controls in order as targets. + ctrls + .iter() + .enumerate() + .for_each(|(index, &element)| targets.insert(index, element)); + + // Extend the provided unitary by inserting it into an identity matrix. + unitary = controlled( + &unitary, + ctrls + .len() + .try_into() + .expect("controls length should fit in u32"), + ); + } + Self::check_for_duplicates(&targets); + + self.flush_queue(&targets, FlushLevel::HRxRy); + + targets + .iter() + .rev() + .enumerate() + .for_each(|(target_loc, target)| { + let loc = *self + .id_map + .get(*target) + .unwrap_or_else(|| panic!("Unable to find qubit with id {target}")); + let swap_id = self + .id_map + .iter() + .find(|&(_, &value)| value == target_loc) + .expect("qubit id map should contain a mapping for every allocated qubit") + .0; + self.swap_qubit_state(loc, target_loc); + self.id_map[swap_id] = loc; + self.id_map[*target] = target_loc; + }); + + let op_size = unitary.nrows(); + // Applying the unitary to the state vector requires looking up other entries in the state, + // so convert into a hash map while iterating through the state vector. We drain that map + // at the end to convert it back into a vector. + self.state = self + .state + .drain(..) + .fold(SparseStateMap::default(), |mut accum, (index, val)| { + let i = &index / op_size; + let l = (&index % op_size) + .to_usize() + .expect("Cannot operate on more than 64 qubits at a time."); + for j in (0..op_size).filter(|j| !unitary.row(*j)[l].is_nearly_zero()) { + let loc = (&i * op_size) + j; + if let Some(entry) = accum.get_mut(&loc) { + *entry += unitary.row(j)[l] * val; + } else { + accum.insert((&i * op_size) + j, unitary.row(j)[l] * val); + } + if accum + .get(&loc) + .map_or_else(|| false, |entry| (*entry).is_nearly_zero()) + { + accum.remove(&loc); + } + } + accum + }) + .drain() + .collect(); + assert!( + !self.state.is_empty(), + "State vector should never be empty." + ); + } +} + +/// Given a list of operations, applies them sequentially to the given state vector index and value in-place. +fn apply_ops( + ops: &[(Vec, u64, OpCode)], + index: &mut BigUint, + amplitude: &mut num_complex::Complex, +) { + for (ctls, target, op) in ops { + if ctls.iter().all(|c| index.bit(*c)) { + match op { + OpCode::X => QuantumSim::x_transform((index, amplitude), *target), + OpCode::Y => QuantumSim::y_transform((index, amplitude), *target), + OpCode::Z => QuantumSim::z_transform((index, amplitude), *target), + OpCode::S => QuantumSim::s_transform((index, amplitude), *target), + OpCode::Sadj => QuantumSim::sadj_transform((index, amplitude), *target), + OpCode::T => QuantumSim::t_transform((index, amplitude), *target), + OpCode::Tadj => QuantumSim::tadj_transform((index, amplitude), *target), + OpCode::Rz(theta) => { + QuantumSim::rz_transform((index, amplitude), *theta, *target); + } + } + } + } +} + +/// Extends the given unitary matrix into a matrix corresponding to the same unitary with a given number of controls +/// by inserting it into an identity matrix. +#[must_use] +pub fn controlled(u: &Array2, num_ctrls: u32) -> Array2 { + let mut controlled_u = Array2::eye(u.nrows() * 2_usize.pow(num_ctrls)); + let dim_rows = controlled_u.nrows() - u.nrows(); + let dim_cols = controlled_u.ncols() - u.ncols(); + controlled_u.slice_mut(s![dim_rows.., dim_cols..]).assign(u); + controlled_u +} diff --git a/source/simulators/src/quantum_sparse_sim/matrix_testing.rs b/source/simulators/src/quantum_sparse_sim/matrix_testing.rs new file mode 100644 index 0000000000..017da04a46 --- /dev/null +++ b/source/simulators/src/quantum_sparse_sim/matrix_testing.rs @@ -0,0 +1,648 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use core::f64::consts::FRAC_1_SQRT_2; +use ndarray::{Array2, array}; +use num_complex::Complex64; +use num_traits::One; +use num_traits::Zero; + +use super::{QuantumSim, controlled, nearly_zero::NearlyZero}; +use core::f64::consts::PI; + +/// Returns a unitary matrix representing the `X` operation. +#[must_use] +pub fn x() -> Array2 { + array![ + [Complex64::zero(), Complex64::one()], + [Complex64::one(), Complex64::zero()] + ] +} + +/// Returns a unitary matrix representing the `Y` operation. +#[must_use] +pub fn y() -> Array2 { + array![ + [Complex64::zero(), -Complex64::i()], + [Complex64::i(), Complex64::zero()] + ] +} + +/// Returns a unitary matrix representing the `Z` operation. +#[must_use] +pub fn z() -> Array2 { + array![ + [Complex64::one(), Complex64::zero()], + [Complex64::zero(), -Complex64::one()] + ] +} + +/// Returns a unitary matrix representing the single-qubit Hadamard transformation. +#[must_use] +pub fn h() -> Array2 { + array![ + [Complex64::one(), Complex64::one()], + [Complex64::one(), -Complex64::one()] + ] * FRAC_1_SQRT_2 +} + +/// Returns a unitary matrix representing the `T` operation. +#[must_use] +pub fn t() -> Array2 { + array![ + [Complex64::one(), Complex64::zero()], + [ + Complex64::zero(), + Complex64::new(FRAC_1_SQRT_2, FRAC_1_SQRT_2) + ] + ] +} + +/// Returns a unitary matrix representing the `S` operation. +#[must_use] +pub fn s() -> Array2 { + array![ + [Complex64::one(), Complex64::zero()], + [Complex64::zero(), Complex64::i()] + ] +} + +/// Returns a unitary matrix representing the `Rx` operation with the given angle. +#[must_use] +pub fn rx(theta: f64) -> Array2 { + let cos_theta = f64::cos(theta / 2.0); + let sin_theta = f64::sin(theta / 2.0); + array![ + [ + Complex64::new(cos_theta, 0.0), + Complex64::new(0.0, -sin_theta) + ], + [ + Complex64::new(0.0, -sin_theta), + Complex64::new(cos_theta, 0.0) + ] + ] +} + +/// Returns a unitary matrix representing the `Ry` operation with the given angle. +#[must_use] +pub fn ry(theta: f64) -> Array2 { + let cos_theta = f64::cos(theta / 2.0); + let sin_theta = f64::sin(theta / 2.0); + array![ + [ + Complex64::new(cos_theta, 0.0), + Complex64::new(-sin_theta, 0.0) + ], + [ + Complex64::new(sin_theta, 0.0), + Complex64::new(cos_theta, 0.0) + ] + ] +} + +/// Returns a unitary matrix representing the `Rz` operation with the given angle. +#[must_use] +pub fn rz(theta: f64) -> Array2 { + let exp_theta = Complex64::exp(Complex64::new(0.0, theta / 2.0)); + let neg_exp_theta = Complex64::exp(Complex64::new(0.0, -theta / 2.0)); + array![ + [neg_exp_theta, Complex64::zero()], + [Complex64::zero(), exp_theta] + ] +} + +/// Returns a unitary matrix representing the `G` or `GlobalPhase` operation with the given angle. +#[must_use] +pub fn g(theta: f64) -> Array2 { + let neg_exp_theta = Complex64::exp(Complex64::new(0.0, -theta / 2.0)); + array![ + [Complex64::one(), Complex64::zero()], + [Complex64::zero(), neg_exp_theta] + ] +} + +/// Returns a unitary matrix representing the `SWAP` operation. +#[must_use] +pub fn swap() -> Array2 { + array![ + [ + Complex64::one(), + Complex64::zero(), + Complex64::zero(), + Complex64::zero() + ], + [ + Complex64::zero(), + Complex64::zero(), + Complex64::one(), + Complex64::zero() + ], + [ + Complex64::zero(), + Complex64::one(), + Complex64::zero(), + Complex64::zero() + ], + [ + Complex64::zero(), + Complex64::zero(), + Complex64::zero(), + Complex64::one() + ] + ] +} + +/// Transforms the given matrix into it's adjoint using the transpose of the complex conjugate. +#[must_use] +pub fn adjoint(u: &Array2) -> Array2 { + u.t().map(Complex64::conj) +} + +fn is_self_adjoint(arr: &Array2) -> bool { + arr == adjoint(arr) +} + +fn are_equal_to_precision(actual: Array2, expected: Array2) -> bool { + // If we use assert_eq here, we'll get bitten by finite precision. + // We also can't use LAPACK, since that greatly complicates bindings, + // so we do an ad hoc implementation here. + (actual - expected).map(|x| x.norm()).sum() <= 1e-10 +} + +#[test] +fn h_is_self_adjoint() { + assert!(is_self_adjoint(&h())); +} + +#[test] +fn x_is_self_adjoint() { + assert!(is_self_adjoint(&x())); +} + +#[test] +fn y_is_self_adjoint() { + assert!(is_self_adjoint(&y())); +} + +#[test] +fn z_is_self_adjoint() { + assert!(is_self_adjoint(&z())); +} + +#[test] +fn swap_is_self_adjoint() { + assert!(is_self_adjoint(&swap())); +} + +#[test] +fn s_squares_to_z() { + assert_eq!(s().dot(&s()), z()); +} + +#[test] +fn t_squares_to_s() { + assert!(are_equal_to_precision(t().dot(&t()), s())); +} + +#[test] +fn rx_pi_is_x() { + assert!(are_equal_to_precision(Complex64::i() * rx(PI), x())); +} + +#[test] +fn ry_pi_is_y() { + assert!(are_equal_to_precision(Complex64::i() * ry(PI), y())); +} + +#[test] +fn rz_pi_is_z() { + assert!(are_equal_to_precision(Complex64::i() * rz(PI), z())); +} + +#[test] +fn gate_multiplication() { + assert!(are_equal_to_precision(x().dot(&y()), Complex64::i() * z())); +} + +#[test] +fn controlled_extension() { + fn cnot() -> Array2 { + array![ + [ + Complex64::one(), + Complex64::zero(), + Complex64::zero(), + Complex64::zero() + ], + [ + Complex64::zero(), + Complex64::one(), + Complex64::zero(), + Complex64::zero() + ], + [ + Complex64::zero(), + Complex64::zero(), + Complex64::zero(), + Complex64::one() + ], + [ + Complex64::zero(), + Complex64::zero(), + Complex64::one(), + Complex64::zero() + ] + ] + } + assert!(are_equal_to_precision(controlled(&x(), 1), cnot())); + assert!(are_equal_to_precision( + controlled(&x(), 2), + controlled(&cnot(), 1) + )); + assert_eq!(controlled(&x(), 3).nrows(), 2_usize.pow(4)); +} + +/// Utility for testing operation equivalence. +fn assert_operation_equal_referenced(mut op: F1, mut reference: F2, count: usize) +where + F1: FnMut(&mut QuantumSim, &[usize]), + F2: FnMut(&mut QuantumSim, &[usize]), +{ + let mut sim = QuantumSim::default(); + + // Allocate the controls we use to verify behavior. + // Allocate the requested number of targets, entangling the control with them. + let mut ctls = vec![]; + let mut qs = vec![]; + for _ in 0..count { + let ctl = sim.allocate(); + let q = sim.allocate(); + sim.h(ctl); + sim.mcx(&[ctl], q); + qs.push(q); + ctls.push(ctl); + } + + op(&mut sim, &qs); + reference(&mut sim, &qs); + + // Undo the entanglement. + for (q, ctl) in qs.iter().zip(&ctls) { + sim.mcx(&[*ctl], *q); + sim.h(*ctl); + } + + println!("{}", sim.dump()); + + // We know the operations are equal if the qubits are left in the zero state. + for (q, ctl) in qs.iter().zip(&ctls) { + assert!(sim.joint_probability(&[*q]).is_nearly_zero()); + assert!(sim.joint_probability(&[*ctl]).is_nearly_zero()); + } + + // Sparse state vector should have one entry for |0⟩. + assert_eq!(sim.state.len(), 1); + // If the operations are equal including the phase, the entry should be 1. + assert!( + (sim.state + .first() + .expect("state should have at least one entry") + .1 + - Complex64::one()) + .is_nearly_zero() + ); +} + +#[test] +fn test_h() { + assert_operation_equal_referenced( + |sim, qs| { + sim.h(qs[0]); + }, + |sim, qs| { + sim.apply(&h(), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_x() { + assert_operation_equal_referenced( + |sim, qs| { + sim.x(qs[0]); + }, + |sim, qs| { + sim.apply(&x(), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_y() { + assert_operation_equal_referenced( + |sim, qs| { + sim.y(qs[0]); + }, + |sim, qs| { + sim.apply(&y(), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_z() { + assert_operation_equal_referenced( + |sim, qs| { + sim.z(qs[0]); + }, + |sim, qs| { + sim.apply(&z(), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_s() { + assert_operation_equal_referenced( + |sim, qs| { + sim.s(qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&s()), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_sadj() { + assert_operation_equal_referenced( + |sim, qs| { + sim.sadj(qs[0]); + }, + |sim, qs| { + sim.apply(&s(), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_cx() { + assert_operation_equal_referenced( + |sim, qs| { + sim.mcx(&[qs[0]], qs[1]); + }, + |sim, qs| { + sim.apply(&x(), &[qs[1]], Some(&[qs[0]])); + }, + 2, + ); +} + +#[test] +fn test_cz() { + assert_operation_equal_referenced( + |sim, qs| { + sim.mcz(&[qs[0]], qs[1]); + }, + |sim, qs| { + sim.apply(&z(), &[qs[1]], Some(&[qs[0]])); + }, + 2, + ); +} + +#[test] +fn test_swap() { + assert_operation_equal_referenced( + |sim, qs| { + sim.swap_qubit_ids(qs[0], qs[1]); + }, + |sim, qs| { + sim.apply(&swap(), &[qs[0], qs[1]], None); + }, + 2, + ); +} + +#[test] +fn test_rz() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rz(PI / 7.0, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&rz(PI / 7.0)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_rz_pi() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rz(PI, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&rz(PI)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_rx() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rx(PI / 7.0, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&rx(PI / 7.0)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_rx_pi() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rx(PI, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&rx(PI)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_rx_2pi() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rx(2.0 * PI, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&rx(2.0 * PI)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_rx_zero() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rx(0.0, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&rx(0.0)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_rx_3pi() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rx(3.0 * PI, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&rx(3.0 * PI)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_rx_4pi() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rx(4.0 * PI, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&rx(4.0 * PI)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_ry() { + assert_operation_equal_referenced( + |sim, qs| { + sim.ry(PI / 7.0, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&ry(PI / 7.0)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_ry_pi() { + assert_operation_equal_referenced( + |sim, qs| { + sim.ry(PI, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&ry(PI)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_ry_2pi() { + assert_operation_equal_referenced( + |sim, qs| { + sim.ry(2.0 * PI, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&ry(2.0 * PI)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_ry_zero() { + assert_operation_equal_referenced( + |sim, qs| { + sim.ry(0.0, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&ry(0.0)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_ry_3pi() { + assert_operation_equal_referenced( + |sim, qs| { + sim.ry(3.0 * PI, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&ry(3.0 * PI)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_ry_4pi() { + assert_operation_equal_referenced( + |sim, qs| { + sim.ry(4.0 * PI, qs[0]); + }, + |sim, qs| { + sim.apply(&adjoint(&ry(4.0 * PI)), &[qs[0]], None); + }, + 1, + ); +} + +#[test] +fn test_mcri() { + assert_operation_equal_referenced( + |sim, qs| { + sim.mcphase( + &qs[2..3], + Complex64::exp(Complex64::new(0.0, -(PI / 7.0) / 2.0)), + qs[1], + ); + }, + |sim, qs| { + sim.apply(&adjoint(&g(PI / 7.0)), &[qs[1]], Some(&qs[2..3])); + }, + 3, + ); +} + +#[test] +fn test_apply_four_qubit_unitary() { + let mut sim = QuantumSim::default(); + let qs: Vec = (0..4).map(|_| sim.allocate()).collect(); + + let mut unitary = Array2::eye(16); + unitary.swap((0, 0), (0, 1)); + unitary.swap((1, 0), (1, 1)); + + sim.apply(&unitary, &qs, None); + + assert!(sim.qubit_is_zero(qs[0])); + assert!(sim.qubit_is_zero(qs[1])); + assert!(sim.qubit_is_zero(qs[2])); + assert!((sim.joint_probability(&[qs[3]]) - 1.0).is_nearly_zero()); +} diff --git a/source/simulators/src/quantum_sparse_sim/nearly_zero.rs b/source/simulators/src/quantum_sparse_sim/nearly_zero.rs new file mode 100644 index 0000000000..7f3a4a9e79 --- /dev/null +++ b/source/simulators/src/quantum_sparse_sim/nearly_zero.rs @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use num_complex::Complex; + +/// `NearlyZero` trait allows for approximate evaluation of a value to the additive identity. +pub trait NearlyZero { + fn is_nearly_zero(&self) -> bool; +} + +impl NearlyZero for f64 { + fn is_nearly_zero(&self) -> bool { + self.max(0.0) - 0.0_f64.min(*self) <= 1e-10 + } +} + +impl NearlyZero for Complex +where + T: NearlyZero, +{ + fn is_nearly_zero(&self) -> bool { + self.re.is_nearly_zero() && self.im.is_nearly_zero() + } +} diff --git a/source/simulators/src/quantum_sparse_sim/tests.rs b/source/simulators/src/quantum_sparse_sim/tests.rs new file mode 100644 index 0000000000..5918b22748 --- /dev/null +++ b/source/simulators/src/quantum_sparse_sim/tests.rs @@ -0,0 +1,723 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +use super::*; +use std::f64::consts::PI; + +fn almost_equal(a: f64, b: f64) -> bool { + a.max(b) - b.min(a) <= 1e-10 +} + +// Test that basic allocation and release of qubits doesn't fail. +#[test] +fn test_alloc_release() { + let sim = &mut QuantumSim::default(); + for i in 0..16 { + assert_eq!(sim.allocate(), i); + } + sim.release(4); + sim.release(7); + sim.release(12); + assert_eq!(sim.allocate(), 4); + for i in 0..7 { + sim.release(i); + } + for i in 8..12 { + sim.release(i); + } + for i in 13..16 { + sim.release(i); + } +} + +/// Verifies that application of gates to a qubit results in the correct probabilities. +#[test] +fn test_probability() { + let mut sim = QuantumSim::default(); + let q = sim.allocate(); + let extra = sim.allocate(); + assert!(almost_equal(0.0, sim.joint_probability(&[q]))); + sim.x(q); + assert!(almost_equal(1.0, sim.joint_probability(&[q]))); + sim.x(q); + assert!(almost_equal(0.0, sim.joint_probability(&[q]))); + sim.h(q); + assert!(almost_equal(0.5, sim.joint_probability(&[q]))); + sim.h(q); + assert!(almost_equal(0.0, sim.joint_probability(&[q]))); + sim.x(q); + sim.h(q); + sim.s(q); + assert!(almost_equal(0.5, sim.joint_probability(&[q]))); + sim.sadj(q); + sim.h(q); + sim.x(q); + assert!(almost_equal(0.0, sim.joint_probability(&[q]))); + sim.release(extra); + sim.release(q); +} + +/// Verify that a qubit in superposition has probability corresponding the measured value and +/// can be operationally reset back into the ground state. +#[test] +fn test_measure() { + let mut sim = QuantumSim::default(); + let q = sim.allocate(); + let extra = sim.allocate(); + assert!(!sim.measure(q)); + sim.x(q); + assert!(sim.measure(q)); + let mut res = false; + while !res { + sim.h(q); + res = sim.measure(q); + assert!(almost_equal( + sim.joint_probability(&[q]), + if res { 1.0 } else { 0.0 } + )); + if res { + sim.x(q); + } + } + assert!(almost_equal(sim.joint_probability(&[q]), 0.0)); + sim.release(extra); + sim.release(q); +} + +// Verify that out of order release of non-zero qubits behaves as expected, namely qubits that +// are not released are still in the expected states, newly allocated qubits use the available spot +// and start in a zero state. +#[test] +fn test_out_of_order_release() { + let sim = &mut QuantumSim::default(); + for i in 0..5 { + assert_eq!(sim.allocate(), i); + sim.x(i); + } + + // Release out of order. + sim.release(3); + + // Remaining qubits should all still be in one. + assert_eq!(sim.state.len(), 1); + assert!(!sim.joint_probability(&[0]).is_nearly_zero()); + assert!(!sim.joint_probability(&[1]).is_nearly_zero()); + assert!(!sim.joint_probability(&[2]).is_nearly_zero()); + assert!(!sim.joint_probability(&[4]).is_nearly_zero()); + + // Cheat and peak at the released location to make sure it has been zeroed out. + assert!(sim.check_joint_probability(&[3]).is_nearly_zero()); + + // Next allocation should be the empty spot, and it should be in zero state. + assert_eq!(sim.allocate(), 3); + assert!(sim.joint_probability(&[3]).is_nearly_zero()); + + for i in 0..5 { + sim.release(i); + } + assert_eq!(sim.state.len(), 1); +} + +/// Verify joint probability works as expected, namely that it corresponds to the parity of the +/// qubits. +#[test] +fn test_joint_probability() { + let mut sim = QuantumSim::default(); + let q0 = sim.allocate(); + let q1 = sim.allocate(); + assert!(almost_equal(0.0, sim.joint_probability(&[q0, q1]))); + sim.x(q0); + assert!(almost_equal(1.0, sim.joint_probability(&[q0, q1]))); + sim.x(q1); + assert!(almost_equal(0.0, sim.joint_probability(&[q0, q1]))); + assert!(almost_equal(1.0, sim.joint_probability(&[q0]))); + assert!(almost_equal(1.0, sim.joint_probability(&[q1]))); + sim.h(q0); + assert!(almost_equal(0.5, sim.joint_probability(&[q0, q1]))); + sim.release(q1); + sim.release(q0); +} + +/// Verify joint measurement works as expected, namely that it corresponds to the parity of the +/// qubits. +#[test] +fn test_joint_measurement() { + let mut sim = QuantumSim::default(); + let q0 = sim.allocate(); + let q1 = sim.allocate(); + assert!(!sim.joint_measure(&[q0, q1])); + sim.x(q0); + assert!(sim.joint_measure(&[q0, q1])); + sim.x(q1); + assert!(!sim.joint_measure(&[q0, q1])); + assert!(sim.joint_measure(&[q0])); + assert!(sim.joint_measure(&[q1])); + sim.h(q0); + let res = sim.joint_measure(&[q0, q1]); + assert!(almost_equal( + if res { 1.0 } else { 0.0 }, + sim.joint_probability(&[q0, q1]) + )); + sim.release(q1); + sim.release(q0); +} + +#[test] +fn test_force_collapse() { + let mut sim = QuantumSim::default(); + let q0 = sim.allocate(); + let q1 = sim.allocate(); + sim.h(q0); + sim.mcx(&[q0], q1); + assert!(almost_equal(0.5, sim.joint_probability(&[q0]))); + assert!(almost_equal(0.5, sim.joint_probability(&[q1]))); + sim.force_collapse(false, q0); + assert!(almost_equal(0.0, sim.joint_probability(&[q0]))); + assert!(almost_equal(0.0, sim.joint_probability(&[q1]))); + sim.release(q1); + sim.release(q0); +} + +#[test] +fn test_force_collapse_to_non_existent_state() { + let mut sim = QuantumSim::default(); + let q0 = sim.allocate(); + sim.x(q0); + assert!(almost_equal(1.0, sim.joint_probability(&[q0]))); + assert!(almost_equal(0.0, sim.force_collapse(false, q0))); + // The qubit should still be in the |1> state since the requested collapse state was not present, so the probability of measuring |1> should still be 1. + assert!(almost_equal(1.0, sim.joint_probability(&[q0]))); + sim.release(q0); +} + +/// Test multiple controls. +#[test] +fn test_multiple_controls() { + let mut sim = QuantumSim::default(); + let q0 = sim.allocate(); + let q1 = sim.allocate(); + let q2 = sim.allocate(); + assert!(almost_equal(0.0, sim.joint_probability(&[q0]))); + sim.h(q0); + assert!(almost_equal(0.5, sim.joint_probability(&[q0]))); + sim.h(q0); + assert!(almost_equal(0.0, sim.joint_probability(&[q0]))); + sim.mch(&[q1], q0); + assert!(almost_equal(0.0, sim.joint_probability(&[q0]))); + sim.x(q1); + sim.mch(&[q1], q0); + assert!(almost_equal(0.5, sim.joint_probability(&[q0]))); + sim.mch(&[q2, q1], q0); + assert!(almost_equal(0.5, sim.joint_probability(&[q0]))); + sim.x(q2); + sim.mch(&[q2, q1], q0); + assert!(almost_equal(0.0, sim.joint_probability(&[q0]))); + sim.x(q0); + sim.x(q1); + sim.release(q2); + sim.release(q1); + sim.release(q0); +} + +/// Verify that targets cannot be duplicated. +#[test] +#[should_panic(expected = "Duplicate qubit id '0' found in application.")] +fn test_duplicate_target() { + let mut sim = QuantumSim::new(None); + let q = sim.allocate(); + sim.mcx(&[q], q); + let _ = sim.dump(); +} + +/// Verify that controls cannot be duplicated. +#[test] +#[should_panic(expected = "Duplicate qubit id '1' found in application.")] +fn test_duplicate_control() { + let mut sim = QuantumSim::new(None); + let q = sim.allocate(); + let c = sim.allocate(); + sim.mcx(&[c, c], q); + let _ = sim.dump(); +} + +/// Verify that targets aren't in controls. +#[test] +#[should_panic(expected = "Duplicate qubit id '0' found in application.")] +fn test_target_in_control() { + let mut sim = QuantumSim::new(None); + let q = sim.allocate(); + let c = sim.allocate(); + sim.mcx(&[c, q], q); + let _ = sim.dump(); +} + +/// Large, entangled state handling. +#[test] +fn test_large_state() { + let mut sim = QuantumSim::new(None); + let ctl = sim.allocate(); + sim.h(ctl); + for _ in 0..4999 { + let q = sim.allocate(); + sim.mcx(&[ctl], q); + } + let _ = sim.measure(ctl); + for i in 0..5000 { + sim.release(i); + } +} + +/// Verify seeded RNG is predictable. +#[test] +fn test_seeded_rng() { + let mut sim = QuantumSim::new(None); + sim.set_rng_seed(42); + let q = sim.allocate(); + let mut val1 = 0_u64; + for i in 0..64 { + sim.h(q); + if sim.measure(q) { + val1 += 1 << i; + } + } + let mut sim = QuantumSim::new(None); + sim.set_rng_seed(42); + let q = sim.allocate(); + let mut val2 = 0_u64; + for i in 0..64 { + sim.h(q); + if sim.measure(q) { + val2 += 1 << i; + } + } + assert_eq!(val1, val2); +} + +/// Verify that dump after swap on released qubits doesn't crash. +#[test] +fn test_swap_dump() { + let mut sim = QuantumSim::new(None); + let q = sim.allocate(); + let inner_q = sim.allocate(); + sim.swap_qubit_ids(q, inner_q); + sim.release(inner_q); + println!("{}", sim.dump()); +} + +/// Verify that swap preserves queued rotations. +#[test] +fn test_swap_rotations() { + let mut sim = QuantumSim::new(None); + let (q1, q2) = (sim.allocate(), sim.allocate()); + sim.rx(PI / 7.0, q1); + sim.ry(PI / 7.0, q2); + sim.swap_qubit_ids(q1, q2); + sim.rx(-PI / 7.0, q2); + sim.ry(-PI / 7.0, q1); + assert!(sim.joint_probability(&[q1]).is_nearly_zero()); + assert!(sim.joint_probability(&[q2]).is_nearly_zero()); +} + +/// Verify that two queued Rx rotations that sum to zero are treated as +/// a no-op. +#[test] +fn test_rx_queue_nearly_zero() { + let mut sim = QuantumSim::new(None); + let q = sim.allocate(); + sim.rx(PI / 4.0, q); + assert_eq!(sim.state.len(), 1); + sim.rx(-PI / 4.0, q); + assert_eq!(sim.state.len(), 1); + assert!(sim.joint_probability(&[q]).is_nearly_zero()); +} + +/// Verify that two queued Ry rotations that sum to zero are treated as +/// a no-op. +#[test] +fn test_ry_queue_nearly_zero() { + let mut sim = QuantumSim::new(None); + let q = sim.allocate(); + sim.ry(PI / 4.0, q); + assert_eq!(sim.state.len(), 1); + sim.ry(-PI / 4.0, q); + assert_eq!(sim.state.len(), 1); + assert!(sim.joint_probability(&[q]).is_nearly_zero()); +} + +/// Verifies that an Rx rotation by PI, which becomes an X gate, is correctly flushed. +#[test] +fn test_rx_pi_flushed() { + let mut sim = QuantumSim::new(None); + let q = sim.allocate(); + sim.rx(PI, q); + assert!(almost_equal( + sim.joint_probability(&[q]), + sim.joint_probability(&[q]) + )); + assert!(!sim.joint_probability(&[q]).is_nearly_zero()); +} + +/// Verifies that an Ry rotation by PI, which becomes an Y gate, is correctly flushed. +#[test] +fn test_ry_pi_flushed() { + let mut sim = QuantumSim::new(None); + let q = sim.allocate(); + sim.ry(PI, q); + assert!(almost_equal( + sim.joint_probability(&[q]), + sim.joint_probability(&[q]) + )); + assert!(!sim.joint_probability(&[q]).is_nearly_zero()); +} + +/// Verifies that when a controlled Ry(PI) is recognized as equivalent to a +/// controlled -iY (and handed as such), the state vector is not corrupted +#[test] +fn test_mcry_pi() { + let mut sim = QuantumSim::new(None); + let q1 = sim.allocate(); + let q2 = sim.allocate(); + sim.h(q1); + sim.x(q1); + sim.mcry(&[q1], PI, q2); + sim.x(q1); + // Expected result is an equal superposition of |01⟩ and |10⟩ + assert!(almost_equal(sim.joint_probability(&[q1, q2]), 1.0)); +} + +/// Verifies that when a controlled Ry(2*PI) is recognized as equivalent to a +/// controlled -I (and handed as such), the state vector is not corrupted +#[test] +fn test_mcry_2pi() { + let mut sim = QuantumSim::new(None); + let q1 = sim.allocate(); + let q2 = sim.allocate(); + sim.h(q1); + sim.mcry(&[q1], 2.0 * PI, q2); + sim.h(q1); + // Expected result is |10⟩ because CRy(2pi) = Z ⊗ I, so conjugating + // with Hadamards on the left makes it equivalent to a bit flip X ⊗ I + assert!(almost_equal(sim.joint_probability(&[q1, q2]), 1.0)); +} + +/// Utility for testing operation equivalence. +fn assert_operation_equal_referenced(mut op: F1, mut reference: F2, count: usize) +where + F1: FnMut(&mut QuantumSim, &[usize]), + F2: FnMut(&mut QuantumSim, &[usize]), +{ + enum QueuedOp { + NoOp, + H, + Rx, + Ry, + } + + for inner_op in [QueuedOp::NoOp, QueuedOp::H, QueuedOp::Rx, QueuedOp::Ry] { + let mut sim = QuantumSim::default(); + + // Allocte the control we use to verify behavior. + let ctl = sim.allocate(); + sim.h(ctl); + + // Allocate the requested number of targets, entangling the control with them. + let mut qs = vec![]; + for _ in 0..count { + let q = sim.allocate(); + sim.mcx(&[ctl], q); + qs.push(q); + } + + // To test queuing, try the op after running each of the different intermediate operations that + // can be queued. + match inner_op { + QueuedOp::NoOp => (), + QueuedOp::H => { + for &q in &qs { + sim.h(q); + } + } + QueuedOp::Rx => { + for &q in &qs { + sim.rx(PI / 7.0, q); + } + } + QueuedOp::Ry => { + for &q in &qs { + sim.ry(PI / 7.0, q); + } + } + } + + op(&mut sim, &qs); + + // Trigger a flush between the op and expected adjoint reference to ensure the reference is + // run without any queued, commuted operations. + let _ = sim.joint_probability(&qs); + + reference(&mut sim, &qs); + + // Perform the adjoint of any additional ops. We check the joint probability of the target + // qubits before and after to force a flush of the operation queue. This helps us verify queuing, as the + // original operation will have used the queue and commuting while the adjoint perform here will not. + let _ = sim.joint_probability(&qs); + match inner_op { + QueuedOp::NoOp => (), + QueuedOp::H => { + for &q in &qs { + sim.h(q); + } + } + QueuedOp::Rx => { + for &q in &qs { + sim.rx(PI / -7.0, q); + } + } + QueuedOp::Ry => { + for &q in &qs { + sim.ry(PI / -7.0, q); + } + } + } + let _ = sim.joint_probability(&qs); + + // Undo the entanglement. + for q in &qs { + sim.mcx(&[ctl], *q); + } + sim.h(ctl); + + // We know the operations are equal if the qubits are left in the zero state. + assert!(sim.joint_probability(&[ctl]).is_nearly_zero()); + for q in qs { + assert!(sim.joint_probability(&[q]).is_nearly_zero()); + } + + // Sparse state vector should have one entry for |0⟩. + // Dump the state first to force a flush of any queued operations. + println!("{}", sim.dump()); + assert_eq!(sim.state.len(), 1); + } +} + +#[test] +fn test_h() { + assert_operation_equal_referenced( + |sim, qs| { + sim.h(qs[0]); + }, + |sim, qs| { + sim.h(qs[0]); + }, + 1, + ); +} + +#[test] +fn test_x() { + assert_operation_equal_referenced( + |sim, qs| { + sim.x(qs[0]); + }, + |sim, qs| { + sim.x(qs[0]); + }, + 1, + ); +} + +#[test] +fn test_y() { + assert_operation_equal_referenced( + |sim, qs| { + sim.y(qs[0]); + }, + |sim, qs| { + sim.y(qs[0]); + }, + 1, + ); +} + +#[test] +fn test_z() { + assert_operation_equal_referenced( + |sim, qs| { + sim.z(qs[0]); + }, + |sim, qs| { + sim.z(qs[0]); + }, + 1, + ); +} + +#[test] +fn test_s() { + assert_operation_equal_referenced( + |sim, qs| { + sim.s(qs[0]); + }, + |sim, qs| { + sim.sadj(qs[0]); + }, + 1, + ); +} + +#[test] +fn test_sadj() { + assert_operation_equal_referenced( + |sim, qs| { + sim.sadj(qs[0]); + }, + |sim, qs| { + sim.s(qs[0]); + }, + 1, + ); +} + +#[test] +fn test_cx() { + assert_operation_equal_referenced( + |sim, qs| { + sim.mcx(&[qs[0]], qs[1]); + }, + |sim, qs| { + sim.mcx(&[qs[0]], qs[1]); + }, + 2, + ); +} + +#[test] +fn test_cz() { + assert_operation_equal_referenced( + |sim, qs| { + sim.mcz(&[qs[0]], qs[1]); + }, + |sim, qs| { + sim.mcz(&[qs[0]], qs[1]); + }, + 2, + ); +} + +#[test] +fn test_swap() { + assert_operation_equal_referenced( + |sim, qs| { + sim.swap_qubit_ids(qs[0], qs[1]); + }, + |sim, qs| { + sim.swap_qubit_ids(qs[0], qs[1]); + }, + 2, + ); +} + +#[test] +fn test_rz() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rz(PI / 7.0, qs[0]); + }, + |sim, qs| { + sim.rz(-PI / 7.0, qs[0]); + }, + 1, + ); +} + +#[test] +fn test_rx() { + assert_operation_equal_referenced( + |sim, qs| { + sim.rx(PI / 7.0, qs[0]); + }, + |sim, qs| { + sim.rx(-PI / 7.0, qs[0]); + }, + 1, + ); +} + +#[test] +fn test_ry() { + assert_operation_equal_referenced( + |sim, qs| { + sim.ry(PI / 7.0, qs[0]); + }, + |sim, qs| { + sim.ry(-PI / 7.0, qs[0]); + }, + 1, + ); +} + +#[test] +fn test_mcri() { + assert_operation_equal_referenced( + |sim, qs| { + sim.mcphase( + &qs[2..3], + Complex64::exp(Complex64::new(0.0, -(PI / 7.0) / 2.0)), + qs[1], + ); + }, + |sim, qs| { + sim.mcphase( + &qs[2..3], + Complex64::exp(Complex64::new(0.0, (PI / 7.0) / 2.0)), + qs[1], + ); + }, + 3, + ); +} + +#[test] +fn test_op_queue_flushes_at_limit() { + let mut sim = QuantumSim::default(); + let q = sim.allocate(); + for _ in 0..10_002 { + sim.x(q); + } + assert_eq!(sim.op_queue.len(), 2); + assert_eq!(sim.state.len(), 1); +} + +#[test] +fn test_cx_after_h_ry_executes_queued_operations_in_order() { + assert_operation_equal_referenced( + |sim, qs| { + sim.h(qs[0]); + sim.ry(PI, qs[0]); + sim.h(qs[1]); + sim.mcx(&[qs[1]], qs[0]); + }, + |sim, qs| { + sim.mcx(&[qs[1]], qs[0]); + sim.h(qs[1]); + sim.ry(-PI, qs[0]); + sim.h(qs[0]); + }, + 2, + ); +} + +#[test] +fn test_global_phase_dropped_when_all_qubits_released() { + let mut sim = QuantumSim::default(); + let q = sim.allocate(); + sim.x(q); + sim.z(q); + sim.release(q); + let _ = sim.allocate(); + let (state, count) = sim.get_state(); + assert_eq!(count, 1); + assert_eq!(state.len(), 1); + let (index, value) = state.first().expect("state should have at least one entry"); + assert_eq!(index, &BigUint::zero()); + assert_eq!(value, &Complex64::one()); +} From 91592e9b19b68429ed2c0418b00fb67b9ce1bb6f Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Tue, 21 Apr 2026 11:25:22 -0700 Subject: [PATCH 2/2] Delete leftover file, rename create and struct --- .../qsc_data_structures/src/index_map.rs | 316 ------------------ source/compiler/qsc_eval/src/backend.rs | 8 +- source/simulators/src/lib.rs | 6 +- ...parse_sim.rs => sparse_state_simulator.rs} | 28 +- .../matrix_testing.rs | 10 +- .../nearly_zero.rs | 0 .../tests.rs | 56 ++-- 7 files changed, 54 insertions(+), 370 deletions(-) delete mode 100644 source/compiler/qsc_data_structures/src/index_map.rs rename source/simulators/src/{quantum_sparse_sim.rs => sparse_state_simulator.rs} (98%) rename source/simulators/src/{quantum_sparse_sim => sparse_state_simulator}/matrix_testing.rs (98%) rename source/simulators/src/{quantum_sparse_sim => sparse_state_simulator}/nearly_zero.rs (100%) rename source/simulators/src/{quantum_sparse_sim => sparse_state_simulator}/tests.rs (93%) diff --git a/source/compiler/qsc_data_structures/src/index_map.rs b/source/compiler/qsc_data_structures/src/index_map.rs deleted file mode 100644 index b9c70f81a3..0000000000 --- a/source/compiler/qsc_data_structures/src/index_map.rs +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright (c) Microsoft Corporation. -// Licensed under the MIT License. - -use std::{ - fmt::{self, Debug, Formatter}, - iter::Enumerate, - marker::PhantomData, - option::Option, - slice, vec, -}; - -pub struct IndexMap { - _keys: PhantomData, - values: Vec>, -} - -impl IndexMap -where - K: Into, - V: Default, -{ - pub fn get_mut_or_default(&mut self, key: K) -> &mut V { - let index: usize = key.into(); - if index >= self.values.len() { - self.values.resize_with(index + 1, Option::default); - } - self.values - .get_mut(index) - .expect("IndexMap::get_mut_or_default: index out of bounds") - .get_or_insert_with(Default::default) - } -} - -impl IndexMap { - #[must_use] - pub fn new() -> Self { - Self::default() - } - - #[must_use] - pub fn is_empty(&self) -> bool { - self.values.iter().all(Option::is_none) - } - - // `Iter` does implement `Iterator`, but it has an additional bound on `K`. - #[allow(clippy::iter_not_returning_iterator)] - #[must_use] - pub fn iter(&self) -> Iter<'_, K, V> { - Iter { - _keys: PhantomData, - base: self.values.iter().enumerate(), - } - } - - // `Iter` does implement `Iterator`, but it has an additional bound on `K`. - #[allow(clippy::iter_not_returning_iterator)] - pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { - IterMut { - _keys: PhantomData, - base: self.values.iter_mut().enumerate(), - } - } - - pub fn drain(&mut self) -> Drain<'_, K, V> { - Drain { - _keys: PhantomData, - base: self.values.drain(..).enumerate(), - } - } - - #[must_use] - pub fn values(&self) -> Values<'_, V> { - Values { - base: self.values.iter(), - } - } - - pub fn values_mut(&mut self) -> ValuesMut<'_, V> { - ValuesMut { - base: self.values.iter_mut(), - } - } - - pub fn retain(&mut self, mut f: F) - where - F: FnMut(K, &V) -> bool, - K: From, - { - for (k, v) in self.values.iter_mut().enumerate() { - let remove = if let Some(value) = v { - !f(K::from(k), value) - } else { - false - }; - if remove { - *v = None; - } - } - } - - pub fn clear(&mut self) { - self.values.clear(); - } -} - -impl, V> IndexMap { - pub fn insert(&mut self, key: K, value: V) { - let index = key.into(); - if index >= self.values.len() { - self.values.resize_with(index + 1, || None); - } - self.values[index] = Some(value); - } - - pub fn contains_key(&self, key: K) -> bool { - let index: usize = key.into(); - self.values.get(index).is_some_and(Option::is_some) - } - - pub fn get(&self, key: K) -> Option<&V> { - let index: usize = key.into(); - self.values.get(index).and_then(Option::as_ref) - } - - pub fn get_mut(&mut self, key: K) -> Option<&mut V> { - let index: usize = key.into(); - self.values.get_mut(index).and_then(Option::as_mut) - } - - pub fn remove(&mut self, key: K) { - let index: usize = key.into(); - if index < self.values.len() { - self.values[index] = None; - } - } -} - -impl Clone for IndexMap { - fn clone(&self) -> Self { - Self { - _keys: PhantomData, - values: self.values.clone(), - } - } -} - -impl Debug for IndexMap { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - f.debug_struct("IndexMap") - .field( - "values", - &self - .values - .iter() - .enumerate() - .filter_map(|(k, v)| v.as_ref().map(|val| format!("{k:?}: {val:?}"))) - .collect::>(), - ) - .finish() - } -} - -impl Default for IndexMap { - fn default() -> Self { - Self { - _keys: PhantomData, - values: Vec::default(), - } - } -} - -impl, V> IntoIterator for IndexMap { - type Item = (K, V); - - type IntoIter = IntoIter; - - fn into_iter(self) -> Self::IntoIter { - IntoIter { - _keys: PhantomData, - base: self.values.into_iter().enumerate(), - } - } -} - -impl<'a, K: From, V> IntoIterator for &'a IndexMap { - type Item = (K, &'a V); - - type IntoIter = Iter<'a, K, V>; - - fn into_iter(self) -> Self::IntoIter { - self.iter() - } -} - -impl, V> FromIterator<(K, V)> for IndexMap { - fn from_iter>(iter: T) -> Self { - let iter = iter.into_iter(); - let mut map = Self::new(); - let (lo, hi) = iter.size_hint(); - map.values.reserve(hi.unwrap_or(lo)); - for (key, value) in iter { - map.insert(key, value); - } - map - } -} - -pub struct Iter<'a, K, V> { - _keys: PhantomData, - base: Enumerate>>, -} - -impl<'a, K: From, V> Iterator for Iter<'a, K, V> { - type Item = (K, &'a V); - - fn next(&mut self) -> Option { - loop { - if let (index, Some(value)) = self.base.next()? { - break Some((index.into(), value)); - } - } - } -} - -pub struct IterMut<'a, K, V> { - _keys: PhantomData, - base: Enumerate>>, -} - -impl, V> DoubleEndedIterator for Iter<'_, K, V> { - fn next_back(&mut self) -> Option { - loop { - if let (index, Some(value)) = self.base.next_back()? { - break Some((index.into(), value)); - } - } - } -} - -impl<'a, K: From, V> Iterator for IterMut<'a, K, V> { - type Item = (K, &'a mut V); - - fn next(&mut self) -> Option { - loop { - if let (index, Some(value)) = self.base.next()? { - break Some((index.into(), value)); - } - } - } -} - -pub struct IntoIter { - _keys: PhantomData, - base: Enumerate>>, -} - -impl, V> Iterator for IntoIter { - type Item = (K, V); - - fn next(&mut self) -> Option { - loop { - if let (index, Some(value)) = self.base.next()? { - break Some((index.into(), value)); - } - } - } -} - -pub struct Drain<'a, K, V> { - _keys: PhantomData, - base: Enumerate>>, -} - -impl, V> Iterator for Drain<'_, K, V> { - type Item = (K, V); - - fn next(&mut self) -> Option { - loop { - if let (index, Some(value)) = self.base.next()? { - break Some((index.into(), value)); - } - } - } -} - -pub struct Values<'a, V> { - base: slice::Iter<'a, Option>, -} - -impl<'a, V> Iterator for Values<'a, V> { - type Item = &'a V; - - fn next(&mut self) -> Option { - loop { - if let Some(value) = self.base.next()? { - break Some(value); - } - } - } -} - -pub struct ValuesMut<'a, V> { - base: slice::IterMut<'a, Option>, -} - -impl<'a, V> Iterator for ValuesMut<'a, V> { - type Item = &'a mut V; - - fn next(&mut self) -> Option { - loop { - if let Some(value) = self.base.next()? { - break Some(value); - } - } - } -} diff --git a/source/compiler/qsc_eval/src/backend.rs b/source/compiler/qsc_eval/src/backend.rs index 3db62e0e0c..36f4a42536 100644 --- a/source/compiler/qsc_eval/src/backend.rs +++ b/source/compiler/qsc_eval/src/backend.rs @@ -8,7 +8,7 @@ use ndarray::Array2; use num_bigint::BigUint; use num_complex::Complex; use num_traits::Zero; -use qdk_simulators::QuantumSim; +use qdk_simulators::SparseStateSim; use qdk_simulators::cpu_full_state_simulator::noise::{Fault, PauliFault}; use qdk_simulators::noise_config::{CumulativeNoiseConfig, CumulativeNoiseTable}; use rand::{Rng, RngCore}; @@ -503,7 +503,7 @@ impl SequentialAllocator { /// Default backend used when targeting sparse simulation. pub struct SparseSim { /// Noiseless Sparse simulator to be used by this instance. - pub sim: QuantumSim, + pub sim: SparseStateSim, /// Noise configuration for this simulator instance, which defines the probabilities of different faults occurring during simulation. pub noise_config: Option>, /// Pauli noise that is applied after a gate or before a measurement is executed. @@ -530,7 +530,7 @@ impl SparseSim { #[must_use] pub fn new() -> Self { Self { - sim: QuantumSim::new(None), + sim: SparseStateSim::new(None), noise_config: None, noise: PauliNoise::default(), loss: f64::zero(), @@ -549,7 +549,7 @@ impl SparseSim { #[must_use] pub fn new_with_noise_config(noise_config: CumulativeNoiseConfig) -> Self { Self { - sim: QuantumSim::new(None), + sim: SparseStateSim::new(None), noise_config: Some(noise_config), noise: PauliNoise::default(), loss: f64::zero(), diff --git a/source/simulators/src/lib.rs b/source/simulators/src/lib.rs index bb68461f36..baaa9990c4 100644 --- a/source/simulators/src/lib.rs +++ b/source/simulators/src/lib.rs @@ -5,12 +5,12 @@ pub mod bytecode; pub mod cpu_full_state_simulator; mod gpu_full_state_simulator; pub mod noise_config; -pub mod quantum_sparse_sim; +pub mod sparse_state_simulator; pub mod stabilizer_simulator; pub use gpu_full_state_simulator::*; -pub use quantum_sparse_sim::QuantumSim; -pub use quantum_sparse_sim::nearly_zero::NearlyZero; +pub use sparse_state_simulator::SparseStateSim; +pub use sparse_state_simulator::nearly_zero::NearlyZero; /// A qubit ID. pub type QubitID = usize; diff --git a/source/simulators/src/quantum_sparse_sim.rs b/source/simulators/src/sparse_state_simulator.rs similarity index 98% rename from source/simulators/src/quantum_sparse_sim.rs rename to source/simulators/src/sparse_state_simulator.rs index 5789ed4b71..3b9a7b1039 100644 --- a/source/simulators/src/quantum_sparse_sim.rs +++ b/source/simulators/src/sparse_state_simulator.rs @@ -28,9 +28,9 @@ type SparseStateMap = FxHashMap; const QUEUE_LIMIT: usize = 10_000; const DEFAULT_INITIAL_SIZE: usize = 50; -/// The `QuantumSim` struct contains the necessary state for tracking the simulation. Each instance of a -/// `QuantumSim` represents an independant simulation. -pub struct QuantumSim { +/// The `SparseStateSim` struct contains the necessary state for tracking the simulation. Each instance of a +/// `SparseStateSim` represents an independant simulation. +pub struct SparseStateSim { /// The structure that describes the current quantum state. pub(crate) state: SparseState, @@ -74,20 +74,20 @@ pub(crate) enum FlushLevel { HRxRy, } -impl Default for QuantumSim { +impl Default for SparseStateSim { fn default() -> Self { Self::new(None) } } /// Provides the common set of functionality across all quantum simulation types. -impl QuantumSim { +impl SparseStateSim { /// Creates a new sparse state quantum simulator object with empty initial state (no qubits allocated, no operations buffered). #[must_use] pub fn new(rng: Option) -> Self { let initial_state = vec![(BigUint::zero(), Complex64::one())]; - QuantumSim { + SparseStateSim { state: initial_state, id_map: IndexMap::with_capacity(DEFAULT_INITIAL_SIZE), rng: RefCell::new(rng.unwrap_or_else(StdRng::from_entropy)), @@ -1299,15 +1299,15 @@ fn apply_ops( for (ctls, target, op) in ops { if ctls.iter().all(|c| index.bit(*c)) { match op { - OpCode::X => QuantumSim::x_transform((index, amplitude), *target), - OpCode::Y => QuantumSim::y_transform((index, amplitude), *target), - OpCode::Z => QuantumSim::z_transform((index, amplitude), *target), - OpCode::S => QuantumSim::s_transform((index, amplitude), *target), - OpCode::Sadj => QuantumSim::sadj_transform((index, amplitude), *target), - OpCode::T => QuantumSim::t_transform((index, amplitude), *target), - OpCode::Tadj => QuantumSim::tadj_transform((index, amplitude), *target), + OpCode::X => SparseStateSim::x_transform((index, amplitude), *target), + OpCode::Y => SparseStateSim::y_transform((index, amplitude), *target), + OpCode::Z => SparseStateSim::z_transform((index, amplitude), *target), + OpCode::S => SparseStateSim::s_transform((index, amplitude), *target), + OpCode::Sadj => SparseStateSim::sadj_transform((index, amplitude), *target), + OpCode::T => SparseStateSim::t_transform((index, amplitude), *target), + OpCode::Tadj => SparseStateSim::tadj_transform((index, amplitude), *target), OpCode::Rz(theta) => { - QuantumSim::rz_transform((index, amplitude), *theta, *target); + SparseStateSim::rz_transform((index, amplitude), *theta, *target); } } } diff --git a/source/simulators/src/quantum_sparse_sim/matrix_testing.rs b/source/simulators/src/sparse_state_simulator/matrix_testing.rs similarity index 98% rename from source/simulators/src/quantum_sparse_sim/matrix_testing.rs rename to source/simulators/src/sparse_state_simulator/matrix_testing.rs index 017da04a46..d00580f252 100644 --- a/source/simulators/src/quantum_sparse_sim/matrix_testing.rs +++ b/source/simulators/src/sparse_state_simulator/matrix_testing.rs @@ -7,7 +7,7 @@ use num_complex::Complex64; use num_traits::One; use num_traits::Zero; -use super::{QuantumSim, controlled, nearly_zero::NearlyZero}; +use super::{SparseStateSim, controlled, nearly_zero::NearlyZero}; use core::f64::consts::PI; /// Returns a unitary matrix representing the `X` operation. @@ -266,10 +266,10 @@ fn controlled_extension() { /// Utility for testing operation equivalence. fn assert_operation_equal_referenced(mut op: F1, mut reference: F2, count: usize) where - F1: FnMut(&mut QuantumSim, &[usize]), - F2: FnMut(&mut QuantumSim, &[usize]), + F1: FnMut(&mut SparseStateSim, &[usize]), + F2: FnMut(&mut SparseStateSim, &[usize]), { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); // Allocate the controls we use to verify behavior. // Allocate the requested number of targets, entangling the control with them. @@ -632,7 +632,7 @@ fn test_mcri() { #[test] fn test_apply_four_qubit_unitary() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let qs: Vec = (0..4).map(|_| sim.allocate()).collect(); let mut unitary = Array2::eye(16); diff --git a/source/simulators/src/quantum_sparse_sim/nearly_zero.rs b/source/simulators/src/sparse_state_simulator/nearly_zero.rs similarity index 100% rename from source/simulators/src/quantum_sparse_sim/nearly_zero.rs rename to source/simulators/src/sparse_state_simulator/nearly_zero.rs diff --git a/source/simulators/src/quantum_sparse_sim/tests.rs b/source/simulators/src/sparse_state_simulator/tests.rs similarity index 93% rename from source/simulators/src/quantum_sparse_sim/tests.rs rename to source/simulators/src/sparse_state_simulator/tests.rs index 5918b22748..c11d077f76 100644 --- a/source/simulators/src/quantum_sparse_sim/tests.rs +++ b/source/simulators/src/sparse_state_simulator/tests.rs @@ -10,7 +10,7 @@ fn almost_equal(a: f64, b: f64) -> bool { // Test that basic allocation and release of qubits doesn't fail. #[test] fn test_alloc_release() { - let sim = &mut QuantumSim::default(); + let sim = &mut SparseStateSim::default(); for i in 0..16 { assert_eq!(sim.allocate(), i); } @@ -32,7 +32,7 @@ fn test_alloc_release() { /// Verifies that application of gates to a qubit results in the correct probabilities. #[test] fn test_probability() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let q = sim.allocate(); let extra = sim.allocate(); assert!(almost_equal(0.0, sim.joint_probability(&[q]))); @@ -60,7 +60,7 @@ fn test_probability() { /// can be operationally reset back into the ground state. #[test] fn test_measure() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let q = sim.allocate(); let extra = sim.allocate(); assert!(!sim.measure(q)); @@ -88,7 +88,7 @@ fn test_measure() { // and start in a zero state. #[test] fn test_out_of_order_release() { - let sim = &mut QuantumSim::default(); + let sim = &mut SparseStateSim::default(); for i in 0..5 { assert_eq!(sim.allocate(), i); sim.x(i); @@ -121,7 +121,7 @@ fn test_out_of_order_release() { /// qubits. #[test] fn test_joint_probability() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let q0 = sim.allocate(); let q1 = sim.allocate(); assert!(almost_equal(0.0, sim.joint_probability(&[q0, q1]))); @@ -141,7 +141,7 @@ fn test_joint_probability() { /// qubits. #[test] fn test_joint_measurement() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let q0 = sim.allocate(); let q1 = sim.allocate(); assert!(!sim.joint_measure(&[q0, q1])); @@ -163,7 +163,7 @@ fn test_joint_measurement() { #[test] fn test_force_collapse() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let q0 = sim.allocate(); let q1 = sim.allocate(); sim.h(q0); @@ -179,7 +179,7 @@ fn test_force_collapse() { #[test] fn test_force_collapse_to_non_existent_state() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let q0 = sim.allocate(); sim.x(q0); assert!(almost_equal(1.0, sim.joint_probability(&[q0]))); @@ -192,7 +192,7 @@ fn test_force_collapse_to_non_existent_state() { /// Test multiple controls. #[test] fn test_multiple_controls() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let q0 = sim.allocate(); let q1 = sim.allocate(); let q2 = sim.allocate(); @@ -222,7 +222,7 @@ fn test_multiple_controls() { #[test] #[should_panic(expected = "Duplicate qubit id '0' found in application.")] fn test_duplicate_target() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q = sim.allocate(); sim.mcx(&[q], q); let _ = sim.dump(); @@ -232,7 +232,7 @@ fn test_duplicate_target() { #[test] #[should_panic(expected = "Duplicate qubit id '1' found in application.")] fn test_duplicate_control() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q = sim.allocate(); let c = sim.allocate(); sim.mcx(&[c, c], q); @@ -243,7 +243,7 @@ fn test_duplicate_control() { #[test] #[should_panic(expected = "Duplicate qubit id '0' found in application.")] fn test_target_in_control() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q = sim.allocate(); let c = sim.allocate(); sim.mcx(&[c, q], q); @@ -253,7 +253,7 @@ fn test_target_in_control() { /// Large, entangled state handling. #[test] fn test_large_state() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let ctl = sim.allocate(); sim.h(ctl); for _ in 0..4999 { @@ -269,7 +269,7 @@ fn test_large_state() { /// Verify seeded RNG is predictable. #[test] fn test_seeded_rng() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); sim.set_rng_seed(42); let q = sim.allocate(); let mut val1 = 0_u64; @@ -279,7 +279,7 @@ fn test_seeded_rng() { val1 += 1 << i; } } - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); sim.set_rng_seed(42); let q = sim.allocate(); let mut val2 = 0_u64; @@ -295,7 +295,7 @@ fn test_seeded_rng() { /// Verify that dump after swap on released qubits doesn't crash. #[test] fn test_swap_dump() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q = sim.allocate(); let inner_q = sim.allocate(); sim.swap_qubit_ids(q, inner_q); @@ -306,7 +306,7 @@ fn test_swap_dump() { /// Verify that swap preserves queued rotations. #[test] fn test_swap_rotations() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let (q1, q2) = (sim.allocate(), sim.allocate()); sim.rx(PI / 7.0, q1); sim.ry(PI / 7.0, q2); @@ -321,7 +321,7 @@ fn test_swap_rotations() { /// a no-op. #[test] fn test_rx_queue_nearly_zero() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q = sim.allocate(); sim.rx(PI / 4.0, q); assert_eq!(sim.state.len(), 1); @@ -334,7 +334,7 @@ fn test_rx_queue_nearly_zero() { /// a no-op. #[test] fn test_ry_queue_nearly_zero() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q = sim.allocate(); sim.ry(PI / 4.0, q); assert_eq!(sim.state.len(), 1); @@ -346,7 +346,7 @@ fn test_ry_queue_nearly_zero() { /// Verifies that an Rx rotation by PI, which becomes an X gate, is correctly flushed. #[test] fn test_rx_pi_flushed() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q = sim.allocate(); sim.rx(PI, q); assert!(almost_equal( @@ -359,7 +359,7 @@ fn test_rx_pi_flushed() { /// Verifies that an Ry rotation by PI, which becomes an Y gate, is correctly flushed. #[test] fn test_ry_pi_flushed() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q = sim.allocate(); sim.ry(PI, q); assert!(almost_equal( @@ -373,7 +373,7 @@ fn test_ry_pi_flushed() { /// controlled -iY (and handed as such), the state vector is not corrupted #[test] fn test_mcry_pi() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q1 = sim.allocate(); let q2 = sim.allocate(); sim.h(q1); @@ -388,7 +388,7 @@ fn test_mcry_pi() { /// controlled -I (and handed as such), the state vector is not corrupted #[test] fn test_mcry_2pi() { - let mut sim = QuantumSim::new(None); + let mut sim = SparseStateSim::new(None); let q1 = sim.allocate(); let q2 = sim.allocate(); sim.h(q1); @@ -402,8 +402,8 @@ fn test_mcry_2pi() { /// Utility for testing operation equivalence. fn assert_operation_equal_referenced(mut op: F1, mut reference: F2, count: usize) where - F1: FnMut(&mut QuantumSim, &[usize]), - F2: FnMut(&mut QuantumSim, &[usize]), + F1: FnMut(&mut SparseStateSim, &[usize]), + F2: FnMut(&mut SparseStateSim, &[usize]), { enum QueuedOp { NoOp, @@ -413,7 +413,7 @@ where } for inner_op in [QueuedOp::NoOp, QueuedOp::H, QueuedOp::Rx, QueuedOp::Ry] { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); // Allocte the control we use to verify behavior. let ctl = sim.allocate(); @@ -678,7 +678,7 @@ fn test_mcri() { #[test] fn test_op_queue_flushes_at_limit() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let q = sim.allocate(); for _ in 0..10_002 { sim.x(q); @@ -708,7 +708,7 @@ fn test_cx_after_h_ry_executes_queued_operations_in_order() { #[test] fn test_global_phase_dropped_when_all_qubits_released() { - let mut sim = QuantumSim::default(); + let mut sim = SparseStateSim::default(); let q = sim.allocate(); sim.x(q); sim.z(q);