diff --git a/.github/labeler.yml b/.github/labeler.yml index 8e0c08c95c..dea78c8fb3 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -1,3 +1,3 @@ # Add/remove 'doc needed' label if issue/PR contains the line '- [x] This PR requires documentation updates when merged.' "doc needed": - - '- \[x\] This PR requires documentation updates when merged.' + - '- \[(x|X)\] This PR requires documentation updates when merged.' diff --git a/Cargo.lock b/Cargo.lock index 2cf58bb327..acc15e4fb8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,19 +4,20 @@ version = 3 [[package]] name = "acir" -version = "0.10.3" -source = "git+https://github.com/noir-lang/acvm?rev=46c645233228c35bc22b6eb5fb5d1d3c0a33d507#46c645233228c35bc22b6eb5fb5d1d3c0a33d507" +version = "0.11.0" +source = "git+https://github.com/noir-lang/acvm?rev=b248e606dd69c25d33ae77c5c5c0541adbf80cd6#b248e606dd69c25d33ae77c5c5c0541adbf80cd6" dependencies = [ "acir_field", "flate2", "rmp-serde", "serde", + "thiserror", ] [[package]] name = "acir_field" -version = "0.10.3" -source = "git+https://github.com/noir-lang/acvm?rev=46c645233228c35bc22b6eb5fb5d1d3c0a33d507#46c645233228c35bc22b6eb5fb5d1d3c0a33d507" +version = "0.11.0" +source = "git+https://github.com/noir-lang/acvm?rev=b248e606dd69c25d33ae77c5c5c0541adbf80cd6#b248e606dd69c25d33ae77c5c5c0541adbf80cd6" dependencies = [ "ark-bn254", "ark-ff", @@ -28,8 +29,8 @@ dependencies = [ [[package]] name = "acvm" -version = "0.10.3" -source = "git+https://github.com/noir-lang/acvm?rev=46c645233228c35bc22b6eb5fb5d1d3c0a33d507#46c645233228c35bc22b6eb5fb5d1d3c0a33d507" +version = "0.11.0" +source = "git+https://github.com/noir-lang/acvm?rev=b248e606dd69c25d33ae77c5c5c0541adbf80cd6#b248e606dd69c25d33ae77c5c5c0541adbf80cd6" dependencies = [ "acir", "acvm_stdlib", @@ -39,18 +40,18 @@ dependencies = [ "k256", "num-bigint", "num-traits", - "sha2 0.9.9", + "sha2 0.10.6", + "sha3", "thiserror", ] [[package]] name = "acvm-backend-barretenberg" -version = "0.0.0" -source = "git+https://github.com/noir-lang/aztec_backend?rev=148521f851d22a1411b8491905585d3c77e22ee1#148521f851d22a1411b8491905585d3c77e22ee1" +version = "0.1.2" +source = "git+https://github.com/noir-lang/acvm-backend-barretenberg?rev=030a7e7b9ba842f3d307dbab178962b63d0dedcf#030a7e7b9ba842f3d307dbab178962b63d0dedcf" dependencies = [ "acvm", "barretenberg-sys", - "blake2", "dirs 3.0.2", "futures-util", "getrandom", @@ -58,14 +59,15 @@ dependencies = [ "pkg-config", "reqwest", "rust-embed", + "thiserror", "tokio", "wasmer", ] [[package]] name = "acvm_stdlib" -version = "0.10.3" -source = "git+https://github.com/noir-lang/acvm?rev=46c645233228c35bc22b6eb5fb5d1d3c0a33d507#46c645233228c35bc22b6eb5fb5d1d3c0a33d507" +version = "0.11.0" +source = "git+https://github.com/noir-lang/acvm?rev=b248e606dd69c25d33ae77c5c5c0541adbf80cd6#b248e606dd69c25d33ae77c5c5c0541adbf80cd6" dependencies = [ "acir", ] @@ -367,13 +369,11 @@ dependencies = [ [[package]] name = "blake2" -version = "0.9.2" +version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a4e37d16930f5459780f5621038b6382b9bb37c19016f39fb6b5808d831f174" +checksum = "46502ad458c9a52b69d4d4d32775c788b7a1b85e8bc9d482d92250fc0e3f8efe" dependencies = [ - "crypto-mac 0.8.0", - "digest 0.9.0", - "opaque-debug", + "digest 0.10.6", ] [[package]] @@ -810,16 +810,6 @@ dependencies = [ "typenum", ] -[[package]] -name = "crypto-mac" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b584a330336237c1eecd3e94266efb216c56ed91225d634cb2991c5f3fd1aeab" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "crypto-mac" version = "0.10.1" @@ -951,6 +941,7 @@ checksum = "8168378f4e5023e7218c89c891c0fd8ecdb5e5e4f18cb78f38cf245dd021e76f" dependencies = [ "block-buffer 0.10.4", "crypto-common", + "subtle", ] [[package]] @@ -1434,7 +1425,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c1441c6b1e930e2817404b5046f1f989899143a12bf92de603b69f4e0aee1e15" dependencies = [ - "crypto-mac 0.10.1", + "crypto-mac", "digest 0.9.0", ] @@ -1672,6 +1663,15 @@ dependencies = [ "sha2 0.9.9", ] +[[package]] +name = "keccak" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +dependencies = [ + "cpufeatures", +] + [[package]] name = "lazy_static" version = "1.4.0" @@ -1960,6 +1960,7 @@ dependencies = [ "noirc_errors", "rustc-hash", "serde", + "small-ord-set", "smol_str", "strum", "strum_macros", @@ -2816,6 +2817,16 @@ dependencies = [ "digest 0.10.6", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.6", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.4" @@ -2865,6 +2876,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "small-ord-set" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf7035a2b2268a5be8c1395738565b06beda836097e12021cdefc06b127a0e7e" +dependencies = [ + "smallvec", +] + [[package]] name = "smallvec" version = "1.10.0" diff --git a/Cargo.toml b/Cargo.toml index 3c08a8804e..987737a065 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ edition = "2021" rust-version = "1.66" [workspace.dependencies] -acvm = "0.10.3" +acvm = "0.11.0" arena = { path = "crates/arena" } fm = { path = "crates/fm" } iter-extended = { path = "crates/iter-extended" } @@ -52,4 +52,5 @@ wasm-bindgen = { version = "0.2.83", features = ["serde-serialize"] } wasm-bindgen-test = "0.3.33" [patch.crates-io] -acvm = { package = "acvm", git = "https://github.com/noir-lang/acvm", rev = "46c645233228c35bc22b6eb5fb5d1d3c0a33d507" } +acvm = { package = "acvm", git = "https://github.com/noir-lang/acvm", rev = "b248e606dd69c25d33ae77c5c5c0541adbf80cd6" } +acvm-backend-barretenberg = { git = "https://github.com/noir-lang/acvm-backend-barretenberg", rev = "030a7e7b9ba842f3d307dbab178962b63d0dedcf" } diff --git a/README.md b/README.md index 1830e43016..f6977ff23e 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,7 @@ Noir is a Domain Specific Language for SNARK proving systems. It has been design ## Quick Start -Read the installation section [here](https://noir-lang.org/getting_started/nargo/nargo_installation). +Read the installation section [here](https://noir-lang.org/getting_started/nargo_installation). Once you have read through the documentation, you can visit [Awesome Noir](https://github.com/noir-lang/awesome-noir) to run some of the examples that others have created. diff --git a/crates/nargo/src/ops/codegen_verifier.rs b/crates/nargo/src/ops/codegen_verifier.rs index ead125699b..2a0b54df86 100644 --- a/crates/nargo/src/ops/codegen_verifier.rs +++ b/crates/nargo/src/ops/codegen_verifier.rs @@ -1,10 +1,8 @@ use acvm::SmartContract; -use crate::NargoError; - -pub fn codegen_verifier( - backend: &impl SmartContract, +pub fn codegen_verifier( + backend: &B, verification_key: &[u8], -) -> Result { - Ok(backend.eth_contract_from_vk(verification_key)) +) -> Result { + backend.eth_contract_from_vk(verification_key) } diff --git a/crates/nargo/src/ops/execute.rs b/crates/nargo/src/ops/execute.rs index e9a2cf4a67..664b519e88 100644 --- a/crates/nargo/src/ops/execute.rs +++ b/crates/nargo/src/ops/execute.rs @@ -1,5 +1,6 @@ +use acvm::pwg::PartialWitnessGeneratorStatus; +use acvm::PartialWitnessGenerator; use acvm::{acir::circuit::Circuit, acir::native_types::WitnessMap, pwg::block::Blocks}; -use acvm::{PartialWitnessGenerator, PartialWitnessGeneratorStatus}; use crate::NargoError; @@ -9,7 +10,8 @@ pub fn execute_circuit( mut initial_witness: WitnessMap, ) -> Result { let mut blocks = Blocks::default(); - let solver_status = backend.solve(&mut initial_witness, &mut blocks, circuit.opcodes)?; + let solver_status = + acvm::pwg::solve(backend, &mut initial_witness, &mut blocks, circuit.opcodes)?; if matches!(solver_status, PartialWitnessGeneratorStatus::RequiresOracleData { .. }) { todo!("Add oracle support to nargo execute") } diff --git a/crates/nargo/src/ops/preprocess.rs b/crates/nargo/src/ops/preprocess.rs index f8d4eb5a82..3be8151f9c 100644 --- a/crates/nargo/src/ops/preprocess.rs +++ b/crates/nargo/src/ops/preprocess.rs @@ -1,26 +1,23 @@ use acvm::ProofSystemCompiler; -use iter_extended::vecmap; +use iter_extended::try_vecmap; use noirc_driver::{CompiledContract, CompiledProgram}; -use crate::{ - artifacts::{ - contract::{PreprocessedContract, PreprocessedContractFunction}, - program::PreprocessedProgram, - }, - NargoError, +use crate::artifacts::{ + contract::{PreprocessedContract, PreprocessedContractFunction}, + program::PreprocessedProgram, }; // TODO: pull this from backend. const BACKEND_IDENTIFIER: &str = "acvm-backend-barretenberg"; -pub fn preprocess_program( - backend: &impl ProofSystemCompiler, +pub fn preprocess_program( + backend: &B, compiled_program: CompiledProgram, -) -> Result { +) -> Result { // TODO: currently `compiled_program`'s bytecode is already optimized for the backend. // In future we'll need to apply those optimizations here. let optimized_bytecode = compiled_program.circuit; - let (proving_key, verification_key) = backend.preprocess(&optimized_bytecode); + let (proving_key, verification_key) = backend.preprocess(&optimized_bytecode)?; Ok(PreprocessedProgram { backend: String::from(BACKEND_IDENTIFIER), @@ -31,17 +28,17 @@ pub fn preprocess_program( }) } -pub fn preprocess_contract( - backend: &impl ProofSystemCompiler, +pub fn preprocess_contract( + backend: &B, compiled_contract: CompiledContract, -) -> Result { - let preprocessed_contract_functions = vecmap(compiled_contract.functions, |func| { +) -> Result { + let preprocessed_contract_functions = try_vecmap(compiled_contract.functions, |func| { // TODO: currently `func`'s bytecode is already optimized for the backend. // In future we'll need to apply those optimizations here. let optimized_bytecode = func.bytecode; - let (proving_key, verification_key) = backend.preprocess(&optimized_bytecode); + let (proving_key, verification_key) = backend.preprocess(&optimized_bytecode)?; - PreprocessedContractFunction { + Ok(PreprocessedContractFunction { name: func.name, function_type: func.function_type, abi: func.abi, @@ -49,8 +46,8 @@ pub fn preprocess_contract( bytecode: optimized_bytecode, proving_key, verification_key, - } - }); + }) + })?; Ok(PreprocessedContract { name: compiled_contract.name, diff --git a/crates/nargo/src/ops/prove.rs b/crates/nargo/src/ops/prove.rs index 921cc71c67..523b2ed5ed 100644 --- a/crates/nargo/src/ops/prove.rs +++ b/crates/nargo/src/ops/prove.rs @@ -1,15 +1,11 @@ use acvm::acir::{circuit::Circuit, native_types::WitnessMap}; use acvm::ProofSystemCompiler; -use crate::NargoError; - -pub fn prove_execution( - backend: &impl ProofSystemCompiler, +pub fn prove_execution( + backend: &B, circuit: &Circuit, solved_witness: WitnessMap, proving_key: &[u8], -) -> Result, NargoError> { - let proof = backend.prove_with_pk(circuit, solved_witness, proving_key); - - Ok(proof) +) -> Result, B::Error> { + backend.prove_with_pk(circuit, solved_witness, proving_key) } diff --git a/crates/nargo/src/ops/verify.rs b/crates/nargo/src/ops/verify.rs index 09dfac1a03..f2145e3cf8 100644 --- a/crates/nargo/src/ops/verify.rs +++ b/crates/nargo/src/ops/verify.rs @@ -1,16 +1,12 @@ use acvm::acir::{circuit::Circuit, native_types::WitnessMap}; use acvm::ProofSystemCompiler; -use crate::NargoError; - -pub fn verify_proof( - backend: &impl ProofSystemCompiler, +pub fn verify_proof( + backend: &B, circuit: &Circuit, proof: &[u8], public_inputs: WitnessMap, verification_key: &[u8], -) -> Result { - let valid_proof = backend.verify_with_vk(proof, public_inputs, circuit, verification_key); - - Ok(valid_proof) +) -> Result { + backend.verify_with_vk(proof, public_inputs, circuit, verification_key) } diff --git a/crates/nargo_cli/Cargo.toml b/crates/nargo_cli/Cargo.toml index 5db5d70eff..74db54c8ef 100644 --- a/crates/nargo_cli/Cargo.toml +++ b/crates/nargo_cli/Cargo.toml @@ -37,7 +37,7 @@ termcolor = "1.1.2" color-eyre = "0.6.2" # Backends -acvm-backend-barretenberg = { git = "https://github.com/noir-lang/aztec_backend", rev = "148521f851d22a1411b8491905585d3c77e22ee1", default-features = false } +acvm-backend-barretenberg = { version = "0.1.2", default-features = false } [dev-dependencies] tempdir = "0.3.7" @@ -50,4 +50,3 @@ default = ["plonk_bn254"] # The plonk backend can only use bn254, so we do not specify the field plonk_bn254 = ["acvm-backend-barretenberg/native"] plonk_bn254_wasm = ["acvm-backend-barretenberg/wasm"] - diff --git a/crates/nargo_cli/src/cli/check_cmd.rs b/crates/nargo_cli/src/cli/check_cmd.rs index 3049c830de..9664930466 100644 --- a/crates/nargo_cli/src/cli/check_cmd.rs +++ b/crates/nargo_cli/src/cli/check_cmd.rs @@ -1,5 +1,5 @@ use crate::{errors::CliError, resolver::Resolver}; -use acvm::ProofSystemCompiler; +use acvm::Backend; use clap::Args; use iter_extended::btree_map; use noirc_abi::{AbiParameter, AbiType, MAIN_RETURN_NAME}; @@ -17,15 +17,21 @@ pub(crate) struct CheckCommand { compile_options: CompileOptions, } -pub(crate) fn run(args: CheckCommand, config: NargoConfig) -> Result<(), CliError> { - check_from_path(config.program_dir, &args.compile_options)?; +pub(crate) fn run( + backend: &B, + args: CheckCommand, + config: NargoConfig, +) -> Result<(), CliError> { + check_from_path(backend, config.program_dir, &args.compile_options)?; println!("Constraint system successfully built!"); Ok(()) } -fn check_from_path>(p: P, compile_options: &CompileOptions) -> Result<(), CliError> { - let backend = crate::backends::ConcreteBackend::default(); - +fn check_from_path>( + backend: &B, + p: P, + compile_options: &CompileOptions, +) -> Result<(), CliError> { let mut driver = Resolver::resolve_root_manifest(p.as_ref(), backend.np_language())?; driver.check_crate(compile_options).map_err(|_| CliError::CompilationError)?; @@ -148,12 +154,13 @@ d2 = ["", "", ""] let pass_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(format!("{TEST_DATA_DIR}/pass")); + let backend = crate::backends::ConcreteBackend::default(); let config = CompileOptions::default(); let paths = std::fs::read_dir(pass_dir).unwrap(); for path in paths.flatten() { let path = path.path(); assert!( - super::check_from_path(path.clone(), &config).is_ok(), + super::check_from_path(&backend, path.clone(), &config).is_ok(), "path: {}", path.display() ); @@ -166,12 +173,13 @@ d2 = ["", "", ""] let fail_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(format!("{TEST_DATA_DIR}/fail")); + let backend = crate::backends::ConcreteBackend::default(); let config = CompileOptions::default(); let paths = std::fs::read_dir(fail_dir).unwrap(); for path in paths.flatten() { let path = path.path(); assert!( - super::check_from_path(path.clone(), &config).is_err(), + super::check_from_path(&backend, path.clone(), &config).is_err(), "path: {}", path.display() ); @@ -183,13 +191,14 @@ d2 = ["", "", ""] let pass_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join(format!("{TEST_DATA_DIR}/pass_dev_mode")); + let backend = crate::backends::ConcreteBackend::default(); let config = CompileOptions { allow_warnings: true, ..Default::default() }; let paths = std::fs::read_dir(pass_dir).unwrap(); for path in paths.flatten() { let path = path.path(); assert!( - super::check_from_path(path.clone(), &config).is_ok(), + super::check_from_path(&backend, path.clone(), &config).is_ok(), "path: {}", path.display() ); diff --git a/crates/nargo_cli/src/cli/codegen_verifier_cmd.rs b/crates/nargo_cli/src/cli/codegen_verifier_cmd.rs index f23502a15b..d38433e2d1 100644 --- a/crates/nargo_cli/src/cli/codegen_verifier_cmd.rs +++ b/crates/nargo_cli/src/cli/codegen_verifier_cmd.rs @@ -4,6 +4,7 @@ use crate::{ cli::compile_cmd::compile_circuit, constants::CONTRACT_DIR, constants::TARGET_DIR, errors::CliError, }; +use acvm::Backend; use clap::Args; use nargo::ops::{codegen_verifier, preprocess_program}; use noirc_driver::CompileOptions; @@ -18,9 +19,11 @@ pub(crate) struct CodegenVerifierCommand { compile_options: CompileOptions, } -pub(crate) fn run(args: CodegenVerifierCommand, config: NargoConfig) -> Result<(), CliError> { - let backend = crate::backends::ConcreteBackend::default(); - +pub(crate) fn run( + backend: &B, + args: CodegenVerifierCommand, + config: NargoConfig, +) -> Result<(), CliError> { // TODO(#1201): Should this be a utility function? let circuit_build_path = args .circuit_name @@ -30,12 +33,14 @@ pub(crate) fn run(args: CodegenVerifierCommand, config: NargoConfig) -> Result<( Some(circuit_build_path) => read_program_from_file(circuit_build_path)?, None => { let compiled_program = - compile_circuit(&backend, config.program_dir.as_ref(), &args.compile_options)?; - preprocess_program(&backend, compiled_program)? + compile_circuit(backend, config.program_dir.as_ref(), &args.compile_options)?; + preprocess_program(backend, compiled_program) + .map_err(CliError::ProofSystemCompilerError)? } }; - let smart_contract_string = codegen_verifier(&backend, &preprocessed_program.verification_key)?; + let smart_contract_string = codegen_verifier(backend, &preprocessed_program.verification_key) + .map_err(CliError::SmartContractError)?; let contract_dir = config.program_dir.join(CONTRACT_DIR); create_named_dir(&contract_dir, "contract"); diff --git a/crates/nargo_cli/src/cli/compile_cmd.rs b/crates/nargo_cli/src/cli/compile_cmd.rs index 78b5200316..531560b87d 100644 --- a/crates/nargo_cli/src/cli/compile_cmd.rs +++ b/crates/nargo_cli/src/cli/compile_cmd.rs @@ -1,4 +1,4 @@ -use acvm::ProofSystemCompiler; +use acvm::Backend; use iter_extended::try_vecmap; use noirc_driver::{CompileOptions, CompiledProgram, Driver}; use std::path::Path; @@ -27,19 +27,22 @@ pub(crate) struct CompileCommand { compile_options: CompileOptions, } -pub(crate) fn run(args: CompileCommand, config: NargoConfig) -> Result<(), CliError> { +pub(crate) fn run( + backend: &B, + args: CompileCommand, + config: NargoConfig, +) -> Result<(), CliError> { let circuit_dir = config.program_dir.join(TARGET_DIR); - let backend = crate::backends::ConcreteBackend::default(); - // If contracts is set we're compiling every function in a 'contract' rather than just 'main'. if args.contracts { - let mut driver = setup_driver(&backend, &config.program_dir)?; + let mut driver = setup_driver(backend, &config.program_dir)?; let compiled_contracts = driver .compile_contracts(&args.compile_options) .map_err(|_| CliError::CompilationError)?; - let preprocessed_contracts = - try_vecmap(compiled_contracts, |contract| preprocess_contract(&backend, contract))?; + let preprocessed_contracts = try_vecmap(compiled_contracts, |contract| { + preprocess_contract(backend, contract).map_err(CliError::ProofSystemCompilerError) + })?; for contract in preprocessed_contracts { save_contract_to_file( &contract, @@ -48,25 +51,26 @@ pub(crate) fn run(args: CompileCommand, config: NargoConfig) -> Result<(), CliEr ); } } else { - let program = compile_circuit(&backend, &config.program_dir, &args.compile_options)?; - let preprocessed_program = preprocess_program(&backend, program)?; + let program = compile_circuit(backend, &config.program_dir, &args.compile_options)?; + let preprocessed_program = + preprocess_program(backend, program).map_err(CliError::ProofSystemCompilerError)?; save_program_to_file(&preprocessed_program, &args.circuit_name, circuit_dir); } Ok(()) } -fn setup_driver( - backend: &impl ProofSystemCompiler, +fn setup_driver( + backend: &B, program_dir: &Path, ) -> Result { Resolver::resolve_root_manifest(program_dir, backend.np_language()) } -pub(crate) fn compile_circuit( - backend: &impl ProofSystemCompiler, +pub(crate) fn compile_circuit( + backend: &B, program_dir: &Path, compile_options: &CompileOptions, -) -> Result { +) -> Result> { let mut driver = setup_driver(backend, program_dir)?; driver.compile_main(compile_options).map_err(|_| CliError::CompilationError) } diff --git a/crates/nargo_cli/src/cli/execute_cmd.rs b/crates/nargo_cli/src/cli/execute_cmd.rs index eeb4ff135a..b597dfb17c 100644 --- a/crates/nargo_cli/src/cli/execute_cmd.rs +++ b/crates/nargo_cli/src/cli/execute_cmd.rs @@ -1,10 +1,11 @@ use std::path::Path; +use acvm::acir::circuit::Circuit; use acvm::acir::native_types::WitnessMap; -use acvm::PartialWitnessGenerator; +use acvm::Backend; use clap::Args; use noirc_abi::input_parser::{Format, InputValue}; -use noirc_abi::InputMap; +use noirc_abi::{Abi, InputMap}; use noirc_driver::{CompileOptions, CompiledProgram}; use super::fs::{inputs::read_inputs_from_file, witness::save_witness_to_dir}; @@ -25,9 +26,13 @@ pub(crate) struct ExecuteCommand { compile_options: CompileOptions, } -pub(crate) fn run(args: ExecuteCommand, config: NargoConfig) -> Result<(), CliError> { +pub(crate) fn run( + backend: &B, + args: ExecuteCommand, + config: NargoConfig, +) -> Result<(), CliError> { let (return_value, solved_witness) = - execute_with_path(&config.program_dir, &args.compile_options)?; + execute_with_path(backend, &config.program_dir, &args.compile_options)?; println!("Circuit witness successfully solved"); if let Some(return_value) = return_value { @@ -43,35 +48,34 @@ pub(crate) fn run(args: ExecuteCommand, config: NargoConfig) -> Result<(), CliEr Ok(()) } -fn execute_with_path( +fn execute_with_path( + backend: &B, program_dir: &Path, compile_options: &CompileOptions, -) -> Result<(Option, WitnessMap), CliError> { - let backend = crate::backends::ConcreteBackend::default(); - - let compiled_program = compile_circuit(&backend, program_dir, compile_options)?; +) -> Result<(Option, WitnessMap), CliError> { + let CompiledProgram { abi, circuit } = compile_circuit(backend, program_dir, compile_options)?; // Parse the initial witness values from Prover.toml let (inputs_map, _) = - read_inputs_from_file(program_dir, PROVER_INPUT_FILE, Format::Toml, &compiled_program.abi)?; + read_inputs_from_file(program_dir, PROVER_INPUT_FILE, Format::Toml, &abi)?; - let solved_witness = execute_program(&backend, &compiled_program, &inputs_map)?; + let solved_witness = execute_program(backend, circuit, &abi, &inputs_map)?; - let public_abi = compiled_program.abi.public_abi(); + let public_abi = abi.public_abi(); let (_, return_value) = public_abi.decode(&solved_witness)?; Ok((return_value, solved_witness)) } -pub(crate) fn execute_program( - backend: &impl PartialWitnessGenerator, - compiled_program: &CompiledProgram, +pub(crate) fn execute_program( + backend: &B, + circuit: Circuit, + abi: &Abi, inputs_map: &InputMap, -) -> Result { - let initial_witness = compiled_program.abi.encode(inputs_map, None)?; +) -> Result> { + let initial_witness = abi.encode(inputs_map, None)?; - let solved_witness = - nargo::ops::execute_circuit(backend, compiled_program.circuit.clone(), initial_witness)?; + let solved_witness = nargo::ops::execute_circuit(backend, circuit, initial_witness)?; Ok(solved_witness) } diff --git a/crates/nargo_cli/src/cli/fs/inputs.rs b/crates/nargo_cli/src/cli/fs/inputs.rs index dd9ce19972..eaf94cc22f 100644 --- a/crates/nargo_cli/src/cli/fs/inputs.rs +++ b/crates/nargo_cli/src/cli/fs/inputs.rs @@ -4,7 +4,7 @@ use noirc_abi::{ }; use std::{collections::BTreeMap, path::Path}; -use crate::errors::CliError; +use crate::errors::FilesystemError; use super::write_to_file; @@ -20,14 +20,14 @@ pub(crate) fn read_inputs_from_file>( file_name: &str, format: Format, abi: &Abi, -) -> Result<(InputMap, Option), CliError> { +) -> Result<(InputMap, Option), FilesystemError> { if abi.is_empty() { return Ok((BTreeMap::new(), None)); } let file_path = path.as_ref().join(file_name).with_extension(format.ext()); if !file_path.exists() { - return Err(CliError::MissingTomlFile(file_name.to_owned(), file_path)); + return Err(FilesystemError::MissingTomlFile(file_name.to_owned(), file_path)); } let input_string = std::fs::read_to_string(file_path).unwrap(); @@ -43,7 +43,7 @@ pub(crate) fn write_inputs_to_file>( path: P, file_name: &str, format: Format, -) -> Result<(), CliError> { +) -> Result<(), FilesystemError> { let file_path = path.as_ref().join(file_name).with_extension(format.ext()); // We must insert the return value into the `InputMap` in order for it to be written to file. diff --git a/crates/nargo_cli/src/cli/fs/mod.rs b/crates/nargo_cli/src/cli/fs/mod.rs index d860f722fd..4ebce3b332 100644 --- a/crates/nargo_cli/src/cli/fs/mod.rs +++ b/crates/nargo_cli/src/cli/fs/mod.rs @@ -4,7 +4,7 @@ use std::{ path::{Path, PathBuf}, }; -use crate::errors::CliError; +use crate::errors::FilesystemError; pub(super) mod inputs; pub(super) mod program; @@ -32,11 +32,11 @@ pub(super) fn write_to_file(bytes: &[u8], path: &Path) -> String { } } -pub(super) fn load_hex_data>(path: P) -> Result, CliError> { - let hex_data: Vec<_> = - std::fs::read(&path).map_err(|_| CliError::PathNotValid(path.as_ref().to_path_buf()))?; +pub(super) fn load_hex_data>(path: P) -> Result, FilesystemError> { + let hex_data: Vec<_> = std::fs::read(&path) + .map_err(|_| FilesystemError::PathNotValid(path.as_ref().to_path_buf()))?; - let raw_bytes = hex::decode(hex_data).map_err(CliError::HexArtifactNotValid)?; + let raw_bytes = hex::decode(hex_data).map_err(FilesystemError::HexArtifactNotValid)?; Ok(raw_bytes) } diff --git a/crates/nargo_cli/src/cli/fs/program.rs b/crates/nargo_cli/src/cli/fs/program.rs index a3b5f4026b..871a602383 100644 --- a/crates/nargo_cli/src/cli/fs/program.rs +++ b/crates/nargo_cli/src/cli/fs/program.rs @@ -2,7 +2,7 @@ use std::path::{Path, PathBuf}; use nargo::artifacts::{contract::PreprocessedContract, program::PreprocessedProgram}; -use crate::errors::CliError; +use crate::errors::FilesystemError; use super::{create_named_dir, write_to_file}; @@ -35,10 +35,11 @@ fn save_build_artifact_to_file, T: ?Sized + serde::Serialize>( pub(crate) fn read_program_from_file>( circuit_path: P, -) -> Result { +) -> Result { let file_path = circuit_path.as_ref().with_extension("json"); - let input_string = std::fs::read(&file_path).map_err(|_| CliError::PathNotValid(file_path))?; + let input_string = + std::fs::read(&file_path).map_err(|_| FilesystemError::PathNotValid(file_path))?; let program = serde_json::from_slice(&input_string).expect("could not deserialize program"); diff --git a/crates/nargo_cli/src/cli/fs/proof.rs b/crates/nargo_cli/src/cli/fs/proof.rs index 4a77595a54..3a54aa908f 100644 --- a/crates/nargo_cli/src/cli/fs/proof.rs +++ b/crates/nargo_cli/src/cli/fs/proof.rs @@ -1,6 +1,6 @@ use std::path::{Path, PathBuf}; -use crate::{constants::PROOF_EXT, errors::CliError}; +use crate::{constants::PROOF_EXT, errors::FilesystemError}; use super::{create_named_dir, write_to_file}; @@ -8,7 +8,7 @@ pub(crate) fn save_proof_to_dir>( proof: &[u8], proof_name: &str, proof_dir: P, -) -> Result { +) -> Result { create_named_dir(proof_dir.as_ref(), "proof"); let proof_path = proof_dir.as_ref().join(proof_name).with_extension(PROOF_EXT); diff --git a/crates/nargo_cli/src/cli/fs/witness.rs b/crates/nargo_cli/src/cli/fs/witness.rs index 70c872d9b2..0fcee53163 100644 --- a/crates/nargo_cli/src/cli/fs/witness.rs +++ b/crates/nargo_cli/src/cli/fs/witness.rs @@ -3,17 +3,17 @@ use std::path::{Path, PathBuf}; use acvm::acir::native_types::WitnessMap; use super::{create_named_dir, write_to_file}; -use crate::{constants::WITNESS_EXT, errors::CliError}; +use crate::{constants::WITNESS_EXT, errors::FilesystemError}; pub(crate) fn save_witness_to_dir>( witness: WitnessMap, witness_name: &str, witness_dir: P, -) -> Result { +) -> Result { create_named_dir(witness_dir.as_ref(), "witness"); let witness_path = witness_dir.as_ref().join(witness_name).with_extension(WITNESS_EXT); - let buf: Vec = witness.into(); + let buf: Vec = witness.try_into().expect("Could not serialize witness"); write_to_file(buf.as_slice(), &witness_path); diff --git a/crates/nargo_cli/src/cli/gates_cmd.rs b/crates/nargo_cli/src/cli/gates_cmd.rs index 9fe9f5c7a5..88e11c683e 100644 --- a/crates/nargo_cli/src/cli/gates_cmd.rs +++ b/crates/nargo_cli/src/cli/gates_cmd.rs @@ -1,4 +1,4 @@ -use acvm::ProofSystemCompiler; +use acvm::Backend; use clap::Args; use noirc_driver::CompileOptions; use std::path::Path; @@ -15,17 +15,20 @@ pub(crate) struct GatesCommand { compile_options: CompileOptions, } -pub(crate) fn run(args: GatesCommand, config: NargoConfig) -> Result<(), CliError> { - count_gates_with_path(config.program_dir, &args.compile_options) +pub(crate) fn run( + backend: &B, + args: GatesCommand, + config: NargoConfig, +) -> Result<(), CliError> { + count_gates_with_path(backend, config.program_dir, &args.compile_options) } -fn count_gates_with_path>( +fn count_gates_with_path>( + backend: &B, program_dir: P, compile_options: &CompileOptions, -) -> Result<(), CliError> { - let backend = crate::backends::ConcreteBackend::default(); - - let compiled_program = compile_circuit(&backend, program_dir.as_ref(), compile_options)?; +) -> Result<(), CliError> { + let compiled_program = compile_circuit(backend, program_dir.as_ref(), compile_options)?; let num_opcodes = compiled_program.circuit.opcodes.len(); println!( @@ -34,7 +37,9 @@ fn count_gates_with_path>( num_opcodes ); - let exact_circuit_size = backend.get_exact_circuit_size(&compiled_program.circuit); + let exact_circuit_size = backend + .get_exact_circuit_size(&compiled_program.circuit) + .map_err(CliError::ProofSystemCompilerError)?; println!("Backend circuit size: {exact_circuit_size}"); Ok(()) diff --git a/crates/nargo_cli/src/cli/mod.rs b/crates/nargo_cli/src/cli/mod.rs index 2bb92925e5..d41dc1a815 100644 --- a/crates/nargo_cli/src/cli/mod.rs +++ b/crates/nargo_cli/src/cli/mod.rs @@ -1,6 +1,5 @@ use clap::{Args, Parser, Subcommand}; use const_format::formatcp; -use noirc_abi::InputMap; use noirc_driver::CompileOptions; use std::path::{Path, PathBuf}; @@ -68,17 +67,19 @@ pub fn start_cli() -> eyre::Result<()> { config.program_dir = find_package_root(&config.program_dir)?; } + let backend = crate::backends::ConcreteBackend::default(); + match command { - NargoCommand::New(args) => new_cmd::run(args, config), - NargoCommand::Check(args) => check_cmd::run(args, config), - NargoCommand::Compile(args) => compile_cmd::run(args, config), - NargoCommand::Execute(args) => execute_cmd::run(args, config), - NargoCommand::Prove(args) => prove_cmd::run(args, config), - NargoCommand::Verify(args) => verify_cmd::run(args, config), - NargoCommand::Test(args) => test_cmd::run(args, config), - NargoCommand::Gates(args) => gates_cmd::run(args, config), - NargoCommand::CodegenVerifier(args) => codegen_verifier_cmd::run(args, config), - NargoCommand::PrintAcir(args) => print_acir_cmd::run(args, config), + NargoCommand::New(args) => new_cmd::run(&backend, args, config), + NargoCommand::Check(args) => check_cmd::run(&backend, args, config), + NargoCommand::Compile(args) => compile_cmd::run(&backend, args, config), + NargoCommand::Execute(args) => execute_cmd::run(&backend, args, config), + NargoCommand::Prove(args) => prove_cmd::run(&backend, args, config), + NargoCommand::Verify(args) => verify_cmd::run(&backend, args, config), + NargoCommand::Test(args) => test_cmd::run(&backend, args, config), + NargoCommand::Gates(args) => gates_cmd::run(&backend, args, config), + NargoCommand::CodegenVerifier(args) => codegen_verifier_cmd::run(&backend, args, config), + NargoCommand::PrintAcir(args) => print_acir_cmd::run(&backend, args, config), }?; Ok(()) @@ -86,10 +87,18 @@ pub fn start_cli() -> eyre::Result<()> { // helper function which tests noir programs by trying to generate a proof and verify it pub fn prove_and_verify(proof_name: &str, program_dir: &Path, show_ssa: bool) -> bool { - let compile_options = CompileOptions { show_ssa, allow_warnings: false, show_output: false }; + let backend = crate::backends::ConcreteBackend::default(); + + let compile_options = CompileOptions { + show_ssa, + allow_warnings: false, + show_output: false, + experimental_ssa: false, + }; let proof_dir = program_dir.join(PROOFS_DIR); match prove_cmd::prove_with_path( + &backend, Some(proof_name.to_owned()), program_dir, &proof_dir, diff --git a/crates/nargo_cli/src/cli/new_cmd.rs b/crates/nargo_cli/src/cli/new_cmd.rs index 9d39f8d1d8..5868c1e820 100644 --- a/crates/nargo_cli/src/cli/new_cmd.rs +++ b/crates/nargo_cli/src/cli/new_cmd.rs @@ -5,6 +5,7 @@ use crate::{ use super::fs::{create_named_dir, write_to_file}; use super::{NargoConfig, CARGO_PKG_VERSION}; +use acvm::Backend; use clap::Args; use const_format::formatcp; use std::path::{Path, PathBuf}; @@ -27,19 +28,24 @@ compiler_version = "{CARGO_PKG_VERSION}" ); const EXAMPLE: &str = r#"fn main(x : Field, y : pub Field) { - constrain x != y; + assert(x != y); } #[test] fn test_main() { main(1, 2); - + // Uncomment to make test fail // main(1, 1); } "#; -pub(crate) fn run(args: NewCommand, config: NargoConfig) -> Result<(), CliError> { +pub(crate) fn run( + // Backend is currently unused, but we might want to use it to inform the "new" template in the future + _backend: &B, + args: NewCommand, + config: NargoConfig, +) -> Result<(), CliError> { let package_dir = config.program_dir.join(args.package_name); if package_dir.exists() { diff --git a/crates/nargo_cli/src/cli/print_acir_cmd.rs b/crates/nargo_cli/src/cli/print_acir_cmd.rs index 38b841121b..420c57c6a0 100644 --- a/crates/nargo_cli/src/cli/print_acir_cmd.rs +++ b/crates/nargo_cli/src/cli/print_acir_cmd.rs @@ -1,3 +1,4 @@ +use acvm::Backend; use clap::Args; use noirc_driver::CompileOptions; use std::path::Path; @@ -14,17 +15,20 @@ pub(crate) struct PrintAcirCommand { compile_options: CompileOptions, } -pub(crate) fn run(args: PrintAcirCommand, config: NargoConfig) -> Result<(), CliError> { - print_acir_with_path(config.program_dir, &args.compile_options) +pub(crate) fn run( + backend: &B, + args: PrintAcirCommand, + config: NargoConfig, +) -> Result<(), CliError> { + print_acir_with_path(backend, config.program_dir, &args.compile_options) } -fn print_acir_with_path>( +fn print_acir_with_path>( + backend: &B, program_dir: P, compile_options: &CompileOptions, -) -> Result<(), CliError> { - let backend = crate::backends::ConcreteBackend::default(); - - let compiled_program = compile_circuit(&backend, program_dir.as_ref(), compile_options)?; +) -> Result<(), CliError> { + let compiled_program = compile_circuit(backend, program_dir.as_ref(), compile_options)?; println!("{}", compiled_program.circuit); Ok(()) diff --git a/crates/nargo_cli/src/cli/prove_cmd.rs b/crates/nargo_cli/src/cli/prove_cmd.rs index cecdee23fe..d2adc8d4af 100644 --- a/crates/nargo_cli/src/cli/prove_cmd.rs +++ b/crates/nargo_cli/src/cli/prove_cmd.rs @@ -1,10 +1,11 @@ use std::path::{Path, PathBuf}; +use acvm::Backend; use clap::Args; use nargo::artifacts::program::PreprocessedProgram; -use nargo::ops::{preprocess_program, prove_execution}; +use nargo::ops::{preprocess_program, prove_execution, verify_proof}; use noirc_abi::input_parser::Format; -use noirc_driver::{CompileOptions, CompiledProgram}; +use noirc_driver::CompileOptions; use super::NargoConfig; use super::{ @@ -16,7 +17,7 @@ use super::{ }, }; use crate::{ - cli::{execute_cmd::execute_program, verify_cmd::verify_proof}, + cli::execute_cmd::execute_program, constants::{PROOFS_DIR, PROVER_INPUT_FILE, TARGET_DIR, VERIFIER_INPUT_FILE}, errors::CliError, }; @@ -38,7 +39,11 @@ pub(crate) struct ProveCommand { compile_options: CompileOptions, } -pub(crate) fn run(args: ProveCommand, config: NargoConfig) -> Result<(), CliError> { +pub(crate) fn run( + backend: &B, + args: ProveCommand, + config: NargoConfig, +) -> Result<(), CliError> { let proof_dir = config.program_dir.join(PROOFS_DIR); let circuit_build_path = args @@ -46,6 +51,7 @@ pub(crate) fn run(args: ProveCommand, config: NargoConfig) -> Result<(), CliErro .map(|circuit_name| config.program_dir.join(TARGET_DIR).join(circuit_name)); prove_with_path( + backend, args.proof_name, config.program_dir, proof_dir, @@ -57,41 +63,35 @@ pub(crate) fn run(args: ProveCommand, config: NargoConfig) -> Result<(), CliErro Ok(()) } -pub(crate) fn prove_with_path>( +pub(crate) fn prove_with_path>( + backend: &B, proof_name: Option, program_dir: P, proof_dir: P, circuit_build_path: Option, check_proof: bool, compile_options: &CompileOptions, -) -> Result, CliError> { - let backend = crate::backends::ConcreteBackend::default(); - +) -> Result, CliError> { let preprocessed_program = match circuit_build_path { Some(circuit_build_path) => read_program_from_file(circuit_build_path)?, None => { - let compiled_program = - compile_circuit(&backend, program_dir.as_ref(), compile_options)?; - preprocess_program(&backend, compiled_program)? + let compiled_program = compile_circuit(backend, program_dir.as_ref(), compile_options)?; + preprocess_program(backend, compiled_program) + .map_err(CliError::ProofSystemCompilerError)? } }; let PreprocessedProgram { abi, bytecode, proving_key, verification_key, .. } = preprocessed_program; - let compiled_program = CompiledProgram { abi, circuit: bytecode }; // Parse the initial witness values from Prover.toml - let (inputs_map, _) = read_inputs_from_file( - &program_dir, - PROVER_INPUT_FILE, - Format::Toml, - &compiled_program.abi, - )?; + let (inputs_map, _) = + read_inputs_from_file(&program_dir, PROVER_INPUT_FILE, Format::Toml, &abi)?; - let solved_witness = execute_program(&backend, &compiled_program, &inputs_map)?; + let solved_witness = execute_program(backend, bytecode.clone(), &abi, &inputs_map)?; // Write public inputs into Verifier.toml - let public_abi = compiled_program.abi.clone().public_abi(); + let public_abi = abi.public_abi(); let (public_inputs, return_value) = public_abi.decode(&solved_witness)?; write_inputs_to_file( @@ -102,19 +102,18 @@ pub(crate) fn prove_with_path>( Format::Toml, )?; - let proof = prove_execution(&backend, &compiled_program.circuit, solved_witness, &proving_key)?; + let proof = prove_execution(backend, &bytecode, solved_witness, &proving_key) + .map_err(CliError::ProofSystemCompilerError)?; if check_proof { - let no_proof_name = "".into(); - verify_proof( - &backend, - &compiled_program, - public_inputs, - return_value, - &proof, - &verification_key, - no_proof_name, - )?; + let public_inputs = public_abi.encode(&public_inputs, return_value)?; + let valid_proof = + verify_proof(backend, &bytecode, &proof, public_inputs, &verification_key) + .map_err(CliError::ProofSystemCompilerError)?; + + if !valid_proof { + return Err(CliError::InvalidProof("".into())); + } } let proof_path = if let Some(proof_name) = proof_name { diff --git a/crates/nargo_cli/src/cli/test_cmd.rs b/crates/nargo_cli/src/cli/test_cmd.rs index 6e2ead684b..57ef5acdb1 100644 --- a/crates/nargo_cli/src/cli/test_cmd.rs +++ b/crates/nargo_cli/src/cli/test_cmd.rs @@ -1,6 +1,6 @@ use std::{io::Write, path::Path}; -use acvm::{acir::native_types::WitnessMap, ProofSystemCompiler}; +use acvm::{acir::native_types::WitnessMap, Backend}; use clap::Args; use nargo::ops::execute_circuit; use noirc_driver::{CompileOptions, Driver}; @@ -21,19 +21,21 @@ pub(crate) struct TestCommand { compile_options: CompileOptions, } -pub(crate) fn run(args: TestCommand, config: NargoConfig) -> Result<(), CliError> { +pub(crate) fn run(backend: &B, args: TestCommand, config: NargoConfig) -> Result<(), CliError> +where + B: Backend, +{ let test_name: String = args.test_name.unwrap_or_else(|| "".to_owned()); - run_tests(&config.program_dir, &test_name, &args.compile_options) + run_tests(backend, &config.program_dir, &test_name, &args.compile_options) } -fn run_tests( +fn run_tests( + backend: &B, program_dir: &Path, test_name: &str, compile_options: &CompileOptions, -) -> Result<(), CliError> { - let backend = crate::backends::ConcreteBackend::default(); - +) -> Result<(), CliError> { let mut driver = Resolver::resolve_root_manifest(program_dir, backend.np_language())?; driver.check_crate(compile_options).map_err(|_| CliError::CompilationError)?; @@ -50,7 +52,7 @@ fn run_tests( writeln!(writer, "Testing {test_name}...").expect("Failed to write to stdout"); writer.flush().ok(); - match run_test(test_name, test_function, &driver, compile_options) { + match run_test(backend, test_name, test_function, &driver, compile_options) { Ok(_) => { writer.set_color(ColorSpec::new().set_fg(Some(Color::Green))).ok(); writeln!(writer, "ok").ok(); @@ -73,21 +75,20 @@ fn run_tests( Ok(()) } -fn run_test( +fn run_test( + backend: &B, test_name: &str, main: FuncId, driver: &Driver, config: &CompileOptions, -) -> Result<(), CliError> { - let backend = crate::backends::ConcreteBackend::default(); - +) -> Result<(), CliError> { let program = driver .compile_no_check(config, main) .map_err(|_| CliError::Generic(format!("Test '{test_name}' failed to compile")))?; // Run the backend to ensure the PWG evaluates functions like std::hash::pedersen, // otherwise constraints involving these expressions will not error. - match execute_circuit(&backend, program.circuit, WitnessMap::new()) { + match execute_circuit(backend, program.circuit, WitnessMap::new()) { Ok(_) => Ok(()), Err(error) => { let writer = StandardStream::stderr(ColorChoice::Always); diff --git a/crates/nargo_cli/src/cli/verify_cmd.rs b/crates/nargo_cli/src/cli/verify_cmd.rs index 07b7e351ee..07ba12d389 100644 --- a/crates/nargo_cli/src/cli/verify_cmd.rs +++ b/crates/nargo_cli/src/cli/verify_cmd.rs @@ -1,16 +1,17 @@ use super::compile_cmd::compile_circuit; use super::fs::{inputs::read_inputs_from_file, load_hex_data, program::read_program_from_file}; -use super::{InputMap, NargoConfig}; +use super::NargoConfig; use crate::{ constants::{PROOFS_DIR, PROOF_EXT, TARGET_DIR, VERIFIER_INPUT_FILE}, errors::CliError, }; -use acvm::ProofSystemCompiler; + +use acvm::Backend; use clap::Args; use nargo::artifacts::program::PreprocessedProgram; use nargo::ops::preprocess_program; -use noirc_abi::input_parser::{Format, InputValue}; -use noirc_driver::{CompileOptions, CompiledProgram}; +use noirc_abi::input_parser::Format; +use noirc_driver::CompileOptions; use std::path::{Path, PathBuf}; /// Given a proof and a program, verify whether the proof is valid @@ -26,7 +27,11 @@ pub(crate) struct VerifyCommand { compile_options: CompileOptions, } -pub(crate) fn run(args: VerifyCommand, config: NargoConfig) -> Result<(), CliError> { +pub(crate) fn run( + backend: &B, + args: VerifyCommand, + config: NargoConfig, +) -> Result<(), CliError> { let proof_path = config.program_dir.join(PROOFS_DIR).join(&args.proof).with_extension(PROOF_EXT); @@ -34,68 +39,49 @@ pub(crate) fn run(args: VerifyCommand, config: NargoConfig) -> Result<(), CliErr .circuit_name .map(|circuit_name| config.program_dir.join(TARGET_DIR).join(circuit_name)); - verify_with_path(config.program_dir, proof_path, circuit_build_path, args.compile_options) + verify_with_path( + backend, + &config.program_dir, + proof_path, + circuit_build_path.as_ref(), + args.compile_options, + ) } -fn verify_with_path>( +fn verify_with_path>( + backend: &B, program_dir: P, proof_path: PathBuf, circuit_build_path: Option

, compile_options: CompileOptions, -) -> Result<(), CliError> { - let backend = crate::backends::ConcreteBackend::default(); - +) -> Result<(), CliError> { let preprocessed_program = match circuit_build_path { Some(circuit_build_path) => read_program_from_file(circuit_build_path)?, None => { let compiled_program = - compile_circuit(&backend, program_dir.as_ref(), &compile_options)?; - preprocess_program(&backend, compiled_program)? + compile_circuit(backend, program_dir.as_ref(), &compile_options)?; + preprocess_program(backend, compiled_program) + .map_err(CliError::ProofSystemCompilerError)? } }; let PreprocessedProgram { abi, bytecode, verification_key, .. } = preprocessed_program; - let compiled_program = CompiledProgram { abi, circuit: bytecode }; // Load public inputs (if any) from `VERIFIER_INPUT_FILE`. - let public_abi = compiled_program.abi.clone().public_abi(); + let public_abi = abi.public_abi(); let (public_inputs_map, return_value) = read_inputs_from_file(program_dir, VERIFIER_INPUT_FILE, Format::Toml, &public_abi)?; - verify_proof( - &backend, - &compiled_program, - public_inputs_map, - return_value, - &load_hex_data(&proof_path)?, - &verification_key, - proof_path, - ) -} - -pub(crate) fn verify_proof( - backend: &impl ProofSystemCompiler, - compiled_program: &CompiledProgram, - public_inputs_map: InputMap, - return_value: Option, - proof: &[u8], - verification_key: &[u8], - proof_name: PathBuf, -) -> Result<(), CliError> { - let public_abi = compiled_program.abi.clone().public_abi(); let public_inputs = public_abi.encode(&public_inputs_map, return_value)?; + let proof = load_hex_data(&proof_path)?; - let valid_proof = nargo::ops::verify_proof( - backend, - &compiled_program.circuit, - proof, - public_inputs, - verification_key, - )?; + let valid_proof = + nargo::ops::verify_proof(backend, &bytecode, &proof, public_inputs, &verification_key) + .map_err(CliError::ProofSystemCompilerError)?; if valid_proof { Ok(()) } else { - Err(CliError::InvalidProof(proof_name)) + Err(CliError::InvalidProof(proof_path)) } } diff --git a/crates/nargo_cli/src/errors.rs b/crates/nargo_cli/src/errors.rs index f6537b550e..15d1917a5d 100644 --- a/crates/nargo_cli/src/errors.rs +++ b/crates/nargo_cli/src/errors.rs @@ -1,3 +1,4 @@ +use acvm::{Backend, ProofSystemCompiler, SmartContract}; use hex::FromHexError; use nargo::NargoError; use noirc_abi::errors::{AbiError, InputParserError}; @@ -7,11 +8,7 @@ use thiserror::Error; use crate::resolver::DependencyResolutionError; #[derive(Debug, Error)] -pub(crate) enum CliError { - #[error("{0}")] - Generic(String), - #[error("Error: destination {} already exists", .0.display())] - DestinationAlreadyExists(PathBuf), +pub(crate) enum FilesystemError { #[error("Error: {} is not a valid path\nRun either `nargo compile` to generate missing build artifacts or `nargo prove` to construct a proof", .0.display())] PathNotValid(PathBuf), #[error("Error: could not parse hex build artifact (proof, proving and/or verification keys, ACIR checksum) ({0})")] @@ -21,6 +18,18 @@ pub(crate) enum CliError { )] MissingTomlFile(String, PathBuf), + /// Input parsing error + #[error(transparent)] + InputParserError(#[from] InputParserError), +} + +#[derive(Debug, Error)] +pub(crate) enum CliError { + #[error("{0}")] + Generic(String), + #[error("Error: destination {} already exists", .0.display())] + DestinationAlreadyExists(PathBuf), + #[error("Failed to verify proof {}", .0.display())] InvalidProof(PathBuf), @@ -31,15 +40,23 @@ pub(crate) enum CliError { #[error("Failed to compile circuit")] CompilationError, - /// Input parsing error - #[error(transparent)] - InputParserError(#[from] InputParserError), - /// ABI encoding/decoding error #[error(transparent)] AbiError(#[from] AbiError), + /// Filesystem errors + #[error(transparent)] + FilesystemError(#[from] FilesystemError), + /// Error from Nargo #[error(transparent)] NargoError(#[from] NargoError), + + /// Backend error caused by a function on the SmartContract trait + #[error(transparent)] + SmartContractError(::Error), // Unfortunately, Rust won't let us `impl From` over an Associated Type on a generic + + /// Backend error caused by a function on the ProofSystemCompiler trait + #[error(transparent)] + ProofSystemCompilerError(::Error), // Unfortunately, Rust won't let us `impl From` over an Associated Type on a generic } diff --git a/crates/nargo_cli/tests/compile_tests_data/pass/basic.nr b/crates/nargo_cli/tests/compile_tests_data/pass/basic.nr index 6a678f93fe..90c0d7ffd3 100644 --- a/crates/nargo_cli/tests/compile_tests_data/pass/basic.nr +++ b/crates/nargo_cli/tests/compile_tests_data/pass/basic.nr @@ -1,4 +1,4 @@ fn main(x : Field, y : Field) { - constrain x != y; + assert(x != y); } \ No newline at end of file diff --git a/crates/nargo_cli/tests/compile_tests_data/pass/basic_import.nr b/crates/nargo_cli/tests/compile_tests_data/pass/basic_import.nr index 212d0f1359..bb61c0f1ed 100644 --- a/crates/nargo_cli/tests/compile_tests_data/pass/basic_import.nr +++ b/crates/nargo_cli/tests/compile_tests_data/pass/basic_import.nr @@ -7,5 +7,5 @@ fn main(x : Field, y : Field) { let _k = std::hash::pedersen([x]); let _l = hello(x); - constrain x != import::hello(y); + assert(x != import::hello(y)); } diff --git a/crates/nargo_cli/tests/target_tests_data/pass/basic/src/main.nr b/crates/nargo_cli/tests/target_tests_data/pass/basic/src/main.nr index 6a678f93fe..90c0d7ffd3 100644 --- a/crates/nargo_cli/tests/target_tests_data/pass/basic/src/main.nr +++ b/crates/nargo_cli/tests/target_tests_data/pass/basic/src/main.nr @@ -1,4 +1,4 @@ fn main(x : Field, y : Field) { - constrain x != y; + assert(x != y); } \ No newline at end of file diff --git a/crates/nargo_cli/tests/target_tests_data/pass/import/src/main.nr b/crates/nargo_cli/tests/target_tests_data/pass/import/src/main.nr index 58fb0c3f3f..cb6476480d 100644 --- a/crates/nargo_cli/tests/target_tests_data/pass/import/src/main.nr +++ b/crates/nargo_cli/tests/target_tests_data/pass/import/src/main.nr @@ -5,5 +5,5 @@ fn main(x : Field, y : Field) { let _k = dep::std::hash::pedersen([x]); let _l = hello(x); - constrain x != import::hello(y); + assert(x != import::hello(y)); } diff --git a/crates/nargo_cli/tests/test_data/1_mul/src/main.nr b/crates/nargo_cli/tests/test_data/1_mul/src/main.nr index e423eb6566..4587b4b594 100644 --- a/crates/nargo_cli/tests/test_data/1_mul/src/main.nr +++ b/crates/nargo_cli/tests/test_data/1_mul/src/main.nr @@ -5,5 +5,5 @@ fn main(mut x: u32, y: u32, z: u32) { x *= x; //144 x *= x; //20736 x *= x; //429 981 696 - constrain x == z; + assert(x == z); } diff --git a/crates/nargo_cli/tests/test_data/2_div/src/main.nr b/crates/nargo_cli/tests/test_data/2_div/src/main.nr index 6df7549255..00608cb697 100644 --- a/crates/nargo_cli/tests/test_data/2_div/src/main.nr +++ b/crates/nargo_cli/tests/test_data/2_div/src/main.nr @@ -1,6 +1,6 @@ // Testing integer division: 7/3 = 2 fn main(mut x: u32, y: u32, z: u32) { let a = x % y; - constrain x / y == z; - constrain a == x - z*y; + assert(x / y == z); + assert(a == x - z*y); } diff --git a/crates/nargo_cli/tests/test_data/3_add/src/main.nr b/crates/nargo_cli/tests/test_data/3_add/src/main.nr index 73ed46a8e5..2884415b81 100644 --- a/crates/nargo_cli/tests/test_data/3_add/src/main.nr +++ b/crates/nargo_cli/tests/test_data/3_add/src/main.nr @@ -1,8 +1,8 @@ // Test integer addition: 3 + 4 = 7 fn main(mut x: u32, y: u32, z: u32) { x += y; - constrain x == z; + assert(x == z); x *= 8; - constrain x>9; + assert(x>9); } diff --git a/crates/nargo_cli/tests/test_data/4_sub/src/main.nr b/crates/nargo_cli/tests/test_data/4_sub/src/main.nr index 242be90970..80fc0177e4 100644 --- a/crates/nargo_cli/tests/test_data/4_sub/src/main.nr +++ b/crates/nargo_cli/tests/test_data/4_sub/src/main.nr @@ -1,5 +1,5 @@ // Test unsafe integer subtraction with underflow: 12 - 2418266113 = 1876701195 modulo 2^32 fn main(mut x: u32, y: u32, z: u32) { x -= y; - constrain x == z; + assert(x == z); } diff --git a/crates/nargo_cli/tests/test_data/5_over/src/main.nr b/crates/nargo_cli/tests/test_data/5_over/src/main.nr index 8701c1e632..4fdff16c5c 100644 --- a/crates/nargo_cli/tests/test_data/5_over/src/main.nr +++ b/crates/nargo_cli/tests/test_data/5_over/src/main.nr @@ -2,8 +2,8 @@ // Test odd bits integer fn main(mut x: u32, y: u32) { x = x * x; - constrain y == x; + assert(y == x); let c:u3 = 2; - constrain c > x as u3; + assert(c > x as u3); } diff --git a/crates/nargo_cli/tests/test_data/6/src/main.nr b/crates/nargo_cli/tests/test_data/6/src/main.nr index 61be34e6d8..8b350de16c 100644 --- a/crates/nargo_cli/tests/test_data/6/src/main.nr +++ b/crates/nargo_cli/tests/test_data/6/src/main.nr @@ -12,9 +12,9 @@ fn main(x: [u8; 5], result: pub [u8; 32]) { let mut digest = std::hash::sha256(x); digest[0] = 5 as u8; digest = std::hash::sha256(x); - constrain digest == result; + assert(digest == result); let y = [12,45,78,41]; let h = std::hash::mimc_bn254(y); - constrain h == 18226366069841799622585958305961373004333097209608110160936134895615261821931; + assert(h == 18226366069841799622585958305961373004333097209608110160936134895615261821931); } diff --git a/crates/nargo_cli/tests/test_data/6_array/src/main.nr b/crates/nargo_cli/tests/test_data/6_array/src/main.nr index 3537740f1e..9593c56524 100644 --- a/crates/nargo_cli/tests/test_data/6_array/src/main.nr +++ b/crates/nargo_cli/tests/test_data/6_array/src/main.nr @@ -1,14 +1,13 @@ //Basic tests for arrays fn main(x: [u32; 5], y: [u32; 5], mut z: u32, t: u32) { let mut c = 2301; - let _idx = (z - 5*t - 5) as Field; z = y[4]; //Test 1: for i in 0..5 { c = z*z*y[i]; z -= c; } - constrain (z==0); //y[4]=0, so c and z are always 0 + assert(z==0); //y[4]=0, so c and z are always 0 //Test 2: c = 2301 as u32; @@ -17,7 +16,7 @@ fn main(x: [u32; 5], y: [u32; 5], mut z: u32, t: u32) { c = z*z*x[i]; z += x[i]*y[i] - c; } - constrain (z==3814912846); + assert(z==3814912846); //Test 3: c = 2300001 as u32; @@ -29,7 +28,7 @@ fn main(x: [u32; 5], y: [u32; 5], mut z: u32, t: u32) { z *= c; } } - constrain (z==41472); + assert(z==41472); //Test 4: z = y[4]; @@ -39,33 +38,17 @@ fn main(x: [u32; 5], y: [u32; 5], mut z: u32, t: u32) { z += x[i+j] - y[i+j]; } } - constrain (z ==11539); + assert(z ==11539); //Test 5: let cc = if z < 1 { x } else { y }; - constrain cc[0] == y[0]; + assert(cc[0] == y[0]); // Test 6: for-each loops for y_elem in y { for x_elem in x { - constrain x_elem != y_elem; + assert(x_elem != y_elem); } } - - //dynamic array test - TODO uncomment the call below when activating dynamic arrays - //dyn_array(x, idx, idx - 3); } -// fn dyn_array(mut x: [u32; 5], y: Field, z: Field) { -// constrain x[y] == 111; -// constrain x[z] == 101; -// x[z] = 0; -// constrain x[y] == 111; -// constrain x[1] == 0; -// if y as u32 < 10 { -// x[y] = x[y] - 2; -// } else { -// x[y] = 0; -// } -// constrain x[4] == 109; -// } \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/7/src/main.nr b/crates/nargo_cli/tests/test_data/7/src/main.nr index ec01ea7c4b..a6bba97864 100644 --- a/crates/nargo_cli/tests/test_data/7/src/main.nr +++ b/crates/nargo_cli/tests/test_data/7/src/main.nr @@ -6,5 +6,5 @@ use dep::std; fn main(x: [u8; 5], result: [u8; 32]) { let digest = std::hash::blake2s(x); - constrain digest == result; + assert(digest == result); } diff --git a/crates/nargo_cli/tests/test_data/7_function/src/main.nr b/crates/nargo_cli/tests/test_data/7_function/src/main.nr index 96ca9759a8..5a23b49387 100644 --- a/crates/nargo_cli/tests/test_data/7_function/src/main.nr +++ b/crates/nargo_cli/tests/test_data/7_function/src/main.nr @@ -15,20 +15,20 @@ fn f2(mut x: Field) -> Field{ // Simple example fn test0(mut a: Field) { a = f2(a); - constrain a == 3; + assert(a == 3); } // Nested call fn test1(mut a: Field) { a = f1(a); - constrain a == 4; + assert(a == 4); } fn test2(z: Field, t: u32 ) { let a = z + t as Field; - constrain a == 64; + assert(a == 64); let e = pow(z, t as Field); - constrain e == 714924299; + assert(e == 714924299); } fn pow(base: Field, exponent: Field) -> Field { @@ -46,7 +46,7 @@ fn test3(x: [u8; 3]) -> [u8; 3] { for i in 0..3 { buffer[i] = x[i]; } - constrain buffer == x; + assert(buffer == x); buffer } @@ -59,7 +59,7 @@ fn test_multiple2() -> my_struct { } fn test_multiple3(x: u32, y: u32) { - constrain x == y; + assert(x == y); } struct my_struct { @@ -73,18 +73,18 @@ struct my2 { } fn test_multiple4(s: my_struct) { - constrain s.a == s.b+2; + assert(s.a == s.b+2); } fn test_multiple5(a: (u32, u32)) { - constrain a.0 == a.1+2; + assert(a.0 == a.1+2); } fn test_multiple6(a: my2, b: my_struct, c: (my2, my_struct)) { test_multiple4(a.aa); test_multiple5((b.a, b.b)); - constrain c.0.aa.a == c.1.a; + assert(c.0.aa.a == c.1.a); } @@ -110,28 +110,28 @@ fn main(x: u32 , y: u32 , a: Field, arr1: [u32; 9], arr2: [u32; 9]) { ab = ab + a; (x,ab) }; - constrain my_block.1 == 4; + assert(my_block.1 == 4); test0(a); test1(a); test2(x as Field, y); - constrain bar()[0] == 0; + assert(bar()[0] == 0); let mut b = [0 as u8, 5 as u8, 2 as u8]; let c = test3(b); - constrain b == c; + assert(b == c); b[0] = 1 as u8; let cc = test3(b); - constrain c != cc; + assert(c != cc); let e = test_multiple(x, y); - constrain e.1 == e.0 + 54 as u32; + assert(e.1 == e.0 + 54 as u32); let d = test_multiple2(); - constrain d.b == d.a + 2 as u32; + assert(d.b == d.a + 2 as u32); test_multiple3(y, y); //Regression test for issue #628: let result = first(arr_to_field(arr1), arr_to_field(arr2)); - constrain result[0] == arr1[0] as Field; + assert(result[0] == arr1[0] as Field); } diff --git a/crates/nargo_cli/tests/test_data/8_integration/src/main.nr b/crates/nargo_cli/tests/test_data/8_integration/src/main.nr index 57dca4e2ac..56b02650c2 100644 --- a/crates/nargo_cli/tests/test_data/8_integration/src/main.nr +++ b/crates/nargo_cli/tests/test_data/8_integration/src/main.nr @@ -57,7 +57,7 @@ fn iterate1(mut a0: u32) -> u32{ } fn array_noteq(a: [u32; 4], b: [u32; 4]) { - constrain a != b; + assert(a != b); } fn test3(mut b: [Field; 4]) -> [Field; 4] { @@ -105,7 +105,7 @@ fn iterate3( mut hash: [u32; 8]) -> [u32; 8] { g = f; a = t1+t2; } - constrain a == 2470696267; + assert(a == 2470696267); hash[0] = hash[0] + a; hash[1] = hash[1] + b; hash[2] = hash[2] + c; @@ -126,7 +126,7 @@ fn test5() { sha_hash = iterate2(sha_hash); - constrain sha_hash[0] == 9; + assert(sha_hash[0] == 9); } @@ -244,31 +244,31 @@ fn sig1(x: u32) -> u32 { fn main(a: [u32; 100], b: [u32; 100], c: [u32; 4], mut d: [u32; 4], m: [u8; 32]) { let e = matrix_mul_10(a,b); - constrain e[6] == 1866842232; + assert(e[6] == 1866842232); let f = matrix_mul_2(c,d); - constrain f[3] == 2082554100; + assert(f[3] == 2082554100); let mut a = [1 as u32, 2, 3, 4]; a = test4(a); - constrain a[3] == 20; + assert(a[3] == 20); a = test4(c); - constrain a[3] == c[1] * 10; + assert(a[3] == c[1] * 10); d[0] += c[0]; d[0] += c[1]; - constrain d[0] == 2739986880; + assert(d[0] == 2739986880); let h = iterate1(1); - constrain h == 4; + assert(h == 4); let x = d; array_noteq(x, [d[0], d[1], d[2], 0]); let mut h5 = [d[0] as Field, d[1] as Field, d[2] as Field, d[3] as Field]; let t5 = test3(h5); - constrain t5[3] == 3; + assert(t5[3] == 3); h5 = test3(h5); - constrain h5[3] == 3; + assert(h5[3] == 3); test5(); @@ -279,5 +279,5 @@ fn main(a: [u32; 100], b: [u32; 100], c: [u32; 4], mut d: [u32; 4], m: [u8; 32]) sha_hash = iterate3(sha_hash); let h6 = test6(m); - constrain h6[0]== 523008072; //31.. 3800709683; + assert(h6[0]== 523008072); //31.. 3800709683 } diff --git a/crates/nargo_cli/tests/test_data/9_conditional/src/main.nr b/crates/nargo_cli/tests/test_data/9_conditional/src/main.nr index 0f37f3e92f..48ac639ecf 100644 --- a/crates/nargo_cli/tests/test_data/9_conditional/src/main.nr +++ b/crates/nargo_cli/tests/test_data/9_conditional/src/main.nr @@ -17,11 +17,11 @@ fn call_intrinsic(x: [u8; 5], result: [u8; 32]) { let mut digest = std::hash::sha256(x); digest[0] = 5 as u8; digest = std::hash::sha256(x); - constrain digest == result; + assert(digest == result); } fn must_be_zero(x: u8) { - constrain x == 0; + assert(x == 0); } fn test3 (x: u8) { @@ -41,19 +41,19 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ let arr: [u8; 2] = [1, 2]; if arr[0] != arr[1] { for i in 0..1 { - constrain i != 2; + assert(i != 2); } } //Issue reported in #421 if a == c[0] { - constrain c[0] == 0; + assert(c[0] == 0); } else { if a == c[1] { - constrain c[1] == 0; + assert(c[1] == 0); } else { if a == c[2] { - constrain c[2] == 0; + assert(c[2] == 0); } } } @@ -67,25 +67,25 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ c1 = c1 + as_bits[0] as Field; if i == 0 { - constrain arr[i] == 1;// 1 + assert(arr[i] == 1);// 1 for k in 0..2 { - constrain as_bits_hardcode_1[k] == as_bits[k]; + assert(as_bits_hardcode_1[k] == as_bits[k]); } } if i == 1 { - constrain arr[i] == 2;//2 + assert(arr[i] == 2);//2 for k in 0..2 { - constrain as_bits_hardcode_1[k] != as_bits[k]; + assert(as_bits_hardcode_1[k] != as_bits[k]); } } } - constrain c1==1; + assert(c1==1); //Regression for Issue #579 let result1_true = test(true); - constrain result1_true.array_param[0] == 1; + assert(result1_true.array_param[0] == 1); let result1_false = test(false); - constrain result1_false.array_param[0] == 0; + assert(result1_false.array_param[0] == 0); //Test case for short-circuit let mut data = [0 as u32; 32]; @@ -104,23 +104,23 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ } } } - constrain data[31] == 0; - constrain ba != 13; + assert(data[31] == 0); + assert(ba != 13); //regression for short-circuit2 if 35 == a { - constrain false; + assert(false); } bar(a as Field); if a == 3 { c = test4(); } - constrain c[1] != 2; + assert(c[1] != 2); call_intrinsic(x, result); //Test case for conditional with arrays from function parameters let b = sort([1,2,3,4]); - constrain b[0] == 1; + assert(b[0] == 1); if a == 0 { must_be_zero(0); @@ -130,9 +130,9 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ c[0] = 1; c[1] = c[2] / a + 11 % a; let f1 = a as Field; - constrain 10/f1 != 0; + assert(10/f1 != 0); } - constrain c[0] == 3; + assert(c[0] == 3); let mut y = 0; if a == 0 { @@ -141,9 +141,9 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ } else { y = 5; } - constrain y == result[0]; + assert(y == result[0]); c = sort(c); - constrain c[0]==0; + assert(c[0]==0); //test 1 let mut x: u32 = 0; @@ -153,16 +153,16 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ x = 6; } else { x = 2; - constrain x == 2; + assert(x == 2); } } else { x = 5; - constrain x == 5; + assert(x == 5); } if c[0] == 0 { x = 3; } - constrain x == 2; + assert(x == 2); //test2: loops! x = 0; @@ -172,19 +172,19 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ x = i as u32 +2; } } - constrain x == 0; + assert(x == 0); test3(1); if a == 0 { c = test4(); } else { - constrain c[1] != 2; + assert(c[1] != 2); } if false { c[1] = 5; } - constrain c[1] == 2; + assert(c[1] == 2); test5(4); @@ -195,7 +195,7 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ } else { c_661 = issue_661_foo(issue_661_bar(c), x); } - constrain c_661[0] < 20000; + assert(c_661[0] < 20000); // Test case for function synchronisation let mut c_sync = 0; @@ -204,7 +204,7 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ } else { c_sync = foo2() + foo2(); } - constrain c_sync == 6; + assert(c_sync == 6); // Regression for predicate simplification safe_inverse(0); @@ -213,7 +213,7 @@ fn main(a: u32, mut c: [u32; 4], x: [u8; 5], result: pub [u8; 32]){ fn test5(a : u32) { if a > 1 { let q = a / 2; - constrain q == 2; + assert(q == 2); } } diff --git a/crates/nargo_cli/tests/test_data/array_dynamic/Nargo.toml b/crates/nargo_cli/tests/test_data/array_dynamic/Nargo.toml new file mode 100644 index 0000000000..e0b467ce5d --- /dev/null +++ b/crates/nargo_cli/tests/test_data/array_dynamic/Nargo.toml @@ -0,0 +1,5 @@ +[package] +authors = [""] +compiler_version = "0.1" + +[dependencies] \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/array_dynamic/Prover.toml b/crates/nargo_cli/tests/test_data/array_dynamic/Prover.toml new file mode 100644 index 0000000000..ff6f02ccda --- /dev/null +++ b/crates/nargo_cli/tests/test_data/array_dynamic/Prover.toml @@ -0,0 +1,6 @@ +x = [104, 101, 108, 108, 111] +z = "59" +t = "10" +index = [0,1,2,3,4] + + diff --git a/crates/nargo_cli/tests/test_data/array_dynamic/src/main.nr b/crates/nargo_cli/tests/test_data/array_dynamic/src/main.nr new file mode 100644 index 0000000000..a4073fd55c --- /dev/null +++ b/crates/nargo_cli/tests/test_data/array_dynamic/src/main.nr @@ -0,0 +1,28 @@ + +fn main(x: [u32; 5], mut z: u32, t: u32, index: [Field;5]) { + let idx = (z - 5*t - 5) as Field; + //dynamic array test + dyn_array(x, idx, idx - 3); + + // regression for issue 1283 + let mut s = 0; + let x3 = [246,159,32,176,8]; + for i in 0..5 { + s += x3[index[i]]; + } + assert(s!=0); +} + +fn dyn_array(mut x: [u32; 5], y: Field, z: Field) { + assert(x[y] == 111); + assert(x[z] == 101); + x[z] = 0; + assert(x[y] == 111); + assert(x[1] == 0); + if y as u32 < 10 { + x[y] = x[y] - 2; + } else { + x[y] = 0; + } + assert(x[4] == 109); +} \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/array_len/src/main.nr b/crates/nargo_cli/tests/test_data/array_len/src/main.nr index 7ed9ebfd1c..29ee44ce92 100644 --- a/crates/nargo_cli/tests/test_data/array_len/src/main.nr +++ b/crates/nargo_cli/tests/test_data/array_len/src/main.nr @@ -13,19 +13,19 @@ fn nested_call(b: [Field]) -> Field { } fn main(len3: [u8; 3], len4: [Field; 4]) { - constrain len_plus_1(len3) == 4; - constrain len_plus_1(len4) == 5; - constrain add_lens(len3, len4) == 7; - constrain nested_call(len4) == 5; + assert(len_plus_1(len3) == 4); + assert(len_plus_1(len4) == 5); + assert(add_lens(len3, len4) == 7); + assert(nested_call(len4) == 5); // std::array::len returns a comptime value - constrain len4[len3.len()] == 4; + assert(len4[len3.len()] == 4); // test for std::array::sort let mut unsorted = len3; unsorted[0] = len3[1]; unsorted[1] = len3[0]; - constrain unsorted[0] > unsorted[1]; + assert(unsorted[0] > unsorted[1]); let sorted = unsorted.sort(); - constrain sorted[0] < sorted[1]; + assert(sorted[0] < sorted[1]); } diff --git a/crates/nargo_cli/tests/test_data/array_neq/src/main.nr b/crates/nargo_cli/tests/test_data/array_neq/src/main.nr index 1fc5d9579c..be734dea36 100644 --- a/crates/nargo_cli/tests/test_data/array_neq/src/main.nr +++ b/crates/nargo_cli/tests/test_data/array_neq/src/main.nr @@ -1,4 +1,4 @@ // Simple example of checking where two arrays are different fn main(a: [Field; 32], b: [Field; 32]) { - constrain a != b; + assert(a != b); } diff --git a/crates/nargo_cli/tests/test_data/assign_ex/src/main.nr b/crates/nargo_cli/tests/test_data/assign_ex/src/main.nr index 158da95935..b0626d63c8 100644 --- a/crates/nargo_cli/tests/test_data/assign_ex/src/main.nr +++ b/crates/nargo_cli/tests/test_data/assign_ex/src/main.nr @@ -1,6 +1,6 @@ fn main(x: Field, y: Field) { let mut z = x + y; - constrain z == 3; + assert(z == 3); z = x * y; - constrain z == 2; + assert(z == 2); } diff --git a/crates/nargo_cli/tests/test_data/bit_and/src/main.nr b/crates/nargo_cli/tests/test_data/bit_and/src/main.nr index 14b865d1a3..f4805960a3 100644 --- a/crates/nargo_cli/tests/test_data/bit_and/src/main.nr +++ b/crates/nargo_cli/tests/test_data/bit_and/src/main.nr @@ -4,15 +4,15 @@ fn main(x : Field, y : Field) { let x_as_u8 = x as u8; let y_as_u8 = y as u8; - constrain (x_as_u8 & y_as_u8) == x_as_u8; + assert((x_as_u8 & y_as_u8) == x_as_u8); //bitwise and with 1 bit: let flag = (x == 0) & (y == 16); - constrain flag; + assert(flag); //bitwise and with odd bits: let x_as_u11 = x as u11; let y_as_u11 = y as u11; - constrain (x_as_u11 & y_as_u11) == x_as_u11; + assert((x_as_u11 & y_as_u11) == x_as_u11); } diff --git a/crates/nargo_cli/tests/test_data/bit_shifts_comptime/Nargo.toml b/crates/nargo_cli/tests/test_data/bit_shifts_comptime/Nargo.toml new file mode 100644 index 0000000000..e0b467ce5d --- /dev/null +++ b/crates/nargo_cli/tests/test_data/bit_shifts_comptime/Nargo.toml @@ -0,0 +1,5 @@ +[package] +authors = [""] +compiler_version = "0.1" + +[dependencies] \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/bit_shifts_comptime/Prover.toml b/crates/nargo_cli/tests/test_data/bit_shifts_comptime/Prover.toml new file mode 100644 index 0000000000..cfd62c406c --- /dev/null +++ b/crates/nargo_cli/tests/test_data/bit_shifts_comptime/Prover.toml @@ -0,0 +1 @@ +x = 64 diff --git a/crates/nargo_cli/tests/test_data/bit_shifts_comptime/src/main.nr b/crates/nargo_cli/tests/test_data/bit_shifts_comptime/src/main.nr new file mode 100644 index 0000000000..c1c6890feb --- /dev/null +++ b/crates/nargo_cli/tests/test_data/bit_shifts_comptime/src/main.nr @@ -0,0 +1,13 @@ +fn main(x: u64) { + let two: u64 = 2; + let three: u64 = 3; + + // comptime shifts on comptime values + assert(two << 2 == 8); + assert((two << 3) / 8 == two); + assert((three >> 1) == 1); + + // comptime shifts on runtime values + assert(x << 1 == 128); + assert(x >> 2 == 16); +} diff --git a/crates/nargo_cli/tests/test_data/bit_shifts_runtime/Nargo.toml b/crates/nargo_cli/tests/test_data/bit_shifts_runtime/Nargo.toml new file mode 100644 index 0000000000..e0b467ce5d --- /dev/null +++ b/crates/nargo_cli/tests/test_data/bit_shifts_runtime/Nargo.toml @@ -0,0 +1,5 @@ +[package] +authors = [""] +compiler_version = "0.1" + +[dependencies] \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/bit_shifts_runtime/Prover.toml b/crates/nargo_cli/tests/test_data/bit_shifts_runtime/Prover.toml new file mode 100644 index 0000000000..67bf6a6a23 --- /dev/null +++ b/crates/nargo_cli/tests/test_data/bit_shifts_runtime/Prover.toml @@ -0,0 +1,2 @@ +x = 64 +y = 1 diff --git a/crates/nargo_cli/tests/test_data/bit_shifts_runtime/src/main.nr b/crates/nargo_cli/tests/test_data/bit_shifts_runtime/src/main.nr new file mode 100644 index 0000000000..903a5f3546 --- /dev/null +++ b/crates/nargo_cli/tests/test_data/bit_shifts_runtime/src/main.nr @@ -0,0 +1,12 @@ +fn main(x: u64, y: u64) { + // These are currently unimplemented and panic with "ShiftLeft and ShiftRight operations with shifts which are only known at runtime are not yet implemented." + // See: https://github.com/noir-lang/noir/issues/1265 + + // runtime shifts on comptime values + assert(64 << y == 128); + assert(64 >> y == 32); + + // runtime shifts on runtime values + assert(x << y == 128); + assert(x >> y == 32); +} diff --git a/crates/nargo_cli/tests/test_data/bool_not/src/main.nr b/crates/nargo_cli/tests/test_data/bool_not/src/main.nr index 035c063087..d6b4d7a9fa 100644 --- a/crates/nargo_cli/tests/test_data/bool_not/src/main.nr +++ b/crates/nargo_cli/tests/test_data/bool_not/src/main.nr @@ -1,5 +1,5 @@ use dep::std; fn main(x: u1) { - constrain !x == 0; + assert(!x == 0); } diff --git a/crates/nargo_cli/tests/test_data/bool_or/src/main.nr b/crates/nargo_cli/tests/test_data/bool_or/src/main.nr index 147cc23a92..4a74027e4a 100644 --- a/crates/nargo_cli/tests/test_data/bool_or/src/main.nr +++ b/crates/nargo_cli/tests/test_data/bool_or/src/main.nr @@ -1,7 +1,7 @@ use dep::std; fn main(x: u1, y: u1) { - constrain x | y == 1; + assert(x | y == 1); - constrain x | y | x == 1; + assert(x | y | x == 1); } diff --git a/crates/nargo_cli/tests/test_data/cast_bool/src/main.nr b/crates/nargo_cli/tests/test_data/cast_bool/src/main.nr index e62f4b80dd..57af8120b3 100644 --- a/crates/nargo_cli/tests/test_data/cast_bool/src/main.nr +++ b/crates/nargo_cli/tests/test_data/cast_bool/src/main.nr @@ -1,6 +1,6 @@ fn main(x: Field, y: Field) { let z = x == y; let t = z as u8; - constrain t == 1; + assert(t == 1); } diff --git a/crates/nargo_cli/tests/test_data/comptime_array_access/src/main.nr b/crates/nargo_cli/tests/test_data/comptime_array_access/src/main.nr index 0c8242bca4..04f08bb70c 100644 --- a/crates/nargo_cli/tests/test_data/comptime_array_access/src/main.nr +++ b/crates/nargo_cli/tests/test_data/comptime_array_access/src/main.nr @@ -7,11 +7,11 @@ fn main(a: [Field; 3]) { // Nor should using it in an expression with a non-comptime variable. let two = i + ii; - constrain i == ii; + assert(i == ii); let elem2 = a[i]; - constrain elem1 == elem2; - constrain two == 2; + assert(elem1 == elem2); + assert(two == 2); } fn foo(x: Field) -> Field { x } diff --git a/crates/nargo_cli/tests/test_data/comptime_fail/src/main.nr b/crates/nargo_cli/tests/test_data/comptime_fail/src/main.nr index 9e861b5dc5..ad9ecc2f68 100644 --- a/crates/nargo_cli/tests/test_data/comptime_fail/src/main.nr +++ b/crates/nargo_cli/tests/test_data/comptime_fail/src/main.nr @@ -4,12 +4,12 @@ fn main(x: Field) { // Error here: let foo = my_const + x; - constrain array[foo] == x; + assert(array[foo] == x); let my_const2 = 3; - constrain array[my_const2] == 3; + assert(array[my_const2] == 3); // Using a comptime variable where a non-comptime variable is expected should be fine main(my_const2); - constrain x != 0; + assert(x != 0); } diff --git a/crates/nargo_cli/tests/test_data/comptime_recursion_regression/src/main.nr b/crates/nargo_cli/tests/test_data/comptime_recursion_regression/src/main.nr index 31d7d10975..0461fd9c4c 100644 --- a/crates/nargo_cli/tests/test_data/comptime_recursion_regression/src/main.nr +++ b/crates/nargo_cli/tests/test_data/comptime_recursion_regression/src/main.nr @@ -1,4 +1,4 @@ fn main(x: Field, y: Field) { let flag = (x == 1) | (y == 2); - constrain flag | false == flag; + assert(flag | false == flag); } diff --git a/crates/nargo_cli/tests/test_data/config.toml b/crates/nargo_cli/tests/test_data/config.toml index 1c7536af5a..80822d2237 100644 --- a/crates/nargo_cli/tests/test_data/config.toml +++ b/crates/nargo_cli/tests/test_data/config.toml @@ -2,7 +2,7 @@ # "1_mul", "2_div","3_add","4_sub","5_over", "6","6_array", "7_function","7","8_integration", "9_conditional", "10_slices", "assign_ex", "bool_not", "bool_or", "pedersen_check", "poseidonperm_x5_254", "poseidonsponge_x5_254", "pred_eq", "schnorr", "sha256", "tuples", # "array_len", "array_neq", "bit_and", "cast_bool", "comptime_array_access", "generics", "global_comptime", "main_bool_arg", "main_return", "merkle_insert", "modules", "modules_more", "scalar_mul", "simple_shield", "struct", "submodules", # Exclude "poseidonsponge_x5_254" and "sha2_byte" due to relatively long computation time and "sha2_blocks" due to very long computation time. -exclude = ["comptime_fail", "poseidonsponge_x5_254", "sha2_blocks", "sha2_byte"] +exclude = ["bit_shifts_runtime", "comptime_fail", "poseidonsponge_x5_254", "sha2_blocks", "sha2_byte"] # List of tests (as their directory name in test_data) expecting to fail: if the test pass, we report an error. diff --git a/crates/nargo_cli/tests/test_data/contracts/src/main.nr b/crates/nargo_cli/tests/test_data/contracts/src/main.nr index f236186d42..53e094eb4c 100644 --- a/crates/nargo_cli/tests/test_data/contracts/src/main.nr +++ b/crates/nargo_cli/tests/test_data/contracts/src/main.nr @@ -1,5 +1,5 @@ fn main(x : Field, y : pub Field) { - constrain x * 2 == y * 3; + assert(x * 2 == y * 3); } contract Foo { diff --git a/crates/nargo_cli/tests/test_data/ec_baby_jubjub/src/main.nr b/crates/nargo_cli/tests/test_data/ec_baby_jubjub/src/main.nr index ee9e2e2eee..3372e969d4 100644 --- a/crates/nargo_cli/tests/test_data/ec_baby_jubjub/src/main.nr +++ b/crates/nargo_cli/tests/test_data/ec_baby_jubjub/src/main.nr @@ -23,26 +23,36 @@ fn main() { let p2_affine = Gaffine::new(16540640123574156134436876038791482806971768689494387082833631921987005038935, 20819045374670962167435360035096875258406992893633759881276124905556507972311); let p3_affine = bjj_affine.add(p1_affine, p2_affine); - constrain p3_affine.eq(Gaffine::new(7916061937171219682591368294088513039687205273691143098332585753343424131937, - 14035240266687799601661095864649209771790948434046947201833777492504781204499)); + assert( + p3_affine.eq(Gaffine::new( + 7916061937171219682591368294088513039687205273691143098332585753343424131937, + 14035240266687799601661095864649209771790948434046947201833777492504781204499 + )) + ); // Test scalar multiplication let p4_affine = bjj_affine.mul(2, p1_affine); - constrain p4_affine.eq(Gaffine::new(6890855772600357754907169075114257697580319025794532037257385534741338397365, - 4338620300185947561074059802482547481416142213883829469920100239455078257889)); - constrain p4_affine.eq(bjj_affine.bit_mul([0,1], p1_affine)); + assert( + p4_affine.eq(Gaffine::new( + 6890855772600357754907169075114257697580319025794532037257385534741338397365, + 4338620300185947561074059802482547481416142213883829469920100239455078257889 + )) + ); + assert(p4_affine.eq(bjj_affine.bit_mul([0,1], p1_affine))); // Test subtraction let p5_affine = bjj_affine.subtract(p3_affine, p3_affine); - constrain p5_affine.eq(Gaffine::zero()); + assert(p5_affine.eq(Gaffine::zero())); // Check that these points are on the curve - constrain bjj_affine.contains(bjj_affine.gen) - & bjj_affine.contains(p1_affine) - & bjj_affine.contains(p2_affine) - & bjj_affine.contains(p3_affine) - & bjj_affine.contains(p4_affine) - & bjj_affine.contains(p5_affine); + assert( + bjj_affine.contains(bjj_affine.gen) & + bjj_affine.contains(p1_affine) & + bjj_affine.contains(p2_affine) & + bjj_affine.contains(p3_affine) & + bjj_affine.contains(p4_affine) & + bjj_affine.contains(p5_affine) + ); // Test CurveGroup equivalents let bjj = bjj_affine.into_group(); // Baby Jubjub @@ -54,23 +64,25 @@ fn main() { let p5 = p5_affine.into_group(); // Test addition - constrain p3.eq(bjj.add(p1, p2)); + assert(p3.eq(bjj.add(p1, p2))); // Test scalar multiplication - constrain p4.eq(bjj.mul(2, p1)); - constrain p4.eq(bjj.bit_mul([0,1], p1)); + assert(p4.eq(bjj.mul(2, p1))); + assert(p4.eq(bjj.bit_mul([0,1], p1))); // Test subtraction - constrain G::zero().eq(bjj.subtract(p3, p3)); - constrain p5.eq(G::zero()); + assert(G::zero().eq(bjj.subtract(p3, p3))); + assert(p5.eq(G::zero())); // Check that these points are on the curve - constrain bjj.contains(bjj.gen) - & bjj.contains(p1) - & bjj.contains(p2) - & bjj.contains(p3) - & bjj.contains(p4) - & bjj.contains(p5); + assert( + bjj.contains(bjj.gen) & + bjj.contains(p1) & + bjj.contains(p2) & + bjj.contains(p3) & + bjj.contains(p4) & + bjj.contains(p5) + ); // Test SWCurve equivalents of the above // First the affine representation @@ -83,26 +95,32 @@ fn main() { let p5_swcurve_affine = bjj_affine.map_into_swcurve(p5_affine); // Addition - constrain p3_swcurve_affine.eq( - bjj_swcurve_affine.add( - p1_swcurve_affine, - p2_swcurve_affine)); + assert( + p3_swcurve_affine.eq( + bjj_swcurve_affine.add( + p1_swcurve_affine, + p2_swcurve_affine + ) + ) + ); // Doubling - constrain p4_swcurve_affine.eq(bjj_swcurve_affine.mul(2, p1_swcurve_affine)); - constrain p4_swcurve_affine.eq(bjj_swcurve_affine.bit_mul([0,1], p1_swcurve_affine)); + assert(p4_swcurve_affine.eq(bjj_swcurve_affine.mul(2, p1_swcurve_affine))); + assert(p4_swcurve_affine.eq(bjj_swcurve_affine.bit_mul([0,1], p1_swcurve_affine))); // Subtraction - constrain SWGaffine::zero().eq(bjj_swcurve_affine.subtract(p3_swcurve_affine, p3_swcurve_affine)); - constrain p5_swcurve_affine.eq(SWGaffine::zero()); + assert(SWGaffine::zero().eq(bjj_swcurve_affine.subtract(p3_swcurve_affine, p3_swcurve_affine))); + assert(p5_swcurve_affine.eq(SWGaffine::zero())); // Check that these points are on the curve - constrain bjj_swcurve_affine.contains(bjj_swcurve_affine.gen) - & bjj_swcurve_affine.contains(p1_swcurve_affine) - & bjj_swcurve_affine.contains(p2_swcurve_affine) - & bjj_swcurve_affine.contains(p3_swcurve_affine) - & bjj_swcurve_affine.contains(p4_swcurve_affine) - & bjj_swcurve_affine.contains(p5_swcurve_affine); + assert( + bjj_swcurve_affine.contains(bjj_swcurve_affine.gen) & + bjj_swcurve_affine.contains(p1_swcurve_affine) & + bjj_swcurve_affine.contains(p2_swcurve_affine) & + bjj_swcurve_affine.contains(p3_swcurve_affine) & + bjj_swcurve_affine.contains(p4_swcurve_affine) & + bjj_swcurve_affine.contains(p5_swcurve_affine) + ); // Then the CurveGroup representation let bjj_swcurve = bjj.into_swcurve(); @@ -114,26 +132,25 @@ fn main() { let p5_swcurve = bjj.map_into_swcurve(p5); // Addition - constrain p3_swcurve.eq( - bjj_swcurve.add( - p1_swcurve, - p2_swcurve)); + assert(p3_swcurve.eq(bjj_swcurve.add(p1_swcurve,p2_swcurve))); // Doubling - constrain p4_swcurve.eq(bjj_swcurve.mul(2, p1_swcurve)); - constrain p4_swcurve.eq(bjj_swcurve.bit_mul([0,1], p1_swcurve)); + assert(p4_swcurve.eq(bjj_swcurve.mul(2, p1_swcurve))); + assert(p4_swcurve.eq(bjj_swcurve.bit_mul([0,1], p1_swcurve))); // Subtraction - constrain SWG::zero().eq(bjj_swcurve.subtract(p3_swcurve, p3_swcurve)); - constrain p5_swcurve.eq(SWG::zero()); + assert(SWG::zero().eq(bjj_swcurve.subtract(p3_swcurve, p3_swcurve))); + assert(p5_swcurve.eq(SWG::zero())); // Check that these points are on the curve - constrain bjj_swcurve.contains(bjj_swcurve.gen) - & bjj_swcurve.contains(p1_swcurve) - & bjj_swcurve.contains(p2_swcurve) - & bjj_swcurve.contains(p3_swcurve) - & bjj_swcurve.contains(p4_swcurve) - & bjj_swcurve.contains(p5_swcurve); + assert( + bjj_swcurve.contains(bjj_swcurve.gen) & + bjj_swcurve.contains(p1_swcurve) & + bjj_swcurve.contains(p2_swcurve) & + bjj_swcurve.contains(p3_swcurve) & + bjj_swcurve.contains(p4_swcurve) & + bjj_swcurve.contains(p5_swcurve) + ); // Test MontCurve conversions // First the affine representation @@ -146,26 +163,25 @@ fn main() { let p5_montcurve_affine = p5_affine.into_montcurve(); // Addition - constrain p3_montcurve_affine.eq( - bjj_montcurve_affine.add( - p1_montcurve_affine, - p2_montcurve_affine)); + assert(p3_montcurve_affine.eq(bjj_montcurve_affine.add(p1_montcurve_affine, p2_montcurve_affine))); // Doubling - constrain p4_montcurve_affine.eq(bjj_montcurve_affine.mul(2, p1_montcurve_affine)); - constrain p4_montcurve_affine.eq(bjj_montcurve_affine.bit_mul([0,1], p1_montcurve_affine)); + assert(p4_montcurve_affine.eq(bjj_montcurve_affine.mul(2, p1_montcurve_affine))); + assert(p4_montcurve_affine.eq(bjj_montcurve_affine.bit_mul([0,1], p1_montcurve_affine))); // Subtraction - constrain MGaffine::zero().eq(bjj_montcurve_affine.subtract(p3_montcurve_affine, p3_montcurve_affine)); - constrain p5_montcurve_affine.eq(MGaffine::zero()); + assert(MGaffine::zero().eq(bjj_montcurve_affine.subtract(p3_montcurve_affine, p3_montcurve_affine))); + assert(p5_montcurve_affine.eq(MGaffine::zero())); // Check that these points are on the curve - constrain bjj_montcurve_affine.contains(bjj_montcurve_affine.gen) - & bjj_montcurve_affine.contains(p1_montcurve_affine) - & bjj_montcurve_affine.contains(p2_montcurve_affine) - & bjj_montcurve_affine.contains(p3_montcurve_affine) - & bjj_montcurve_affine.contains(p4_montcurve_affine) - & bjj_montcurve_affine.contains(p5_montcurve_affine); + assert( + bjj_montcurve_affine.contains(bjj_montcurve_affine.gen) & + bjj_montcurve_affine.contains(p1_montcurve_affine) & + bjj_montcurve_affine.contains(p2_montcurve_affine) & + bjj_montcurve_affine.contains(p3_montcurve_affine) & + bjj_montcurve_affine.contains(p4_montcurve_affine) & + bjj_montcurve_affine.contains(p5_montcurve_affine) + ); // Then the CurveGroup representation let bjj_montcurve = bjj.into_montcurve(); @@ -177,35 +193,34 @@ fn main() { let p5_montcurve = p5_montcurve_affine.into_group(); // Addition - constrain p3_montcurve.eq( - bjj_montcurve.add( - p1_montcurve, - p2_montcurve)); - + assert(p3_montcurve.eq(bjj_montcurve.add(p1_montcurve, p2_montcurve))); + // Doubling - constrain p4_montcurve.eq(bjj_montcurve.mul(2, p1_montcurve)); - constrain p4_montcurve.eq(bjj_montcurve.bit_mul([0,1], p1_montcurve)); + assert(p4_montcurve.eq(bjj_montcurve.mul(2, p1_montcurve))); + assert(p4_montcurve.eq(bjj_montcurve.bit_mul([0,1], p1_montcurve))); // Subtraction - constrain MG::zero().eq(bjj_montcurve.subtract(p3_montcurve, p3_montcurve)); - constrain p5_montcurve.eq(MG::zero()); + assert(MG::zero().eq(bjj_montcurve.subtract(p3_montcurve, p3_montcurve))); + assert(p5_montcurve.eq(MG::zero())); // Check that these points are on the curve - constrain bjj_montcurve.contains(bjj_montcurve.gen) - & bjj_montcurve.contains(p1_montcurve) - & bjj_montcurve.contains(p2_montcurve) - & bjj_montcurve.contains(p3_montcurve) - & bjj_montcurve.contains(p4_montcurve) - & bjj_montcurve.contains(p5_montcurve); + assert( + bjj_montcurve.contains(bjj_montcurve.gen) & + bjj_montcurve.contains(p1_montcurve) & + bjj_montcurve.contains(p2_montcurve) & + bjj_montcurve.contains(p3_montcurve) & + bjj_montcurve.contains(p4_montcurve) & + bjj_montcurve.contains(p5_montcurve) + ); // Elligator 2 map-to-curve let ell2_pt_map = bjj_affine.elligator2_map(27); - constrain ell2_pt_map.eq(MGaffine::new(7972459279704486422145701269802978968072470631857513331988813812334797879121, 8142420778878030219043334189293412482212146646099536952861607542822144507872).into_tecurve()); + assert(ell2_pt_map.eq(MGaffine::new(7972459279704486422145701269802978968072470631857513331988813812334797879121, 8142420778878030219043334189293412482212146646099536952861607542822144507872).into_tecurve())); // SWU map-to-curve let swu_pt_map = bjj_affine.swu_map(5,27); - constrain swu_pt_map.eq(bjj_affine.map_from_swcurve(SWGaffine::new(2162719247815120009132293839392097468339661471129795280520343931405114293888, 5341392251743377373758788728206293080122949448990104760111875914082289313973))); + assert(swu_pt_map.eq(bjj_affine.map_from_swcurve(SWGaffine::new(2162719247815120009132293839392097468339661471129795280520343931405114293888, 5341392251743377373758788728206293080122949448990104760111875914082289313973)))); } } diff --git a/crates/nargo_cli/tests/test_data/generics/src/main.nr b/crates/nargo_cli/tests/test_data/generics/src/main.nr index 56078a304e..bfde9d3c95 100644 --- a/crates/nargo_cli/tests/test_data/generics/src/main.nr +++ b/crates/nargo_cli/tests/test_data/generics/src/main.nr @@ -5,7 +5,7 @@ struct Bar { } fn foo(bar: Bar) { - constrain bar.one == bar.two; + assert(bar.one == bar.two); } struct BigInt { @@ -15,12 +15,12 @@ struct BigInt { impl BigInt { // `N` is in scope of all methods in the impl fn first(first: BigInt, second: BigInt) -> Self { - constrain first.limbs != second.limbs; + assert(first.limbs != second.limbs); first } fn second(first: BigInt, second: Self) -> Self { - constrain first.limbs != second.limbs; + assert(first.limbs != second.limbs); second } } @@ -42,11 +42,16 @@ fn main(x: Field, y: Field) { let int1 = BigInt { limbs: [1] }; let int2 = BigInt { limbs: [2] }; let BigInt { limbs } = int1.second(int2).first(int1); - constrain limbs == int2.limbs; + assert(limbs == int2.limbs); // Test impl exclusively for Bar - constrain bar1.get_other() == bar1.other; + assert(bar1.get_other() == bar1.other); // Expected type error - // constrain bar2.get_other() == bar2.other; + // assert(bar2.get_other() == bar2.other); + + let one = x; + let two = y; + let nested_generics: Bar> = Bar { one, two, other: Bar { one, two, other: 0 } }; + assert(nested_generics.other.other == bar1.get_other()); } diff --git a/crates/nargo_cli/tests/test_data/global_consts/src/baz.nr b/crates/nargo_cli/tests/test_data/global_consts/src/baz.nr index 3471da4310..e52efc52ea 100644 --- a/crates/nargo_cli/tests/test_data/global_consts/src/baz.nr +++ b/crates/nargo_cli/tests/test_data/global_consts/src/baz.nr @@ -1,5 +1,5 @@ fn from_baz(x : [Field; crate::foo::MAGIC_NUMBER]) { for i in 0..crate::foo::MAGIC_NUMBER { - constrain x[i] == crate::foo::MAGIC_NUMBER; + assert(x[i] == crate::foo::MAGIC_NUMBER); }; } \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/global_consts/src/foo.nr b/crates/nargo_cli/tests/test_data/global_consts/src/foo.nr index c54a85ae12..2db74fb1ff 100644 --- a/crates/nargo_cli/tests/test_data/global_consts/src/foo.nr +++ b/crates/nargo_cli/tests/test_data/global_consts/src/foo.nr @@ -6,6 +6,6 @@ global TYPE_INFERRED = 42; fn from_foo(x : [Field; bar::N]) { for i in 0..bar::N { - constrain x[i] == bar::N; + assert(x[i] == bar::N); }; } \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/global_consts/src/main.nr b/crates/nargo_cli/tests/test_data/global_consts/src/main.nr index fb48eb2b79..9bcca2b807 100644 --- a/crates/nargo_cli/tests/test_data/global_consts/src/main.nr +++ b/crates/nargo_cli/tests/test_data/global_consts/src/main.nr @@ -16,14 +16,14 @@ fn main(a: [Field; M + N - N], b: [Field; 30 + N / 2], c : pub [Field; foo::MAGI let test_struct = Dummy { x: d, y: c }; for i in 0..foo::MAGIC_NUMBER { - constrain c[i] == foo::MAGIC_NUMBER; - constrain test_struct.y[i] == foo::MAGIC_NUMBER; + assert(c[i] == foo::MAGIC_NUMBER); + assert(test_struct.y[i] == foo::MAGIC_NUMBER); } - constrain N != M; + assert(N != M); let expected: u32 = 42; - constrain foo::TYPE_INFERRED == expected; + assert(foo::TYPE_INFERRED == expected); let mut y = 5; let mut x = M; @@ -33,30 +33,30 @@ fn main(a: [Field; M + N - N], b: [Field; 30 + N / 2], c : pub [Field; foo::MAGI y = i; } - constrain y == 24; - constrain x == 10; + assert(y == 24); + assert(x == 10); let q = multiplyByM(3); - constrain q == 96; + assert(q == 96); arrays_neq(a, b); let t: [Field; T_LEN] = [N, M]; - constrain t[1] == 32; + assert(t[1] == 32); - constrain 15 == mysubmodule::my_helper(); + assert(15 == mysubmodule::my_helper()); let add_submodules_N = mysubmodule::N + foo::bar::N; - constrain 15 == add_submodules_N; + assert(15 == add_submodules_N); let add_from_bar_N = mysubmodule::N + foo::bar::from_bar(1); - constrain 15 == add_from_bar_N; + assert(15 == add_from_bar_N); // Example showing an array filled with (mysubmodule::N + 2) 0's let sugared = [0; mysubmodule::N + 2]; - constrain sugared[mysubmodule::N + 1] == 0; + assert(sugared[mysubmodule::N + 1] == 0); let arr: [Field; mysubmodule::N] = [N; 10]; - constrain (arr[0] == 5) & (arr[9] == 5); + assert((arr[0] == 5) & (arr[9] == 5)); foo::from_foo(d); baz::from_baz(c); @@ -67,7 +67,7 @@ fn multiplyByM(x: Field) -> Field { } fn arrays_neq(a: [Field; M], b: [Field; M]) { - constrain a != b; + assert(a != b); } mod mysubmodule { @@ -77,7 +77,7 @@ mod mysubmodule { global L: Field = 50; fn my_bool_or(x: u1, y: u1) { - constrain x | y == 1; + assert(x | y == 1); } fn my_helper() -> comptime Field { diff --git a/crates/nargo_cli/tests/test_data/higher-order-functions/src/main.nr b/crates/nargo_cli/tests/test_data/higher-order-functions/src/main.nr index 70b281951a..572e6603cc 100644 --- a/crates/nargo_cli/tests/test_data/higher-order-functions/src/main.nr +++ b/crates/nargo_cli/tests/test_data/higher-order-functions/src/main.nr @@ -2,16 +2,16 @@ use dep::std; fn main() -> pub Field { let f = if 3 * 7 > 200 { foo } else { bar }; - constrain f()[1] == 2; + assert(f()[1] == 2); // Lambdas: - constrain twice(|x| x * 2, 5) == 20; - constrain (|x, y| x + y + 1)(2, 3) == 6; + assert(twice(|x| x * 2, 5) == 20); + assert((|x, y| x + y + 1)(2, 3) == 6); // Closures: let a = 42; let g = || a; - constrain g() == 42; + assert(g() == 42); // Mutable variables cannot be captured, but you can // copy them into immutable variables and capture those: @@ -22,7 +22,7 @@ fn main() -> pub Field { // Add extra mutations to ensure we can mutate x without the // captured z changing. x = x + 1; - constrain (|y| y + z)(1) == 4; + assert((|y| y + z)(1) == 4); x = x + 1; let ret = twice(add1, 3); @@ -34,18 +34,18 @@ fn main() -> pub Field { /// Test the array functions in std::array fn test_array_functions() { let myarray: [i32; 3] = [1, 2, 3]; - constrain myarray.any(|n| n > 2); + assert(myarray.any(|n| n > 2)); let evens: [i32; 3] = [2, 4, 6]; - constrain evens.all(|n| n > 1); + assert(evens.all(|n| n > 1)); - constrain evens.fold(0, |a, b| a + b) == 12; - constrain evens.reduce(|a, b| a + b) == 12; + assert(evens.fold(0, |a, b| a + b) == 12); + assert(evens.reduce(|a, b| a + b) == 12); let descending = myarray.sort_via(|a, b| a > b); - constrain descending == [3, 2, 1]; + assert(descending == [3, 2, 1]); - constrain evens.map(|n| n / 2) == myarray; + assert(evens.map(|n| n / 2) == myarray); } fn foo() -> [u32; 2] { diff --git a/crates/nargo_cli/tests/test_data/if_else_chain/src/main.nr b/crates/nargo_cli/tests/test_data/if_else_chain/src/main.nr index af04fc7bdf..5105c18c7d 100644 --- a/crates/nargo_cli/tests/test_data/if_else_chain/src/main.nr +++ b/crates/nargo_cli/tests/test_data/if_else_chain/src/main.nr @@ -1,16 +1,16 @@ fn main(a: u32, mut c: [u32; 4]){ if a == c[0] { - constrain c[0] == 0; + assert(c[0] == 0); } else if a == c[1] { - constrain c[1] == 0; + assert(c[1] == 0); } else if a == c[2] { - constrain c[2] == 0; + assert(c[2] == 0); } else if a == c[3] { // expect to match this case - constrain c[3] == 0; + assert(c[3] == 0); } else { - constrain c[0] == 10; + assert(c[0] == 10); } } diff --git a/crates/nargo_cli/tests/test_data/keccak256/Nargo.toml b/crates/nargo_cli/tests/test_data/keccak256/Nargo.toml new file mode 100644 index 0000000000..e0b467ce5d --- /dev/null +++ b/crates/nargo_cli/tests/test_data/keccak256/Nargo.toml @@ -0,0 +1,5 @@ +[package] +authors = [""] +compiler_version = "0.1" + +[dependencies] \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/keccak256/Prover.toml b/crates/nargo_cli/tests/test_data/keccak256/Prover.toml new file mode 100644 index 0000000000..d65c4011d3 --- /dev/null +++ b/crates/nargo_cli/tests/test_data/keccak256/Prover.toml @@ -0,0 +1,35 @@ +x = 0xbd +result = [ + 0x5a, + 0x50, + 0x2f, + 0x9f, + 0xca, + 0x46, + 0x7b, + 0x26, + 0x6d, + 0x5b, + 0x78, + 0x33, + 0x65, + 0x19, + 0x37, + 0xe8, + 0x05, + 0x27, + 0x0c, + 0xa3, + 0xf3, + 0xaf, + 0x1c, + 0x0d, + 0xd2, + 0x46, + 0x2d, + 0xca, + 0x4b, + 0x3b, + 0x1a, + 0xbf, +] diff --git a/crates/nargo_cli/tests/test_data/keccak256/src/main.nr b/crates/nargo_cli/tests/test_data/keccak256/src/main.nr new file mode 100644 index 0000000000..8fafbaa4e2 --- /dev/null +++ b/crates/nargo_cli/tests/test_data/keccak256/src/main.nr @@ -0,0 +1,10 @@ +// Keccak256 example +// +use dep::std; + +fn main(x: Field, result: [u8; 32]) { + // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field + // The padding is taken care of by the program + let digest = std::hash::keccak256([x as u8]); + assert(digest == result); +} diff --git a/crates/nargo_cli/tests/test_data/main_bool_arg/src/main.nr b/crates/nargo_cli/tests/test_data/main_bool_arg/src/main.nr index 91a8db03ff..0615a7dbca 100644 --- a/crates/nargo_cli/tests/test_data/main_bool_arg/src/main.nr +++ b/crates/nargo_cli/tests/test_data/main_bool_arg/src/main.nr @@ -1,8 +1,8 @@ fn main(x : bool, y: [bool;2]) { if x { - constrain 1 != 2; + assert(1 != 2); } - constrain x; - constrain y[0] != y[1]; + assert(x); + assert(y[0] != y[1]); } diff --git a/crates/nargo_cli/tests/test_data/merkle_insert/src/main.nr b/crates/nargo_cli/tests/test_data/merkle_insert/src/main.nr index 3ab4efb64c..53d876272a 100644 --- a/crates/nargo_cli/tests/test_data/merkle_insert/src/main.nr +++ b/crates/nargo_cli/tests/test_data/merkle_insert/src/main.nr @@ -10,13 +10,13 @@ fn main( mimc_input: [Field; 4], ) { let old_leaf_exists = std::merkle::check_membership(old_root, old_leaf, index, old_hash_path); - constrain old_leaf_exists == 1; - constrain old_root == std::merkle::compute_root_from_leaf(old_leaf, index, old_hash_path); + assert(old_leaf_exists == 1); + assert(old_root == std::merkle::compute_root_from_leaf(old_leaf, index, old_hash_path)); let calculated_root = std::merkle::compute_merkle_root(leaf, index, old_hash_path); - constrain new_root == calculated_root; + assert(new_root == calculated_root); let h = std::hash::mimc_bn254(mimc_input); // Regression test for PR #891 std::println(h); - constrain h == 18226366069841799622585958305961373004333097209608110160936134895615261821931; + assert(h == 18226366069841799622585958305961373004333097209608110160936134895615261821931); } diff --git a/crates/nargo_cli/tests/test_data/modules/src/main.nr b/crates/nargo_cli/tests/test_data/modules/src/main.nr index 4a773c9ed6..167f7e671a 100644 --- a/crates/nargo_cli/tests/test_data/modules/src/main.nr +++ b/crates/nargo_cli/tests/test_data/modules/src/main.nr @@ -10,5 +10,5 @@ mod foo; // // To verify that proof, type `cargo run verify {proof_name}` fn main(x: Field, y: pub Field) { - constrain x != foo::hello(y); + assert(x != foo::hello(y)); } diff --git a/crates/nargo_cli/tests/test_data/modules_more/src/main.nr b/crates/nargo_cli/tests/test_data/modules_more/src/main.nr index 73f3a0a6d8..8862e5a865 100644 --- a/crates/nargo_cli/tests/test_data/modules_more/src/main.nr +++ b/crates/nargo_cli/tests/test_data/modules_more/src/main.nr @@ -2,5 +2,5 @@ mod foo; // An example of the module system fn main(x: Field, y: Field) { - constrain x != foo::bar::from_bar(y); + assert(x != foo::bar::from_bar(y)); } diff --git a/crates/nargo_cli/tests/test_data/modulus/src/main.nr b/crates/nargo_cli/tests/test_data/modulus/src/main.nr index 070d934976..4a13a6e06b 100644 --- a/crates/nargo_cli/tests/test_data/modulus/src/main.nr +++ b/crates/nargo_cli/tests/test_data/modulus/src/main.nr @@ -3,24 +3,24 @@ use dep::std; fn main(bn254_modulus_be_bytes : [u8; 32], bn254_modulus_be_bits : [u1; 254]) -> pub Field { let modulus_size = std::field::modulus_num_bits(); // NOTE: The constraints used in this circuit will only work when testing nargo with the plonk bn254 backend - constrain modulus_size == 254; + assert(modulus_size == 254); let modulus_be_byte_array = std::field::modulus_be_bytes(); for i in 0..32 { - constrain modulus_be_byte_array[i] == bn254_modulus_be_bytes[i]; + assert(modulus_be_byte_array[i] == bn254_modulus_be_bytes[i]); } let modulus_le_byte_array = std::field::modulus_le_bytes(); for i in 0..32 { - constrain modulus_le_byte_array[i] == bn254_modulus_be_bytes[31-i]; + assert(modulus_le_byte_array[i] == bn254_modulus_be_bytes[31-i]); } let modulus_be_bits = std::field::modulus_be_bits(); for i in 0..254 { - constrain modulus_be_bits[i] == bn254_modulus_be_bits[i]; + assert(modulus_be_bits[i] == bn254_modulus_be_bits[i]); } let modulus_le_bits = std::field::modulus_le_bits(); for i in 0..254 { - constrain modulus_le_bits[i] == bn254_modulus_be_bits[253-i]; + assert(modulus_le_bits[i] == bn254_modulus_be_bits[253-i]); } modulus_size diff --git a/crates/nargo_cli/tests/test_data/numeric_generics/src/main.nr b/crates/nargo_cli/tests/test_data/numeric_generics/src/main.nr index ebe50c4d0d..f1efafc19f 100644 --- a/crates/nargo_cli/tests/test_data/numeric_generics/src/main.nr +++ b/crates/nargo_cli/tests/test_data/numeric_generics/src/main.nr @@ -3,15 +3,15 @@ fn main() { let b = id([1, 2, 3]); let itWorks1 = MyStruct { data: a }; - constrain itWorks1.data[1] == 2; + assert(itWorks1.data[1] == 2); let itWorks2 = MyStruct { data: b }; - constrain itWorks2.data[1] == 2; + assert(itWorks2.data[1] == 2); let c = [1, 2]; let itAlsoWorks = MyStruct { data: c }; - constrain itAlsoWorks.data[1] == 2; + assert(itAlsoWorks.data[1] == 2); - constrain foo(itWorks2).data[0] == itWorks2.data[0] + 1; + assert(foo(itWorks2).data[0] == itWorks2.data[0] + 1); } fn id(x: [Field; I]) -> [Field; I] { @@ -25,7 +25,7 @@ struct MyStruct { impl MyStruct { fn insert(mut self: Self, index: comptime Field, elem: Field) -> Self { // Regression test for numeric generics on impls - constrain index as u64 < S as u64; + assert(index as u64 < S as u64); self.data[index] = elem; self diff --git a/crates/nargo_cli/tests/test_data/pedersen_check/src/main.nr b/crates/nargo_cli/tests/test_data/pedersen_check/src/main.nr index b727112ce5..37fc3f6118 100644 --- a/crates/nargo_cli/tests/test_data/pedersen_check/src/main.nr +++ b/crates/nargo_cli/tests/test_data/pedersen_check/src/main.nr @@ -2,8 +2,8 @@ use dep::std; fn main(x: Field, y: Field, salt: Field, out_x: Field, out_y: Field ) { let res = std::hash::pedersen([x, y]); - constrain res[0] == out_x; - constrain res[1] == out_y; + assert(res[0] == out_x); + assert(res[1] == out_y); let raw_data = [x,y]; let mut state = 0; @@ -12,6 +12,6 @@ fn main(x: Field, y: Field, salt: Field, out_x: Field, out_y: Field ) { } state += salt; let hash = std::hash::pedersen([state]); - constrain std::hash::pedersen([43])[0] == hash[0]; + assert(std::hash::pedersen([43])[0] == hash[0]); } diff --git a/crates/nargo_cli/tests/test_data/poseidon_bn254_hash/src/main.nr b/crates/nargo_cli/tests/test_data/poseidon_bn254_hash/src/main.nr index f2f1af7ab7..37621c732a 100644 --- a/crates/nargo_cli/tests/test_data/poseidon_bn254_hash/src/main.nr +++ b/crates/nargo_cli/tests/test_data/poseidon_bn254_hash/src/main.nr @@ -3,8 +3,8 @@ use dep::std::hash::poseidon; fn main(x1: [Field; 2], y1: pub Field, x2: [Field; 4], y2: pub Field) { let hash1 = poseidon::bn254::hash_2(x1); - constrain hash1 == y1; + assert(hash1 == y1); let hash2 = poseidon::bn254::hash_4(x2); - constrain hash2 == y2; + assert(hash2 == y2); } diff --git a/crates/nargo_cli/tests/test_data/poseidonsponge_x5_254/src/main.nr b/crates/nargo_cli/tests/test_data/poseidonsponge_x5_254/src/main.nr index f5135897f1..3addc1cec9 100644 --- a/crates/nargo_cli/tests/test_data/poseidonsponge_x5_254/src/main.nr +++ b/crates/nargo_cli/tests/test_data/poseidonsponge_x5_254/src/main.nr @@ -5,5 +5,5 @@ fn main(x: [Field; 7]) // Test optimised sponge let result = poseidon::bn254::sponge(x); - constrain result == 0x080ae1669d62f0197190573d4a325bfb8d8fc201ce3127cbac0c47a7ac81ac48; + assert(result == 0x080ae1669d62f0197190573d4a325bfb8d8fc201ce3127cbac0c47a7ac81ac48); } diff --git a/crates/nargo_cli/tests/test_data/pred_eq/src/main.nr b/crates/nargo_cli/tests/test_data/pred_eq/src/main.nr index c9c43b56c0..c7986cb7af 100644 --- a/crates/nargo_cli/tests/test_data/pred_eq/src/main.nr +++ b/crates/nargo_cli/tests/test_data/pred_eq/src/main.nr @@ -2,5 +2,5 @@ use dep::std; fn main(x: Field, y: Field) { let p = x == y; - constrain p == true; + assert(p == true); } diff --git a/crates/nargo_cli/tests/test_data/regression/src/main.nr b/crates/nargo_cli/tests/test_data/regression/src/main.nr index 2fcf41c8d7..06e35827d1 100644 --- a/crates/nargo_cli/tests/test_data/regression/src/main.nr +++ b/crates/nargo_cli/tests/test_data/regression/src/main.nr @@ -2,8 +2,8 @@ global NIBBLE_LENGTH: comptime Field = 16; fn compact_decode(input: [u8; N], length: Field) -> ([u4; NIBBLE_LENGTH], Field) { - constrain 2*input.len() as u64 <= NIBBLE_LENGTH as u64; - constrain length as u64 <= input.len() as u64; + assert(2*input.len() as u64 <= NIBBLE_LENGTH as u64); + assert(length as u64 <= input.len() as u64); let mut nibble = [0 as u4; NIBBLE_LENGTH]; @@ -43,7 +43,7 @@ fn compact_decode(input: [u8; N], length: Field) -> ([u4; NIBBLE_LENGTH], Fie fn enc(value: [u8; N], value_length: Field) -> ([u8; 32], Field) { - constrain value.len() as u8 >= value_length as u8; + assert(value.len() as u8 >= value_length as u8); let mut out_value = [0; 32]; if value_length == 0 { @@ -75,8 +75,8 @@ fn main(x: [u8; 5], z: Field) { //Issue 1144 let (nib, len) = compact_decode(x,z); - constrain len == 5; - constrain [nib[0], nib[1], nib[2], nib[3], nib[4]] == [15, 1, 12, 11, 8]; + assert(len == 5); + assert([nib[0], nib[1], nib[2], nib[3], nib[4]] == [15, 1, 12, 11, 8]); } @@ -96,6 +96,6 @@ fn enc_test() let enc_val1 = enc(val1,val1_length); - constrain enc_val1.0 == [0x94,0xb8,0x8f,0x61,0xe6,0xfb,0xda,0x83,0xfb,0xff,0xfa,0xbe,0x36,0x41,0x12,0x13,0x74,0x80,0x39,0x80,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]; - constrain enc_val1.1 == 21; + assert(enc_val1.0 == [0x94,0xb8,0x8f,0x61,0xe6,0xfb,0xda,0x83,0xfb,0xff,0xfa,0xbe,0x36,0x41,0x12,0x13,0x74,0x80,0x39,0x80,0x18,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00]); + assert(enc_val1.1 == 21); } \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/scalar_mul/src/main.nr b/crates/nargo_cli/tests/test_data/scalar_mul/src/main.nr index 72e022edc8..d9d267f1dc 100644 --- a/crates/nargo_cli/tests/test_data/scalar_mul/src/main.nr +++ b/crates/nargo_cli/tests/test_data/scalar_mul/src/main.nr @@ -17,6 +17,6 @@ fn main( pub_y = b_pub_y; } let res = std::scalar_mul::fixed_base(priv_key); - constrain res[0] == pub_x; - constrain res[1] == pub_y; + assert(res[0] == pub_x); + assert(res[1] == pub_y); } diff --git a/crates/nargo_cli/tests/test_data/schnorr/src/main.nr b/crates/nargo_cli/tests/test_data/schnorr/src/main.nr index 39676ca738..ec4f819858 100644 --- a/crates/nargo_cli/tests/test_data/schnorr/src/main.nr +++ b/crates/nargo_cli/tests/test_data/schnorr/src/main.nr @@ -6,5 +6,5 @@ fn main(message: [u8; 10], pub_key_x: Field, pub_key_y: Field, signature: [u8; 6 // Is there ever a situation where someone would want // to ensure that a signature was invalid? let x = std::schnorr::verify_signature(pub_key_x,pub_key_y,signature, message); - constrain x == 1; + assert(x == 1); } diff --git a/crates/nargo_cli/tests/test_data/sha256/src/main.nr b/crates/nargo_cli/tests/test_data/sha256/src/main.nr index bf2249c4fa..fd5340e238 100644 --- a/crates/nargo_cli/tests/test_data/sha256/src/main.nr +++ b/crates/nargo_cli/tests/test_data/sha256/src/main.nr @@ -15,5 +15,5 @@ fn main(x: Field, result: [u8; 32]) { // We use the `as` keyword here to denote the fact that we want to take just the first byte from the x Field // The padding is taken care of by the program let digest = std::hash::sha256([x as u8]); - constrain digest == result; + assert(digest == result); } diff --git a/crates/nargo_cli/tests/test_data/sha2_blocks/src/main.nr b/crates/nargo_cli/tests/test_data/sha2_blocks/src/main.nr index 7e687cd179..fcdcdb8684 100644 --- a/crates/nargo_cli/tests/test_data/sha2_blocks/src/main.nr +++ b/crates/nargo_cli/tests/test_data/sha2_blocks/src/main.nr @@ -5,18 +5,18 @@ fn main(x: [u8; 3], result256: [u8; 32], result512: [u8; 64]) { // One-block tests. let mut digest256 = std::sha256::digest(x); - constrain digest256 == result256; + assert(digest256 == result256); let mut digest512 = std::sha512::digest(x); - constrain digest512 == result512; + assert(digest512 == result512); // Two-block SHA256 test. Taken from https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Standards-and-Guidelines/documents/examples/SHA256.pdf let y: [u8; 56] = [97,98,99,100,98,99,100,101,99,100,101,102,100,101,102,103,101,102,103,104,102,103,104,105,103,104,105,106,104,105,106,107,105,106,107,108,106,107,108,109,107,108,109,110,108,109,110,111,109,110,111,112,110,111,112,113]; // "abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq" digest256 = std::sha256::digest(y); - constrain digest256 == [36,141,106,97,210,6,56,184,229,192,38,147,12,62,96,57,163,60,228,89,100,255,33,103,246,236,237,212,25,219,6,193]; + assert(digest256 == [36,141,106,97,210,6,56,184,229,192,38,147,12,62,96,57,163,60,228,89,100,255,33,103,246,236,237,212,25,219,6,193]); // Two-block SHA256 test. Taken from https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Standards-and-Guidelines/documents/examples/SHA512.pdf let z: [u8; 112] = [97,98,99,100,101,102,103,104,98,99,100,101,102,103,104,105,99,100,101,102,103,104,105,106,100,101,102,103,104,105,106,107,101,102,103,104,105,106,107,108,102,103,104,105,106,107,108,109,103,104,105,106,107,108,109,110,104,105,106,107,108,109,110,111,105,106,107,108,109,110,111,112,106,107,108,109,110,111,112,113,107,108,109,110,111,112,113,114,108,109,110,111,112,113,114,115,109,110,111,112,113,114,115,116,110,111,112,113,114,115,116,117]; // "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu" digest512 = std::sha512::digest(z); - constrain digest512 == [142,149,155,117,218,227,19,218,140,244,247,40,20,252,20,63,143,119,121,198,235,159,127,161,114,153,174,173,182,136,144,24,80,29,40,158,73,0,247,228,51,27,153,222,196,181,67,58,199,211,41,238,182,221,38,84,94,150,229,91,135,75,233,9]; + assert(digest512 == [142,149,155,117,218,227,19,218,140,244,247,40,20,252,20,63,143,119,121,198,235,159,127,161,114,153,174,173,182,136,144,24,80,29,40,158,73,0,247,228,51,27,153,222,196,181,67,58,199,211,41,238,182,221,38,84,94,150,229,91,135,75,233,9]); } diff --git a/crates/nargo_cli/tests/test_data/sha2_byte/src/main.nr b/crates/nargo_cli/tests/test_data/sha2_byte/src/main.nr index 3458862b64..a7cc9daebb 100644 --- a/crates/nargo_cli/tests/test_data/sha2_byte/src/main.nr +++ b/crates/nargo_cli/tests/test_data/sha2_byte/src/main.nr @@ -4,8 +4,8 @@ use dep::std; fn main(x: Field, result256: [u8; 32], result512: [u8; 64]) { let digest256 = std::sha256::digest([x as u8]); - constrain digest256 == result256; + assert(digest256 == result256); let digest512 = std::sha512::digest([x as u8]); - constrain digest512 == result512; + assert(digest512 == result512); } diff --git a/crates/nargo_cli/tests/test_data/simple_shield/src/main.nr b/crates/nargo_cli/tests/test_data/simple_shield/src/main.nr index 20d41481c7..65d0ec954a 100644 --- a/crates/nargo_cli/tests/test_data/simple_shield/src/main.nr +++ b/crates/nargo_cli/tests/test_data/simple_shield/src/main.nr @@ -30,7 +30,7 @@ fn main( // Check that the input note nullifier is in the root let is_member = std::merkle::check_membership(note_root, note_commitment[0], index, note_hash_path); - constrain is_member == 1; + assert(is_member == 1); [nullifier[0], receiver_note_commitment[0]] } diff --git a/crates/nargo_cli/tests/test_data/strings/src/main.nr b/crates/nargo_cli/tests/test_data/strings/src/main.nr index ca0d1691f8..bee2370201 100644 --- a/crates/nargo_cli/tests/test_data/strings/src/main.nr +++ b/crates/nargo_cli/tests/test_data/strings/src/main.nr @@ -3,7 +3,7 @@ use dep::std; fn main(message : pub str<11>, y : Field, hex_as_string : str<4>, hex_as_field : Field) { let mut bad_message = "hello world"; - constrain message == "hello world"; + assert(message == "hello world"); bad_message = "helld world"; let x = 10; let z = x * 5; @@ -13,18 +13,18 @@ fn main(message : pub str<11>, y : Field, hex_as_string : str<4>, hex_as_field : std::println(x); let array = [1, 2, 3, 5, 8]; - constrain y == 5; // Change to y != 5 to see how the later print statements are not called + assert(y == 5); // Change to y != 5 to see how the later print statements are not called std::println(array); std::println(bad_message); - constrain message != bad_message; + assert(message != bad_message); let hash = std::hash::pedersen([x]); std::println(hash); - constrain hex_as_string == "0x41"; - // constrain hex_as_string != 0x41; This will fail with a type mismatch between str[4] and Field - constrain hex_as_field == 0x41; + assert(hex_as_string == "0x41"); + // assert(hex_as_string != 0x41); This will fail with a type mismatch between str[4] and Field + assert(hex_as_field == 0x41); } #[test] diff --git a/crates/nargo_cli/tests/test_data/struct/src/main.nr b/crates/nargo_cli/tests/test_data/struct/src/main.nr index b426339c1e..6d61393920 100644 --- a/crates/nargo_cli/tests/test_data/struct/src/main.nr +++ b/crates/nargo_cli/tests/test_data/struct/src/main.nr @@ -58,20 +58,20 @@ fn main(x: Field, y: Field) { let first = Foo::default(x,y); let p = Pair { first, second: 1 }; - constrain p.bar() == x; - constrain p.second == y; - constrain p.first.array[0] != p.first.array[1]; + assert(p.bar() == x); + assert(p.second == y); + assert(p.first.array[0] != p.first.array[1]); // Nested structs let (struct_from_tuple, a_bool) = test_struct_in_tuple(true,x,y); - constrain struct_from_tuple.my_bool == true; - constrain a_bool == true; - constrain struct_from_tuple.my_int == 5; - constrain struct_from_tuple.my_nest.a == 0; + assert(struct_from_tuple.my_bool == true); + assert(a_bool == true); + assert(struct_from_tuple.my_int == 5); + assert(struct_from_tuple.my_nest.a == 0); // Regression test for issue #670 let Animal { legs, eyes } = get_dog(); let six = legs + eyes as Field; - constrain six == 6; + assert(six == 6); } diff --git a/crates/nargo_cli/tests/test_data/struct_inputs/src/main.nr b/crates/nargo_cli/tests/test_data/struct_inputs/src/main.nr index e022f26947..fe77ed6eee 100644 --- a/crates/nargo_cli/tests/test_data/struct_inputs/src/main.nr +++ b/crates/nargo_cli/tests/test_data/struct_inputs/src/main.nr @@ -14,23 +14,23 @@ fn main(x : Field, y : pub myStruct, z: pub foo::bar::barStruct, a: pub foo::foo check_inner_struct(a, z); for i in 0 .. struct_from_bar.array.len() { - constrain struct_from_bar.array[i] == z.array[i]; + assert(struct_from_bar.array[i] == z.array[i]); } - constrain z.val == struct_from_bar.val; + assert(z.val == struct_from_bar.val); - constrain (struct_from_bar.val * x) == x; + assert((struct_from_bar.val * x) == x); - constrain x != y.bar; + assert(x != y.bar); - constrain y.message == "hello"; - constrain a.bar_struct.message == struct_from_bar.message; + assert(y.message == "hello"); + assert(a.bar_struct.message == struct_from_bar.message); a.bar_struct.array[1] } fn check_inner_struct(a: foo::fooStruct, z: foo::bar::barStruct) { - constrain a.bar_struct.val == z.val; + assert(a.bar_struct.val == z.val); for i in 0.. a.bar_struct.array.len() { - constrain a.bar_struct.array[i] == z.array[i]; + assert(a.bar_struct.array[i] == z.array[i]); } } diff --git a/crates/nargo_cli/tests/test_data/submodules/src/main.nr b/crates/nargo_cli/tests/test_data/submodules/src/main.nr index 90c778db11..9bfe382663 100644 --- a/crates/nargo_cli/tests/test_data/submodules/src/main.nr +++ b/crates/nargo_cli/tests/test_data/submodules/src/main.nr @@ -9,7 +9,7 @@ mod mysubmodule { use dep::std; fn my_bool_or(x: u1, y: u1) { - constrain x | y == 1; + assert(x | y == 1); } fn my_helper() {} diff --git a/crates/nargo_cli/tests/test_data/to_be_bytes/src/main.nr b/crates/nargo_cli/tests/test_data/to_be_bytes/src/main.nr index 1253656217..f5831e8c52 100644 --- a/crates/nargo_cli/tests/test_data/to_be_bytes/src/main.nr +++ b/crates/nargo_cli/tests/test_data/to_be_bytes/src/main.nr @@ -7,8 +7,8 @@ fn main(x : Field) -> pub [u8; 31] { for i in 0..31 { bytes[i] = byte_array[i]; } - constrain bytes[30] == 60; - constrain bytes[29] == 33; - constrain bytes[28] == 31; + assert(bytes[30] == 60); + assert(bytes[29] == 33); + assert(bytes[28] == 31); bytes } diff --git a/crates/nargo_cli/tests/test_data/to_bytes_integration/src/main.nr b/crates/nargo_cli/tests/test_data/to_bytes_integration/src/main.nr index 6f57b407da..1932b7556a 100644 --- a/crates/nargo_cli/tests/test_data/to_bytes_integration/src/main.nr +++ b/crates/nargo_cli/tests/test_data/to_bytes_integration/src/main.nr @@ -7,8 +7,8 @@ fn main(x : Field) { // The result of this byte array will be little-endian let le_byte_array = x.to_le_bytes(31); - constrain le_byte_array[0] == 60; - constrain le_byte_array[0] == be_byte_array[30]; - constrain le_byte_array[1] == be_byte_array[29]; - constrain le_byte_array[2] == be_byte_array[28]; + assert(le_byte_array[0] == 60); + assert(le_byte_array[0] == be_byte_array[30]); + assert(le_byte_array[1] == be_byte_array[29]); + assert(le_byte_array[2] == be_byte_array[28]); } \ No newline at end of file diff --git a/crates/nargo_cli/tests/test_data/tuples/src/main.nr b/crates/nargo_cli/tests/test_data/tuples/src/main.nr index ce25b9171c..b1d310b141 100644 --- a/crates/nargo_cli/tests/test_data/tuples/src/main.nr +++ b/crates/nargo_cli/tests/test_data/tuples/src/main.nr @@ -2,18 +2,28 @@ use dep::std; fn main(x: Field, y: Field) { let pair = (x, y); - constrain pair.0 == 1; - constrain pair.1 == 0; + assert(pair.0 == 1); + assert(pair.1 == 0); let (a, b) = if true { (0, 1) } else { (2, 3) }; - constrain a == 0; - constrain b == 1; + assert(a == 0); + assert(b == 1); - let (u,v) = if x as u32 <1 { - (x,x+1) + let (u,v) = if x as u32 < 1 { + (x, x + 1) } else { - (x+1,x) + (x + 1, x) }; - constrain u==x+1; - constrain v==x; + assert(u == x+1); + assert(v == x); + + // Test mutating tuples + let mut mutable = ((0, 0), 1, 2, 3); + mutable.0 = pair; + mutable.2 = 7; + assert(mutable.0.0 == 1); + assert(mutable.0.1 == 0); + assert(mutable.1 == 1); + assert(mutable.2 == 7); + assert(mutable.3 == 3); } diff --git a/crates/nargo_cli/tests/test_data/xor/src/main.nr b/crates/nargo_cli/tests/test_data/xor/src/main.nr index cc7caf17fa..e893c938fc 100644 --- a/crates/nargo_cli/tests/test_data/xor/src/main.nr +++ b/crates/nargo_cli/tests/test_data/xor/src/main.nr @@ -1,5 +1,5 @@ fn main(x : u32, y : pub u32) { let m = x ^ y; - constrain m != 10; + assert(m != 10); } \ No newline at end of file diff --git a/crates/noirc_abi/src/input_parser/toml.rs b/crates/noirc_abi/src/input_parser/toml.rs index 180cde4bf7..a737f78403 100644 --- a/crates/noirc_abi/src/input_parser/toml.rs +++ b/crates/noirc_abi/src/input_parser/toml.rs @@ -115,11 +115,7 @@ impl InputValue { InputValue::Field(new_value) } - TomlTypes::Bool(boolean) => { - let new_value = if boolean { FieldElement::one() } else { FieldElement::zero() }; - - InputValue::Field(new_value) - } + TomlTypes::Bool(boolean) => InputValue::Field(boolean.into()), TomlTypes::ArrayNum(arr_num) => { let array_elements = vecmap(arr_num, |elem_num| FieldElement::from(i128::from(elem_num))); @@ -132,13 +128,7 @@ impl InputValue { InputValue::Vec(array_elements) } TomlTypes::ArrayBool(arr_bool) => { - let array_elements = vecmap(arr_bool, |elem_bool| { - if elem_bool { - FieldElement::one() - } else { - FieldElement::zero() - } - }); + let array_elements = vecmap(arr_bool, FieldElement::from); InputValue::Vec(array_elements) } diff --git a/crates/noirc_driver/src/contract.rs b/crates/noirc_driver/src/contract.rs index a5600c3d21..c0a5453494 100644 --- a/crates/noirc_driver/src/contract.rs +++ b/crates/noirc_driver/src/contract.rs @@ -1,3 +1,4 @@ +use crate::program::{deserialize_circuit, serialize_circuit}; use acvm::acir::circuit::Circuit; use noirc_abi::Abi; use serde::{Deserialize, Serialize}; @@ -19,6 +20,7 @@ pub enum ContractFunctionType { Unconstrained, } +#[derive(Serialize, Deserialize)] pub struct CompiledContract { /// The name of the contract. pub name: String, @@ -33,7 +35,7 @@ pub struct CompiledContract { /// A contract function unlike a regular Noir program /// however can have additional properties. /// One of these being a function type. -#[derive(Debug)] +#[derive(Debug, Serialize, Deserialize)] pub struct ContractFunction { pub name: String, @@ -41,6 +43,7 @@ pub struct ContractFunction { pub abi: Abi, + #[serde(serialize_with = "serialize_circuit", deserialize_with = "deserialize_circuit")] pub bytecode: Circuit, } diff --git a/crates/noirc_driver/src/lib.rs b/crates/noirc_driver/src/lib.rs index 2fcef5bc57..a2fbed2188 100644 --- a/crates/noirc_driver/src/lib.rs +++ b/crates/noirc_driver/src/lib.rs @@ -10,7 +10,7 @@ use fm::FileType; use iter_extended::try_vecmap; use noirc_abi::FunctionSignature; use noirc_errors::{reporter, ReportedError}; -use noirc_evaluator::create_circuit; +use noirc_evaluator::{create_circuit, ssa_refactor::experimental_create_circuit}; use noirc_frontend::graph::{CrateId, CrateName, CrateType, LOCAL_CRATE}; use noirc_frontend::hir::def_map::{Contract, CrateDefMap}; use noirc_frontend::hir::Context; @@ -43,11 +43,15 @@ pub struct CompileOptions { /// Display output of `println` statements #[arg(long)] pub show_output: bool, + + /// Compile and optimize using the new experimental SSA pass + #[arg(long)] + pub experimental_ssa: bool, } impl Default for CompileOptions { fn default() -> Self { - Self { show_ssa: false, allow_warnings: false, show_output: true } + Self { show_ssa: false, allow_warnings: false, show_output: true, experimental_ssa: false } } } @@ -254,13 +258,25 @@ impl Driver { let np_language = self.language.clone(); let is_opcode_supported = acvm::default_is_opcode_supported(np_language.clone()); - match create_circuit( - program, - np_language, - is_opcode_supported, - options.show_ssa, - options.show_output, - ) { + let circuit_abi = if options.experimental_ssa { + experimental_create_circuit( + program, + np_language, + is_opcode_supported, + options.show_ssa, + options.show_output, + ) + } else { + create_circuit( + program, + np_language, + is_opcode_supported, + options.show_ssa, + options.show_output, + ) + }; + + match circuit_abi { Ok((circuit, abi)) => Ok(CompiledProgram { circuit, abi }), Err(err) => { // The FileId here will be the file id of the file with the main file diff --git a/crates/noirc_evaluator/src/frontend.rs b/crates/noirc_evaluator/src/frontend.rs deleted file mode 100644 index 410f9f1a9b..0000000000 --- a/crates/noirc_evaluator/src/frontend.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod variable; diff --git a/crates/noirc_evaluator/src/frontend/variable.rs b/crates/noirc_evaluator/src/frontend/variable.rs deleted file mode 100644 index 449581cf93..0000000000 --- a/crates/noirc_evaluator/src/frontend/variable.rs +++ /dev/null @@ -1,23 +0,0 @@ -/// A variable in the SSA IR. -/// By definition, a variable can only be defined once. -/// -/// As in Cranelift, we also allow variable use before definition. -/// This will produce side-effects which will need to be handled -/// before sealing a block. -pub struct Variable(u32); - -impl From for Variable { - fn from(value: u32) -> Self { - Variable(value) - } -} -impl From for Variable { - fn from(value: u16) -> Self { - Variable(value as u32) - } -} -impl From for Variable { - fn from(value: u8) -> Self { - Variable(value as u32) - } -} diff --git a/crates/noirc_evaluator/src/lib.rs b/crates/noirc_evaluator/src/lib.rs index 438ada0167..4c70e15bcb 100644 --- a/crates/noirc_evaluator/src/lib.rs +++ b/crates/noirc_evaluator/src/lib.rs @@ -9,14 +9,11 @@ mod ssa; // SSA code to create the SSA based IR // for functions and execute different optimizations. pub mod ssa_refactor; -// Frontend helper module to translate a different AST -// into the SSA IR. -pub mod frontend; use acvm::{ acir::circuit::{opcodes::Opcode as AcirOpcode, Circuit, PublicInputs}, acir::native_types::{Expression, Witness}, - compiler::transformers::IsOpcodeSupported, + compiler::{optimizers::simplify::CircuitSimplifier, transformers::IsOpcodeSupported}, Language, }; use errors::{RuntimeError, RuntimeErrorKind}; @@ -86,6 +83,7 @@ pub fn create_circuit( opcodes, .. } = evaluator; + let simplifier = CircuitSimplifier::new(current_witness_index); let optimized_circuit = acvm::compiler::compile( Circuit { current_witness_index, @@ -95,6 +93,7 @@ pub fn create_circuit( }, np_language, is_opcode_supported, + &simplifier, ) .map_err(|_| RuntimeErrorKind::Spanless(String::from("produced an acvm compile error")))?; @@ -316,10 +315,6 @@ impl Evaluator { /// However, this intermediate representation is useful as it allows us to have /// intermediate Types which the core type system does not know about like Strings. fn parse_abi_alt(&mut self, ir_gen: &mut IrGenerator) { - // XXX: Currently, the syntax only supports public witnesses - // u8 and arrays are assumed to be private - // This is not a short-coming of the ABI, but of the grammar - // The new grammar has been conceived, and will be implemented. let main = ir_gen.program.main_mut(); let main_params = std::mem::take(&mut main.parameters); let abi_params = std::mem::take(&mut ir_gen.program.main_function_signature.0); diff --git a/crates/noirc_evaluator/src/ssa/acir_gen.rs b/crates/noirc_evaluator/src/ssa/acir_gen.rs index b964672e28..22b5390e2f 100644 --- a/crates/noirc_evaluator/src/ssa/acir_gen.rs +++ b/crates/noirc_evaluator/src/ssa/acir_gen.rs @@ -46,7 +46,7 @@ impl Acir { //TODO we should rather follow the jumps current_block = block.left.map(|block_id| &ctx[block_id]); } - self.memory.acir_gen(evaluator); + self.memory.acir_gen(evaluator, ctx); Ok(()) } diff --git a/crates/noirc_evaluator/src/ssa/acir_gen/acir_mem.rs b/crates/noirc_evaluator/src/ssa/acir_gen/acir_mem.rs index 0c55f61ca2..ac3395d941 100644 --- a/crates/noirc_evaluator/src/ssa/acir_gen/acir_mem.rs +++ b/crates/noirc_evaluator/src/ssa/acir_gen/acir_mem.rs @@ -8,7 +8,10 @@ use crate::{ }; use acvm::{ acir::{ - circuit::{directives::Directive, opcodes::Opcode as AcirOpcode}, + circuit::{ + directives::Directive, + opcodes::{BlockId as AcirBlockId, MemOp, MemoryBlock, Opcode as AcirOpcode}, + }, native_types::{Expression, Witness}, }, FieldElement, @@ -22,15 +25,6 @@ use super::{ operations::{self}, }; -/// Represent a memory operation on the ArrayHeap, at the specified index -/// Operation is one for a store and 0 for a load -#[derive(Clone, Debug)] -pub(crate) struct MemOp { - operation: Expression, - value: Expression, - index: Expression, -} - type MemAddress = u32; enum ArrayType { @@ -137,7 +131,7 @@ impl ArrayHeap { outputs } - pub(crate) fn acir_gen(&self, evaluator: &mut Evaluator) { + pub(crate) fn acir_gen(&self, evaluator: &mut Evaluator, array_id: ArrayId, array_len: u32) { let (len, read_write) = match self.typ { ArrayType::Init(_, _) | ArrayType::WriteOnly => (0, true), ArrayType::ReadOnly(last) => (last.unwrap_or(self.trace.len()), false), @@ -147,6 +141,11 @@ impl ArrayHeap { if len == 0 { return; } + evaluator.opcodes.push(AcirOpcode::Block(MemoryBlock { + id: AcirBlockId(array_id.as_u32()), + len: array_len, + trace: self.trace.clone(), + })); let len_bits = AcirMem::bits(len); // permutations let mut in_counter = Vec::new(); @@ -318,9 +317,10 @@ impl AcirMem { let item = MemOp { operation: op, value, index }; self.array_heap_mut(*array_id).push(item); } - pub(crate) fn acir_gen(&self, evaluator: &mut Evaluator) { + pub(crate) fn acir_gen(&self, evaluator: &mut Evaluator, ctx: &SsaContext) { for mem in &self.virtual_memory { - mem.1.acir_gen(evaluator); + let array = &ctx.mem[*mem.0]; + mem.1.acir_gen(evaluator, array.id, array.len); } } } diff --git a/crates/noirc_evaluator/src/ssa/acir_gen/constraints.rs b/crates/noirc_evaluator/src/ssa/acir_gen/constraints.rs index 11371dc54a..2d0378c339 100644 --- a/crates/noirc_evaluator/src/ssa/acir_gen/constraints.rs +++ b/crates/noirc_evaluator/src/ssa/acir_gen/constraints.rs @@ -6,7 +6,7 @@ use crate::{ use acvm::{ acir::{ circuit::{ - directives::Directive, + directives::{Directive, QuotientDirective}, opcodes::{BlackBoxFuncCall, FunctionInput, Opcode as AcirOpcode}, }, native_types::{Expression, Witness}, @@ -265,13 +265,13 @@ pub(crate) fn range_constraint( let b_witness = evaluator.add_witness_to_cs(); let exp_big = BigUint::from(2_u128).pow(num_bits - 1); let exp = FieldElement::from_be_bytes_reduce(&exp_big.to_bytes_be()); - evaluator.push_opcode(AcirOpcode::Directive(Directive::Quotient { + evaluator.push_opcode(AcirOpcode::Directive(Directive::Quotient(QuotientDirective { a: Expression::from(witness), b: Expression::from_field(exp), q: b_witness, r: r_witness, predicate: None, - })); + }))); try_range_constraint(r_witness, num_bits - 1, evaluator); try_range_constraint(b_witness, 1, evaluator); @@ -283,10 +283,8 @@ pub(crate) fn range_constraint( let my_constraint = add(&res, -FieldElement::one(), &witness.into()); evaluator.push_opcode(AcirOpcode::Arithmetic(my_constraint)); } else { - let gate = AcirOpcode::BlackBoxFuncCall(BlackBoxFuncCall { - name: acvm::acir::BlackBoxFunc::RANGE, - inputs: vec![FunctionInput { witness, num_bits }], - outputs: vec![], + let gate = AcirOpcode::BlackBoxFuncCall(BlackBoxFuncCall::RANGE { + input: FunctionInput { witness, num_bits }, }); evaluator.push_opcode(gate); } @@ -311,13 +309,13 @@ pub(crate) fn bound_check( //2^s+a-b=q*2^s +r let expr = add(&r_witness.into(), two_s, &q_witness.into()); evaluator.push_opcode(AcirOpcode::Arithmetic(subtract(&sub, FieldElement::one(), &expr))); - evaluator.push_opcode(AcirOpcode::Directive(Directive::Quotient { + evaluator.push_opcode(AcirOpcode::Directive(Directive::Quotient(QuotientDirective { a: sub, b: Expression::from_field(two_s), q: q_witness, r: r_witness, predicate: None, - })); + }))); try_range_constraint(r_witness, max_bits, evaluator); evaluator.push_opcode(AcirOpcode::Arithmetic(boolean(q_witness))); q_witness @@ -504,13 +502,13 @@ pub(crate) fn evaluate_truncate( //1. Generate witnesses a,b,c let b_witness = evaluator.add_witness_to_cs(); let c_witness = evaluator.add_witness_to_cs(); - evaluator.push_opcode(AcirOpcode::Directive(Directive::Quotient { + evaluator.push_opcode(AcirOpcode::Directive(Directive::Quotient(QuotientDirective { a: lhs.clone(), b: Expression::from_field(exp), q: c_witness, r: b_witness, predicate: None, - })); + }))); try_range_constraint(b_witness, rhs, evaluator); //TODO propagate the error using ? try_range_constraint(c_witness, max_bits - rhs, evaluator); @@ -537,13 +535,13 @@ pub(crate) fn evaluate_udiv( let q_witness = evaluator.add_witness_to_cs(); let r_witness = evaluator.add_witness_to_cs(); let pa = mul_with_witness(evaluator, lhs, predicate); - evaluator.push_opcode(AcirOpcode::Directive(Directive::Quotient { + evaluator.push_opcode(AcirOpcode::Directive(Directive::Quotient(QuotientDirective { a: lhs.clone(), b: rhs.clone(), q: q_witness, r: r_witness, predicate: Some(predicate.clone()), - })); + }))); //r unreachable!("ICE: ShiftLeft and ShiftRight are replaced by multiplications and divisions in optimization pass."), + BinaryOp::Shl | BinaryOp::Shr(_) => todo!("ShiftLeft and ShiftRight operations with shifts which are only known at runtime are not yet implemented."), i @ BinaryOp::Assign => unreachable!("Invalid Instruction: {:?}", i), }; Some(binary_output) diff --git a/crates/noirc_evaluator/src/ssa/acir_gen/operations/bitwise.rs b/crates/noirc_evaluator/src/ssa/acir_gen/operations/bitwise.rs index f8ca271835..6801cdc124 100644 --- a/crates/noirc_evaluator/src/ssa/acir_gen/operations/bitwise.rs +++ b/crates/noirc_evaluator/src/ssa/acir_gen/operations/bitwise.rs @@ -40,8 +40,8 @@ pub(super) fn simplify_bitwise( let max = FieldElement::from((1_u128 << bit_size) - 1); let (field, var) = match (lhs.to_const(), rhs.to_const()) { - (Some(l_c), None) => (l_c == FieldElement::zero() || l_c == max).then_some((l_c, rhs))?, - (None, Some(r_c)) => (r_c == FieldElement::zero() || r_c == max).then_some((r_c, lhs))?, + (Some(l_c), None) => (l_c.is_zero() || l_c == max).then_some((l_c, rhs))?, + (None, Some(r_c)) => (r_c.is_zero() || r_c == max).then_some((r_c, lhs))?, _ => return None, }; @@ -121,9 +121,17 @@ pub(super) fn evaluate_bitwise( let bit_size = if bit_size % 2 == 1 { bit_size + 1 } else { bit_size }; assert!(bit_size < FieldElement::max_num_bits() - 1); let max = FieldElement::from((1_u128 << bit_size) - 1); - let bit_gate = match opcode { - BinaryOp::And => acvm::acir::BlackBoxFunc::AND, - BinaryOp::Xor => acvm::acir::BlackBoxFunc::XOR, + let gate = match opcode { + BinaryOp::And => AcirOpcode::BlackBoxFuncCall(BlackBoxFuncCall::AND { + lhs: FunctionInput { witness: a_witness, num_bits: bit_size }, + rhs: FunctionInput { witness: b_witness, num_bits: bit_size }, + output: result, + }), + BinaryOp::Xor => AcirOpcode::BlackBoxFuncCall(BlackBoxFuncCall::XOR { + lhs: FunctionInput { witness: a_witness, num_bits: bit_size }, + rhs: FunctionInput { witness: b_witness, num_bits: bit_size }, + output: result, + }), BinaryOp::Or => { a_witness = evaluator.create_intermediate_variable(constraints::subtract( &Expression::from_field(max), @@ -136,19 +144,15 @@ pub(super) fn evaluate_bitwise( &Expression::from(b_witness), )); // We do not have an OR gate yet, so we use the AND gate - acvm::acir::BlackBoxFunc::AND + AcirOpcode::BlackBoxFuncCall(BlackBoxFuncCall::AND { + lhs: FunctionInput { witness: a_witness, num_bits: bit_size }, + rhs: FunctionInput { witness: b_witness, num_bits: bit_size }, + output: result, + }) } _ => unreachable!("ICE: expected a bitwise operation"), }; - let gate = AcirOpcode::BlackBoxFuncCall(BlackBoxFuncCall { - name: bit_gate, - inputs: vec![ - FunctionInput { witness: a_witness, num_bits: bit_size }, - FunctionInput { witness: b_witness, num_bits: bit_size }, - ], - outputs: vec![result], - }); evaluator.opcodes.push(gate); if opcode == BinaryOp::Or { diff --git a/crates/noirc_evaluator/src/ssa/acir_gen/operations/intrinsics.rs b/crates/noirc_evaluator/src/ssa/acir_gen/operations/intrinsics.rs index ea7d3d9c6c..01d5fecc89 100644 --- a/crates/noirc_evaluator/src/ssa/acir_gen/operations/intrinsics.rs +++ b/crates/noirc_evaluator/src/ssa/acir_gen/operations/intrinsics.rs @@ -7,7 +7,7 @@ use crate::{ }, builtin, context::SsaContext, - mem::{ArrayId, Memory}, + mem::Memory, node::{self, Instruction, Node, NodeId, ObjectType}, }, Evaluator, @@ -19,6 +19,7 @@ use acvm::{ opcodes::{BlackBoxFuncCall, FunctionInput, Opcode as AcirOpcode}, }, native_types::{Expression, Witness}, + BlackBoxFunc, }, FieldElement, }; @@ -79,15 +80,70 @@ pub(crate) fn evaluate( } } Opcode::LowLevel(op) => { - let inputs = prepare_inputs(acir_gen, args, ctx, evaluator); - let output_count = op.definition().output_size.0 as u32; - outputs = - prepare_outputs(&mut acir_gen.memory, instruction_id, output_count, ctx, evaluator); - - let func_call = BlackBoxFuncCall { - name: op, - inputs, //witness + bit size - outputs: outputs.clone(), //witness + outputs = match op { + BlackBoxFunc::SHA256 | BlackBoxFunc::Blake2s => { + prepare_outputs(&mut acir_gen.memory, instruction_id, 32, ctx, evaluator) + } + BlackBoxFunc::Keccak256 => { + prepare_outputs(&mut acir_gen.memory, instruction_id, 32, ctx, evaluator) + } + BlackBoxFunc::Pedersen | BlackBoxFunc::FixedBaseScalarMul => { + prepare_outputs(&mut acir_gen.memory, instruction_id, 2, ctx, evaluator) + } + BlackBoxFunc::SchnorrVerify + | BlackBoxFunc::EcdsaSecp256k1 + | BlackBoxFunc::ComputeMerkleRoot + | BlackBoxFunc::HashToField128Security => { + prepare_outputs(&mut acir_gen.memory, instruction_id, 1, ctx, evaluator) + } + _ => panic!("Unsupported low level function {:?}", op), + }; + let func_call = match op { + BlackBoxFunc::SHA256 => BlackBoxFuncCall::SHA256 { + inputs: resolve_array(&args[0], acir_gen, ctx, evaluator), + outputs: outputs.to_vec(), + }, + BlackBoxFunc::Blake2s => BlackBoxFuncCall::Blake2s { + inputs: resolve_array(&args[0], acir_gen, ctx, evaluator), + outputs: outputs.to_vec(), + }, + BlackBoxFunc::Keccak256 => BlackBoxFuncCall::Keccak256 { + inputs: resolve_array(&args[0], acir_gen, ctx, evaluator), + outputs: outputs.to_vec(), + }, + BlackBoxFunc::Pedersen => BlackBoxFuncCall::Pedersen { + inputs: resolve_array(&args[0], acir_gen, ctx, evaluator), + outputs: outputs.to_vec(), + }, + BlackBoxFunc::FixedBaseScalarMul => BlackBoxFuncCall::FixedBaseScalarMul { + input: resolve_variable(&args[0], acir_gen, ctx, evaluator).unwrap(), + outputs: outputs.to_vec(), + }, + BlackBoxFunc::SchnorrVerify => BlackBoxFuncCall::SchnorrVerify { + public_key_x: resolve_variable(&args[0], acir_gen, ctx, evaluator).unwrap(), + public_key_y: resolve_variable(&args[1], acir_gen, ctx, evaluator).unwrap(), + signature: resolve_array(&args[2], acir_gen, ctx, evaluator), + message: resolve_array(&args[3], acir_gen, ctx, evaluator), + output: outputs[0], + }, + BlackBoxFunc::EcdsaSecp256k1 => BlackBoxFuncCall::EcdsaSecp256k1 { + public_key_x: resolve_array(&args[0], acir_gen, ctx, evaluator), + public_key_y: resolve_array(&args[1], acir_gen, ctx, evaluator), + signature: resolve_array(&args[2], acir_gen, ctx, evaluator), + hashed_message: resolve_array(&args[3], acir_gen, ctx, evaluator), + output: outputs[0], + }, + BlackBoxFunc::ComputeMerkleRoot => BlackBoxFuncCall::ComputeMerkleRoot { + leaf: resolve_variable(&args[0], acir_gen, ctx, evaluator).unwrap(), + index: resolve_variable(&args[1], acir_gen, ctx, evaluator).unwrap(), + hash_path: resolve_array(&args[2], acir_gen, ctx, evaluator), + output: outputs[0], + }, + BlackBoxFunc::HashToField128Security => BlackBoxFuncCall::HashToField128Security { + inputs: resolve_array(&args[0], acir_gen, ctx, evaluator), + output: outputs[0], + }, + _ => panic!("Unsupported low level function {:?}", op), }; evaluator.opcodes.push(AcirOpcode::BlackBoxFuncCall(func_call)); } @@ -139,64 +195,45 @@ pub(crate) fn evaluate( (outputs.len() == 1).then(|| InternalVar::from(outputs[0])) } -// Transform the arguments of intrinsic functions into witnesses -fn prepare_inputs( - acir_gen: &mut Acir, - arguments: &[NodeId], - cfg: &SsaContext, - evaluator: &mut Evaluator, -) -> Vec { - let mut inputs: Vec = Vec::new(); - - for argument in arguments { - inputs.extend(resolve_node_id(argument, acir_gen, cfg, evaluator)); - } - inputs -} - -fn resolve_node_id( +fn resolve_variable( node_id: &NodeId, acir_gen: &mut Acir, cfg: &SsaContext, evaluator: &mut Evaluator, -) -> Vec { +) -> Option { let node_object = cfg.try_get_node(*node_id).expect("could not find node for {node_id}"); match node_object { node::NodeObject::Variable(v) => { - let node_obj_type = node_object.get_type(); - match node_obj_type { - // If the `Variable` represents a Pointer - // Then we know that it is an `Array` - node::ObjectType::ArrayPointer(a) => resolve_array(a, acir_gen, cfg, evaluator), - // If it is not a pointer, we attempt to fetch the witness associated with it - _ => match v.witness { - Some(w) => { - vec![FunctionInput { witness: w, num_bits: v.size_in_bits() }] - } - None => todo!("generate a witness"), - }, - } + Some(FunctionInput { witness: v.witness?, num_bits: v.size_in_bits() }) } _ => { // Upon the case that the `NodeObject` is not a `Variable`, // we attempt to fetch an associated `InternalVar`. // Otherwise, this is a internal compiler error. let internal_var = acir_gen.var_cache.get(node_id).expect("invalid input").clone(); - let witness = acir_gen - .var_cache - .get_or_compute_witness(internal_var, evaluator) - .expect("unexpected constant expression"); - vec![FunctionInput { witness, num_bits: node_object.size_in_bits() }] + let witness = acir_gen.var_cache.get_or_compute_witness(internal_var, evaluator)?; + Some(FunctionInput { witness, num_bits: node_object.size_in_bits() }) } } } fn resolve_array( - array_id: ArrayId, + node_id: &NodeId, acir_gen: &mut Acir, cfg: &SsaContext, evaluator: &mut Evaluator, ) -> Vec { + let node_object = cfg.try_get_node(*node_id).expect("could not find node for {node_id}"); + let array_id = match node_object { + node::NodeObject::Variable(_) => { + let node_obj_type = node_object.get_type(); + match node_obj_type { + node::ObjectType::ArrayPointer(a) => a, + _ => unreachable!(), + } + } + _ => todo!("generate a witness"), + }; let mut inputs = Vec::new(); let array = &cfg.mem[array_id]; diff --git a/crates/noirc_evaluator/src/ssa/acir_gen/operations/sort.rs b/crates/noirc_evaluator/src/ssa/acir_gen/operations/sort.rs index 903c900d03..f7f0586cdd 100644 --- a/crates/noirc_evaluator/src/ssa/acir_gen/operations/sort.rs +++ b/crates/noirc_evaluator/src/ssa/acir_gen/operations/sort.rs @@ -12,8 +12,8 @@ use crate::{ // Generate gates which ensure that out_expr is a permutation of in_expr // Returns the control bits of the sorting network used to generate the constrains pub(crate) fn evaluate_permutation( - in_expr: &Vec, - out_expr: &Vec, + in_expr: &[Expression], + out_expr: &[Expression], evaluator: &mut Evaluator, ) -> Vec { let bits = Vec::new(); @@ -27,9 +27,9 @@ pub(crate) fn evaluate_permutation( // Same as evaluate_permutation() but uses the provided witness as network control bits pub(crate) fn evaluate_permutation_with_witness( - in_expr: &Vec, - out_expr: &Vec, - bits: &Vec, + in_expr: &[Expression], + out_expr: &[Expression], + bits: &[Witness], evaluator: &mut Evaluator, ) { let (w, b) = permutation_layer(in_expr, bits, false, evaluator); @@ -47,14 +47,14 @@ pub(crate) fn evaluate_permutation_with_witness( // in both cases it returns the witness of the network configuration // if generate_witness is true, bits is ignored fn permutation_layer( - in_expr: &Vec, + in_expr: &[Expression], bits: &[Witness], generate_witness: bool, evaluator: &mut Evaluator, ) -> (Vec, Vec) { let n = in_expr.len(); if n == 1 { - return (Vec::new(), in_expr.clone()); + return (Vec::new(), in_expr.to_vec()); } let n1 = n / 2; @@ -114,10 +114,9 @@ fn permutation_layer( #[cfg(test)] mod test { use acvm::{ - acir::{circuit::opcodes::BlackBoxFuncCall, native_types::WitnessMap}, + acir::{ative_types::WitnessMap, circuit::opcodes::FunctionInput, native_types::Witness}, pwg::block::Blocks, FieldElement, OpcodeResolution, OpcodeResolutionError, PartialWitnessGenerator, - PartialWitnessGeneratorStatus, }; use crate::{ @@ -128,12 +127,118 @@ mod test { struct MockBackend {} impl PartialWitnessGenerator for MockBackend { - fn solve_black_box_function_call( + fn aes( + &self, + _initial_witness: &mut WitnessMap, + _inputs: &[FunctionInput], + _outputs: &[Witness], + ) -> Result { + panic!("Path not trodden by this test") + } + fn and( + &self, + _initial_witness: &mut WitnessMap, + _lhs: &FunctionInput, + _rhs: &FunctionInput, + _output: &Witness, + ) -> Result { + panic!("Path not trodden by this test") + } + fn xor( + &self, + _initial_witness: &mut WitnessMap, + _lhs: &FunctionInput, + _rhs: &FunctionInput, + _output: &Witness, + ) -> Result { + panic!("Path not trodden by this test") + } + fn range( + &self, + _initial_witness: &mut WitnessMap, + _input: &FunctionInput, + ) -> Result { + panic!("Path not trodden by this test") + } + fn sha256( + &self, + _initial_witness: &mut WitnessMap, + _inputs: &[FunctionInput], + _outputs: &[Witness], + ) -> Result { + panic!("Path not trodden by this test") + } + fn blake2s( + &self, + _initial_witness: &mut WitnessMap, + _inputs: &[FunctionInput], + _outputs: &[Witness], + ) -> Result { + panic!("Path not trodden by this test") + } + fn compute_merkle_root( + &self, + _initial_witness: &mut WitnessMap, + _leaf: &FunctionInput, + _index: &FunctionInput, + _hash_path: &[FunctionInput], + _output: &Witness, + ) -> Result { + panic!("Path not trodden by this test") + } + fn schnorr_verify( + &self, + _initial_witness: &mut WitnessMap, + _public_key_x: &FunctionInput, + _public_key_y: &FunctionInput, + _signature: &[FunctionInput], + _message: &[FunctionInput], + _output: &Witness, + ) -> Result { + panic!("Path not trodden by this test") + } + fn pedersen( + &self, + _initial_witness: &mut WitnessMap, + _inputs: &[FunctionInput], + _outputs: &[Witness], + ) -> Result { + panic!("Path not trodden by this test") + } + fn hash_to_field_128_security( + &self, + _initial_witness: &mut WitnessMap, + _inputs: &[FunctionInput], + _output: &Witness, + ) -> Result { + panic!("Path not trodden by this test") + } + fn ecdsa_secp256k1( + &self, + _initial_witness: &mut WitnessMap, + _public_key_x: &[FunctionInput], + _public_key_y: &[FunctionInput], + _signature: &[FunctionInput], + _message: &[FunctionInput], + _output: &Witness, + ) -> Result { + panic!("Path not trodden by this test") + } + fn fixed_base_scalar_mul( + &self, + _initial_witness: &mut WitnessMap, + _input: &FunctionInput, + _outputs: &[Witness], + ) -> Result { + panic!("Path not trodden by this test") + } + fn keccak256( &self, _initial_witness: &mut WitnessMap, - _func_call: &BlackBoxFuncCall, + _inputs: &[FunctionInput], + _outputs: &[Witness], ) -> Result { - unreachable!(); + panic!("Path not trodden by this test") } } @@ -179,9 +284,9 @@ mod test { // compute the network output by solving the constraints let backend = MockBackend {}; let mut blocks = Blocks::default(); - let solver_status = backend - .solve(&mut solved_witness, &mut blocks, eval.opcodes.clone()) - .expect("Could not solve permutation constraints"); + let solver_status = + solve(&backend, &mut solved_witness, &mut blocks, eval.opcodes.clone()) + .expect("Could not solve permutation constraints"); assert_eq!(solver_status, PartialWitnessGeneratorStatus::Solved, "Incomplete solution"); let mut b_val = Vec::new(); for i in 0..output.len() { diff --git a/crates/noirc_evaluator/src/ssa/builtin.rs b/crates/noirc_evaluator/src/ssa/builtin.rs index 8e402343bb..4f3741583d 100644 --- a/crates/noirc_evaluator/src/ssa/builtin.rs +++ b/crates/noirc_evaluator/src/ssa/builtin.rs @@ -73,6 +73,7 @@ impl Opcode { match op { // Pointers do not overflow BlackBoxFunc::SHA256 + | BlackBoxFunc::Keccak256 | BlackBoxFunc::Blake2s | BlackBoxFunc::Pedersen | BlackBoxFunc::FixedBaseScalarMul => BigUint::zero(), @@ -84,9 +85,6 @@ impl Opcode { BlackBoxFunc::AES => { todo!("ICE: AES is unimplemented") } - BlackBoxFunc::Keccak256 => { - todo!("ICE: Keccak256 is unimplemented") - } BlackBoxFunc::RANGE | BlackBoxFunc::AND | BlackBoxFunc::XOR => { unimplemented!("ICE: these opcodes do not have Noir builtin functions") } @@ -105,10 +103,7 @@ impl Opcode { Opcode::LowLevel(op) => { match op { BlackBoxFunc::AES => todo!("ICE: AES is unimplemented"), - BlackBoxFunc::Keccak256 => { - todo!("ICE: Keccak256 is unimplemented") - } - BlackBoxFunc::SHA256 | BlackBoxFunc::Blake2s => { + BlackBoxFunc::SHA256 | BlackBoxFunc::Blake2s | BlackBoxFunc::Keccak256 => { (32, ObjectType::unsigned_integer(8)) } BlackBoxFunc::ComputeMerkleRoot | BlackBoxFunc::HashToField128Security => { diff --git a/crates/noirc_evaluator/src/ssa/mem.rs b/crates/noirc_evaluator/src/ssa/mem.rs index e4a82a7e59..09fdc33b70 100644 --- a/crates/noirc_evaluator/src/ssa/mem.rs +++ b/crates/noirc_evaluator/src/ssa/mem.rs @@ -23,6 +23,10 @@ impl ArrayId { pub(crate) fn dummy() -> ArrayId { ArrayId(std::u32::MAX) } + + pub(crate) fn as_u32(&self) -> u32 { + self.0 + } } /// MemArray represents a contiguous array of elements of the same type. diff --git a/crates/noirc_evaluator/src/ssa/node.rs b/crates/noirc_evaluator/src/ssa/node.rs index 8819a96e1c..bec3c923a6 100644 --- a/crates/noirc_evaluator/src/ssa/node.rs +++ b/crates/noirc_evaluator/src/ssa/node.rs @@ -918,8 +918,10 @@ impl Binary { !res_type.is_native_field(), "ICE: comparisons are not implemented for field elements" ); - let res = if lhs < rhs { FieldElement::one() } else { FieldElement::zero() }; - return Ok(NodeEval::Const(res, ObjectType::boolean())); + return Ok(NodeEval::Const( + FieldElement::from(lhs < rhs), + ObjectType::boolean(), + )); } } BinaryOp::Ule => { @@ -931,8 +933,10 @@ impl Binary { !res_type.is_native_field(), "ICE: comparisons are not implemented for field elements" ); - let res = if lhs <= rhs { FieldElement::one() } else { FieldElement::zero() }; - return Ok(NodeEval::Const(res, ObjectType::boolean())); + return Ok(NodeEval::Const( + FieldElement::from(lhs <= rhs), + ObjectType::boolean(), + )); } } BinaryOp::Slt => (), @@ -942,8 +946,10 @@ impl Binary { return Ok(NodeEval::Const(FieldElement::zero(), ObjectType::boolean())); //n.b we assume the type of lhs and rhs is unsigned because of the opcode, we could also verify this } else if let (Some(lhs), Some(rhs)) = (lhs, rhs) { - let res = if lhs < rhs { FieldElement::one() } else { FieldElement::zero() }; - return Ok(NodeEval::Const(res, ObjectType::boolean())); + return Ok(NodeEval::Const( + FieldElement::from(lhs < rhs), + ObjectType::boolean(), + )); } } BinaryOp::Lte => { @@ -951,30 +957,30 @@ impl Binary { return Ok(NodeEval::Const(FieldElement::one(), ObjectType::boolean())); //n.b we assume the type of lhs and rhs is unsigned because of the opcode, we could also verify this } else if let (Some(lhs), Some(rhs)) = (lhs, rhs) { - let res = if lhs <= rhs { FieldElement::one() } else { FieldElement::zero() }; - return Ok(NodeEval::Const(res, ObjectType::boolean())); + return Ok(NodeEval::Const( + FieldElement::from(lhs <= rhs), + ObjectType::boolean(), + )); } } BinaryOp::Eq => { if self.lhs == self.rhs { return Ok(NodeEval::Const(FieldElement::one(), ObjectType::boolean())); } else if let (Some(lhs), Some(rhs)) = (lhs, rhs) { - if lhs == rhs { - return Ok(NodeEval::Const(FieldElement::one(), ObjectType::boolean())); - } else { - return Ok(NodeEval::Const(FieldElement::zero(), ObjectType::boolean())); - } + return Ok(NodeEval::Const( + FieldElement::from(lhs == rhs), + ObjectType::boolean(), + )); } } BinaryOp::Ne => { if self.lhs == self.rhs { return Ok(NodeEval::Const(FieldElement::zero(), ObjectType::boolean())); } else if let (Some(lhs), Some(rhs)) = (lhs, rhs) { - if lhs != rhs { - return Ok(NodeEval::Const(FieldElement::one(), ObjectType::boolean())); - } else { - return Ok(NodeEval::Const(FieldElement::zero(), ObjectType::boolean())); - } + return Ok(NodeEval::Const( + FieldElement::from(lhs != rhs), + ObjectType::boolean(), + )); } } BinaryOp::And => { diff --git a/crates/noirc_evaluator/src/ssa_refactor.rs b/crates/noirc_evaluator/src/ssa_refactor.rs index fc45071e57..3406122733 100644 --- a/crates/noirc_evaluator/src/ssa_refactor.rs +++ b/crates/noirc_evaluator/src/ssa_refactor.rs @@ -7,6 +7,35 @@ //! This module heavily borrows from Cranelift #![allow(dead_code)] +use crate::errors::RuntimeError; +use acvm::{acir::circuit::Circuit, compiler::transformers::IsOpcodeSupported, Language}; +use noirc_abi::Abi; + +use noirc_frontend::monomorphization::ast::Program; + +use self::acir_gen::Acir; + +mod acir_gen; mod ir; +mod opt; mod ssa_builder; pub mod ssa_gen; + +/// Optimize the given program by converting it into SSA +/// form and performing optimizations there. When finished, +/// convert the final SSA into ACIR and return it. +pub fn optimize_into_acir(program: Program) -> Acir { + ssa_gen::generate_ssa(program).inline_functions().into_acir() +} +/// Compiles the Program into ACIR and applies optimizations to the arithmetic gates +/// This is analogous to `ssa:create_circuit` and this method is called when one wants +/// to use the new ssa module to process Noir code. +pub fn experimental_create_circuit( + _program: Program, + _np_language: Language, + _is_opcode_supported: IsOpcodeSupported, + _enable_logging: bool, + _show_output: bool, +) -> Result<(Circuit, Abi), RuntimeError> { + todo!("this is a stub function for the new SSA refactor module") +} diff --git a/crates/noirc_evaluator/src/ssa_refactor/acir_gen/mod.rs b/crates/noirc_evaluator/src/ssa_refactor/acir_gen/mod.rs new file mode 100644 index 0000000000..a0959db5db --- /dev/null +++ b/crates/noirc_evaluator/src/ssa_refactor/acir_gen/mod.rs @@ -0,0 +1,26 @@ +//! This file holds the pass to convert from Noir's SSA IR to ACIR. +use super::ssa_gen::Ssa; + +/// Context struct for the acir generation pass. +/// May be similar to the Evaluator struct in the current SSA IR. +struct Context {} + +/// The output of the Acir-gen pass +pub struct Acir {} + +impl Ssa { + pub(crate) fn into_acir(self) -> Acir { + let mut context = Context::new(); + context.convert_ssa(self) + } +} + +impl Context { + fn new() -> Self { + Self {} + } + + fn convert_ssa(&mut self, _ssa: Ssa) -> Acir { + todo!() + } +} diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir.rs b/crates/noirc_evaluator/src/ssa_refactor/ir.rs index 1f6cca9157..d52f380d3d 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ir.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ir.rs @@ -2,9 +2,11 @@ pub(crate) mod basic_block; pub(crate) mod cfg; pub(crate) mod constant; pub(crate) mod dfg; +pub(crate) mod dom; pub(crate) mod function; pub(crate) mod instruction; pub(crate) mod map; +pub(crate) mod post_order; pub(crate) mod printer; pub(crate) mod types; pub(crate) mod value; diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/basic_block.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/basic_block.rs index 8a3f74c4a6..30526bc296 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ir/basic_block.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/basic_block.rs @@ -29,10 +29,10 @@ pub(crate) struct BasicBlock { pub(crate) type BasicBlockId = Id; impl BasicBlock { - /// Create a new BasicBlock with the given parameters. + /// Create a new BasicBlock with the given instructions. /// Parameters can also be added later via BasicBlock::add_parameter - pub(crate) fn new(parameters: Vec) -> Self { - Self { parameters, instructions: Vec::new(), terminator: None } + pub(crate) fn new(instructions: Vec) -> Self { + Self { parameters: Vec::new(), instructions, terminator: None } } /// Returns the parameters of this block @@ -57,6 +57,11 @@ impl BasicBlock { &self.instructions } + /// Retrieve a mutable reference to all instructions in this block. + pub(crate) fn instructions_mut(&mut self) -> &mut Vec { + &mut self.instructions + } + /// Sets the terminator instruction of this block. /// /// A properly-constructed block will always terminate with a TerminatorInstruction - @@ -76,7 +81,9 @@ impl BasicBlock { /// Iterate over all the successors of the currently block, as determined by /// the blocks jumped to in the terminator instruction. If there is no terminator /// instruction yet, this will iterate 0 times. - pub(crate) fn successors(&self) -> impl ExactSizeIterator { + pub(crate) fn successors( + &self, + ) -> impl ExactSizeIterator + DoubleEndedIterator { match &self.terminator { Some(TerminatorInstruction::Jmp { destination, .. }) => vec![*destination].into_iter(), Some(TerminatorInstruction::JmpIf { then_destination, else_destination, .. }) => { @@ -89,8 +96,10 @@ impl BasicBlock { /// Removes the given instruction from this block if present or panics otherwise. pub(crate) fn remove_instruction(&mut self, instruction: InstructionId) { + // Iterate in reverse here as an optimization since remove_instruction is most + // often called to remove instructions at the end of a block. let index = - self.instructions.iter().position(|id| *id == instruction).unwrap_or_else(|| { + self.instructions.iter().rev().position(|id| *id == instruction).unwrap_or_else(|| { panic!("remove_instruction: No such instruction {instruction:?} in block") }); self.instructions.remove(index); diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/dfg.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/dfg.rs index 67569c6a4c..3ab345f06b 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ir/dfg.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/dfg.rs @@ -13,7 +13,6 @@ use super::{ }; use acvm::FieldElement; -use iter_extended::vecmap; /// The DataFlowGraph contains most of the actual data in a function including /// its blocks, instructions, and values. This struct is largely responsible for @@ -69,22 +68,6 @@ impl DataFlowGraph { self.blocks.insert(BasicBlock::new(Vec::new())) } - /// Creates a new basic block with the given parameters. - /// After being created, the block is unreachable in the current function - /// until another block is made to jump to it. - pub(crate) fn make_block_with_parameters( - &mut self, - parameter_types: impl Iterator, - ) -> BasicBlockId { - self.blocks.insert_with_id(|entry_block| { - let parameters = vecmap(parameter_types.enumerate(), |(position, typ)| { - self.values.insert(Value::Param { block: entry_block, position, typ }) - }); - - BasicBlock::new(parameters) - }) - } - /// Get an iterator over references to each basic block within the dfg, paired with the basic /// block's id. /// @@ -95,6 +78,7 @@ impl DataFlowGraph { self.blocks.iter() } + /// Returns the parameters of the given block pub(crate) fn block_parameters(&self, block: BasicBlockId) -> &[ValueId] { self.blocks[block].parameters() } diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/dom.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/dom.rs new file mode 100644 index 0000000000..dba656838b --- /dev/null +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/dom.rs @@ -0,0 +1,433 @@ +//! The dominator tree of a function, represented as a hash map of each reachable block id to its +//! immediate dominator. +//! +//! Dominator trees are useful for tasks such as identifying back-edges in loop analysis or +//! calculating dominance frontiers. + +use std::{cmp::Ordering, collections::HashMap}; + +use super::{ + basic_block::BasicBlockId, cfg::ControlFlowGraph, function::Function, post_order::PostOrder, +}; + +/// Dominator tree node. We keep one of these per reachable block. +#[derive(Clone, Default)] +struct DominatorTreeNode { + /// The block's idx in the control flow graph's reverse post-order + reverse_post_order_idx: u32, + + /// The block that immediately dominated that of the node in question. + /// + /// This will be None for the entry block, which has no immediate dominator. + immediate_dominator: Option, +} + +impl DominatorTreeNode { + /// Updates the immediate dominator estimate, returning true if it has changed. + /// + /// This is used internally as a shorthand during `compute_dominator_tree`. + pub(self) fn update_estimate(&mut self, immediate_dominator: BasicBlockId) -> bool { + let immediate_dominator = Some(immediate_dominator); + if self.immediate_dominator == immediate_dominator { + false + } else { + self.immediate_dominator = immediate_dominator; + true + } + } +} + +/// The dominator tree for a single function. +pub(crate) struct DominatorTree { + /// The nodes of the dominator tree + /// + /// After dominator tree computation has complete, this will contain a node for every + /// reachable block, and no nodes for unreachable blocks. + nodes: HashMap, +} + +/// Methods for querying the dominator tree. +impl DominatorTree { + /// Is `block_id` reachable from the entry block? + pub(crate) fn is_reachable(&self, block_id: BasicBlockId) -> bool { + self.nodes.contains_key(&block_id) + } + + /// Returns the immediate dominator of `block_id`. + /// + /// A block is said to *dominate* `block_id` if all control flow paths from the function + /// entry to `block_id` must go through the block. + /// + /// The *immediate dominator* is the dominator that is closest to `block_id`. All other + /// dominators also dominate the immediate dominator. + /// + /// This returns `None` if `block_id` is not reachable from the entry block, or if it is the + /// entry block which has no dominators. + pub(crate) fn immediate_dominator(&self, block_id: BasicBlockId) -> Option { + self.nodes.get(&block_id).and_then(|node| node.immediate_dominator) + } + + /// Compare two blocks relative to the reverse post-order. + pub(crate) fn reverse_post_order_cmp(&self, a: BasicBlockId, b: BasicBlockId) -> Ordering { + match (self.nodes.get(&a), self.nodes.get(&b)) { + (Some(a), Some(b)) => a.reverse_post_order_idx.cmp(&b.reverse_post_order_idx), + _ => unreachable!("Post order for unreachable block is undefined"), + } + } + + /// Returns `true` if `block_a_id` dominates `block_b_id`. + /// + /// This means that every control-flow path from the function entry to `block_b_id` must go + /// through `block_a_id`. + /// + /// This function panics if either of the blocks are unreachable. + /// + /// An instruction is considered to dominate itself. + pub(crate) fn dominates(&self, block_a_id: BasicBlockId, mut block_b_id: BasicBlockId) -> bool { + // Walk up the dominator tree from "b" until we encounter or pass "a". Doing the + // comparison on the reverse post-order may allows to test whether we have passed "a" + // without waiting until we reach the root of the tree. + loop { + match self.reverse_post_order_cmp(block_a_id, block_b_id) { + Ordering::Less => { + block_b_id = match self.immediate_dominator(block_b_id) { + Some(immediate_dominator) => immediate_dominator, + None => return false, // a is unreachable, so we climbed past the entry + } + } + Ordering::Greater => return false, + Ordering::Equal => return true, + } + } + } + + /// Allocate and compute a dominator tree from a pre-computed control flow graph and + /// post-order counterpart. + pub(crate) fn with_cfg_and_post_order(cfg: &ControlFlowGraph, post_order: &PostOrder) -> Self { + let mut dom_tree = DominatorTree { nodes: HashMap::new() }; + dom_tree.compute_dominator_tree(cfg, post_order); + dom_tree + } + + /// Allocate and compute a dominator tree for the given function. + /// + /// This approach computes the control flow graph and post-order internally and then + /// discards them. If either should be retained reuse it is better to instead pre-compute them + /// and build the dominator tree with `DominatorTree::with_cfg_and_post_order`. + pub(crate) fn with_function(func: &Function) -> Self { + let cfg = ControlFlowGraph::with_function(func); + let post_order = PostOrder::with_function(func); + Self::with_cfg_and_post_order(&cfg, &post_order) + } + + /// Build a dominator tree from a control flow graph using Keith D. Cooper's + /// "Simple, Fast Dominator Algorithm." + fn compute_dominator_tree(&mut self, cfg: &ControlFlowGraph, post_order: &PostOrder) { + // We'll be iterating over a reverse post-order of the CFG, skipping the entry block. + let (entry_block_id, entry_free_post_order) = post_order + .as_slice() + .split_last() + .expect("ICE: functions always have at least one block"); + + // Do a first pass where we assign reverse post-order indices to all reachable nodes. The + // entry block will be the only node with no immediate dominator. + self.nodes.insert( + *entry_block_id, + DominatorTreeNode { reverse_post_order_idx: 0, immediate_dominator: None }, + ); + for (i, &block_id) in entry_free_post_order.iter().rev().enumerate() { + // Indices have been displaced by 1 by the removal of the entry node + let reverse_post_order_idx = i as u32 + 1; + + // Due to the nature of the post-order traversal, every node we visit will have at + // least one predecessor that has previously been assigned during this loop. + let immediate_dominator = self.compute_immediate_dominator(block_id, cfg); + self.nodes.insert( + block_id, + DominatorTreeNode { + immediate_dominator: Some(immediate_dominator), + reverse_post_order_idx, + }, + ); + } + + // Now that we have reverse post-order indices for everything and initial immediate + // dominator estimates, iterate until convergence. + // + // If the function is free of irreducible control flow, this will exit after one iteration. + let mut changed = true; + while changed { + changed = false; + for &block_id in entry_free_post_order.iter().rev() { + let immediate_dominator = self.compute_immediate_dominator(block_id, cfg); + changed = self + .nodes + .get_mut(&block_id) + .expect("Assigned in first pass") + .update_estimate(immediate_dominator); + } + } + } + + // Compute the immediate dominator for `block_id` using the pre-calculate immediate dominators + // of reachable nodes. + fn compute_immediate_dominator( + &self, + block_id: BasicBlockId, + cfg: &ControlFlowGraph, + ) -> BasicBlockId { + // Get an iterator with just the reachable, already visited predecessors to `block_id`. + // Note that during the first pass `node` was pre-populated with all reachable blocks. + let mut reachable_predecessors = + cfg.predecessors(block_id).filter(|pred_id| self.nodes.contains_key(pred_id)); + + // This function isn't called on unreachable blocks or the entry block, so the reverse + // post-order will contain at least one predecessor to this block. + let mut immediate_dominator = + reachable_predecessors.next().expect("block node must have one reachable predecessor"); + + for predecessor in reachable_predecessors { + immediate_dominator = self.common_dominator(immediate_dominator, predecessor); + } + + immediate_dominator + } + + /// Compute the common dominator of two basic blocks. + /// + /// Both basic blocks are assumed to be reachable. + fn common_dominator( + &self, + mut block_a_id: BasicBlockId, + mut block_b_id: BasicBlockId, + ) -> BasicBlockId { + loop { + match self.reverse_post_order_cmp(block_a_id, block_b_id) { + Ordering::Less => { + // "a" comes before "b" in the reverse post-order. Move "b" up. + block_b_id = self.nodes[&block_b_id] + .immediate_dominator + .expect("Unreachable basic block?"); + } + Ordering::Greater => { + // "b" comes before "a" in the reverse post-order. Move "a" up. + block_a_id = self.nodes[&block_a_id] + .immediate_dominator + .expect("Unreachable basic block?"); + } + Ordering::Equal => break, + } + } + + debug_assert_eq!(block_a_id, block_b_id, "Unreachable block passed to common_dominator?"); + block_a_id + } +} + +#[cfg(test)] +mod tests { + use std::cmp::Ordering; + + use crate::ssa_refactor::{ + ir::{ + basic_block::BasicBlockId, dom::DominatorTree, function::Function, + instruction::TerminatorInstruction, map::Id, types::Type, + }, + ssa_builder::FunctionBuilder, + }; + + #[test] + fn empty() { + let func_id = Id::test_new(0); + let mut func = Function::new("func".into(), func_id); + let block0_id = func.entry_block(); + func.dfg.set_block_terminator( + block0_id, + TerminatorInstruction::Return { return_values: vec![] }, + ); + let dom_tree = DominatorTree::with_function(&func); + assert!(dom_tree.dominates(block0_id, block0_id)); + } + + // Testing setup for a function with an unreachable block2 + fn unreachable_node_setup( + ) -> (DominatorTree, BasicBlockId, BasicBlockId, BasicBlockId, BasicBlockId) { + // func() { + // block0(cond: u1): + // jmpif v0 block2() block3() + // block1(): + // jmp block2() + // block2(): + // jmp block3() + // block3(): + // return () + // } + let func_id = Id::test_new(0); + let mut builder = FunctionBuilder::new("func".into(), func_id); + + let cond = builder.add_parameter(Type::unsigned(1)); + let block1_id = builder.insert_block(); + let block2_id = builder.insert_block(); + let block3_id = builder.insert_block(); + + builder.terminate_with_jmpif(cond, block2_id, block3_id); + builder.switch_to_block(block1_id); + builder.terminate_with_jmp(block2_id, vec![]); + builder.switch_to_block(block2_id); + builder.terminate_with_jmp(block3_id, vec![]); + builder.switch_to_block(block3_id); + builder.terminate_with_return(vec![]); + + let ssa = builder.finish(); + let func = ssa.main(); + let block0_id = func.entry_block(); + + let dt = DominatorTree::with_function(func); + (dt, block0_id, block1_id, block2_id, block3_id) + } + + // Expected dominator tree + // block0 { + // block2 + // block3 + // } + + // Dominance matrix + // ✓: Row item dominates column item + // !: Querying row item's dominance of column item panics. (i.e. invalid) + // b0 b1 b2 b3 + // b0 ✓ ! ✓ ✓ + // b1 ! ! ! ! + // b2 ! ✓ + // b3 ! ✓ + // Note that from a local view block 1 dominates blocks 1,2 & 3, but since this block is + // unreachable, performing this query indicates an internal compiler error. + #[test] + fn unreachable_node_asserts() { + let (dt, b0, _b1, b2, b3) = unreachable_node_setup(); + + assert!(dt.dominates(b0, b0)); + assert!(dt.dominates(b0, b2)); + assert!(dt.dominates(b0, b3)); + + assert!(!dt.dominates(b2, b0)); + assert!(dt.dominates(b2, b2)); + assert!(!dt.dominates(b2, b3)); + + assert!(!dt.dominates(b3, b0)); + assert!(!dt.dominates(b3, b2)); + assert!(dt.dominates(b3, b3)); + } + + #[test] + #[should_panic] + fn unreachable_node_panic_b0_b1() { + let (dt, b0, b1, _b2, _b3) = unreachable_node_setup(); + dt.dominates(b0, b1); + } + + #[test] + #[should_panic] + fn unreachable_node_panic_b1_b0() { + let (dt, b0, b1, _b2, _b3) = unreachable_node_setup(); + dt.dominates(b1, b0); + } + + #[test] + #[should_panic] + fn unreachable_node_panic_b1_b1() { + let (dt, _b0, b1, _b2, _b3) = unreachable_node_setup(); + dt.dominates(b1, b1); + } + + #[test] + #[should_panic] + fn unreachable_node_panic_b1_b2() { + let (dt, _b0, b1, b2, _b3) = unreachable_node_setup(); + dt.dominates(b1, b2); + } + + #[test] + #[should_panic] + fn unreachable_node_panic_b1_b3() { + let (dt, _b0, b1, _b2, b3) = unreachable_node_setup(); + dt.dominates(b1, b3); + } + + #[test] + #[should_panic] + fn unreachable_node_panic_b3_b1() { + let (dt, _b0, b1, b2, _b3) = unreachable_node_setup(); + dt.dominates(b2, b1); + } + + #[test] + fn backwards_layout() { + // func { + // block0(): + // jmp block2() + // block1(): + // return () + // block2(): + // jump block1() + // } + let func_id = Id::test_new(0); + let mut builder = FunctionBuilder::new("func".into(), func_id); + let block1_id = builder.insert_block(); + let block2_id = builder.insert_block(); + + builder.terminate_with_jmp(block2_id, vec![]); + builder.switch_to_block(block1_id); + builder.terminate_with_return(vec![]); + builder.switch_to_block(block2_id); + builder.terminate_with_jmp(block1_id, vec![]); + + let ssa = builder.finish(); + let func = ssa.main(); + let block0_id = func.entry_block(); + + let dt = DominatorTree::with_function(func); + + // Expected dominance tree: + // block0 { + // block2 { + // block1 + // } + // } + + assert_eq!(dt.immediate_dominator(block0_id), None); + assert_eq!(dt.immediate_dominator(block1_id), Some(block2_id)); + assert_eq!(dt.immediate_dominator(block2_id), Some(block0_id)); + + assert_eq!(dt.reverse_post_order_cmp(block0_id, block0_id), Ordering::Equal); + assert_eq!(dt.reverse_post_order_cmp(block0_id, block1_id), Ordering::Less); + assert_eq!(dt.reverse_post_order_cmp(block0_id, block2_id), Ordering::Less); + + assert_eq!(dt.reverse_post_order_cmp(block1_id, block0_id), Ordering::Greater); + assert_eq!(dt.reverse_post_order_cmp(block1_id, block1_id), Ordering::Equal); + assert_eq!(dt.reverse_post_order_cmp(block1_id, block2_id), Ordering::Greater); + + assert_eq!(dt.reverse_post_order_cmp(block2_id, block0_id), Ordering::Greater); + assert_eq!(dt.reverse_post_order_cmp(block2_id, block1_id), Ordering::Less); + assert_eq!(dt.reverse_post_order_cmp(block2_id, block2_id), Ordering::Equal); + + // Dominance matrix: + // ✓: Row item dominates column item + // b0 b1 b2 + // b0 ✓ ✓ ✓ + // b1 ✓ + // b2 ✓ ✓ + + assert!(dt.dominates(block0_id, block0_id)); + assert!(dt.dominates(block0_id, block1_id)); + assert!(dt.dominates(block0_id, block2_id)); + + assert!(!dt.dominates(block1_id, block0_id)); + assert!(dt.dominates(block1_id, block1_id)); + assert!(!dt.dominates(block1_id, block2_id)); + + assert!(!dt.dominates(block2_id, block0_id)); + assert!(dt.dominates(block2_id, block1_id)); + assert!(dt.dominates(block2_id, block2_id)); + } +} diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/function.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/function.rs index 8d90a13911..f37448462b 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ir/function.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/function.rs @@ -2,6 +2,7 @@ use super::basic_block::BasicBlockId; use super::dfg::DataFlowGraph; use super::map::Id; use super::types::Type; +use super::value::ValueId; /// A function holds a list of instructions. /// These instructions are further grouped into Basic blocks @@ -10,7 +11,7 @@ use super::types::Type; /// To reference external functions its FunctionId can be used but this /// cannot be checked for correctness until inlining is performed. #[derive(Debug)] -pub struct Function { +pub(crate) struct Function { /// The first basic block in the function entry_block: BasicBlockId, @@ -54,6 +55,12 @@ impl Function { pub(crate) fn entry_block(&self) -> BasicBlockId { self.entry_block } + + /// Returns the parameters of this function. + /// The parameters will always match that of this function's entry block. + pub(crate) fn parameters(&self) -> &[ValueId] { + self.dfg.block_parameters(self.entry_block) + } } /// FunctionId is a reference for a function diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/instruction.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/instruction.rs index 66f8b1e3b1..812d12b23a 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ir/instruction.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/instruction.rs @@ -1,4 +1,5 @@ use acvm::acir::BlackBoxFunc; +use iter_extended::vecmap; use super::{basic_block::BasicBlockId, map::Id, types::Type, value::ValueId}; @@ -114,6 +115,42 @@ impl Instruction { Instruction::Load { .. } | Instruction::Call { .. } => InstructionResultType::Unknown, } } + + /// True if this instruction requires specifying the control type variables when + /// inserting this instruction into a DataFlowGraph. + pub(crate) fn requires_ctrl_typevars(&self) -> bool { + matches!(self.result_type(), InstructionResultType::Unknown) + } + + /// Maps each ValueId inside this instruction to a new ValueId, returning the new instruction. + /// Note that the returned instruction is fresh and will not have an assigned InstructionId + /// until it is manually inserted in a DataFlowGraph later. + pub(crate) fn map_values(&self, mut f: impl FnMut(ValueId) -> ValueId) -> Instruction { + match self { + Instruction::Binary(binary) => Instruction::Binary(Binary { + lhs: f(binary.lhs), + rhs: f(binary.rhs), + operator: binary.operator, + }), + Instruction::Cast(value, typ) => Instruction::Cast(f(*value), *typ), + Instruction::Not(value) => Instruction::Not(f(*value)), + Instruction::Truncate { value, bit_size, max_bit_size } => Instruction::Truncate { + value: f(*value), + bit_size: *bit_size, + max_bit_size: *max_bit_size, + }, + Instruction::Constrain(value) => Instruction::Constrain(f(*value)), + Instruction::Call { func, arguments } => Instruction::Call { + func: f(*func), + arguments: vecmap(arguments.iter().copied(), f), + }, + Instruction::Allocate { size } => Instruction::Allocate { size: *size }, + Instruction::Load { address } => Instruction::Load { address: f(*address) }, + Instruction::Store { address, value } => { + Instruction::Store { address: f(*address), value: f(*value) } + } + } + } } /// The possible return values for Instruction::return_types @@ -191,7 +228,7 @@ impl Binary { /// All binary operators are also only for numeric types. To implement /// e.g. equality for a compound type like a struct, one must add a /// separate Eq operation for each field and combine them later with And. -#[derive(Debug, PartialEq, Eq, Hash, Clone)] +#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] pub(crate) enum BinaryOp { /// Addition of lhs + rhs. Add, diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/map.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/map.rs index a99ff06c5f..43baf4430c 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ir/map.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/map.rs @@ -45,6 +45,18 @@ impl std::hash::Hash for Id { } } +impl PartialOrd for Id { + fn partial_cmp(&self, other: &Self) -> Option { + self.index.partial_cmp(&other.index) + } +} + +impl Ord for Id { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.index.cmp(&other.index) + } +} + impl Eq for Id {} impl PartialEq for Id { @@ -123,7 +135,7 @@ impl DenseMap { /// /// The id-element pairs are ordered by the numeric values of the ids. pub(crate) fn iter(&self) -> impl ExactSizeIterator, &T)> { - let ids_iter = (0..self.storage.len()).into_iter().map(|idx| Id::new(idx)); + let ids_iter = (0..self.storage.len()).map(|idx| Id::new(idx)); ids_iter.zip(self.storage.iter()) } } @@ -272,6 +284,12 @@ pub(crate) struct AtomicCounter { } impl AtomicCounter { + /// Create a new counter starting after the given Id. + /// Use AtomicCounter::default() to start at zero. + pub(crate) fn starting_after(id: Id) -> Self { + Self { next: AtomicUsize::new(id.index + 1), _marker: Default::default() } + } + /// Return the next fresh id pub(crate) fn next(&self) -> Id { Id::new(self.next.fetch_add(1, Ordering::Relaxed)) diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/post_order.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/post_order.rs new file mode 100644 index 0000000000..984f10a64a --- /dev/null +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/post_order.rs @@ -0,0 +1,163 @@ +//! The post-order for a given function represented as a vector of basic block ids. +//! +//! This ordering is beneficial to the efficiency of various algorithms, such as those for dead +//! code elimination and calculating dominance trees. + +use std::collections::HashSet; + +use crate::ssa_refactor::ir::{basic_block::BasicBlockId, function::Function}; + +/// Depth-first traversal stack state marker for computing the cfg post-order. +enum Visit { + First, + Last, +} + +pub(crate) struct PostOrder(Vec); + +impl PostOrder { + pub(crate) fn as_slice(&self) -> &[BasicBlockId] { + self.0.as_slice() + } +} + +impl PostOrder { + /// Allocate and compute a function's block post-order. Pos + pub(crate) fn with_function(func: &Function) -> Self { + PostOrder(Self::compute_post_order(func)) + } + + // Computes the post-order of the function by doing a depth-first traversal of the + // function's entry block's previously unvisited children. Each block is sequenced according + // to when the traversal exits it. + fn compute_post_order(func: &Function) -> Vec { + let mut stack = vec![(Visit::First, func.entry_block())]; + let mut visited: HashSet = HashSet::new(); + let mut post_order: Vec = Vec::new(); + + while let Some((visit, block_id)) = stack.pop() { + match visit { + Visit::First => { + if !visited.contains(&block_id) { + // This is the first time we pop the block, so we need to scan its + // successors and then revisit it. + visited.insert(block_id); + stack.push((Visit::Last, block_id)); + // Stack successors for visiting. Because items are taken from the top of the + // stack, we push the item that's due for a visit first to the top. + for successor_id in func.dfg[block_id].successors().rev() { + if !visited.contains(&successor_id) { + // This not visited check would also be cover by the the next + // iteration, but checking here two saves an iteration per successor. + stack.push((Visit::First, successor_id)); + } + } + } + } + + Visit::Last => { + // We've finished all this node's successors. + post_order.push(block_id); + } + } + } + post_order + } +} + +#[cfg(test)] +mod tests { + use crate::ssa_refactor::ir::{ + function::Function, instruction::TerminatorInstruction, map::Id, post_order::PostOrder, + types::Type, + }; + + #[test] + fn single_block() { + let func_id = Id::test_new(0); + let func = Function::new("func".into(), func_id); + let post_order = PostOrder::with_function(&func); + assert_eq!(post_order.0, [func.entry_block()]); + } + + #[test] + fn arb_graph_with_unreachable() { + // A → B C + // ↓ ↗ ↓ ↓ + // D ← E → F + // (`A` is entry block) + // Expected post-order working: + // A { + // B { + // E { + // D { + // B (seen) + // } -> push(D) + // F { + // } -> push(F) + // } -> push(E) + // } -> push(B) + // D (seen) + // } -> push(A) + // Result: + // D, F, E, B, A, (C dropped as unreachable) + + let func_id = Id::test_new(0); + let mut func = Function::new("func".into(), func_id); + let block_a_id = func.entry_block(); + let block_b_id = func.dfg.make_block(); + let block_c_id = func.dfg.make_block(); + let block_d_id = func.dfg.make_block(); + let block_e_id = func.dfg.make_block(); + let block_f_id = func.dfg.make_block(); + + // A → B • + // ↓ + // D • • + let cond_a = func.dfg.add_block_parameter(block_a_id, Type::unsigned(1)); + func.dfg.set_block_terminator( + block_a_id, + TerminatorInstruction::JmpIf { + condition: cond_a, + then_destination: block_b_id, + else_destination: block_d_id, + }, + ); + // • B • + // • ↓ • + // • E • + func.dfg.set_block_terminator( + block_b_id, + TerminatorInstruction::Jmp { destination: block_e_id, arguments: vec![] }, + ); + // • • • + // + // D ← E → F + let cond_e = func.dfg.add_block_parameter(block_e_id, Type::unsigned(1)); + func.dfg.set_block_terminator( + block_e_id, + TerminatorInstruction::JmpIf { + condition: cond_e, + then_destination: block_d_id, + else_destination: block_f_id, + }, + ); + // • B • + // ↗ + // D • • + func.dfg.set_block_terminator( + block_d_id, + TerminatorInstruction::Jmp { destination: block_b_id, arguments: vec![] }, + ); + // • • C + // • • ↓ + // • • F + func.dfg.set_block_terminator( + block_c_id, + TerminatorInstruction::Jmp { destination: block_f_id, arguments: vec![] }, + ); + + let post_order = PostOrder::with_function(&func); + assert_eq!(post_order.0, [block_d_id, block_f_id, block_e_id, block_b_id, block_a_id]); + } +} diff --git a/crates/noirc_evaluator/src/ssa_refactor/ir/printer.rs b/crates/noirc_evaluator/src/ssa_refactor/ir/printer.rs index 2e46701788..3993a86261 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ir/printer.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ir/printer.rs @@ -64,7 +64,7 @@ fn value(function: &Function, id: ValueId) -> String { match &function.dfg[id] { Value::NumericConstant { constant, typ } => { let value = function.dfg[*constant].value(); - format!("{} {}", typ, value) + format!("{typ} {value}") } Value::Function(id) => id.to_string(), Value::Intrinsic(intrinsic) => intrinsic.to_string(), diff --git a/crates/noirc_evaluator/src/ssa_refactor/opt/inlining.rs b/crates/noirc_evaluator/src/ssa_refactor/opt/inlining.rs new file mode 100644 index 0000000000..6e7c984874 --- /dev/null +++ b/crates/noirc_evaluator/src/ssa_refactor/opt/inlining.rs @@ -0,0 +1,420 @@ +//! This module defines the function inlining pass for the SSA IR. +//! The purpose of this pass is to inline the instructions of each function call +//! within the function caller. If all function calls are known, there will only +//! be a single function remaining when the pass finishes. +use std::collections::{HashMap, HashSet}; + +use iter_extended::vecmap; + +use crate::ssa_refactor::{ + ir::{ + basic_block::BasicBlockId, + function::{Function, FunctionId}, + instruction::{Instruction, InstructionId, TerminatorInstruction}, + value::{Value, ValueId}, + }, + ssa_builder::FunctionBuilder, + ssa_gen::Ssa, +}; + +/// An arbitrary limit to the maximum number of recursive call +/// frames at any point in time. +const RECURSION_LIMIT: u32 = 1000; + +impl Ssa { + /// Inline all functions within the IR. + /// + /// In the case of recursive functions, this will attempt + /// to recursively inline until the RECURSION_LIMIT is reached. + /// + /// Functions are recursively inlined into main until either we finish + /// inlining all functions or we encounter a function whose function id is not known. + /// When the later happens, the call instruction is kept in addition to the function + /// it refers to. The function it refers to is kept unmodified without any inlining + /// changes. This is because if the function's id later becomes known by a later + /// pass, we would need to re-run all of inlining anyway to inline it, so we might + /// as well save the work for later instead of performing it twice. + pub(crate) fn inline_functions(self) -> Ssa { + InlineContext::new(&self).inline_all(self) + } +} + +/// The context for the function inlining pass. +/// +/// This works using an internal FunctionBuilder to build a new main function from scratch. +/// Doing it this way properly handles importing instructions between functions and lets us +/// reuse the existing API at the cost of essentially cloning each of main's instructions. +struct InlineContext { + recursion_level: u32, + builder: FunctionBuilder, + + /// True if we failed to inline at least one call. If this is still false when finishing + /// inlining we can remove all other functions from the resulting Ssa struct and keep only + /// the function that was inlined into. + failed_to_inline_a_call: bool, +} + +/// The per-function inlining context contains information that is only valid for one function. +/// For example, each function has its own DataFlowGraph, and thus each function needs a translation +/// layer to translate between BlockId to BlockId for the current function and the function to +/// inline into. The same goes for ValueIds, InstructionIds, and for storing other data like +/// parameter to argument mappings. +struct PerFunctionContext<'function> { + /// The source function is the function we're currently inlining into the function being built. + source_function: &'function Function, + + /// The shared inlining context for all functions. This notably contains the FunctionBuilder used + /// to build the function we're inlining into. + context: &'function mut InlineContext, + + /// Maps ValueIds in the function being inlined to the new ValueIds to use in the function + /// being inlined into. This mapping also contains the mapping from parameter values to + /// argument values. + values: HashMap, + + /// Maps BasicBlockIds in the function being inlined to the new BasicBlockIds to use in the + /// function being inlined into. + blocks: HashMap, + + /// Maps InstructionIds from the function being inlined to the function being inlined into. + instructions: HashMap, + + /// The TerminatorInstruction::Return in the source_function will be mapped to a jmp to + /// this block in the destination function instead. + return_destination: BasicBlockId, + + /// True if we're currently working on the main function. + inlining_main: bool, +} + +impl InlineContext { + /// Create a new context object for the function inlining pass. + /// This starts off with an empty mapping of instructions for main's parameters. + /// The function being inlined into will always be the main function, although it is + /// actually a copy that is created in case the original main is still needed from a function + /// that could not be inlined calling it. + fn new(ssa: &Ssa) -> InlineContext { + let main_name = ssa.main().name().to_owned(); + let builder = FunctionBuilder::new(main_name, ssa.next_id.next()); + Self { builder, recursion_level: 0, failed_to_inline_a_call: false } + } + + /// Start inlining the main function and all functions reachable from it. + fn inline_all(mut self, ssa: Ssa) -> Ssa { + let main = ssa.main(); + let mut context = PerFunctionContext::new(&mut self, main); + context.inlining_main = true; + + // The main block is already inserted so we have to add it to context.blocks and add + // its parameters here. Failing to do so would cause context.translate_block() to add + // a fresh block for the entry block rather than use the existing one. + let entry_block = context.context.builder.current_function.entry_block(); + let original_parameters = context.source_function.parameters(); + + for parameter in original_parameters { + let typ = context.source_function.dfg.type_of_value(*parameter); + let new_parameter = context.context.builder.add_block_parameter(entry_block, typ); + context.values.insert(*parameter, new_parameter); + } + + context.blocks.insert(context.source_function.entry_block(), entry_block); + context.inline_blocks(&ssa); + self.finish(ssa) + } + + /// Inlines a function into the current function and returns the translated return values + /// of the inlined function. + fn inline_function(&mut self, ssa: &Ssa, id: FunctionId, arguments: &[ValueId]) -> &[ValueId] { + self.recursion_level += 1; + + if self.recursion_level > RECURSION_LIMIT { + panic!( + "Attempted to recur more than {RECURSION_LIMIT} times during function inlining." + ); + } + + let source_function = &ssa.functions[&id]; + let mut context = PerFunctionContext::new(self, source_function); + + let parameters = source_function.parameters(); + assert_eq!(parameters.len(), arguments.len()); + context.values = parameters.iter().copied().zip(arguments.iter().copied()).collect(); + + let current_block = context.context.builder.current_block(); + context.blocks.insert(source_function.entry_block(), current_block); + + context.inline_blocks(ssa); + let return_destination = context.return_destination; + self.builder.block_parameters(return_destination) + } + + /// Finish inlining and return the new Ssa struct with the inlined version of main. + /// If any functions failed to inline, they are not removed from the final Ssa struct. + fn finish(self, mut ssa: Ssa) -> Ssa { + let mut new_ssa = self.builder.finish(); + assert_eq!(new_ssa.functions.len(), 1); + + // If we failed to inline any call, any function may still be reachable so we + // don't remove any from the final program. We could be more precise here and + // do a reachability analysis but it should be fine to keep the extra functions + // around longer if they are not called. + if self.failed_to_inline_a_call { + let new_main = new_ssa.functions.pop_first().unwrap().1; + ssa.main_id = new_main.id(); + ssa.functions.insert(new_main.id(), new_main); + ssa + } else { + new_ssa + } + } +} + +impl<'function> PerFunctionContext<'function> { + /// Create a new PerFunctionContext from the source function. + /// The value and block mappings for this context are initially empty except + /// for containing the mapping between parameters in the source_function and + /// the arguments of the destination function. + fn new(context: &'function mut InlineContext, source_function: &'function Function) -> Self { + // Create the block to return to but don't insert its parameters until we + // have the types of the actual return values later. + Self { + return_destination: context.builder.insert_block(), + context, + source_function, + blocks: HashMap::new(), + instructions: HashMap::new(), + values: HashMap::new(), + inlining_main: false, + } + } + + /// Translates a ValueId from the function being inlined to a ValueId of the function + /// being inlined into. Note that this expects value ids for all Value::Instruction and + /// Value::Param values are already handled as a result of previous inlining of instructions + /// and blocks respectively. If these assertions trigger it means a value is being used before + /// the instruction or block that defines the value is inserted. + fn translate_value(&mut self, id: ValueId) -> ValueId { + if let Some(value) = self.values.get(&id) { + return *value; + } + + let new_value = match &self.source_function.dfg[id] { + value @ Value::Instruction { .. } => { + unreachable!("All Value::Instructions should already be known during inlining after creating the original inlined instruction. Unknown value {id} = {value:?}") + } + value @ Value::Param { .. } => { + unreachable!("All Value::Params should already be known from previous calls to translate_block. Unknown value {id} = {value:?}") + } + Value::NumericConstant { constant, typ } => { + let value = self.source_function.dfg[*constant].value(); + self.context.builder.numeric_constant(value, *typ) + } + Value::Function(function) => self.context.builder.import_function(*function), + Value::Intrinsic(intrinsic) => self.context.builder.import_intrinsic_id(*intrinsic), + }; + + self.values.insert(id, new_value); + new_value + } + + /// Translate a block id from the source function to one of the target function. + /// + /// If the block isn't already known, this will insert a new block into the target function + /// with the same parameter types as the source block. + fn translate_block( + &mut self, + source_block: BasicBlockId, + block_queue: &mut Vec, + ) -> BasicBlockId { + if let Some(block) = self.blocks.get(&source_block) { + return *block; + } + + // The block is not yet inlined, queue it + block_queue.push(source_block); + + // The block is not already present in the function being inlined into so we must create it. + // The block's instructions are not copied over as they will be copied later in inlining. + let new_block = self.context.builder.insert_block(); + let original_parameters = self.source_function.dfg.block_parameters(source_block); + + for parameter in original_parameters { + let typ = self.source_function.dfg.type_of_value(*parameter); + let new_parameter = self.context.builder.add_block_parameter(new_block, typ); + self.values.insert(*parameter, new_parameter); + } + + self.blocks.insert(source_block, new_block); + new_block + } + + /// Try to retrieve the function referred to by the given Id. + /// Expects that the given ValueId belongs to the source_function. + /// + /// Returns None if the id is not known to refer to a function. + fn get_function(&mut self, mut id: ValueId) -> Option { + id = self.translate_value(id); + match self.context.builder[id] { + Value::Function(id) => Some(id), + Value::Intrinsic(_) => None, + _ => { + self.context.failed_to_inline_a_call = true; + None + } + } + } + + /// Inline all reachable blocks within the source_function into the destination function. + fn inline_blocks(&mut self, ssa: &Ssa) { + let mut seen_blocks = HashSet::new(); + let mut block_queue = vec![self.source_function.entry_block()]; + + while let Some(source_block_id) = block_queue.pop() { + let translated_block_id = self.translate_block(source_block_id, &mut block_queue); + self.context.builder.switch_to_block(translated_block_id); + + seen_blocks.insert(source_block_id); + self.inline_block(ssa, source_block_id); + self.handle_terminator_instruction(source_block_id, &mut block_queue); + } + + self.context.builder.switch_to_block(self.return_destination); + } + + /// Inline each instruction in the given block into the function being inlined into. + /// This may recurse if it finds another function to inline if a call instruction is within this block. + fn inline_block(&mut self, ssa: &Ssa, block_id: BasicBlockId) { + let block = &self.source_function.dfg[block_id]; + for id in block.instructions() { + match &self.source_function.dfg[*id] { + Instruction::Call { func, arguments } => match self.get_function(*func) { + Some(function) => self.inline_function(ssa, *id, function, arguments), + None => self.push_instruction(*id), + }, + _ => self.push_instruction(*id), + } + } + } + + /// Inline a function call and remember the inlined return values in the values map + fn inline_function( + &mut self, + ssa: &Ssa, + call_id: InstructionId, + function: FunctionId, + arguments: &[ValueId], + ) { + let old_results = self.source_function.dfg.instruction_results(call_id); + let new_results = self.context.inline_function(ssa, function, arguments); + Self::insert_new_instruction_results(&mut self.values, old_results, new_results); + } + + /// Push the given instruction from the source_function into the current block of the + /// function being inlined into. + fn push_instruction(&mut self, id: InstructionId) { + let instruction = self.source_function.dfg[id].map_values(|id| self.translate_value(id)); + let results = self.source_function.dfg.instruction_results(id); + + let ctrl_typevars = instruction + .requires_ctrl_typevars() + .then(|| vecmap(results, |result| self.source_function.dfg.type_of_value(*result))); + + let new_results = self.context.builder.insert_instruction(instruction, ctrl_typevars); + Self::insert_new_instruction_results(&mut self.values, results, new_results); + } + + /// Modify the values HashMap to remember the mapping between an instruction result's previous + /// ValueId (from the source_function) and its new ValueId in the destination function. + fn insert_new_instruction_results( + values: &mut HashMap, + old_results: &[ValueId], + new_results: &[ValueId], + ) { + assert_eq!(old_results.len(), new_results.len()); + for (old_result, new_result) in old_results.iter().zip(new_results) { + values.insert(*old_result, *new_result); + } + } + + /// Handle the given terminator instruction from the given source function block. + /// This will push any new blocks to the destination function as needed, add them + /// to the block queue, and set the terminator instruction for the current block. + fn handle_terminator_instruction( + &mut self, + block_id: BasicBlockId, + block_queue: &mut Vec, + ) { + match self.source_function.dfg[block_id].terminator() { + Some(TerminatorInstruction::Jmp { destination, arguments }) => { + let destination = self.translate_block(*destination, block_queue); + let arguments = vecmap(arguments, |arg| self.translate_value(*arg)); + self.context.builder.terminate_with_jmp(destination, arguments); + } + Some(TerminatorInstruction::JmpIf { + condition, + then_destination, + else_destination, + }) => { + let condition = self.translate_value(*condition); + let then_block = self.translate_block(*then_destination, block_queue); + let else_block = self.translate_block(*else_destination, block_queue); + self.context.builder.terminate_with_jmpif(condition, then_block, else_block); + } + Some(TerminatorInstruction::Return { return_values }) => { + let return_values = vecmap(return_values, |value| self.translate_value(*value)); + + if self.inlining_main { + self.context.builder.terminate_with_return(return_values); + } else { + for value in &return_values { + // Add the block parameters for the return block here since we don't do + // it when inserting the block in PerFunctionContext::new + let typ = self.context.builder.current_function.dfg.type_of_value(*value); + self.context.builder.add_block_parameter(self.return_destination, typ); + } + self.context.builder.terminate_with_jmp(self.return_destination, return_values); + } + } + None => unreachable!("Block has no terminator instruction"), + } + } +} + +#[cfg(test)] +mod test { + use crate::ssa_refactor::{ + ir::{map::Id, types::Type}, + ssa_builder::FunctionBuilder, + }; + + #[test] + fn basic_inlining() { + // fn foo { + // b0(): + // v0 = call bar() + // return v0 + // } + // fn bar { + // b0(): + // return 72 + // } + let foo_id = Id::test_new(0); + let mut builder = FunctionBuilder::new("foo".into(), foo_id); + + let bar_id = Id::test_new(1); + let bar = builder.import_function(bar_id); + let results = builder.insert_call(bar, Vec::new(), vec![Type::field()]).to_vec(); + builder.terminate_with_return(results); + + builder.new_function("bar".into(), bar_id); + let expected_return = 72u128; + let seventy_two = builder.field_constant(expected_return); + builder.terminate_with_return(vec![seventy_two]); + + let ssa = builder.finish(); + assert_eq!(ssa.functions.len(), 2); + + let inlined = ssa.inline_functions(); + assert_eq!(inlined.functions.len(), 1); + } +} diff --git a/crates/noirc_evaluator/src/ssa_refactor/opt/mod.rs b/crates/noirc_evaluator/src/ssa_refactor/opt/mod.rs new file mode 100644 index 0000000000..46ca7d443b --- /dev/null +++ b/crates/noirc_evaluator/src/ssa_refactor/opt/mod.rs @@ -0,0 +1,6 @@ +//! This folder contains each optimization pass for the SSA IR. +//! +//! Each pass is generally expected to mutate the SSA IR into a gradually +//! simpler form until the IR only has a single function remaining with 1 block within it. +//! Generally, these passes are also expected to minimize the final amount of instructions. +mod inlining; diff --git a/crates/noirc_evaluator/src/ssa_refactor/ssa_builder/mod.rs b/crates/noirc_evaluator/src/ssa_refactor/ssa_builder/mod.rs index aa67cbed58..f621503e59 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ssa_builder/mod.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ssa_builder/mod.rs @@ -9,7 +9,10 @@ use crate::ssa_refactor::ir::{ }; use super::{ - ir::instruction::{InstructionId, Intrinsic}, + ir::{ + basic_block::BasicBlock, + instruction::{InstructionId, Intrinsic}, + }, ssa_gen::Ssa, }; @@ -95,8 +98,13 @@ impl FunctionBuilder { self.current_function.dfg.add_block_parameter(block, typ) } + /// Returns the parameters of the given block in the current function. + pub(crate) fn block_parameters(&self, block: BasicBlockId) -> &[ValueId] { + self.current_function.dfg.block_parameters(block) + } + /// Inserts a new instruction at the end of the current block and returns its results - fn insert_instruction( + pub(crate) fn insert_instruction( &mut self, instruction: Instruction, ctrl_typevars: Option>, @@ -113,6 +121,11 @@ impl FunctionBuilder { self.current_block = block; } + /// Returns the block currently being inserted into + pub(crate) fn current_block(&mut self) -> BasicBlockId { + self.current_block + } + /// Insert an allocate instruction at the end of the current block, allocating the /// given amount of field elements. Returns the result of the allocate instruction, /// which is always a Reference to the allocated data. @@ -228,8 +241,12 @@ impl FunctionBuilder { /// Retrieve a value reference to the given intrinsic operation. /// Returns None if there is no intrinsic matching the given name. pub(crate) fn import_intrinsic(&mut self, name: &str) -> Option { - Intrinsic::lookup(name) - .map(|intrinsic| self.current_function.dfg.import_intrinsic(intrinsic)) + Intrinsic::lookup(name).map(|intrinsic| self.import_intrinsic_id(intrinsic)) + } + + /// Retrieve a value reference to the given intrinsic operation. + pub(crate) fn import_intrinsic_id(&mut self, intrinsic: Intrinsic) -> ValueId { + self.current_function.dfg.import_intrinsic(intrinsic) } /// Removes the given instruction from the current block or panics otherwise. @@ -253,3 +270,11 @@ impl std::ops::Index for FunctionBuilder { &self.current_function.dfg[id] } } + +impl std::ops::Index for FunctionBuilder { + type Output = BasicBlock; + + fn index(&self, id: BasicBlockId) -> &Self::Output { + &self.current_function.dfg[id] + } +} diff --git a/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/mod.rs b/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/mod.rs index d6c5731e14..34317b7df2 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/mod.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/mod.rs @@ -2,7 +2,7 @@ mod context; mod program; mod value; -pub use program::Ssa; +pub(crate) use program::Ssa; use context::SharedContext; use iter_extended::vecmap; @@ -19,7 +19,7 @@ use super::ir::{instruction::BinaryOp, types::Type, value::ValueId}; /// Generates SSA for the given monomorphized program. /// /// This function will generate the SSA but does not perform any optimizations on it. -pub fn generate_ssa(program: Program) -> Ssa { +pub(crate) fn generate_ssa(program: Program) -> Ssa { let context = SharedContext::new(program); let main_id = Program::main_id(); @@ -31,6 +31,10 @@ pub fn generate_ssa(program: Program) -> Ssa { let mut function_context = FunctionContext::new(main.name.clone(), &main.parameters, &context); function_context.codegen_function_body(&main.body); + // Main has now been compiled and any other functions referenced within have been added to the + // function queue as they were found in codegen_ident. This queueing will happen each time a + // previously-unseen function is found so we need now only continue popping from this queue + // to generate SSA for each function used within the program. while let Some((src_function_id, dest_id)) = context.pop_next_function_in_queue() { let function = &context.program[src_function_id]; function_context.new_function(dest_id, function.name.clone(), &function.parameters); @@ -116,6 +120,15 @@ impl<'a> FunctionContext<'a> { } } + /// Codegen an array by allocating enough space for each element and inserting separate + /// store instructions until each element is stored. The store instructions will be separated + /// by add instructions to calculate the new offset address to store to next. + /// + /// In the case of arrays of structs, the structs are flattened such that each field will be + /// stored next to the other fields in memory. So an array such as [(1, 2), (3, 4)] is + /// stored the same as the array [1, 2, 3, 4]. + /// + /// The value returned from this function is always that of the allocate instruction. fn codegen_array(&mut self, elements: Vec, element_type: Tree) -> Values { let size = element_type.size_of_type() * elements.len(); let array = self.builder.insert_allocate(size.try_into().unwrap_or_else(|_| { @@ -206,6 +219,23 @@ impl<'a> FunctionContext<'a> { self.builder.insert_cast(lhs, typ).into() } + /// Codegens a for loop, creating three new blocks in the process. + /// The return value of a for loop is always a unit literal. + /// + /// For example, the loop `for i in start .. end { body }` is codegen'd as: + /// + /// v0 = ... codegen start ... + /// v1 = ... codegen end ... + /// br loop_entry(v0) + /// loop_entry(i: Field): + /// v2 = lt i v1 + /// brif v2, then: loop_body, else: loop_end + /// loop_body(): + /// v3 = ... codegen body ... + /// v4 = add 1, i + /// br loop_entry(v4) + /// loop_end(): + /// ... This is the current insert point after codegen_for finishes ... fn codegen_for(&mut self, for_expr: &ast::For) -> Values { let loop_entry = self.builder.insert_block(); let loop_body = self.builder.insert_block(); @@ -236,6 +266,30 @@ impl<'a> FunctionContext<'a> { self.unit_value() } + /// Codegens an if expression, handling the case of what to do if there is no 'else'. + /// + /// For example, the expression `if cond { a } else { b }` is codegen'd as: + /// + /// v0 = ... codegen cond ... + /// brif v0, then: then_block, else: else_block + /// then_block(): + /// v1 = ... codegen a ... + /// br end_if(v1) + /// else_block(): + /// v2 = ... codegen b ... + /// br end_if(v2) + /// end_if(v3: ?): // Type of v3 matches the type of a and b + /// ... This is the current insert point after codegen_if finishes ... + /// + /// As another example, the expression `if cond { a }` is codegen'd as: + /// + /// v0 = ... codegen cond ... + /// brif v0, then: then_block, else: end_block + /// then_block: + /// v1 = ... codegen a ... + /// br end_if() + /// end_if: // No block parameter is needed. Without an else, the unit value is always returned. + /// ... This is the current insert point after codegen_if finishes ... fn codegen_if(&mut self, if_expr: &ast::If) -> Values { let condition = self.codegen_non_tuple_expression(&if_expr.condition); @@ -287,6 +341,8 @@ impl<'a> FunctionContext<'a> { Self::get_field(tuple, field_index) } + /// Generate SSA for a function call. Note that calls to built-in functions + /// and intrinsics are also represented by the function call instruction. fn codegen_call(&mut self, call: &ast::Call) -> Values { let function = self.codegen_non_tuple_expression(&call.func); @@ -299,6 +355,10 @@ impl<'a> FunctionContext<'a> { self.insert_call(function, arguments, &call.return_type) } + /// Generate SSA for the given variable. + /// If the variable is immutable, no special handling is necessary and we can return the given + /// ValueId directly. If it is mutable, we'll need to allocate space for the value and store + /// the initial value before returning the allocate instruction. fn codegen_let(&mut self, let_expr: &ast::Let) -> Values { let mut values = self.codegen_expression(&let_expr.expression); diff --git a/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/program.rs b/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/program.rs index 99d4945621..7f4b9a8dd2 100644 --- a/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/program.rs +++ b/crates/noirc_evaluator/src/ssa_refactor/ssa_gen/program.rs @@ -1,22 +1,43 @@ -use std::fmt::Display; +use std::{collections::BTreeMap, fmt::Display}; -use crate::ssa_refactor::ir::function::Function; +use iter_extended::btree_map; + +use crate::ssa_refactor::ir::{ + function::{Function, FunctionId}, + map::AtomicCounter, +}; /// Contains the entire SSA representation of the program. -pub struct Ssa { - functions: Vec, +pub(crate) struct Ssa { + pub(crate) functions: BTreeMap, + pub(crate) main_id: FunctionId, + pub(crate) next_id: AtomicCounter, } impl Ssa { - /// Create a new Ssa object from the given SSA functions - pub fn new(functions: Vec) -> Self { - Self { functions } + /// Create a new Ssa object from the given SSA functions. + /// The first function in this vector is expected to be the main function. + pub(crate) fn new(functions: Vec) -> Self { + let main_id = functions.first().expect("Expected at least 1 SSA function").id(); + let mut max_id = main_id; + + let functions = btree_map(functions, |f| { + max_id = std::cmp::max(max_id, f.id()); + (f.id(), f) + }); + + Self { functions, main_id, next_id: AtomicCounter::starting_after(max_id) } + } + + /// Returns the entry-point function of the program + pub(crate) fn main(&self) -> &Function { + &self.functions[&self.main_id] } } impl Display for Ssa { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - for function in &self.functions { + for function in self.functions.values() { writeln!(f, "{function}")?; } Ok(()) diff --git a/crates/noirc_frontend/Cargo.toml b/crates/noirc_frontend/Cargo.toml index b5551d17f5..f3fc1c8375 100644 --- a/crates/noirc_frontend/Cargo.toml +++ b/crates/noirc_frontend/Cargo.toml @@ -18,6 +18,7 @@ thiserror.workspace = true smol_str.workspace = true serde.workspace = true rustc-hash = "1.1.0" +small-ord-set = "0.1.3" [dev-dependencies] strum = "0.24" diff --git a/crates/noirc_frontend/src/ast/mod.rs b/crates/noirc_frontend/src/ast/mod.rs index 6bd5c148d6..24004e34ff 100644 --- a/crates/noirc_frontend/src/ast/mod.rs +++ b/crates/noirc_frontend/src/ast/mod.rs @@ -16,7 +16,11 @@ use noirc_errors::Span; pub use statement::*; pub use structure::*; -use crate::{parser::ParserError, token::IntType, BinaryTypeOperator, CompTime}; +use crate::{ + parser::{ParserError, ParserErrorReason}, + token::IntType, + BinaryTypeOperator, CompTime, +}; use iter_extended::vecmap; /// The parser parses types as 'UnresolvedType's which @@ -148,13 +152,16 @@ pub enum Signedness { } impl UnresolvedTypeExpression { + // This large error size is justified because it improves parsing speeds by around 40% in + // release mode. See `ParserError` definition for further explanation. + #[allow(clippy::result_large_err)] pub fn from_expr( expr: Expression, span: Span, ) -> Result { - Self::from_expr_helper(expr).map_err(|err| { + Self::from_expr_helper(expr).map_err(|err_expr| { ParserError::with_reason( - format!("Expression is invalid in an array-length type: '{err}'. Only unsigned integer constants, globals, generics, +, -, *, /, and % may be used in this context."), + ParserErrorReason::InvalidArrayLengthExpression(err_expr), span, ) }) diff --git a/crates/noirc_frontend/src/ast/statement.rs b/crates/noirc_frontend/src/ast/statement.rs index 5e0dd4e439..d4fabccea7 100644 --- a/crates/noirc_frontend/src/ast/statement.rs +++ b/crates/noirc_frontend/src/ast/statement.rs @@ -1,7 +1,7 @@ use std::fmt::Display; use crate::lexer::token::SpannedToken; -use crate::parser::ParserError; +use crate::parser::{ParserError, ParserErrorReason}; use crate::token::Token; use crate::{Expression, ExpressionKind, IndexExpression, MemberAccessExpression, UnresolvedType}; use iter_extended::vecmap; @@ -59,8 +59,10 @@ impl Statement { | Statement::Error => { // To match rust, statements always require a semicolon, even at the end of a block if semi.is_none() { - let reason = "Expected a ; separating these two statements".to_string(); - emit_error(ParserError::with_reason(reason, span)); + emit_error(ParserError::with_reason( + ParserErrorReason::MissingSeparatingSemi, + span, + )); } self } @@ -83,8 +85,10 @@ impl Statement { // for unneeded expressions like { 1 + 2; 3 } (_, Some(_), false) => Statement::Expression(expr), (_, None, false) => { - let reason = "Expected a ; separating these two statements".to_string(); - emit_error(ParserError::with_reason(reason, span)); + emit_error(ParserError::with_reason( + ParserErrorReason::MissingSeparatingSemi, + span, + )); Statement::Expression(expr) } diff --git a/crates/noirc_frontend/src/hir/resolution/errors.rs b/crates/noirc_frontend/src/hir/resolution/errors.rs index c57e4c890d..87257cbb84 100644 --- a/crates/noirc_frontend/src/hir/resolution/errors.rs +++ b/crates/noirc_frontend/src/hir/resolution/errors.rs @@ -57,7 +57,7 @@ pub enum ResolverError { #[error("Incorrect amount of arguments to generic type constructor")] IncorrectGenericCount { span: Span, struct_type: String, actual: usize, expected: usize }, #[error("{0}")] - ParserError(ParserError), + ParserError(Box), #[error("Function is not defined in a contract yet sets its contract visibility")] ContractFunctionTypeInNormalFunction { span: Span }, } @@ -252,7 +252,7 @@ impl From for Diagnostic { span, ) } - ResolverError::ParserError(error) => error.into(), + ResolverError::ParserError(error) => (*error).into(), ResolverError::ContractFunctionTypeInNormalFunction { span } => Diagnostic::simple_error( "Only functions defined within contracts can set their contract function type".into(), "Non-contract functions cannot be 'open'".into(), diff --git a/crates/noirc_frontend/src/hir/resolution/resolver.rs b/crates/noirc_frontend/src/hir/resolution/resolver.rs index 98cf5993ed..d80bca9df1 100644 --- a/crates/noirc_frontend/src/hir/resolution/resolver.rs +++ b/crates/noirc_frontend/src/hir/resolution/resolver.rs @@ -859,7 +859,7 @@ impl<'a> Resolver<'a> { let span = length.span; let length = UnresolvedTypeExpression::from_expr(*length, span).unwrap_or_else( |error| { - self.errors.push(ResolverError::ParserError(error)); + self.errors.push(ResolverError::ParserError(Box::new(error))); UnresolvedTypeExpression::Constant(0, span) }, ); @@ -1357,7 +1357,7 @@ mod test { let src = r#" fn main(x : Field) { let y = x + x; - constrain y == x; + assert(y == x); } "#; @@ -1369,7 +1369,7 @@ mod test { let src = r#" fn main(x : Field) { let y = x + x; - constrain x == x; + assert(x == x); } "#; @@ -1392,7 +1392,7 @@ mod test { let src = r#" fn main(x : Field) { let y = x + x; - constrain y == z; + assert(y == z); } "#; @@ -1428,7 +1428,7 @@ mod test { let src = r#" fn main(x : Field) { let y = 5; - constrain y == x; + assert(y == x); } "#; diff --git a/crates/noirc_frontend/src/hir/type_check/expr.rs b/crates/noirc_frontend/src/hir/type_check/expr.rs index aba44e36d2..8a91ecbfde 100644 --- a/crates/noirc_frontend/src/hir/type_check/expr.rs +++ b/crates/noirc_frontend/src/hir/type_check/expr.rs @@ -252,29 +252,13 @@ impl<'interner> TypeChecker<'interner> { let index_type = self.check_expression(&index_expr.index); let span = self.interner.expr_span(&index_expr.index); - self.unify(&index_type, &Type::comp_time(Some(span)), span, || { - // Specialize the error in the case the user has a Field, just not a `comptime` one. - if matches!(index_type, Type::FieldElement(..)) { - TypeCheckError::Unstructured { - msg: format!("Array index must be known at compile-time, but here a non-comptime {index_type} was used instead"), - span, - } - } else { - TypeCheckError::TypeMismatch { - expected_typ: "comptime Field".to_owned(), - expr_typ: index_type.to_string(), - expr_span: span, - } + index_type.make_subtype_of(&Type::field(Some(span)), span, &mut self.errors, || { + TypeCheckError::TypeMismatch { + expected_typ: "Field".to_owned(), + expr_typ: index_type.to_string(), + expr_span: span, } }); - // TODO: replace the above by the below in order to activate dynamic arrays - // index_type.make_subtype_of(&Type::field(Some(span)), span, errors, || { - // TypeCheckError::TypeMismatch { - // expected_typ: "Field".to_owned(), - // expr_typ: index_type.to_string(), - // expr_span: span, - // } - // }); let lhs_type = self.check_expression(&index_expr.collection); match lhs_type { @@ -471,18 +455,47 @@ impl<'interner> TypeChecker<'interner> { fn check_member_access(&mut self, access: expr::HirMemberAccess, expr_id: ExprId) -> Type { let lhs_type = self.check_expression(&access.lhs).follow_bindings(); + let span = self.interner.expr_span(&expr_id); + + match self.check_field_access(&lhs_type, &access.rhs.0.contents, span) { + Some((element_type, index)) => { + self.interner.set_field_index(expr_id, index); + element_type + } + None => Type::Error, + } + } + + /// This will verify that an expression in the form `lhs.rhs_name` has the given field and will push + /// a type error if it does not. If there is no error, the type of the struct/tuple field is returned + /// along with the index of the field in question. + /// + /// This function is abstracted from check_member_access so that it can be shared between + /// there and the HirLValue::MemberAccess case of check_lvalue. + pub(super) fn check_field_access( + &mut self, + lhs_type: &Type, + field_name: &str, + span: Span, + ) -> Option<(Type, usize)> { + let lhs_type = lhs_type.follow_bindings(); if let Type::Struct(s, args) = &lhs_type { let s = s.borrow(); - if let Some((field, index)) = s.get_field(&access.rhs.0.contents, args) { - self.interner.set_field_index(expr_id, index); - return field; + if let Some((field, index)) = s.get_field(field_name, args) { + return Some((field, index)); } } else if let Type::Tuple(elements) = &lhs_type { - if let Ok(index) = access.rhs.0.contents.parse::() { - if index < elements.len() { - self.interner.set_field_index(expr_id, index); - return elements[index].clone(); + if let Ok(index) = field_name.parse::() { + let length = elements.len(); + if index < length { + return Some((elements[index].clone(), index)); + } else { + self.errors.push(TypeCheckError::Unstructured { + msg: format!("Index {index} is out of bounds for this tuple {lhs_type} of length {length}"), + span, + }); + return None; } } } @@ -490,17 +503,13 @@ impl<'interner> TypeChecker<'interner> { // If we get here the type has no field named 'access.rhs'. // Now we specialize the error message based on whether we know the object type in question yet. if let Type::TypeVariable(..) = &lhs_type { - self.errors.push(TypeCheckError::TypeAnnotationsNeeded { - span: self.interner.expr_span(&access.lhs), - }); + self.errors.push(TypeCheckError::TypeAnnotationsNeeded { span }); } else if lhs_type != Type::Error { - self.errors.push(TypeCheckError::Unstructured { - msg: format!("Type {lhs_type} has no member named {}", access.rhs), - span: self.interner.expr_span(&access.lhs), - }); + let msg = format!("Type {lhs_type} has no member named {field_name}"); + self.errors.push(TypeCheckError::Unstructured { msg, span }); } - Type::Error + None } fn comparator_operand_type_rules( diff --git a/crates/noirc_frontend/src/hir/type_check/stmt.rs b/crates/noirc_frontend/src/hir/type_check/stmt.rs index c5ad701141..7bd5039240 100644 --- a/crates/noirc_frontend/src/hir/type_check/stmt.rs +++ b/crates/noirc_frontend/src/hir/type_check/stmt.rs @@ -142,49 +142,31 @@ impl<'interner> TypeChecker<'interner> { (typ.clone(), HirLValue::Ident(ident, typ)) } HirLValue::MemberAccess { object, field_name, .. } => { - let (result, object) = self.check_lvalue(*object, assign_span); + let (lhs_type, object) = self.check_lvalue(*object, assign_span); let object = Box::new(object); - let mut error = |typ| { - self.errors.push(TypeCheckError::Unstructured { - msg: format!("Type {typ} has no member named {field_name}"), - span: field_name.span(), - }); - (Type::Error, None) - }; - - let (typ, field_index) = match result { - Type::Struct(def, args) => { - match def.borrow().get_field(&field_name.0.contents, &args) { - Some((field, index)) => (field, Some(index)), - None => error(Type::Struct(def.clone(), args)), - } - } - Type::Error => (Type::Error, None), - other => error(other), - }; + let span = field_name.span(); + let (typ, field_index) = self + .check_field_access(&lhs_type, &field_name.0.contents, span) + .unwrap_or((Type::Error, 0)); + let field_index = Some(field_index); (typ.clone(), HirLValue::MemberAccess { object, field_name, field_index, typ }) } HirLValue::Index { array, index, .. } => { let index_type = self.check_expression(&index); let expr_span = self.interner.expr_span(&index); - self.unify(&index_type, &Type::comp_time(Some(expr_span)), expr_span, || { - TypeCheckError::TypeMismatch { - expected_typ: "comptime Field".to_owned(), + index_type.make_subtype_of( + &Type::field(Some(expr_span)), + expr_span, + &mut self.errors, + || TypeCheckError::TypeMismatch { + expected_typ: "Field".to_owned(), expr_typ: index_type.to_string(), expr_span, - } - }); - //TODO replace the above by the below in order to activate dynamic arrays - // index_type.make_subtype_of(&Type::field(Some(expr_span)), expr_span, || { - // TypeCheckError::TypeMismatch { - // expected_typ: "Field".to_owned(), - // expr_typ: index_type.to_string(), - // expr_span, - // } - // }); + }, + ); let (result, array) = self.check_lvalue(*array, assign_span); let array = Box::new(array); diff --git a/crates/noirc_frontend/src/hir_def/types.rs b/crates/noirc_frontend/src/hir_def/types.rs index be7d90e089..9a6f83ddd5 100644 --- a/crates/noirc_frontend/src/hir_def/types.rs +++ b/crates/noirc_frontend/src/hir_def/types.rs @@ -652,7 +652,7 @@ impl std::fmt::Display for Type { write!(f, "fn({}) -> {}", args.join(", "), ret) } Type::Vec(element) => { - write!(f, "Vec<{}>", element) + write!(f, "Vec<{element}>") } } } diff --git a/crates/noirc_frontend/src/lexer/lexer.rs b/crates/noirc_frontend/src/lexer/lexer.rs index 5e0d99cfed..2c8583ef2c 100644 --- a/crates/noirc_frontend/src/lexer/lexer.rs +++ b/crates/noirc_frontend/src/lexer/lexer.rs @@ -162,9 +162,8 @@ impl<'a> Lexer<'a> { if self.peek_char_is('=') { self.next_char(); Ok(Token::GreaterEqual.into_span(start, start + 1)) - } else if self.peek_char_is('>') { - self.next_char(); - Ok(Token::ShiftRight.into_span(start, start + 1)) + // Note: There is deliberately no case for RightShift. We always lex >> as + // two separate Greater tokens to help the parser parse nested generic types. } else { Ok(prev_token.into_single_span(start)) } @@ -387,7 +386,8 @@ fn test_single_double_char() { Token::Assign, Token::Equal, Token::ShiftLeft, - Token::ShiftRight, + Token::Greater, + Token::Greater, Token::EOF, ]; diff --git a/crates/noirc_frontend/src/lexer/token.rs b/crates/noirc_frontend/src/lexer/token.rs index bfcd0f4be5..fe0e3bf1f9 100644 --- a/crates/noirc_frontend/src/lexer/token.rs +++ b/crates/noirc_frontend/src/lexer/token.rs @@ -189,7 +189,7 @@ impl fmt::Display for Token { } } -#[derive(PartialEq, Eq, Hash, Debug, Clone)] +#[derive(PartialEq, Eq, Hash, Debug, Clone, Ord, PartialOrd)] /// The different kinds of tokens that are possible in the target language pub enum TokenKind { Token(Token), diff --git a/crates/noirc_frontend/src/node_interner.rs b/crates/noirc_frontend/src/node_interner.rs index d8ea11ae89..f3cb00ae9c 100644 --- a/crates/noirc_frontend/src/node_interner.rs +++ b/crates/noirc_frontend/src/node_interner.rs @@ -586,15 +586,11 @@ impl NodeInterner { #[allow(deprecated)] pub fn foreign(&self, opcode: &str) -> bool { let is_supported = acvm::default_is_opcode_supported(self.language.clone()); - let black_box_func = match acvm::acir::BlackBoxFunc::lookup(opcode) { - Some(black_box_func) => black_box_func, + let black_box_func_call = match acvm::acir::BlackBoxFunc::lookup(opcode) { + Some(black_box_func) => BlackBoxFuncCall::dummy(black_box_func), None => return false, }; - is_supported(&Opcode::BlackBoxFuncCall(BlackBoxFuncCall { - name: black_box_func, - inputs: Vec::new(), - outputs: Vec::new(), - })) + is_supported(&Opcode::BlackBoxFuncCall(black_box_func_call)) } pub fn push_delayed_type_check(&mut self, f: TypeCheckFn) { diff --git a/crates/noirc_frontend/src/parser/errors.rs b/crates/noirc_frontend/src/parser/errors.rs index 7f19ef7f06..d4a294482a 100644 --- a/crates/noirc_frontend/src/parser/errors.rs +++ b/crates/noirc_frontend/src/parser/errors.rs @@ -1,65 +1,77 @@ -use std::collections::BTreeSet; - use crate::lexer::token::Token; -use crate::BinaryOp; +use crate::Expression; +use small_ord_set::SmallOrdSet; +use thiserror::Error; use iter_extended::vecmap; use noirc_errors::CustomDiagnostic as Diagnostic; use noirc_errors::Span; +use super::labels::ParsingRuleLabel; + +#[derive(Debug, Clone, PartialEq, Eq, Error)] +pub enum ParserErrorReason { + #[error("Arrays must have at least one element")] + ZeroSizedArray, + #[error("Unexpected '{0}', expected a field name")] + ExpectedFieldName(Token), + #[error("Expected a ; separating these two statements")] + MissingSeparatingSemi, + #[error("constrain keyword is deprecated")] + ConstrainDeprecated, + #[error("Expression is invalid in an array-length type: '{0}'. Only unsigned integer constants, globals, generics, +, -, *, /, and % may be used in this context.")] + InvalidArrayLengthExpression(Expression), +} + +/// Represents a parsing error, or a parsing error in the making. +/// +/// `ParserError` is used extensively by the parser, as it not only used to report badly formed +/// token streams, but also as a general intermediate that accumulates information as various +/// parsing rules are tried. This struct is constructed and destructed with a very high frequency +/// and as such, the time taken to do so significantly impacts parsing performance. For this +/// reason we use `SmallOrdSet` to avoid heap allocations for as long as possible - this greatly +/// inflates the size of the error, but this is justified by a resulting increase in parsing +/// speeds of approximately 40% in release mode. +/// +/// Both `expected_tokens` and `expected_labels` use `SmallOrdSet` sized 1. In the of labels this +/// is optimal. In the of tokens we stop here due to fast diminishing returns. #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParserError { - expected_tokens: BTreeSet, - expected_labels: BTreeSet, + expected_tokens: SmallOrdSet<[Token; 1]>, + expected_labels: SmallOrdSet<[ParsingRuleLabel; 1]>, found: Token, - reason: Option, + reason: Option, span: Span, } impl ParserError { pub fn empty(found: Token, span: Span) -> ParserError { ParserError { - expected_tokens: BTreeSet::new(), - expected_labels: BTreeSet::new(), + expected_tokens: SmallOrdSet::new(), + expected_labels: SmallOrdSet::new(), found, reason: None, span, } } - pub fn expected(token: Token, found: Token, span: Span) -> ParserError { - let mut error = ParserError::empty(found, span); - error.expected_tokens.insert(token); - error - } - - pub fn expected_label(label: String, found: Token, span: Span) -> ParserError { + pub fn expected_label(label: ParsingRuleLabel, found: Token, span: Span) -> ParserError { let mut error = ParserError::empty(found, span); error.expected_labels.insert(label); error } - pub fn with_reason(reason: String, span: Span) -> ParserError { + pub fn with_reason(reason: ParserErrorReason, span: Span) -> ParserError { let mut error = ParserError::empty(Token::EOF, span); error.reason = Some(reason); error } - - pub fn invalid_constrain_operator(operator: BinaryOp) -> ParserError { - let message = format!( - "Cannot use the {} operator in a constraint statement.", - operator.contents.as_string() - ); - let mut error = ParserError::empty(operator.contents.as_token(), operator.span()); - error.reason = Some(message); - error - } } impl std::fmt::Display for ParserError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut expected = vecmap(&self.expected_tokens, ToString::to_string); - expected.append(&mut vecmap(&self.expected_labels, Clone::clone)); + expected.append(&mut vecmap(&self.expected_labels, |label| format!("{label}"))); if expected.is_empty() { write!(f, "Unexpected {} in input", self.found) @@ -84,7 +96,19 @@ impl std::fmt::Display for ParserError { impl From for Diagnostic { fn from(error: ParserError) -> Diagnostic { match &error.reason { - Some(reason) => Diagnostic::simple_error(reason.clone(), String::new(), error.span), + Some(reason) => { + match reason { + ParserErrorReason::ConstrainDeprecated => Diagnostic::simple_warning( + "Use of deprecated keyword 'constrain'".into(), + "The 'constrain' keyword has been deprecated. Please use the 'assert' function instead.".into(), + error.span, + ), + other => { + + Diagnostic::simple_error(format!("{other}"), String::new(), error.span) + } + } + } None => { let primary = error.to_string(); Diagnostic::simple_error(primary, String::new(), error.span) @@ -95,7 +119,7 @@ impl From for Diagnostic { impl chumsky::Error for ParserError { type Span = Span; - type Label = String; + type Label = ParsingRuleLabel; fn expected_input_found(span: Self::Span, expected: Iter, found: Option) -> Self where @@ -103,7 +127,7 @@ impl chumsky::Error for ParserError { { ParserError { expected_tokens: expected.into_iter().map(|opt| opt.unwrap_or(Token::EOF)).collect(), - expected_labels: BTreeSet::new(), + expected_labels: SmallOrdSet::new(), found: found.unwrap_or(Token::EOF), reason: None, span, @@ -130,7 +154,7 @@ impl chumsky::Error for ParserError { self.reason = other.reason; } - assert_eq!(self.span, other.span); + self.span = self.span.merge(other.span); self } } diff --git a/crates/noirc_frontend/src/parser/labels.rs b/crates/noirc_frontend/src/parser/labels.rs new file mode 100644 index 0000000000..b43c10fb9e --- /dev/null +++ b/crates/noirc_frontend/src/parser/labels.rs @@ -0,0 +1,42 @@ +use std::fmt; + +use crate::token::TokenKind; + +/// Used to annotate parsing rules with extra context that can be presented to the user later in +/// the case of an error. +#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)] +pub enum ParsingRuleLabel { + Atom, + BinaryOperator, + Cast, + Expression, + FieldAccess, + Global, + IntegerType, + Parameter, + Pattern, + Statement, + Term, + TypeExpression, + TokenKind(TokenKind), +} + +impl fmt::Display for ParsingRuleLabel { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + ParsingRuleLabel::Atom => write!(f, "atom"), + ParsingRuleLabel::BinaryOperator => write!(f, "binary operator"), + ParsingRuleLabel::Cast => write!(f, "cast"), + ParsingRuleLabel::Expression => write!(f, "expression"), + ParsingRuleLabel::FieldAccess => write!(f, "field access"), + ParsingRuleLabel::Global => write!(f, "global"), + ParsingRuleLabel::IntegerType => write!(f, "integer type"), + ParsingRuleLabel::Parameter => write!(f, "parameter"), + ParsingRuleLabel::Pattern => write!(f, "pattern"), + ParsingRuleLabel::Statement => write!(f, "statement"), + ParsingRuleLabel::Term => write!(f, "term"), + ParsingRuleLabel::TypeExpression => write!(f, "type expression"), + ParsingRuleLabel::TokenKind(token_kind) => write!(f, "{:?}", token_kind), + } + } +} diff --git a/crates/noirc_frontend/src/parser/mod.rs b/crates/noirc_frontend/src/parser/mod.rs index 788c0eec89..a8b7f43fa5 100644 --- a/crates/noirc_frontend/src/parser/mod.rs +++ b/crates/noirc_frontend/src/parser/mod.rs @@ -7,6 +7,7 @@ //! This file is mostly helper functions and types for the parser. For the parser itself, //! see parser.rs. The definition of the abstract syntax tree can be found in the `ast` folder. mod errors; +mod labels; #[allow(clippy::module_inception)] mod parser; @@ -24,6 +25,7 @@ use acvm::FieldElement; use chumsky::prelude::*; use chumsky::primitive::Container; pub use errors::ParserError; +pub use errors::ParserErrorReason; use noirc_errors::Span; pub use parser::parse_program; @@ -176,7 +178,7 @@ where .try_map(move |peek, span| { if too_far.get_iter().any(|t| t == peek) { // This error will never be shown to the user - Err(ParserError::with_reason(String::new(), span)) + Err(ParserError::empty(Token::EOF, span)) } else { Ok(Recoverable::error(span)) } diff --git a/crates/noirc_frontend/src/parser/parser.rs b/crates/noirc_frontend/src/parser/parser.rs index 575a9403ea..98b4524756 100644 --- a/crates/noirc_frontend/src/parser/parser.rs +++ b/crates/noirc_frontend/src/parser/parser.rs @@ -24,9 +24,10 @@ //! be limited to cases like the above `fn` example where it is clear we shouldn't back out of the //! current parser to try alternative parsers in a `choice` expression. use super::{ - foldl_with_span, parameter_name_recovery, parameter_recovery, parenthesized, then_commit, - then_commit_ignore, top_level_statement_recovery, ExprParser, ForRange, NoirParser, - ParsedModule, ParserError, Precedence, SubModule, TopLevelStatement, + foldl_with_span, labels::ParsingRuleLabel, parameter_name_recovery, parameter_recovery, + parenthesized, then_commit, then_commit_ignore, top_level_statement_recovery, ExprParser, + ForRange, NoirParser, ParsedModule, ParserError, ParserErrorReason, Precedence, SubModule, + TopLevelStatement, }; use crate::ast::{Expression, ExpressionKind, LetStatement, Statement, UnresolvedType}; use crate::lexer::Lexer; @@ -113,7 +114,7 @@ fn top_level_statement( /// global_declaration: 'global' ident global_type_annotation '=' literal fn global_declaration() -> impl NoirParser { let p = ignore_then_commit( - keyword(Keyword::Global).labelled("global"), + keyword(Keyword::Global).labelled(ParsingRuleLabel::Global), ident().map(Pattern::Identifier), ); let p = then_commit(p, global_type_annotation()); @@ -273,7 +274,10 @@ fn lambda_parameters() -> impl NoirParser> { .recover_via(parameter_name_recovery()) .then(typ.or_not().map(|typ| typ.unwrap_or(UnresolvedType::Unspecified))); - parameter.separated_by(just(Token::Comma)).allow_trailing().labelled("parameter") + parameter + .separated_by(just(Token::Comma)) + .allow_trailing() + .labelled(ParsingRuleLabel::Parameter) } fn function_parameters<'a>( @@ -292,7 +296,10 @@ fn function_parameters<'a>( let parameter = full_parameter.or(self_parameter); - parameter.separated_by(just(Token::Comma)).allow_trailing().labelled("parameter") + parameter + .separated_by(just(Token::Comma)) + .allow_trailing() + .labelled(ParsingRuleLabel::Parameter) } /// This parser always parses no input and fails @@ -308,7 +315,7 @@ fn self_parameter() -> impl NoirParser<(Pattern, UnresolvedType, AbiVisibility)> let self_type = UnresolvedType::Named(path, vec![]); Ok((Pattern::Identifier(ident), self_type, AbiVisibility::Private)) } - _ => Err(ParserError::expected_label("parameter".to_owned(), found, span)), + _ => Err(ParserError::expected_label(ParsingRuleLabel::Parameter, found, span)), }) } @@ -406,7 +413,11 @@ fn token_kind(token_kind: TokenKind) -> impl NoirParser { if found.kind() == token_kind { Ok(found) } else { - Err(ParserError::expected_label(token_kind.to_string(), found, span)) + Err(ParserError::expected_label( + ParsingRuleLabel::TokenKind(token_kind.clone()), + found, + span, + )) } }) } @@ -446,8 +457,15 @@ fn constrain<'a, P>(expr_parser: P) -> impl NoirParser + 'a where P: ExprParser + 'a, { - ignore_then_commit(keyword(Keyword::Constrain).labelled("statement"), expr_parser) - .map(|expr| Statement::Constrain(ConstrainStatement(expr))) + ignore_then_commit( + keyword(Keyword::Constrain).labelled(ParsingRuleLabel::Statement), + expr_parser, + ) + .map(|expr| Statement::Constrain(ConstrainStatement(expr))) + .validate(|expr, span, emit| { + emit(ParserError::with_reason(ParserErrorReason::ConstrainDeprecated, span)); + expr + }) } fn assertion<'a, P>(expr_parser: P) -> impl NoirParser + 'a @@ -455,7 +473,7 @@ where P: ExprParser + 'a, { ignore_then_commit(keyword(Keyword::Assert), parenthesized(expr_parser)) - .labelled("statement") + .labelled(ParsingRuleLabel::Statement) .map(|expr| Statement::Constrain(ConstrainStatement(expr))) } @@ -463,7 +481,8 @@ fn declaration<'a, P>(expr_parser: P) -> impl NoirParser + 'a where P: ExprParser + 'a, { - let p = ignore_then_commit(keyword(Keyword::Let).labelled("statement"), pattern()); + let p = + ignore_then_commit(keyword(Keyword::Let).labelled(ParsingRuleLabel::Statement), pattern()); let p = p.then(optional_type_annotation()); let p = then_commit_ignore(p, just(Token::Assign)); let p = then_commit(p, expr_parser); @@ -497,14 +516,15 @@ fn pattern() -> impl NoirParser { choice((mut_pattern, tuple_pattern, struct_pattern, ident_pattern)) }) - .labelled("pattern") + .labelled(ParsingRuleLabel::Pattern) } fn assignment<'a, P>(expr_parser: P) -> impl NoirParser + 'a where P: ExprParser + 'a, { - let fallible = lvalue(expr_parser.clone()).then(assign_operator()).labelled("statement"); + let fallible = + lvalue(expr_parser.clone()).then(assign_operator()).labelled(ParsingRuleLabel::Statement); then_commit(fallible, expr_parser).map_with_span( |((identifier, operator), expression), span| { @@ -513,9 +533,22 @@ where ) } +/// Parse an assignment operator `=` optionally prefixed by a binary operator for a combined +/// assign statement shorthand. Notably, this must handle a few corner cases with how `>>` is +/// lexed as two separate greater-than operators rather than a single right-shift. fn assign_operator() -> impl NoirParser { let shorthand_operators = Token::assign_shorthand_operators(); - let shorthand_syntax = one_of(shorthand_operators).then_ignore(just(Token::Assign)); + // We need to explicitly check for right_shift here since it is actually + // two separate greater-than operators. + let shorthand_operators = right_shift_operator().or(one_of(shorthand_operators)); + let shorthand_syntax = shorthand_operators.then_ignore(just(Token::Assign)); + + // Since >> is lexed as two separate greater-thans, >>= is lexed as > >=, so + // we need to account for that case here as well. + let right_shift_fix = + just(Token::Greater).then(just(Token::GreaterEqual)).map(|_| Token::ShiftRight); + + let shorthand_syntax = shorthand_syntax.or(right_shift_fix); just(Token::Assign).or(shorthand_syntax) } @@ -530,7 +563,7 @@ where { let l_ident = ident().map(LValue::Ident); - let l_member_rhs = just(Token::Dot).ignore_then(ident()).map(LValueRhs::MemberAccess); + let l_member_rhs = just(Token::Dot).ignore_then(field_name()).map(LValueRhs::MemberAccess); let l_index = expr_parser .delimited_by(just(Token::LeftBracket), just(Token::RightBracket)) @@ -606,7 +639,7 @@ fn int_type() -> impl NoirParser { .then(filter_map(|span, token: Token| match token { Token::IntType(int_type) => Ok(int_type), unexpected => { - Err(ParserError::expected_label("integer type".to_string(), unexpected, span)) + Err(ParserError::expected_label(ParsingRuleLabel::IntegerType, unexpected, span)) } })) .map(UnresolvedType::from_int_token) @@ -652,7 +685,7 @@ fn array_type(type_parser: impl NoirParser) -> impl NoirParser impl NoirParser { recursive(|expr| expression_with_precedence(Precedence::lowest_type_precedence(), expr, true)) - .labelled("type expression") + .labelled(ParsingRuleLabel::TypeExpression) .try_map(UnresolvedTypeExpression::from_expr) } @@ -678,7 +711,7 @@ where fn expression() -> impl ExprParser { recursive(|expr| expression_with_precedence(Precedence::Lowest, expr, false)) - .labelled("expression") + .labelled(ParsingRuleLabel::Expression) } // An expression is a single term followed by 0 or more (OP subexpression)* @@ -695,9 +728,9 @@ where { if precedence == Precedence::Highest { if is_type_expression { - type_expression_term(expr_parser).boxed().labelled("term") + type_expression_term(expr_parser).boxed().labelled(ParsingRuleLabel::Term) } else { - term(expr_parser).boxed().labelled("term") + term(expr_parser).boxed().labelled(ParsingRuleLabel::Term) } } else { let next_precedence = @@ -711,7 +744,7 @@ where .then(then_commit(operator_with_precedence(precedence), next_expr).repeated()) .foldl(create_infix_expression) .boxed() - .labelled("expression") + .labelled(ParsingRuleLabel::Expression) } } @@ -722,14 +755,23 @@ fn create_infix_expression(lhs: Expression, (operator, rhs): (BinaryOp, Expressi Expression { span, kind: ExpressionKind::Infix(infix) } } +// Right-shift (>>) is issued as two separate > tokens by the lexer as this makes it easier +// to parse nested generic types. For normal expressions however, it means we have to manually +// parse two greater-than tokens as a single right-shift here. +fn right_shift_operator() -> impl NoirParser { + just(Token::Greater).then(just(Token::Greater)).map(|_| Token::ShiftRight) +} + fn operator_with_precedence(precedence: Precedence) -> impl NoirParser> { - filter_map(move |span, token: Token| { - if Precedence::token_precedence(&token) == Some(precedence) { - Ok(token.try_into_binary_op(span).unwrap()) - } else { - Err(ParserError::expected_label("binary operator".to_string(), token, span)) - } - }) + right_shift_operator() + .or(any()) // Parse any single token, we're validating it as an operator next + .try_map(move |token, span| { + if Precedence::token_precedence(&token) == Some(precedence) { + Ok(token.try_into_binary_op(span).unwrap()) + } else { + Err(ParserError::expected_label(ParsingRuleLabel::BinaryOperator, token, span)) + } + }) } fn term<'a, P>(expr_parser: P) -> impl NoirParser + 'a @@ -778,15 +820,17 @@ where .map(UnaryRhs::ArrayIndex); // `as Type` in `atom as Type` - let cast_rhs = - keyword(Keyword::As).ignore_then(parse_type()).map(UnaryRhs::Cast).labelled("cast"); + let cast_rhs = keyword(Keyword::As) + .ignore_then(parse_type()) + .map(UnaryRhs::Cast) + .labelled(ParsingRuleLabel::Cast); // `.foo` or `.foo(args)` in `atom.foo` or `atom.foo(args)` let member_rhs = just(Token::Dot) .ignore_then(field_name()) .then(parenthesized(expression_list(expr_parser.clone())).or_not()) .map(UnaryRhs::MemberAccess) - .labelled("field access"); + .labelled(ParsingRuleLabel::FieldAccess); let rhs = choice((call_rhs, array_rhs, cast_rhs, member_rhs)); @@ -877,10 +921,7 @@ where .delimited_by(just(Token::LeftBracket), just(Token::RightBracket)) .validate(|elements, span, emit| { if elements.is_empty() { - emit(ParserError::with_reason( - "Arrays must have at least one element".to_owned(), - span, - )); + emit(ParserError::with_reason(ParserErrorReason::ZeroSizedArray, span)); } ExpressionKind::array(elements) }) @@ -938,7 +979,7 @@ where .map_with_span(Expression::new) .or(parenthesized(expr_parser.clone())) .or(tuple(expr_parser)) - .labelled("atom") + .labelled(ParsingRuleLabel::Atom) } /// Atoms within type expressions are limited to only variables, literals, and parenthesized @@ -951,7 +992,7 @@ where .or(literal()) .map_with_span(Expression::new) .or(parenthesized(expr_parser)) - .labelled("atom") + .labelled(ParsingRuleLabel::Atom) } fn tuple

(expr_parser: P) -> impl NoirParser @@ -966,8 +1007,7 @@ fn field_name() -> impl NoirParser { ident().or(token_kind(TokenKind::Literal).validate(|token, span, emit| match token { Token::Int(_) => Ident::from(Spanned::from(span, token.to_string())), other => { - let reason = format!("Unexpected '{other}', expected a field name"); - emit(ParserError::with_reason(reason, span)); + emit(ParserError::with_reason(ParserErrorReason::ExpectedFieldName(other), span)); Ident::error(span) } })) @@ -1196,10 +1236,12 @@ mod test { ); } - /// This is the standard way to declare a constrain statement + /// Deprecated constrain usage test #[test] fn parse_constrain() { - parse_with(constrain(expression()), "constrain x == y").unwrap(); + let errors = parse_with(constrain(expression()), "constrain x == y").unwrap_err(); + assert_eq!(errors.len(), 1); + assert!(format!("{}", errors.first().unwrap()).contains("deprecated")); // Currently we disallow constrain statements where the outer infix operator // produces a value. This would require an implicit `==` which @@ -1217,7 +1259,9 @@ mod test { for operator in disallowed_operators { let src = format!("constrain x {} y;", operator.as_string()); - parse_with(constrain(expression()), &src).unwrap_err(); + let errors = parse_with(constrain(expression()), &src).unwrap_err(); + assert_eq!(errors.len(), 2); + assert!(format!("{}", errors.first().unwrap()).contains("deprecated")); } // These are general cases which should always work. @@ -1226,7 +1270,7 @@ mod test { // The first (inner) `==` is a predicate which returns 0/1 // The outer layer is an infix `==` which is // associated with the Constrain statement - parse_all( + let errors = parse_all_failing( constrain(expression()), vec![ "constrain ((x + y) == k) + z == y", @@ -1236,8 +1280,11 @@ mod test { "constrain x + x ^ x == y | m", ], ); + assert_eq!(errors.len(), 5); + assert!(errors.iter().all(|err| { format!("{}", err).contains("deprecated") })); } + /// This is the standard way to declare an assert statement #[test] fn parse_assert() { parse_with(assertion(expression()), "assert(x == y)").unwrap(); @@ -1533,9 +1580,9 @@ mod test { ("let = ", 2, "let $error: unspecified = Error"), ("let", 3, "let $error: unspecified = Error"), ("foo = one two three", 1, "foo = plain::one"), - ("constrain", 1, "constrain Error"), + ("constrain", 2, "constrain Error"), ("assert", 1, "constrain Error"), - ("constrain x ==", 1, "constrain (plain::x == Error)"), + ("constrain x ==", 2, "constrain (plain::x == Error)"), ("assert(x ==)", 1, "constrain (plain::x == Error)"), ]; diff --git a/crates/wasm/src/compile.rs b/crates/wasm/src/compile.rs index 18bd30029b..ecf2b78936 100644 --- a/crates/wasm/src/compile.rs +++ b/crates/wasm/src/compile.rs @@ -95,19 +95,7 @@ pub fn compile(args: JsValue) -> JsValue { .compile_contracts(&options.compile_options) .unwrap_or_else(|_| panic!("Contract compilation failed")); - // Flatten each contract into a list of its functions, each being assigned a unique name. - let collected_compiled_programs: Vec<_> = compiled_contracts - .into_iter() - .flat_map(|contract| { - let contract_id = format!("{}-{}", options.circuit_name, &contract.name); - contract.functions.into_iter().map(move |contract_function| { - let program_name = format!("{}-{}", contract_id, contract_function.name); - (program_name, contract_function.bytecode) - }) - }) - .collect(); - - ::from_serde(&collected_compiled_programs).unwrap() + ::from_serde(&compiled_contracts).unwrap() } else { let main = driver.main_function().unwrap_or_else(|_| panic!("Could not find main function!")); diff --git a/examples_failing/ecdsa_secp256k1/src/main.nr b/examples_failing/ecdsa_secp256k1/src/main.nr index 6ee7a98a89..43a4f78e63 100644 --- a/examples_failing/ecdsa_secp256k1/src/main.nr +++ b/examples_failing/ecdsa_secp256k1/src/main.nr @@ -13,5 +13,5 @@ fn main(hashed_message : [32]u8, pub_key_x : [32]u8, pub_key_y : [32]u8, signatu // Is there ever a situation where someone would want // to ensure that a signature was invalid? let x = std::ecdsa_secp256k1::verify_signature(pub_key_x,pub_key_y,signature, hashed_message); - constrain x == 1; + assert(x == 1); } diff --git a/examples_failing/pow_const/src/main.nr b/examples_failing/pow_const/src/main.nr index 4355935d2a..9b742417e1 100644 --- a/examples_failing/pow_const/src/main.nr +++ b/examples_failing/pow_const/src/main.nr @@ -1,6 +1,6 @@ use dep::std; fn main(_x : Field) { - constrain std::pow_const(2,3) == _x; + assert(std::pow_const(2,3) == _x); } diff --git a/flake.lock b/flake.lock index 6a6aff10f7..8d6445d3cf 100644 --- a/flake.lock +++ b/flake.lock @@ -10,11 +10,11 @@ ] }, "locked": { - "lastModified": 1682626614, - "narHash": "sha256-TC535frlYLUTDZ2iHAtUtKpMJWngL1MFxzEXhOfeCo8=", + "lastModified": 1683314474, + "narHash": "sha256-gfHYpOnVTfS+4fhScBhfkB/e5z+jPFCi8zSy+aEh+8s=", "owner": "AztecProtocol", "repo": "barretenberg", - "rev": "7b5598890c1fa4ee485a4a0015fcb23b5389392e", + "rev": "ad615ee7dc931d3dbea041e47c96b9d8dccebf98", "type": "github" }, "original": { diff --git a/noir_stdlib/src/ec/montcurve.nr b/noir_stdlib/src/ec/montcurve.nr index fad5e5e0a9..e917661f0f 100644 --- a/noir_stdlib/src/ec/montcurve.nr +++ b/noir_stdlib/src/ec/montcurve.nr @@ -82,13 +82,13 @@ mod affine { // Curve constructor fn new(j: Field, k: Field, gen: Point) -> Self { // Check curve coefficients - constrain k != 0; - constrain j*j != 4; + assert(k != 0); + assert(j*j != 4); let curve = Self {j, k, gen}; // gen should be on the curve - constrain curve.contains(curve.gen); + assert(curve.contains(curve.gen)); curve } @@ -180,10 +180,10 @@ mod affine { let z = ZETA; // Non-square Field element required for map // Check whether curve is admissible - constrain j != 0; + assert(j != 0); let l = (j*j - 4)/(k*k); - constrain l != 0; - constrain is_square(l) == false; + assert(l != 0); + assert(is_square(l) == false); let x1 = safe_inverse(1+z*u*u)*(0 - (j/k)); @@ -284,13 +284,13 @@ mod curvegroup { // Curve constructor fn new(j: Field, k: Field, gen: Point) -> Self { // Check curve coefficients - constrain k != 0; - constrain j*j != 4; + assert(k != 0); + assert(j*j != 4); let curve = Self {j, k, gen}; // gen should be on the curve - constrain curve.contains(curve.gen); + assert(curve.contains(curve.gen)); curve } diff --git a/noir_stdlib/src/ec/swcurve.nr b/noir_stdlib/src/ec/swcurve.nr index 8e2a996e92..eae4f375e4 100644 --- a/noir_stdlib/src/ec/swcurve.nr +++ b/noir_stdlib/src/ec/swcurve.nr @@ -71,12 +71,12 @@ mod affine { // Curve constructor fn new(a: Field, b: Field, gen: Point) -> Curve { // Check curve coefficients - constrain 4*a*a*a + 27*b*b != 0; + assert(4*a*a*a + 27*b*b != 0); let curve = Curve { a, b, gen }; // gen should be on the curve - constrain curve.contains(curve.gen); + assert(curve.contains(curve.gen)); curve } @@ -164,7 +164,7 @@ mod affine { // where g(x) = x^3 + a*x + b. swu_map(c,z,.) then maps a Field element to a point on curve c. fn swu_map(self, z: Field, u: Field) -> Point { // Check whether curve is admissible - constrain self.a*self.b != 0; + assert(self.a*self.b != 0); let Curve {a, b, gen: _gen} = self; @@ -248,12 +248,12 @@ mod curvegroup { // Curve constructor fn new(a: Field, b: Field, gen: Point) -> Curve { // Check curve coefficients - constrain 4*a*a*a + 27*b*b != 0; + assert(4*a*a*a + 27*b*b != 0); let curve = Curve { a, b, gen }; // gen should be on the curve - constrain curve.contains(curve.gen); + assert(curve.contains(curve.gen)); curve } diff --git a/noir_stdlib/src/ec/tecurve.nr b/noir_stdlib/src/ec/tecurve.nr index 43c9f5d201..8611e4270c 100644 --- a/noir_stdlib/src/ec/tecurve.nr +++ b/noir_stdlib/src/ec/tecurve.nr @@ -81,12 +81,12 @@ mod affine { // Curve constructor fn new(a: Field, d: Field, gen: Point) -> Curve { // Check curve coefficients - constrain a*d*(a-d) != 0; + assert(a*d*(a-d) != 0); let curve = Curve {a, d, gen}; // gen should be on the curve - constrain curve.contains(curve.gen); + assert(curve.contains(curve.gen)); curve } @@ -286,12 +286,12 @@ mod curvegroup { // Curve constructor fn new(a: Field, d: Field, gen: Point) -> Curve { // Check curve coefficients - constrain a*d*(a-d) != 0; + assert(a*d*(a-d) != 0); let curve = Curve { a, d, gen }; // gen should be on the curve - constrain curve.contains(curve.gen); + assert(curve.contains(curve.gen)); curve } diff --git a/noir_stdlib/src/hash.nr b/noir_stdlib/src/hash.nr index 614918ae49..0618897d20 100644 --- a/noir_stdlib/src/hash.nr +++ b/noir_stdlib/src/hash.nr @@ -12,6 +12,9 @@ fn pedersen(_input : [Field; N]) -> [Field; 2] {} #[foreign(hash_to_field_128_security)] fn hash_to_field(_input : [Field; N]) -> Field {} +#[foreign(keccak256)] +fn keccak256(_input : [u8; N]) -> [u8; 32] {} + // mimc-p/p implementation // constants are (publicly generated) random numbers, for instance using keccak as a ROM. // You must use constants generated for the native field diff --git a/noir_stdlib/src/hash/poseidon.nr b/noir_stdlib/src/hash/poseidon.nr index 7ac365c499..416f740bbd 100644 --- a/noir_stdlib/src/hash/poseidon.nr +++ b/noir_stdlib/src/hash/poseidon.nr @@ -20,9 +20,9 @@ fn config( mds: [Field; N]) -> PoseidonConfig { // Input checks - constrain t as u8 * (rf + rp) == ark.len() as u8; - constrain t * t == mds.len(); - constrain alpha != 0; + assert(t as u8 * (rf + rp) == ark.len() as u8); + assert(t * t == mds.len()); + assert(alpha != 0); PoseidonConfig {t, rf, rp, alpha, ark, mds} } @@ -34,7 +34,7 @@ fn permute( -> [Field; O] { let PoseidonConfig {t, rf, rp, alpha, ark, mds} = pos_conf; - constrain t == state.len(); + assert(t == state.len()); let mut count = 0; @@ -68,7 +68,7 @@ fn absorb( capacity: comptime Field, // Capacity; usually 1 msg: [Field; P]) // Arbitrary length message -> [Field; O] { - constrain pos_conf.t == rate + capacity; + assert(pos_conf.t == rate + capacity); let mut i = 0; diff --git a/noir_stdlib/src/hash/poseidon/bn254.nr b/noir_stdlib/src/hash/poseidon/bn254.nr index 355e7d13a5..9ba26dbd87 100644 --- a/noir_stdlib/src/hash/poseidon/bn254.nr +++ b/noir_stdlib/src/hash/poseidon/bn254.nr @@ -15,9 +15,9 @@ fn permute( let rf = 8; let rp = [56, 57, 56, 60, 60, 63, 64, 63, 60, 66, 60, 65, 70, 60, 64, 68][state.len() - 2]; - constrain t == state.len(); - constrain rf == config_rf as Field; - constrain rp == config_rp as Field; + assert(t == state.len()); + assert(rf == config_rf as Field); + assert(rp == config_rp as Field); let mut count = 0; @@ -73,7 +73,7 @@ fn absorb( msg: [Field; P] // Arbitrary length message ) -> [Field; O] { - constrain pos_conf.t == rate + capacity; + assert(pos_conf.t == rate + capacity); let mut i = 0;