From 35ac6ceb3ace7bfa0e0b624ded03f64ef5c43b96 Mon Sep 17 00:00:00 2001 From: "Stefan J. Wernli" Date: Tue, 6 Dec 2022 16:05:17 -0800 Subject: [PATCH] Update projects, docs, cpp files for QIR stdlib (#1578) * Update projects, docs, cpp files for QIR stdlib This change updates the remaining example and test projects for using the QIR stdlib via the Microsoft.Quantum.Simulator.Runtime as a QIR backend. This resolves #1560, resolves #1568, and resolves #1569. Note that it does not update the samples under examples\QIR\Simulation as these need to be fully rewritten and likely moved to https://github.com/qir-alliance/qir-runner (see Rewrite or Move docs in examples/QIR/Simulation #1577) * Fix development project QDK version * Update examples/QIR/Optimization/README.md Co-authored-by: Robin Kuzmin <9372582+kuzminrobin@users.noreply.github.com> * Use alternative method for copying build dependencies Co-authored-by: Robin Kuzmin <9372582+kuzminrobin@users.noreply.github.com> --- .gitignore | 1 + examples/QIR/Development/Development.csproj | 35 +- examples/QIR/Emission/Emission.csproj | 2 +- examples/QIR/JITCompilation/README.md | 70 +- examples/QIR/JITCompilation/qir-jit.py | 27 +- examples/QIR/Optimization/Hello/Hello.csproj | 23 +- examples/QIR/Optimization/Hello/Main.cpp | 9 - examples/QIR/Optimization/Hello/qir/Hello.ll | 1994 +---------------- examples/QIR/Optimization/README.md | 105 +- .../Simulation/Target/QirExecution.cs | 17 +- .../Simulation/Target/Simulation.csproj | 3 +- .../Tests.Compiler/ExecutionTests.fs | 2 +- 12 files changed, 68 insertions(+), 2220 deletions(-) diff --git a/.gitignore b/.gitignore index 1997f0054f..d76deb0c4a 100644 --- a/.gitignore +++ b/.gitignore @@ -368,6 +368,7 @@ src/ProjectTemplates/Quantum.Test1/.template.config/template.json src/QsCompiler/QirGeneration/QirGeneration.nuspec /examples/QIR/Development/qir/* /examples/QIR/Development/build +/examples/QIR/Optimization/Hello/build src/Telemetry/Tests/coverage.json src/Telemetry/.vscode/settings.json build/267DevDivSNKey2048.snk diff --git a/examples/QIR/Development/Development.csproj b/examples/QIR/Development/Development.csproj index 9d7ffd9207..08f3c26aa0 100644 --- a/examples/QIR/Development/Development.csproj +++ b/examples/QIR/Development/Development.csproj @@ -1,4 +1,4 @@ - + Exe @@ -9,8 +9,7 @@ - - + @@ -92,18 +91,9 @@ - #include "QirContext.hpp" - #include "QirRuntime.hpp" - #include "SimFactory.hpp" - - using namespace Microsoft::Quantum%3B - using namespace std%3B - extern "C" void Microsoft__Quantum__Qir__Development__RunExample()%3B // NOLINT int main(int argc, char* argv[]){ - unique_ptr<IRuntimeDriver> sim = CreateFullstateSimulator()%3B - QirContextScope qirctx(sim.get(), true /*trackAllocatedObjects*/)%3B Microsoft__Quantum__Qir__Development__RunExample()%3B return 0%3B } @@ -114,24 +104,17 @@ - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/any/native/include - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/osx-x64/native - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/win-x64/native - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/linux-x64/native - $(PkgMicrosoft_Quantum_Simulators)/runtimes/osx-x64/native/libMicrosoft.Quantum.Simulator.Runtime.dylib - $(PkgMicrosoft_Quantum_Simulators)/runtimes/win-x64/native/Microsoft.Quantum.Simulator.Runtime.dll - $(PkgMicrosoft_Quantum_Simulators)/runtimes/linux-x64/native/libMicrosoft.Quantum.Simulator.Runtime.so + $(PkgMicrosoft_Quantum_Simulators)/runtimes/osx-x64/native + $(PkgMicrosoft_Quantum_Simulators)/runtimes/win-x64/native + $(PkgMicrosoft_Quantum_Simulators)/runtimes/linux-x64/native -fseh-exceptions -lstdc++ - clang -std=c++17 -Wno-override-module $(ClangOptions) -o $(ExecutablePath) $(QirOutputPath)$(PathCompatibleAssemblyName).ll $(BuildOutputPath)/Main.cpp -I$(BuildOutputPath) -L$(BuildOutputPath) -lMicrosoft.Quantum.Qir.Runtime -lMicrosoft.Quantum.Qir.QSharp.Core -lMicrosoft.Quantum.Qir.QSharp.Foundation + -lomp + clang -std=c++17 -Wno-override-module $(ClangOptions) -o $(ExecutablePath) $(QirOutputPath)$(PathCompatibleAssemblyName).ll $(BuildOutputPath)/Main.cpp -I$(BuildOutputPath) -L$(BuildOutputPath) -lMicrosoft.Quantum.Simulator.Runtime $(OmpOptions) - <_QirRuntimeLibFiles Include="$(QirRuntimeLibs)/**/*.*" Exclude="$(QirRuntimeLibs)/**/*.exe" /> - <_QirRuntimeHeaderFiles Include="$(QirRuntimeHeaders)/**/*.hpp" /> - <_QirRuntimeHeaderFiles Include="$(QirRuntimeHeaders)/**/*.h" /> + <_SimulatorLibraries Include="$(SimulatorFolder)/*.*" /> - - - + diff --git a/examples/QIR/Emission/Emission.csproj b/examples/QIR/Emission/Emission.csproj index 890dd4471d..19b2e40787 100644 --- a/examples/QIR/Emission/Emission.csproj +++ b/examples/QIR/Emission/Emission.csproj @@ -1,4 +1,4 @@ - + Detailed diff --git a/examples/QIR/JITCompilation/README.md b/examples/QIR/JITCompilation/README.md index d54054147a..fc85d9bc83 100644 --- a/examples/QIR/JITCompilation/README.md +++ b/examples/QIR/JITCompilation/README.md @@ -38,7 +38,7 @@ The function below is all that is needed to JIT compile a QIR program, and consi - load QIR/simulator libraries - parse the QIR program - load the JIT compiler -- initialize the QIR Runtime and attach a simulator (see [next section](#building-the-project) for more info) +- initialize and attach a simulator (see [next section](#building-the-project) for more info) - run a QIR function ```py @@ -48,9 +48,8 @@ def main(qir_file, entry_point): llvm.initialize_native_target() llvm.initialize_native_asmprinter() - # Load the QIR Runtime libraries - for lib in runtime_libs: - llvm.load_library_permanently(lib) + # Load the simulator library + llvm.load_library_permanently(simulator_lib) # Parse the provided QIR module file = open(qir_file, 'r') @@ -60,16 +59,12 @@ def main(qir_file, entry_point): target = llvm.Target.from_default_triple().create_target_machine() jit_engine = llvm.create_mcjit_compiler(module, target) - # Initialize the QIR Runtime and simulator via exposed C wrapper - fun_ptr = llvm.address_of_symbol("InitQIRSim") - CFUNCTYPE(None)(fun_ptr)() - # Run the entry point of the QIR module fun_ptr = jit_engine.get_function_address(entry_point) CFUNCTYPE(None)(fun_ptr)() ``` -Here, `runtime_libs` is a hardcoded list of the system-specific QIR libraries. +Here, `simulator_lib` is the platform specific path to the simulator library. The name of the QIR source file `qir_file` is provided as a command-line argument, as well the name of the entry point function `entry_point`. The way external functions with C-style linkage are invoked from Python is by using the ctypes module. @@ -78,44 +73,22 @@ The returned value is a raw pointer that can be converted to a function pointer This is done via the function `CFUNCTYPE(, ..)()`, which also specifies the usage of the standard C calling convention. Refer to the [ctypes documentation](https://docs.python.org/3/library/ctypes.html) for more information on calling external functions from Python. -The full Python script can be found in `jit-qir.py`. +The full Python script can be found in `qir-jit.py`. ## Building the project -Currently, it's not easily possible to set up a JIT for QIR entirely from Python, since the QIR Runtime and simulator are compiled from C++. -To get around this, a small library for which we can specify C-linkage can handle the QIR-specific initialization, which can then be invoked from Python directly using ctypes. -A small function declared with `extern "C"` linkage is sufficient to initialize the Runtime + simulator, as defined in `QIRinit.cpp`: - -```cpp -#include "QirContext.hpp" -#include "SimFactory.hpp" +Before being able run QIR via LLVM's JIT compiler, we need to download the necessary simulator library from the [Quantum Simulators](https://www.nuget.org/packages/Microsoft.Quantum.Simulators/) NuGet packages: -#include - -using namespace Microsoft::Quantum; - -extern "C" void InitQIRSim() -{ - // initialize Quantum Simulator and QIR Runtime - std::unique_ptr sim = CreateFullstateSimulator(); - InitializeQirContext(sim.release(), true); -} -``` - -Before being able run QIR via LLVM's JIT compiler, we need to download the necessary header and library files from the [QIR Runtime](https://www.nuget.org/packages/Microsoft.Quantum.Qir.Runtime) and [Quantum Simulators](https://www.nuget.org/packages/Microsoft.Quantum.Simulators/) NuGet packages: - -- **Linux** (installs mono for the NuGet CLI): +- **Linux** (install mono for the NuGet CLI): ```shell mkdir build sudo apt update && sudo apt install -y mono-complete curl https://dist.nuget.org/win-x86-commandline/latest/nuget.exe --output build/nuget mono build/nuget sources add -name nuget.org -source https://api.nuget.org/v3/index.json - mono build/nuget install Microsoft.Quantum.Qir.Runtime -Version 0.18.2106148911-alpha -DirectDownload -DependencyVersion Ignore -OutputDirectory tmp - cp tmp/Microsoft.Quantum.Qir.Runtime.0.18.2106148911-alpha/runtimes/any/native/include/* build - cp tmp/Microsoft.Quantum.Qir.Runtime.0.18.2106148911-alpha/runtimes/linux-x64/native/* build - mono build/nuget install Microsoft.Quantum.Simulators -Version 0.18.2106148911 -DirectDownload -DependencyVersion Ignore -OutputDirectory tmp - cp tmp/Microsoft.Quantum.Simulators.0.18.2106148911/runtimes/linux-x64/native/Microsoft.Quantum.Simulator.Runtime.dll build + mono build/nuget install Microsoft.Quantum.Simulators -Version 0.27.238334 -DirectDownload -DependencyVersion Ignore -OutputDirectory tmp + cp tmp/Microsoft.Quantum.Simulators.0.27.238334/runtimes/linux-x64/native/libMicrosoft.Quantum.Simulator.Runtime.so build + cp tmp/Microsoft.Quantum.Simulators.0.27.238334/runtimes/linux-x64/native/libomp.so build rm -r tmp ``` @@ -124,28 +97,13 @@ Before being able run QIR via LLVM's JIT compiler, we need to download the neces ```shell mkdir build curl https://dist.nuget.org/win-x86-commandline/latest/nuget.exe --output build/nuget.exe - build/nuget install Microsoft.Quantum.Qir.Runtime -Version 0.18.2106148911-alpha -DirectDownload -DependencyVersion Ignore -OutputDirectory tmp - cp tmp/Microsoft.Quantum.Qir.Runtime.0.18.2106148911-alpha/runtimes/any/native/include/* build - cp tmp/Microsoft.Quantum.Qir.Runtime.0.18.2106148911-alpha/runtimes/win-x64/native/* build - build/nuget install Microsoft.Quantum.Simulators -Version 0.18.2106148911 -DirectDownload -DependencyVersion Ignore -OutputDirectory tmp - cp tmp/Microsoft.Quantum.Simulators.0.18.2106148911/runtimes/win-x64/native/Microsoft.Quantum.Simulator.Runtime.dll build + build/nuget install Microsoft.Quantum.Simulators -Version 0.27.238334 -DirectDownload -DependencyVersion Ignore -OutputDirectory tmp + cp tmp/Microsoft.Quantum.Simulators.0.27.238334/runtimes/win-x64/native/Microsoft.Quantum.Simulator.Runtime.dll build + cp tmp/Microsoft.Quantum.Simulators.0.27.238334/runtimes/win-x64/native/Microsoft.Quantum.Simulator.Runtime.lib build + cp tmp/Microsoft.Quantum.Simulators.0.27.238334/runtimes/win-x64/native/libomp140.x86_64.dll build rm -r tmp ``` -Then compile the initialization library with the following command: - -- **Linux**: - - ```shell - clang++ -shared -fPIC QIRinit.cpp -Ibuild -o build/libQIRinit.so - ``` - -- **Windows**: - - ```shell - clang++ -shared QIRinit.cpp -Ibuild -Lbuild -l'Microsoft.Quantum.Qir.Runtime' -l'Microsoft.Quantum.Qir.QSharp.Core' -Wl',/EXPORT:InitQIRSim' -o build/QIRinit.dll - ``` - ## Running a QIR program with JIT To run any QIR program through the JIT, simply run the Python script and provide the QIR source file name and entry point as command line arguments, for example: diff --git a/examples/QIR/JITCompilation/qir-jit.py b/examples/QIR/JITCompilation/qir-jit.py index 41310b42f3..b11155e9df 100644 --- a/examples/QIR/JITCompilation/qir-jit.py +++ b/examples/QIR/JITCompilation/qir-jit.py @@ -2,22 +2,14 @@ import llvmlite.binding as llvm from ctypes import CFUNCTYPE -linux_runtime_libs = ["build/libMicrosoft.Quantum.Qir.Runtime.so", - "build/libMicrosoft.Quantum.Qir.QSharp.Core.so", - "build/libMicrosoft.Quantum.Qir.QSharp.Foundation.so", - "build/Microsoft.Quantum.Simulator.Runtime.dll", - "build/libQIRinit.so"] - -windows_runtime_libs = ["build/Microsoft.Quantum.Qir.Runtime.dll", - "build/Microsoft.Quantum.Qir.QSharp.Core.dll", - "build/Microsoft.Quantum.Qir.QSharp.Foundation.dll", - "build/Microsoft.Quantum.Simulator.Runtime.dll", - "build/QIRinit.dll"] +linux_simulator_lib = "build/libMicrosoft.Quantum.Simulator.Runtime.so" + +windows_simulator_lib = "build/Microsoft.Quantum.Simulator.Runtime.dll" if platform.system() == "Linux": - runtime_libs = linux_runtime_libs + simulator_lib = linux_simulator_lib elif platform.system() == "Windows": - runtime_libs = windows_runtime_libs + simulator_lib = windows_simulator_lib else: raise Exception("unsupported platform") @@ -27,9 +19,8 @@ def main(qir_file, entry_point): llvm.initialize_native_target() llvm.initialize_native_asmprinter() - # Load the QIR Runtime libraries - for lib in runtime_libs: - llvm.load_library_permanently(lib) + # Load the simulator library + llvm.load_library_permanently(simulator_lib) # Parse the provided QIR module file = open(qir_file, 'r') @@ -39,10 +30,6 @@ def main(qir_file, entry_point): target = llvm.Target.from_default_triple().create_target_machine() jit_engine = llvm.create_mcjit_compiler(module, target) - # Initialize the QIR Runtime and simulator via exposed C wrapper - fun_ptr = llvm.address_of_symbol("InitQIRSim") - CFUNCTYPE(None)(fun_ptr)() - # Run the entry point of the QIR module fun_ptr = jit_engine.get_function_address(entry_point) CFUNCTYPE(None)(fun_ptr)() diff --git a/examples/QIR/Optimization/Hello/Hello.csproj b/examples/QIR/Optimization/Hello/Hello.csproj index 20cb127669..263feacd42 100644 --- a/examples/QIR/Optimization/Hello/Hello.csproj +++ b/examples/QIR/Optimization/Hello/Hello.csproj @@ -1,5 +1,4 @@ - - + Exe net6.0 @@ -8,27 +7,19 @@ - - + - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/any/native/include - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/osx-x64/native - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/win-x64/native - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/linux-x64/native - $(PkgMicrosoft_Quantum_Simulators)/runtimes/osx-x64/native/Microsoft.Quantum.Simulator.Runtime.dll - $(PkgMicrosoft_Quantum_Simulators)/runtimes/win-x64/native/Microsoft.Quantum.Simulator.Runtime.dll - $(PkgMicrosoft_Quantum_Simulators)/runtimes/linux-x64/native/Microsoft.Quantum.Simulator.Runtime.dll + $(PkgMicrosoft_Quantum_Simulators)/runtimes/osx-x64/native + $(PkgMicrosoft_Quantum_Simulators)/runtimes/win-x64/native + $(PkgMicrosoft_Quantum_Simulators)/runtimes/linux-x64/native - <_QirRuntimeLibFiles Include="$(QirRuntimeLibs)/**/*.*" Exclude="$(QirRuntimeLibs)/**/*.exe" /> - <_QirRuntimeHeaderFiles Include="$(QirRuntimeHeaders)/**/*.hpp" /> + <_SimulatorLibraries Include="$(SimulatorFolder)/*.*" /> - - - + diff --git a/examples/QIR/Optimization/Hello/Main.cpp b/examples/QIR/Optimization/Hello/Main.cpp index ddfe6b396f..3746a300c2 100644 --- a/examples/QIR/Optimization/Hello/Main.cpp +++ b/examples/QIR/Optimization/Hello/Main.cpp @@ -1,15 +1,6 @@ -#include "QirContext.hpp" -#include "QirRuntime.hpp" -#include "SimFactory.hpp" - -using namespace Microsoft::Quantum; -using namespace std; - extern "C" void Hello__HelloQ(); int main(int argc, char* argv[]){ - unique_ptr sim = CreateFullstateSimulator(); - QirContextScope qirctx(sim.get(), true /*trackAllocatedObjects*/); Hello__HelloQ(); return 0; } diff --git a/examples/QIR/Optimization/Hello/qir/Hello.ll b/examples/QIR/Optimization/Hello/qir/Hello.ll index 7feb85317c..16b37b98af 100644 --- a/examples/QIR/Optimization/Hello/qir/Hello.ll +++ b/examples/QIR/Optimization/Hello/qir/Hello.ll @@ -1,31 +1,8 @@ -%Range = type { i64, i64, i64 } -%Tuple = type opaque %String = type opaque -%Array = type opaque -%Callable = type opaque -%Result = type opaque -@PauliI = internal constant i2 0 -@PauliX = internal constant i2 1 -@PauliY = internal constant i2 -1 -@PauliZ = internal constant i2 -2 -@EmptyRange = internal constant %Range { i64 0, i64 1, i64 -1 } @0 = internal constant [21 x i8] c"Hello quantum world!\00" -@PartialApplication__1 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@MemoryManagement__1 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] -@PartialApplication__2 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__3 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] -@MemoryManagement__2 = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] -@PartialApplication__4 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] -@PartialApplication__5 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] -@PartialApplication__6 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] -@PartialApplication__7 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__8 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null] -@PartialApplication__9 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] -@PartialApplication__10 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] -@PartialApplication__11 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] -@PartialApplication__12 = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] +@1 = internal constant [3 x i8] c"()\00" define internal void @Hello__HelloQ__body() { entry: @@ -41,1972 +18,6 @@ declare void @__quantum__rt__message(%String*) declare void @__quantum__rt__string_update_reference_count(%String*, i32) -define internal { %String* }* @Microsoft__Quantum__Diagnostics__EnableTestingViaName__body(%String* %__Item1__) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %__Item1__, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) - ret { %String* }* %1 -} - -declare %Tuple* @__quantum__rt__tuple_create(i64) - -define internal { %String* }* @Microsoft__Quantum__Diagnostics__Test__body(%String* %ExecutionTarget) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %ExecutionTarget, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %ExecutionTarget, i32 1) - ret { %String* }* %1 -} - -define internal %Tuple* @Microsoft__Quantum__Core__Attribute__body() { -entry: - ret %Tuple* null -} - -define internal { %String* }* @Microsoft__Quantum__Core__Deprecated__body(%String* %NewName) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %NewName, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %NewName, i32 1) - ret { %String* }* %1 -} - -define internal %Tuple* @Microsoft__Quantum__Core__EntryPoint__body() { -entry: - ret %Tuple* null -} - -define internal %Tuple* @Microsoft__Quantum__Core__Inline__body() { -entry: - ret %Tuple* null -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -declare void @__quantum__rt__array_update_alias_count(%Array*, i32) - -declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) - -declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) - -declare void @__quantum__qis__applyconditionallyintrinsic__body(%Array*, %Array*, %Callable*, %Callable*) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) - -declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) - -declare void @__quantum__rt__callable_make_adjoint(%Callable*) - -declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicC__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) - -declare void @__quantum__rt__callable_make_controlled(%Callable*) - -declare void @__quantum__rt__array_update_reference_count(%Array*, i32) - -define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) - -declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) - -declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__adj(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp, %Callable* %onNonEqualOp) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onNonEqualOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctl(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %2 = load %Callable*, %Callable** %1, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) - call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) - %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %4 = load %Array*, %Array** %3, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) - call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) - ret void -} - -define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyConditionallyIntrinsicCA__ctladj(%Array* %ctls, { %Array*, %Array*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResults = load %Array*, %Array** %1, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - %2 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %resultsValues = load %Array*, %Array** %2, align 8 - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %3 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onEqualOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 1) - %4 = getelementptr inbounds { %Array*, %Array*, %Callable*, %Callable* }, { %Array*, %Array*, %Callable*, %Callable* }* %0, i32 0, i32 3 - %onNonEqualOp = load %Callable*, %Callable** %4, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 1) - %5 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 - %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_make_controlled(%Callable* %9) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %9, %Callable** %7, align 8 - store %Array* %ctls, %Array** %8, align 8 - %onEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %5) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 1) - %10 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 - %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 - %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onNonEqualOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %14) - call void @__quantum__rt__callable_make_controlled(%Callable* %14) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %14, %Callable** %12, align 8 - store %Array* %ctls, %Array** %13, align 8 - %onNonEqualOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %10) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 1) - call void @__quantum__qis__applyconditionallyintrinsic__body(%Array* %measurementResults, %Array* %resultsValues, %Callable* %onEqualOp__1, %Callable* %onNonEqualOp__1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onEqualOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onNonEqualOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %measurementResults, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %resultsValues, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onEqualOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onNonEqualOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -declare void @__quantum__qis__applyifelseintrinsic__body(%Result*, %Callable*, %Callable*) - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicC__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__adj(%Result* %measurementResult, %Callable* %onResultZeroOp, %Callable* %onResultOneOp) { -entry: - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultZeroOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctl(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Microsoft__Quantum__ClassicalControl__ApplyIfElseIntrinsicCA__ctladj(%Array* %ctls, { %Result*, %Callable*, %Callable* }* %0) { -entry: - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 1) - %1 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 0 - %measurementResult = load %Result*, %Result** %1, align 8 - %2 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 1 - %onResultZeroOp = load %Callable*, %Callable** %2, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 1) - %3 = getelementptr inbounds { %Result*, %Callable*, %Callable* }, { %Result*, %Callable*, %Callable* }* %0, i32 0, i32 2 - %onResultOneOp = load %Callable*, %Callable** %3, align 8 - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 1) - %4 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %5 = bitcast %Tuple* %4 to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultZeroOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %8) - call void @__quantum__rt__callable_make_controlled(%Callable* %8) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %8, %Callable** %6, align 8 - store %Array* %ctls, %Array** %7, align 8 - %onResultZeroOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %4) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 1) - %9 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* - %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 - %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 - %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %onResultOneOp, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %13) - call void @__quantum__rt__callable_make_controlled(%Callable* %13) - call void @__quantum__rt__array_update_reference_count(%Array* %ctls, i32 1) - store %Callable* %13, %Callable** %11, align 8 - store %Array* %ctls, %Array** %12, align 8 - %onResultOneOp__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2, %Tuple* %9) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 1) - call void @__quantum__qis__applyifelseintrinsic__body(%Result* %measurementResult, %Callable* %onResultZeroOp__1, %Callable* %onResultOneOp__1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultZeroOp__1, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %onResultOneOp__1, i32 -1) - call void @__quantum__rt__array_update_alias_count(%Array* %ctls, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultZeroOp, i32 -1) - call void @__quantum__rt__capture_update_alias_count(%Callable* %onResultOneOp, i32 -1) - call void @__quantum__rt__callable_update_alias_count(%Callable* %onResultOneOp, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 - %2 = load %Array*, %Array** %1, align 8 - %3 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %4 = bitcast %Tuple* %3 to { %Array*, %Tuple* }* - %5 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 0 - %6 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %4, i32 0, i32 1 - store %Array* %2, %Array** %5, align 8 - store %Tuple* null, %Tuple** %6, align 8 - %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 - %8 = load %Callable*, %Callable** %7, align 8 - %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %8, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %9) - call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { -entry: - %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Tuple* }* - %1 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 0 - %2 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %0, i32 0, i32 1 - %3 = load %Array*, %Array** %1, align 8 - %4 = load %Tuple*, %Tuple** %2, align 8 - %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* - %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 - %7 = load %Array*, %Array** %6, align 8 - %8 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %9 = bitcast %Tuple* %8 to { %Array*, %Tuple* }* - %10 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 0 - %11 = getelementptr inbounds { %Array*, %Tuple* }, { %Array*, %Tuple* }* %9, i32 0, i32 1 - store %Array* %7, %Array** %10, align 8 - store %Tuple* %4, %Tuple** %11, align 8 - %12 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Tuple* }* }* - %14 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 0 - %15 = getelementptr inbounds { %Array*, { %Array*, %Tuple* }* }, { %Array*, { %Array*, %Tuple* }* }* %13, i32 0, i32 1 - store %Array* %3, %Array** %14, align 8 - store { %Array*, %Tuple* }* %9, { %Array*, %Tuple* }** %15, align 8 - %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 - %17 = load %Callable*, %Callable** %16, align 8 - %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) - call void @__quantum__rt__callable_make_adjoint(%Callable* %18) - call void @__quantum__rt__callable_make_controlled(%Callable* %18) - call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) - call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) - call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) - call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) - ret void -} - -define internal { %String*, %String* }* @Microsoft__Quantum__Targeting__RequiresCapability__body(%String* %Level, %String* %Reason) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 mul nuw (i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64), i64 2)) - %1 = bitcast %Tuple* %0 to { %String*, %String* }* - %2 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 0 - %3 = getelementptr inbounds { %String*, %String* }, { %String*, %String* }* %1, i32 0, i32 1 - store %String* %Level, %String** %2, align 8 - store %String* %Reason, %String** %3, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %Level, i32 1) - call void @__quantum__rt__string_update_reference_count(%String* %Reason, i32 1) - ret { %String*, %String* }* %1 -} - -define internal { %String* }* @Microsoft__Quantum__Targeting__TargetInstruction__body(%String* %__Item1__) { -entry: - %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint (i1** getelementptr (i1*, i1** null, i32 1) to i64)) - %1 = bitcast %Tuple* %0 to { %String* }* - %2 = getelementptr inbounds { %String* }, { %String* }* %1, i32 0, i32 0 - store %String* %__Item1__, %String** %2, align 8 - call void @__quantum__rt__string_update_reference_count(%String* %__Item1__, i32 1) - ret { %String* }* %1 -} - define void @Hello__HelloQ__Interop() #0 { entry: call void @Hello__HelloQ__body() @@ -2016,6 +27,9 @@ entry: define void @Hello__HelloQ() #1 { entry: call void @Hello__HelloQ__body() + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) ret void } diff --git a/examples/QIR/Optimization/README.md b/examples/QIR/Optimization/README.md index 6ab4e29e8c..d873e8fe7f 100644 --- a/examples/QIR/Optimization/README.md +++ b/examples/QIR/Optimization/README.md @@ -304,10 +304,11 @@ Check out the full list of [LLVM passes](https://llvm.org/docs/Passes.html) for Since QIR code *is* LLVM IR, the usual code generation tools provided by LLVM can be used to produce an executable. However, in order to handle QIR-specific types and functions, proper linkage of the QIR runtime and simulator libraries is required. -### Obtaining the QIR runtime & simulator +### Obtaining the QIR standard library & simulator -The [QIR runtime](https://github.com/microsoft/qsharp-runtime/tree/main/src/Qir/Runtime) is distributed in the form of a NuGet package, from which we will pull the necessary library files. -The same goes for the [full state quantum simulator](https://learn.microsoft.com/azure/quantum/machines/full-state-simulator), which the QIR runtime can hook into to simulate the quantum program. +The [QIR standard library](https://github.com/qir-alliance/qir-runner/tree/main/stdlib) is a native library that exposes the functionality described in the +[QIR specification](https://github.com/qir-alliance/qir-spec), from which the required static libraries are compiled into the the +[full state quantum simulator](https://learn.microsoft.com/azure/quantum/machines/full-state-simulator), which acts as the QIR backend to simulate the quantum program. In this section, the project file `Hello.csproj` is modified to generate these library files automatically. For convenience, a variable `BuildOutputPath` is defined with the following line added to the top-level `PropertyGroup` section: @@ -321,19 +322,17 @@ For convenience, a variable `BuildOutputPath` is defined with the following line ``` -All QIR runtime and simulator dependencies will be copied there. +All QIR dependencies will be copied there. -Next, the aforementioned NuGet package dependencies must be declared. -One for the runtime and one for the simulator, using the `PackageReference` command: +Next, the simulator NuGet package dependencies must be declared, using the `PackageReference` command: ```xml - ``` -The package versions should match the version of the QDK specified at the top of the file, however, the runtime is only available as an alpha version at the moment. +The package version should match the version of the QDK specified at the top of the file. The `GeneratePathProperty` will allow us to directly reference specific files in the packages later on. Lastly, a new build target is added called `GetDependencies`: @@ -344,34 +343,19 @@ Lastly, a new build target is added called `GetDependencies`: The property `AfterTargets` indicates the target is to be run after the regular build stage. -Inside, we simply copy library and C++ header files from the packages into the build folder with the `Copy` command: +Inside, we simply copy the simulator library from the packages into the build folder with the `Copy` command: ```xml - - ``` The variables used to specify source files must be defined appropriately for each operating system. For example, only these definitions would be active on Windows: ```xml - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/any/native/include - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/win-x64/native $(PkgMicrosoft_Quantum_Simulators)/runtimes/win-x64/native/Microsoft.Quantum.Simulator.Runtime.dll ``` -Note the variable `$(PkgMicrosoft_Quantum_Qir_Runtime)` for example is only available because of the `GeneratePathProperty` in the `Microsoft.Quantum.Qir.Runtime` package declaration. - -Since `QirRuntimeHeaders` and `QirRuntimeLibs` only specify directories (whereas `SimulatorRuntime` specifies a single file), we further filter the files to be copied: - -```xml - <_QirRuntimeLibFiles Include="$(QirRuntimeLibs)/**/*.*" Exclude="$(QirRuntimeLibs)/**/*.exe" /> - <_QirRuntimeHeaderFiles Include="$(QirRuntimeHeaders)/**/*.hpp" /> -``` - -Only `.hpp` files from the QIR header directory will be copied, and no `.exe` files from QIR library directory. - Put together, the new `Hello.csproj` project file should look as follows: ```xml @@ -385,53 +369,31 @@ Put together, the new `Hello.csproj` project file should look as follows: - - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/any/native/include - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/osx-x64/native - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/win-x64/native - $(PkgMicrosoft_Quantum_Qir_Runtime)/runtimes/linux-x64/native - $(PkgMicrosoft_Quantum_Simulators)/runtimes/osx-x64/native/Microsoft.Quantum.Simulator.Runtime.dll - $(PkgMicrosoft_Quantum_Simulators)/runtimes/win-x64/native/Microsoft.Quantum.Simulator.Runtime.dll - $(PkgMicrosoft_Quantum_Simulators)/runtimes/linux-x64/native/Microsoft.Quantum.Simulator.Runtime.dll + $(PkgMicrosoft_Quantum_Simulators)/runtimes/osx-x64/native + $(PkgMicrosoft_Quantum_Simulators)/runtimes/win-x64/native + $(PkgMicrosoft_Quantum_Simulators)/runtimes/linux-x64/native - <_QirRuntimeLibFiles Include="$(QirRuntimeLibs)/**/*.*" Exclude="$(QirRuntimeLibs)/**/*.exe" /> - <_QirRuntimeHeaderFiles Include="$(QirRuntimeHeaders)/**/*.hpp" /> + <_SimulatorLibraries Include="$(SimulatorFolder)/*.*" /> - - - + ``` Build the project again with `dotnet build` from the project root directory. -You should see the following important files appear in a folder named `build`, among others: +You should see the simulator library appear in a folder named `build`, among others: ``` build -├── Microsoft.Quantum.Qir.QSharp.Core.dll -├── Microsoft.Quantum.Qir.QSharp.Foundation.dll -├── Microsoft.Quantum.Qir.Runtime.dll ├── Microsoft.Quantum.Simulator.Runtime.dll -├── QirContext.hpp -├── QirRuntime.hpp -└── SimFactory.hpp -``` - -(**Linux**) The `Microsoft.Quantum.Qir.*` dynamic libraries will already have the right naming scheme for Clang to use, but the `Microsoft.Quantum.Simulator.Runtime` library needs to be renamed. -The proper name format is `lib.so`. - -Execute the following command from the project root directory: - -```bash -mv build/Microsoft.Quantum.Simulator.Runtime.dll build/libMicrosoft.Quantum.Simulator.Runtime.so +└── Microsoft.Quantum.Simulator.Runtime.lib ``` ### Adding a driver @@ -440,18 +402,9 @@ Trying to compile the QIR code in `Hello.ll` as is would present some problems, A small C++ driver program (`Main.cpp`) will handle the setup and invoke Q# operations or functions directly from the QIR code. ```cpp -#include "QirContext.hpp" -#include "QirRuntime.hpp" -#include "SimFactory.hpp" - -using namespace Microsoft::Quantum; -using namespace std; - extern "C" void Hello__HelloQ(); int main(int argc, char* argv[]){ - unique_ptr sim = CreateFullstateSimulator(); - QirContextScope qirctx(sim.get(), true /*trackAllocatedObjects*/); Hello__HelloQ(); return 0; } @@ -459,17 +412,6 @@ int main(int argc, char* argv[]){ The driver consists of the following elements: -* header files (to interface with the libraries): - - - `QirContext` : used to register the simulator with the QIR runtime - - `QirRuntime` : implements the types and functions defined in the [QIR specification](https://github.com/qir-alliance/qir-spec) - - `SimFactory` : provides the Q# simulator - -* namespaces : - - - `Microsoft::Quantum` : the QIR context and simulator live here - - `std` : needed for `unique_ptr` - * external function declarations : This is were we declare functions from other compilation units we'd like to invoke. @@ -478,11 +420,6 @@ The driver consists of the following elements: Normally, C++ function names would be transformed during compilation to include namespace and call argument information in the function name, known as [mangling](https://en.wikipedia.org/wiki/Name_mangling). We can check that the QIR function `Hello_HelloQ` indeed appears in the `Hello.ll` file with that name. -* simulator invocation: - - Here we create a Q# [full state simulator](https://docs.microsoft.com/azure/quantum/user-guide/machines/full-state-simulator) instance that will run our quantum program and register it with the current context. - Following this, everything is set up to call into Q# functions. - ### Compiling the program Multiple tools are available for this step, such as the LLVM static compiler + assembler + linker or the JIT compiler. @@ -491,13 +428,13 @@ Here, Clang is used again, this time to compile and link the `Hello.ll` Q# progr Invoke the following command on Windows: ```powershell -clang++ qir/Hello.ll Main.cpp -Ibuild -Lbuild -l'Microsoft.Quantum.Qir.Runtime' -l'Microsoft.Quantum.Qir.QSharp.Core' -l'Microsoft.Quantum.Qir.QSharp.Foundation' -o build/Hello.exe +clang++ qir/Hello.ll Main.cpp -Ibuild -Lbuild -l'Microsoft.Quantum.Simulator.Runtime' -o build/Hello.exe ``` On Linux: ```bash -clang++ qir/Hello.ll Main.cpp -Wl,-rpath=build -Ibuild -Lbuild -l'Microsoft.Quantum.Qir.Runtime' -l'Microsoft.Quantum.Qir.QSharp.Core' -l'Microsoft.Quantum.Qir.QSharp.Foundation' -l'Microsoft.Quantum.Simulator.Runtime' -o build/Hello.exe +clang++ qir/Hello.ll Main.cpp -Wl,-rpath=build -Ibuild -Lbuild -l'Microsoft.Quantum.Simulator.Runtime' -o build/Hello.exe ``` Parameters: @@ -521,13 +458,13 @@ The same can be done with the optimized QIR code. On Windows: ```powershell -clang++ qir/Hello-dce-inline.ll Main.cpp -Ibuild -Lbuild -l'Microsoft.Quantum.Qir.Runtime' -l'Microsoft.Quantum.Qir.QSharp.Core' -l'Microsoft.Quantum.Qir.QSharp.Foundation' -o build/Hello.exe && ./build/Hello.exe +clang++ qir/Hello-dce-inline.ll Main.cpp -Ibuild -Lbuild -l'Microsoft.Quantum.Simulator.Runtime' -o build/Hello.exe && ./build/Hello.exe ``` On Linux: ```bash -clang++ qir/Hello-dce-inline.ll Main.cpp -Wl,-rpath=build -Ibuild -Lbuild -l'Microsoft.Quantum.Qir.Runtime' -l'Microsoft.Quantum.Qir.QSharp.Core' -l'Microsoft.Quantum.Qir.QSharp.Foundation' -l'Microsoft.Quantum.Simulator.Runtime' -o build/Hello.exe && ./build/Hello.exe +clang++ qir/Hello-dce-inline.ll Main.cpp -Wl,-rpath=build -Ibuild -Lbuild -l'Microsoft.Quantum.Simulator.Runtime' -o build/Hello.exe && ./build/Hello.exe ``` As a last example, let's modify the Q# program `Program.qs` with a random bit generator and run through the whole process: @@ -556,10 +493,10 @@ Steps: * optimize the code `clang -S qir/Hello.ll -O3 -emit-llvm -o qir/Hello-o3.ll` * compile the code on Windows ```powershell - clang++ qir/Hello-o3.ll Main.cpp -Ibuild -Lbuild -l'Microsoft.Quantum.Qir.Runtime' -l'Microsoft.Quantum.Qir.QSharp.Core' -l'Microsoft.Quantum.Qir.QSharp.Foundation' -o build/Hello.exe + clang++ qir/Hello-o3.ll Main.cpp -Ibuild -Lbuild -l'Microsoft.Quantum.Simulator.Runtime' -o build/Hello.exe ``` or Linux ```bash - clang++ qir/Hello.ll Main.cpp -Wl,-rpath=build -Ibuild -Lbuild -l'Microsoft.Quantum.Qir.Runtime' -l'Microsoft.Quantum.Qir.QSharp.Core' -l'Microsoft.Quantum.Qir.QSharp.Foundation' -l'Microsoft.Quantum.Simulator.Runtime' -o build/Hello.exe + clang++ qir/Hello.ll Main.cpp -Wl,-rpath=build -Ibuild -Lbuild -l'Microsoft.Quantum.Simulator.Runtime' -o build/Hello.exe ``` * simulate the program `./build/Hello.exe` diff --git a/src/QsCompiler/TestTargets/Simulation/Target/QirExecution.cs b/src/QsCompiler/TestTargets/Simulation/Target/QirExecution.cs index e9f9b3be52..843e3dd2a7 100644 --- a/src/QsCompiler/TestTargets/Simulation/Target/QirExecution.cs +++ b/src/QsCompiler/TestTargets/Simulation/Target/QirExecution.cs @@ -11,12 +11,6 @@ namespace Microsoft.Quantum.QsCompiler.Testing.Qir { public static class JitCompilation { - [DllImport("Microsoft.Quantum.Qir.QSharp.Core", ExactSpelling = true, CallingConvention = CallingConvention.Cdecl)] - public static extern IntPtr CreateFullstateSimulatorC(long seed); - - [DllImport("Microsoft.Quantum.Qir.Runtime", ExactSpelling = true, CallingConvention = CallingConvention.Cdecl)] // TODO(#1569): Requires refactoring for Rust QIR RT. - public static extern void InitializeQirContext(IntPtr driver, bool trackAllocatedObjects); - [UnmanagedFunctionPointer(CallingConvention.Cdecl)] private delegate void SimpleFunction(); @@ -79,13 +73,8 @@ public static unsafe void BuildAndRun(string pathToBitcode, params string[] func NativeLibrary.Load("omp", typeof(JitCompilation).Assembly, null); } - // Explicitly load dependent libraries so that they are already present in memory when pinvoke - // triggers for the Microsoft.Quantum.Qir.QSharp.Core call below. + // Explicitly load dependent libraries so that they are already present in memory. NativeLibrary.Load("Microsoft.Quantum.Simulator.Runtime", typeof(JitCompilation).Assembly, null); - NativeLibrary.Load("Microsoft.Quantum.Qir.Runtime", typeof(JitCompilation).Assembly, null); - NativeLibrary.Load("Microsoft.Quantum.Qir.QSharp.Foundation", typeof(JitCompilation).Assembly, null); - - InitializeQirContext(CreateFullstateSimulatorC(0), true); if (!File.Exists(pathToBitcode)) { @@ -111,9 +100,7 @@ public static unsafe void BuildAndRun(string pathToBitcode, params string[] func // Linux requires an additional explicit load of the libraries into MCJIT. // Full paths are not needed since .NET already loaded these into program memory above, // but without this explict load the JIT logic won't find them. - ExplicitLibraryLoad("libMicrosoft.Quantum.Qir.Runtime.so"); - ExplicitLibraryLoad("libMicrosoft.Quantum.Qir.QSharp.Foundation.so"); - ExplicitLibraryLoad("libMicrosoft.Quantum.Qir.QSharp.Core.so"); + ExplicitLibraryLoad("libMicrosoft.Quantum.Simulator.Runtime.so"); } var engine = modRef.CreateMCJITCompiler(); diff --git a/src/QsCompiler/TestTargets/Simulation/Target/Simulation.csproj b/src/QsCompiler/TestTargets/Simulation/Target/Simulation.csproj index bb0a2f8f36..8f274719d6 100644 --- a/src/QsCompiler/TestTargets/Simulation/Target/Simulation.csproj +++ b/src/QsCompiler/TestTargets/Simulation/Target/Simulation.csproj @@ -18,8 +18,7 @@ - - + diff --git a/src/QsCompiler/Tests.Compiler/ExecutionTests.fs b/src/QsCompiler/Tests.Compiler/ExecutionTests.fs index d21c20c1aa..36e4989ac8 100644 --- a/src/QsCompiler/Tests.Compiler/ExecutionTests.fs +++ b/src/QsCompiler/Tests.Compiler/ExecutionTests.fs @@ -162,7 +162,7 @@ type ExecutionTests(output: ITestOutputHelper) = "interpolated string" true or false, true, false, true, false 1, -1, 0 - 1.0, 2.0, 100000.0, 0.10000000000000001, -1.0, 0.0 + 1.0, 2.0, 100000.0, 0.1, -1.0, 0.0 Zero, One PauliZ, PauliX, PauliY, [PauliI] 1..3, 3..-1..1, 0..-1..0