diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml deleted file mode 100644 index cb807cc85..000000000 --- a/.github/workflows/ci.yaml +++ /dev/null @@ -1,92 +0,0 @@ -name: Continuous Integration - -on: [pull_request] - -jobs: - lint: - name: Lint check - runs-on: ubuntu-16.04 - steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 - with: - python-version: '3.6' - architecture: 'x64' - - name: Install Lint tools - run: pip install --upgrade pylint - - name: Lint All - run: ./scripts/lint_all.sh - - format: - name: Formatting check - runs-on: ubuntu-16.04 - - steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 - with: - python-version: '3.6' - architecture: 'x64' - - name: Install Format tools - run: pip install --upgrade pip setuptools; pip install -r requirements.txt; sudo apt-get install -y clang-format - - name: Format Check - run: ./scripts/format_check.sh - - wheel-build: - name: Wheel test - runs-on: ubuntu-16.04 - - steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 - with: - python-version: '3.6' - architecture: 'x64' - - name: Install Bazel on CI - run: ./scripts/ci_install.sh - - name: Configure CI TF - run: echo "Y\n" | ./configure.sh - - name: Build Wheel Test - run: ./scripts/build_pip_package_test.sh - - name: Test Wheel - run: ./scripts/run_example.sh - - bazel-tests: - name: Library tests - runs-on: ubuntu-16.04 - needs: [lint, format] - - steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 - with: - python-version: '3.6' - architecture: 'x64' - - name: Install Bazel on CI - run: ./scripts/ci_install.sh - - name: Configure CI TF - run: echo "Y\n" | ./configure.sh - - name: Full Library Test - run: ./scripts/test_all.sh - - tutorials-test: - name: Tutorial tests - runs-on: ubuntu-16.04 - needs: [lint, format, wheel-build] - - steps: - - uses: actions/checkout@v1 - - uses: actions/setup-python@v1 - with: - python-version: '3.6' - architecture: 'x64' - - name: Install notebook dependencies - run: pip install --upgrade pip seaborn==0.10.0 - - name: Install Bazel on CI - run: ./scripts/ci_install.sh - - name: Configure CI TF - run: echo "Y\n" | ./configure.sh - - name: Build Wheel - run: ./scripts/build_pip_package_test.sh - - name: Test Notebooks - run: ./scripts/ci_validate_tutorials.sh \ No newline at end of file diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml deleted file mode 100644 index d33969c96..000000000 --- a/.github/workflows/stale.yml +++ /dev/null @@ -1,19 +0,0 @@ -name: Mark stale issues and pull requests - -on: - schedule: - - cron: "0 0 1 * *" - -jobs: - stale: - - runs-on: ubuntu-latest - - steps: - - uses: actions/stale@v1 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - stale-issue-message: 'This issue has not had any activity in a month. Is it stale ?' - stale-pr-message: 'This PR has not had any activity in a month. Is it stale ?' - stale-issue-label: 'no-issue-activity' - stale-pr-label: 'no-pr-activity' diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index aaef90426..000000000 --- a/.pylintrc +++ /dev/null @@ -1,131 +0,0 @@ -[MASTER] - -max-line-length=80 -disable=all -output-format=colorized -score=no -reports=no -enable= - assert-on-tuple, - bad-indentation, - bad-option-value, - bad-reversed-sequence, - bad-super-call, - consider-merging-isinstance, - continue-in-finally, - cyclic-import, - dangerous-default-value, - duplicate-argument-name, - empty-docstring, - expression-not-assigned, - function-redefined, - import-self, - invalid-name, - relative-import, - inconsistent-mro, - init-is-generator, - line-too-long, - lost-exception, - missing-docstring, - missing-kwoa, - mixed-indentation, - mixed-line-endings, - multiple-imports, - no-else-return, - not-callable, - no-value-for-parameter, - nonexistent-operator, - not-in-loop, - pointless-statement, - redefined-builtin, - relative-import, - return-arg-in-generator, - return-in-init, - return-outside-function, - simplifiable-if-statement, - syntax-error, - too-many-function-args, - trailing-whitespace, - undefined-variable, - unexpected-keyword-arg, - unhashable-dict-key, - unnecessary-pass, - unreachable, - unrecognized-inline-option, - unused-import, - unnecessary-semicolon, - unused-variable, - unused-wildcard-import, - wildcard-import, - wrong-import-order, - wrong-import-position, - wrong-spelling-in-comment, - wrong-spelling-in-docstrin, - wildcard-import, - yield-outside-function, - -# Ignore long lines containing urls or pylint directives. -ignore-long-lines=^(\s*(#\s*)??)$ - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=yes - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,65}$ - -# Naming hint for function names -function-name-hint=[a-z_][a-z0-9_]{2,65}$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{0,30}$ - -# Naming hint for variable names -variable-name-hint=[a-z_][a-z0-9_]{0,30}$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Naming hint for constant names -const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{0,30}$ - -# Naming hint for attribute names -attr-name-hint=[a-z_][a-z0-9_]{0,30}$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{0,30}$ - -# Naming hint for argument names -argument-name-hint=[a-z_][a-z0-9_]{0,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Naming hint for class attribute names -class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Naming hint for inline iteration names -inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Naming hint for class names -class-name-hint=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Naming hint for module names -module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct method names -method-rgx=[a-z_][a-z0-9_]{2,60}$ - -# Naming hint for method names -method-name-hint=[a-z_][a-z0-9_]{2,60}$ \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 3d5b67e7e..000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,62 +0,0 @@ -# Contributing - -## Contributor License Agreements - -We'd love to accept your patches! Before we can take them, we have to jump a -couple of legal hurdles. - -Please fill out either the individual or corporate Contributor License Agreement -(CLA). - -* If you are an individual writing original source code and you're sure you - own the intellectual property, then you'll need to sign an - [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html). -* If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a - [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html). - -Follow either of the two links above to access the appropriate CLA and -instructions for how to sign and return it. Once we receive it, we'll be able to -accept your pull requests. - -NOTE: Only original source code from you and other people that have signed the -CLA can be accepted into the main repository. - -## Code Reviews - -All submissions, including submissions by project members, require review. We -use GitHub pull requests for this purpose. Consult -[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more -information on using pull requests and the -[TensorFlow Community Guidelines](https://www.tensorflow.org/community/contribute) -for more information on contributor best practices. - -Before making any changes, we recommend opening an issue (if it doesn't already -exist) and discussing your proposed changes. This will let us give you advice -on the proposed changes. If the changes are minor, then feel free to make -them without discussion. - -## Code Standards - -We have some standards in place to ensure that incoming code is the highest -quality it can be. Before a code review can happen you should make sure that -the following tests are passing locally: - -1. `./scripts/test_all.sh` passes. We use TensorFlow's testing suite for our -testing and be sure that any code you add follows the structure they have -[outlined](https://www.tensorflow.org/api_docs/python/tf/test). - -2. `./scripts/lint_all.sh` passes. We use [pylint](https://www.pylint.org/) -to ensure that code has proper formatting and is lint free. - -3. `./scripts/format_check.sh` passes. We use -[yapf](https://github.com/google/yapf) along with -[clang format](https://clang.llvm.org/docs/ClangFormat.html) to ensure we have -consistent formatting everywhere. - -### Adding Modules - -If you are adding new modules, be sure to properly expose them to the user -using `__init__.py` files and update the `/scripts/import_test.py` file -to ensure that you are exposing them properly. - diff --git a/README.md b/README.md index a6d9c5c93..a84a114f2 100644 --- a/README.md +++ b/README.md @@ -1,55 +1,3 @@ -# TensorFlow Quantum +# Research -TensorFlow Quantum (TFQ) is a python framework for hybrid -quantum-classical machine learning that is primarily focused on -modeling quantum data. TFQ is an application framework developed to -allow quantum algorithms researchers and machine learning applications -researchers to explore computing workflows that leverage Google’s -quantum computing offerings, all from within TensorFlow. - - -## Motivation - -Quantum computing at Google has hit an exciting milestone with the achievment -of [Quantum Supremacy](https://www.nature.com/articles/s41586-019-1666-5). -In the wake of this demonstration, Google is now turning its attention to -developing and implementing new algorithms to run on its Quantum Computer -that have real world [applications](https://ai.googleblog.com/2019/10/quantum-supremacy-using-programmable.html). - -To provide users with the tools they need to program and simulate a quantum -computer, Google is working on [Cirq](https://github.com/quantumlib/Cirq). Cirq -is designed for quantum computing researchers who are interested in running and -designing algorithms that leverage existing (imperfect) quantum computers. - -TensorFlow Quantum provides users with the tools they need to interleave quantum -algorithms and logic designed in Cirq with the powerful and performant ML tools -from TensorFlow. With this connection we hope to unlock new and exciting paths -for Quantum Computing research that would not have otherwise been possible. - - -## Installation - -See the [installation instructions](https://github.com/tensorflow/quantum/blob/master/docs/install.md). - - -## Examples - -All of our examples can be found here in the form of -[Python notebook tutorials](https://github.com/tensorflow/quantum/tree/master/docs/tutorials) - - -## Report issues - -Report bugs or feature requests using the -[TensorFlow Quantum issue tracker](https://github.com/tensorflow/quantum/issues). - -In the meantime check out the [install instructions](./docs/install.md) to get -the experimental code running! - - -## Contributing - -We are eager to collaborate with you! TensorFlow Quantum is still a very young codebase, -if you have ideas for features that you would like added feel free to check out our -[Contributor Guidelines](https://github.com/tensorflow/quantum/blob/master/CONTRIBUTING.md) -to get started. +Each directory in this branch corresponds to an example application in the [TensorFlow Quantum whitepaper](https://arxiv.org/abs/2003.02989). diff --git a/WORKSPACE b/WORKSPACE deleted file mode 100644 index 7bb7c2655..000000000 --- a/WORKSPACE +++ /dev/null @@ -1,122 +0,0 @@ -# This file includes external dependencies that are required to compile the -# TensorFlow op. Maybe of them are specific versions used by the TensorFlow -# binary used. These are extracted from TF v2.0.0, but are also compatible -# with v1.14.0. - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "com_google_absl", - sha256 = "acd93f6baaedc4414ebd08b33bebca7c7a46888916101d8c0b8083573526d070", - strip_prefix = "abseil-cpp-43ef2148c0936ebf7cb4be6b19927a9d9d145b8f", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz", - "https://github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz", - ], -) - -http_archive( - name = "com_google_googletest", - sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86", - strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip", - "https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip", - ], -) - -http_archive( - name = "com_google_protobuf", - sha256 = "b9e92f9af8819bbbc514e2902aec860415b70209f31dfc8c4fa72515a5df9d59", - strip_prefix = "protobuf-310ba5ee72661c081129eb878c1bbcec936b20f0", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz", - "https://github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz", - ], -) - -# Use this zlib rule that depends on github since it is more reliable than zlib.net. -http_archive( - name = "zlib", - build_file = "@com_google_protobuf//:third_party/zlib.BUILD", - sha256 = "629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff", - strip_prefix = "zlib-1.2.11", - urls = ["https://github.com/madler/zlib/archive/v1.2.11.tar.gz"], -) - -load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") - -protobuf_deps() - -# com_google_protobuf depends on @bazel_skylib -http_archive( - name = "bazel_skylib", - sha256 = "bbccf674aa441c266df9894182d80de104cabd19be98be002f6d478aaa31574d", - strip_prefix = "bazel-skylib-2169ae1c374aab4a09aa90e65efe1a3aad4e279b", - urls = ["https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"], -) - -http_archive( - name = "cirq", - sha256 = "e882a0bfbf47c75c69d70de354049d64bbec2ef0d114def7da36cf4867e7b57f", - strip_prefix = "Cirq-0.7.0", - urls = ["https://github.com/quantumlib/Cirq/archive/v0.7.0.zip"], -) - -# Added for crosstool in tensorflow. -http_archive( - name = "io_bazel_rules_closure", - sha256 = "5b00383d08dd71f28503736db0500b6fb4dda47489ff5fc6bed42557c07c6ba9", - strip_prefix = "rules_closure-308b05b2419edb5c8ee0471b67a40403df940149", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/308b05b2419edb5c8ee0471b67a40403df940149.tar.gz", - "https://github.com/bazelbuild/rules_closure/archive/308b05b2419edb5c8ee0471b67a40403df940149.tar.gz", # 2019-06-13 - ], -) - -http_archive( - name = "org_tensorflow", - sha256 = "e82f3b94d863e223881678406faa5071b895e1ff928ba18578d2adbbc6b42a4c", - strip_prefix = "tensorflow-2.1.0", - urls = [ - "https://github.com/tensorflow/tensorflow/archive/v2.1.0.zip", - ], -) - -load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace") - -tf_workspace(tf_repo_name = "@org_tensorflow") - -load("//third_party/tf:tf_configure.bzl", "tf_configure") - -tf_configure(name = "local_config_tf") - -http_archive( - name = "eigen", - # TODO(pmassey): Probably move this content in a third_party/eigen.BUILD file - build_file_content = """ -cc_library( - name = "eigen3", - textual_hdrs = glob(["Eigen/**", "unsupported/**"]), - visibility = ["//visibility:public"], -) - """, - sha256 = "7e7a57e33c59280a17a66e521396cd8b1a55d0676c9f807078522fda52114b5c", - strip_prefix = "eigen-eigen-8071cda5714d", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/bitbucket.org/eigen/eigen/get/8071cda5714d.tar.gz", - "https://bitbucket.org/eigen/eigen/get/8071cda5714d.tar.gz", - ], -) - -http_archive( - name = "six_archive", - build_file = "@com_google_protobuf//:six.BUILD", - sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a", - url = "https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz#md5=34eed507548117b2ab523ab14b2f8b55", -) - -bind( - name = "six", - actual = "@six_archive//:six", -) diff --git a/benchmarks/BUILD b/benchmarks/BUILD deleted file mode 100644 index fd4e75c2e..000000000 --- a/benchmarks/BUILD +++ /dev/null @@ -1,6 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) diff --git a/benchmarks/README.md b/benchmarks/README.md deleted file mode 100644 index b71bd1b04..000000000 --- a/benchmarks/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Tensorflow Quantum Benchmarks - -## Testing instructions -Benchmarks are currently tested separately from the main repository. To run -a benchmark testcase, simply _run_ the benchmark file like with any other unit test: -``` -bazel run benchmarks/scripts: -``` - -## Instructions to run -A benchmark can be run from the command line or a bash script by setting model -parameters via flags, separated from Bazel flags by a `--` delimiter. To run a benchmark with a set of specific parameters, use -the following command template: -``` -bazel run benchmarks/scripts: -- --benchmarks= -``` -Some notes on benchmark configuration: - - "all" is a valid option for the `benchmarks` flag, and will result in all benchmarks methods associated with that file to run. - - If a benchmark method runs twice with identical configurations, the most recent run will overwrite previous reports. - - For information on valid parameter flags and their descriptions see `flags.py` - - -### Sample benchmark experiments - -For example, to benchmark a dense depth-10 Clifford circuit over 5 qubits call: -``` -bazel run -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" \ - --cxxopt="-msse3" --cxxopt="-msse4" \ - benchmarks/scripts:benchmark_clifford_circuit -- \ - --n_moments 5 --n_qubits 4 \ - --benchmarks=benchmark_clifford_circuit_eager -``` -This will produce a proto benchmark report under `benchmarks/reports` corresponding to the chosen parameters: -``` -benchmarks/scripts/reports/CliffordBenchmarks.benchmark_clifford_circuit_4_5_1 -``` - - -To benchmark the parameter shift differentiation method on a random depth-10 4-qubit circuit with 10 parameters call, where the circuit will be differentiated -over 50 trials, each time over a batch of 10 circuits. -``` -bazel run -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" \ - --cxxopt="-msse3" --cxxopt="-msse4" \ - benchmarks/scripts:benchmark_op_gradients -- \ - --n_moments 10 --n_qubits 4 --n_symbols 10 \ - --n_runs 50 --batch_size 10 \ - --benchmarks=benchmark_parameter_shift -``` - diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/benchmarks/scripts/BUILD b/benchmarks/scripts/BUILD deleted file mode 100644 index 1f93be0d6..000000000 --- a/benchmarks/scripts/BUILD +++ /dev/null @@ -1,63 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_test( - name = "benchmark_clifford_circuit", - srcs = ["benchmark_clifford_circuit.py"], - python_version = "PY3", - deps = [ - "//tensorflow_quantum/core/ops:tfq_simulate_ops_py", - "//tensorflow_quantum/core/serialize:serializer", - "@local_config_tf//:test_log_pb2", - ], -) - -py_test( - name = "benchmark_random_circuit", - srcs = ["benchmark_random_circuit.py"], - python_version = "PY3", - deps = [ - "//tensorflow_quantum/core/ops:tfq_simulate_ops_py", - "//tensorflow_quantum/core/serialize:serializer", - "@local_config_tf//:test_log_pb2", - ], -) - -py_test( - name = "benchmark_op_gradients", - srcs = ["benchmark_op_gradients.py"], - python_version = "PY3", - deps = [ - "//tensorflow_quantum/core/ops:batch_util", - "//tensorflow_quantum/core/ops:cirq_ops", - "//tensorflow_quantum/core/ops:tfq_simulate_ops_py", - "//tensorflow_quantum/core/ops:tfq_utility_ops_py", - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/differentiators:linear_combination", - "//tensorflow_quantum/python/differentiators:parameter_shift", - "//tensorflow_quantum/python/differentiators:stochastic_differentiator", - "@local_config_tf//:test_log_pb2", - ], -) - -py_library( - name = "benchmark_util", - srcs = ["benchmark_util.py"], - deps = [ - "@local_config_tf//:test_log_pb2", - ], -) - -py_test( - name = "benchmark_util_test", - srcs = ["benchmark_util_test.py"], - python_version = "PY3", - deps = [ - ":benchmark_util", - "@local_config_tf//:test_log_pb2", - ], -) diff --git a/benchmarks/scripts/__init__.py b/benchmarks/scripts/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/benchmarks/scripts/benchmark_clifford_circuit.py b/benchmarks/scripts/benchmark_clifford_circuit.py deleted file mode 100644 index 643eff790..000000000 --- a/benchmarks/scripts/benchmark_clifford_circuit.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Benchmark simulators against classically simulatable circuits.""" -import os -import time - -from absl.testing import parameterized -import cirq -import tensorflow as tf -import numpy as np - -from tensorflow_quantum.core.ops import tfq_simulate_ops -from tensorflow_quantum.core.serialize.serializer import serialize_circuit -from models.random_clifford_circuit import random_clifford_circuit -import flags -import benchmark_util - -SEED = 48510234 -SRC = os.path.dirname(os.path.realpath(__file__)) -os.environ['TEST_REPORT_FILE_PREFIX'] = os.path.join(SRC, 'reports/') -TEST_PARAMS_1 = flags.TEST_FLAGS(n_qubits=3, n_moments=5, op_density=0.99) -TEST_PARAMS_2 = flags.TEST_FLAGS(n_qubits=4, n_moments=5, op_density=0.99) -ALL_PARAMS = [TEST_PARAMS_1, TEST_PARAMS_2] - - -class CliffordBenchmarksTest(tf.test.TestCase, parameterized.TestCase): - """Test the Clifford benchmarking class.""" - - @parameterized.named_parameters( - ("params_1", TEST_PARAMS_1), - ("params_2", TEST_PARAMS_2), - ) - def testBenchmarkCliffordCircuitEager(self, params): - """Test that Op constructs and runs correctly.""" - proto_file_path = os.path.join( - SRC, "reports/", - "CliffordBenchmarks.benchmark_clifford_circuit_{}_{}_{}".format( - params.n_qubits, params.n_moments, params.batch_size)) - self.addCleanup(os.remove, proto_file_path) - - bench = CliffordBenchmarks(params=params) - bench.benchmark_clifford_circuit_eager() - - res = benchmark_util.read_benchmark_entry(proto_file_path) - self.assertEqual( - res.name, - "CliffordBenchmarks.benchmark_clifford_circuit_{}_{}_{}".format( - params.n_qubits, params.n_moments, params.batch_size)) - self.assertEqual( - res.extras.get("n_qubits").double_value, params.n_qubits) - self.assertEqual( - res.extras.get("n_moments").double_value, params.n_moments) - self.assertEqual( - res.extras.get("op_density").double_value, params.op_density) - assert hasattr(res, 'iters') - assert hasattr(res, 'wall_time') - - -class CliffordBenchmarks(tf.test.Benchmark): - """Benchmark simulators against Clifford circuits. - - Flags: - --n_qubits --n_moments --op_density --batch_size --n_runs --n_burn - """ - - def __init__(self, params=None): - """Pull in command line flags or use provided flags.""" - super(CliffordBenchmarks, self).__init__() - # Allow input params for testing purposes. - self.params = params if params else flags.FLAGS - - def _simulate_circuit(self, circuit, params): - # TODO: implement backend switch - return tfq_simulate_ops.tfq_simulate_state( - [str(serialize_circuit(circuit))] * params.batch_size, ["None"], - [[0]] * params.batch_size) - - def benchmark_clifford_circuit_eager(self): - """tf.test.Benchmark does not provide eager benchmarks methods.""" - - qubits = cirq.GridQubit.rect(1, self.params.n_qubits) - circuit = random_clifford_circuit( - qubits, - self.params.n_moments, - self.params.op_density, - random_state=np.random.RandomState(SEED)) - - for _ in range(self.params.n_burn): - _ = self._simulate_circuit(circuit, self.params) - - deltas = [None] * self.params.n_runs - for i in range(self.params.n_runs): - start = time.perf_counter() - _ = self._simulate_circuit(circuit, self.params) - deltas[i] = time.perf_counter() - start - - extras = { - 'n_qubits': self.params.n_qubits, - 'n_moments': self.params.n_moments, - 'op_density': self.params.op_density, - 'batch_size': self.params.batch_size, - "min_time": min(deltas), - } - name = "benchmark_clifford_circuit_{}_{}_{}".format( - self.params.n_qubits, self.params.n_moments, self.params.batch_size) - - full_path = os.path.join(os.environ['TEST_REPORT_FILE_PREFIX'], - "{}.{}".format(self.__class__.__name__, name)) - if os.path.exists(full_path): - os.remove(full_path) - - benchmark_values = { - "iters": self.params.n_runs, - "wall_time": np.median(deltas), - "extras": extras, - "name": name, - } - self.report_benchmark(**benchmark_values) - return benchmark_values - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/benchmark_op_gradients.py b/benchmarks/scripts/benchmark_op_gradients.py deleted file mode 100644 index 88f37d620..000000000 --- a/benchmarks/scripts/benchmark_op_gradients.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Benchmark differentiator methods.""" -import os -import time -import string - -from absl.testing import parameterized -import cirq -import tensorflow as tf -import numpy as np - -from tensorflow_quantum.core.ops import tfq_simulate_ops -import benchmark_util -import flags - -from tensorflow_quantum.python import util -from tensorflow_quantum.python.differentiators import (linear_combination, - parameter_shift, - stochastic_differentiator - ) - -SRC = os.path.dirname(os.path.realpath(__file__)) -os.environ['TEST_REPORT_FILE_PREFIX'] = os.path.join(SRC, 'reports/') -TEST_PARAMS_1 = flags.TEST_FLAGS(n_symbols=4, - n_qubits=3, - n_moments=5, - op_density=0.9) -TEST_PARAMS_2 = flags.TEST_FLAGS(n_symbols=3, - n_qubits=4, - n_moments=5, - op_density=0.6) - - -class GradientBenchmarksTest(tf.test.TestCase, parameterized.TestCase): - """Test the Gradient benchmarking class.""" - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'diff': [ - linear_combination.ForwardDifference(), - linear_combination.CentralDifference(), - parameter_shift.ParameterShift(), - stochastic_differentiator.SGDifferentiator(), - ], - 'params': [TEST_PARAMS_1, TEST_PARAMS_2] - }))) - def testBenchmarkGradient(self, diff, params): - """Test that op constructs and runs correctly.""" - - bench_name = "GradientBenchmarks.{}_{}_{}_{}_{}".format( - diff.__class__.__name__, params.n_qubits, params.n_moments, - params.batch_size, params.n_symbols) - proto_file_path = os.path.join(SRC, "reports/", "{}".format(bench_name)) - self.addCleanup(os.remove, proto_file_path) - - bench = GradientBenchmarks(params=params) - bench.setup() - bench._benchmark_tfq_differentiator(diff, params) - - res = benchmark_util.read_benchmark_entry(proto_file_path) - self.assertEqual(res.name, bench_name) - self.assertEqual( - res.extras.get("n_qubits").double_value, params.n_qubits) - self.assertEqual( - res.extras.get("n_moments").double_value, params.n_moments) - self.assertEqual( - res.extras.get("op_density").double_value, params.op_density) - assert hasattr(res, 'iters') - assert hasattr(res, 'wall_time') - - -class GradientBenchmarks(tf.test.Benchmark): - """Benchmarks for circuit differentiation. - - Flags: - --n_qubits --n_moments --op_density --n_runs --n_symbols --batch_size - --n_burn - """ - - def __init__(self, params=None): - """Pull in command line flags or use provided flags.""" - super(GradientBenchmarks, self).__init__() - self.params = params if params else flags.FLAGS - self.setup() - - def setup(self): - """Persistent variational circuit, parameters, and observables.""" - qubits = cirq.GridQubit.rect(1, self.params.n_qubits) - - # Generate arbitrary symbol set without name clashes. - symbol_names = set() - while len(symbol_names) < self.params.n_symbols: - symbol_names.add(''.join( - np.random.choice(list(string.ascii_uppercase), - size=4, - replace=True))) - symbol_names = list(symbol_names) - - circuit_batch, resolver_batch = util.random_symbol_circuit_resolver_batch( - qubits=qubits, - symbols=symbol_names, - batch_size=self.params.batch_size, - n_moments=self.params.n_moments, - p=self.params.op_density) - psums = util.random_pauli_sums(qubits, 1, self.params.batch_size) - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch], - dtype=np.float32) - - self.symbol_names = symbol_names - self.symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - self.programs = util.convert_to_tensor(circuit_batch) - self.psums = util.convert_to_tensor([psums]) - - def _benchmark_tfq_differentiator(self, differentiator, params): - """Common pipeline for benchmarking and reporting.""" - # for parametrization over a single differentiator instance - differentiator.refresh() - op = differentiator.generate_differentiable_op( - analytic_op=tfq_simulate_ops.tfq_simulate_expectation) - - for _ in range(params.n_burn): - op(self.programs, self.symbol_names, self.symbol_values_tensor, - self.psums) - - deltas = [None] * params.n_runs - for i in range(params.n_runs): - start = time.perf_counter() - with tf.GradientTape() as g: - g.watch(self.symbol_values_tensor) - expectations = op(self.programs, self.symbol_names, - self.symbol_values_tensor, self.psums) - g.gradient(expectations, self.symbol_values_tensor) - deltas[i] = time.perf_counter() - start - - # Name benchmark logs by differentiator classname. - name = "{}_{}_{}_{}_{}".format(differentiator.__class__.__name__, - params.n_qubits, params.n_moments, - params.batch_size, params.n_symbols) - - full_path = os.path.join(os.environ['TEST_REPORT_FILE_PREFIX'], - "{}.{}".format(self.__class__.__name__, name)) - if os.path.exists(full_path): - os.remove(full_path) - - extras = { - 'n_qubits': params.n_qubits, - 'n_moments': params.n_moments, - 'op_density': params.op_density, - 'n_symbols': params.n_symbols, - 'batch_size': params.batch_size, - "min_time": min(deltas), - } - - benchmark_values = { - "iters": params.n_runs, - "wall_time": np.median(deltas), - "extras": extras, - "name": name, - } - self.report_benchmark(**benchmark_values) - return benchmark_values - - def benchmark_finite_difference_forward(self): - """Benchmark the forward difference gradient method.""" - diff = linear_combination.ForwardDifference() - self._benchmark_tfq_differentiator(diff, self.params) - - def benchmark_finite_difference_central(self): - """Benchmark the central difference gradient method.""" - diff = linear_combination.CentralDifference() - self._benchmark_tfq_differentiator(diff, self.params) - - def benchmark_parameter_shift(self): - """Benchmark the parameter shift gradient method.""" - diff = parameter_shift.ParameterShift() - self._benchmark_tfq_differentiator(diff, self.params) - - def benchmark_stochastic_differentiator(self): - """Benchmark the default stochastic differentiator.""" - diff = stochastic_differentiator.SGDifferentiator() - self._benchmark_tfq_differentiator(diff, self.params) - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/benchmark_random_circuit.py b/benchmarks/scripts/benchmark_random_circuit.py deleted file mode 100644 index 51d4ccfbf..000000000 --- a/benchmarks/scripts/benchmark_random_circuit.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Benchmark simulators against classically intractable 'supremacy' circuits.""" -import os -import time - -from absl.testing import parameterized -import cirq -import tensorflow as tf -import numpy as np - -from tensorflow_quantum.core.ops import tfq_simulate_ops -from tensorflow_quantum.core.serialize.serializer import serialize_circuit -import flags -import benchmark_util - -SEED = 63536323 -SRC = os.path.dirname(os.path.realpath(__file__)) -os.environ['TEST_REPORT_FILE_PREFIX'] = os.path.join(SRC, 'reports/') -TEST_PARAMS_1 = flags.TEST_FLAGS(n_rows=3, n_cols=5, n_moments=5) -TEST_PARAMS_2 = flags.TEST_FLAGS(n_rows=4, n_cols=4, n_moments=20) - - -def make_random_circuit(n_rows, n_cols, depth): - """Generate a random unparameterized circuit of fixed depth.""" - return cirq.experiments.generate_boixo_2018_supremacy_circuits_v2_grid( - n_rows=n_rows, - n_cols=n_cols, - cz_depth=depth - 2, # Account for beginning/ending Hadamard layers - seed=SEED) - - -class RandomCircuitBenchmarksTest(tf.test.TestCase, parameterized.TestCase): - """Test the random circuit benchmarking class.""" - - @parameterized.named_parameters( - ("params_1", TEST_PARAMS_1), - ("params_2", TEST_PARAMS_2), - ) - def testBenchmarkRandomCircuit(self, params): - """Test that Op constructs and runs correctly.""" - proto_file_path = os.path.join( - SRC, "reports/", - "RandomCircuitBenchmarks.benchmark_random_circuit_{}_{}_{}".format( - params.n_rows, params.n_cols, params.n_moments)) - self.addCleanup(os.remove, proto_file_path) - - bench = RandomCircuitBenchmarks(params=params) - bench.benchmark_random_circuit() - - res = benchmark_util.read_benchmark_entry(proto_file_path) - self.assertEqual( - res.name, - "RandomCircuitBenchmarks.benchmark_random_circuit_{}_{}_{}".format( - params.n_rows, params.n_cols, params.n_moments)) - self.assertEqual(res.extras.get("n_rows").double_value, params.n_rows) - self.assertEqual(res.extras.get("n_cols").double_value, params.n_cols) - self.assertEqual( - res.extras.get("n_moments").double_value, params.n_moments) - - assert hasattr(res, 'iters') - assert hasattr(res, 'wall_time') - - @parameterized.named_parameters( - ("params_1", TEST_PARAMS_1), - ("params_2", TEST_PARAMS_2), - ) - def testRandomCircuitParams(self, params): - """Ensure that the random circuits are structured as advertised.""" - circuit = make_random_circuit(params.n_rows, params.n_cols, - params.n_moments) - self.assertEqual(len(circuit), params.n_moments) - self.assertEqual(len(circuit.all_qubits()), - params.n_rows * params.n_cols) - - -class RandomCircuitBenchmarks(tf.test.Benchmark): - """Benchmark simulators against random 'supremacy' circuits. - - Flags: - --n_rows --n_cols --n_moments --batch_size --n_runs --n_burn - """ - - def __init__(self, params=None): - """Pull in command line flags or use provided flags.""" - super(RandomCircuitBenchmarks, self).__init__() - # Allow input params for testing purposes. - self.params = params if params else flags.FLAGS - - def _simulate_circuit(self, circuit, params): - # TODO: implement backend switch - return tfq_simulate_ops.tfq_simulate_state( - [str(serialize_circuit(circuit))] * params.batch_size, ["None"], - [[0]] * params.batch_size) - - def benchmark_random_circuit(self): - """Benchmark simulator performance on a classically intractable circuit.""" - - circuit = make_random_circuit(self.params.n_rows, self.params.n_cols, - self.params.n_moments) - for _ in range(self.params.n_burn): - _ = self._simulate_circuit(circuit, self.params) - - deltas = [None] * self.params.n_runs - for i in range(self.params.n_runs): - start = time.perf_counter() - _ = self._simulate_circuit(circuit, self.params) - deltas[i] = time.perf_counter() - start - - extras = { - 'n_rows': self.params.n_rows, - 'n_cols': self.params.n_cols, - 'n_qubits': len(circuit.all_qubits()), - 'n_moments': self.params.n_moments, - 'batch_size': self.params.batch_size, - "min_time": min(deltas), - } - - name = "benchmark_random_circuit_{}_{}_{}".format( - self.params.n_rows, self.params.n_cols, self.params.n_moments) - full_path = os.path.join(os.environ['TEST_REPORT_FILE_PREFIX'], - "{}.{}".format(self.__class__.__name__, name)) - if os.path.exists(full_path): - os.remove(full_path) - - benchmark_values = { - "iters": self.params.n_runs, - "wall_time": np.median(deltas), - "extras": extras, - "name": name, - } - self.report_benchmark(**benchmark_values) - return benchmark_values - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/benchmark_util.py b/benchmarks/scripts/benchmark_util.py deleted file mode 100644 index 5b4bea2e5..000000000 --- a/benchmarks/scripts/benchmark_util.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Utility functions for benchmark tools.""" -import tensorflow as tf -import test_log_pb2 - - -def read_benchmark_entry(f): - s = tf.io.gfile.GFile(f, "rb").read() - entries = test_log_pb2.BenchmarkEntries.FromString(s) - return entries.entry[0] diff --git a/benchmarks/scripts/benchmark_util_test.py b/benchmarks/scripts/benchmark_util_test.py deleted file mode 100644 index bece69897..000000000 --- a/benchmarks/scripts/benchmark_util_test.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for utilities related to reading/running benchmarks.""" -import os -import tempfile - -import tensorflow as tf - -import test_log_pb2 -import benchmark_util - - -def _make_dummy_benchmark_report(): - """Make a serialized benchmark report.""" - entries = test_log_pb2.BenchmarkEntries() - entry = entries.entry.add() - entry.name = "dummy_report" - entry.iters = 1234 - entry.wall_time = 5678 - return entries.SerializeToString() - - -class ReadBenchmarkEntryTest(tf.test.TestCase): - """Test reading serialized benchmark results.""" - - def test_read_benchmark_entry(self): - """Test reading test_log protobuf contents.""" - - # Do temp file setup and queue teardown. - with tempfile.NamedTemporaryFile(prefix='ReadBenchmarkEntryTest', - dir=self.get_temp_dir(), - delete=False) as temp: - temp.write(_make_dummy_benchmark_report()) - self.addCleanup(lambda: os.remove(temp.name)) - - res = benchmark_util.read_benchmark_entry(temp.name) - self.assertEqual(res.name, "dummy_report") - self.assertEqual(res.iters, 1234) - self.assertEqual(res.wall_time, 5678) - - -if __name__ == '__main__': - tf.test.main() diff --git a/benchmarks/scripts/differentiators/BUILD b/benchmarks/scripts/differentiators/BUILD deleted file mode 100644 index 61241b346..000000000 --- a/benchmarks/scripts/differentiators/BUILD +++ /dev/null @@ -1,19 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_test( - name = "convergence_test", - srcs = ["convergence_test.py"], - python_version = "PY3", - deps = [ - "//tensorflow_quantum/core/ops:batch_util", - "//tensorflow_quantum/core/ops:cirq_ops", - "//tensorflow_quantum/core/ops:tfq_simulate_ops_py", - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/differentiators:stochastic_differentiator", - ], -) diff --git a/benchmarks/scripts/differentiators/__init__.py b/benchmarks/scripts/differentiators/__init__.py deleted file mode 100644 index bf5b48863..000000000 --- a/benchmarks/scripts/differentiators/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== \ No newline at end of file diff --git a/benchmarks/scripts/differentiators/convergence_test.py b/benchmarks/scripts/differentiators/convergence_test.py deleted file mode 100644 index 42499d294..000000000 --- a/benchmarks/scripts/differentiators/convergence_test.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Testing for SGDifferentiator convergence & calculation consistency in TFQ.""" -import copy -import time - -import numpy as np -import tensorflow as tf -from absl.testing import parameterized - -import cirq -from tensorflow_quantum.python import util -from tensorflow_quantum.python.differentiators import stochastic_differentiator -from tensorflow_quantum.core.ops import tfq_simulate_ops, batch_util - -# DISCLAIMER: Environment : Intel(R) Xeon(R) W-2135 CPU @ 3.70GHz, 12 cores. -# The overall tests take around 1 hours. -DIFFS_NUM_RUNS = [ - # The tests without sampling cost Hamiltonian take 1.5 hours. - # Case 1 : ParameterShift ~ 0.04 sec/shot - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=False, - stochastic_cost=False), 1), - # Case 2 : coordinate ~ 42 sec (0.04 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=False, - stochastic_cost=False), 1100), - # Case 3 : generator ~ 350 sec (0.023 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=True, - stochastic_cost=False), 15000), - # Case 4 : coordinate + generator ~ 400 sec ~ (0.020 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=True, - stochastic_cost=False), 20000), - # The tests with sampling cost Hamiltonian takes around 3 hours - # Case 5 : cost ~ 35 sec (0.15 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=False, - stochastic_cost=True), 250), - # Case 6 : cost + coordinate ~ 160 sec (0.15 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=False, - stochastic_cost=True), 1200), - # Case 7 : cost + generator ~ 320 sec (0.13 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=True, - stochastic_cost=True), 2500), - # Case 8 : All ~ 2400 sec ~ 40 m (0.12 sec/shot) - # Increase error margin due to numerical stability of summing up gradients - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=True, - stochastic_cost=True), 20000), -] - - -# TODO(jaeyoo): aggregate identical _cirq_simple_finite_difference functions -# in different tests into one python file and import it. -def _cirq_simple_finite_difference(circuit_batch, - resolvers, - symbol_names, - op_batch, - grid_spacing=0.0001): - simulator = cirq.sim.Simulator() - - init_vals = batch_util.batch_calculate_expectation(circuit_batch, resolvers, - op_batch, simulator) - grad_circuits = [] - grad_resolvers = [] - grad_pauli_sums = [] - for this_program, this_pauli_sums, this_resolver in \ - zip(circuit_batch, op_batch, resolvers): - for symbol in symbol_names: - perturbed_resolver = copy.deepcopy(this_resolver) - perturbed_resolver.param_dict[symbol] += grid_spacing - grad_circuits.append(this_program) - grad_pauli_sums.append(this_pauli_sums) - grad_resolvers.append(perturbed_resolver) - - # shape: [n_programs * len(symbol_names), n_pauli_sums] - results = np.array( - batch_util.batch_calculate_expectation(circuits=grad_circuits, - param_resolvers=grad_resolvers, - ops=grad_pauli_sums, - simulator=simulator)) - - # shape: [n_pauli_sums, n_programs, len(symbol_names)] - gradient_generator = results.transpose().reshape( - (len(op_batch[0]), len(circuit_batch), len(symbol_names))) - - # shape: [n_pauli_sums, n_programs, len(symbol_names)] - forward_pass_vals = np.transpose( - np.vstack([np.expand_dims(init_vals, axis=0)] * len(symbol_names)), - (2, 1, 0)) - - return np.sum(1 / grid_spacing * (gradient_generator - forward_pass_vals), - axis=0) - - -class StochasticGradientConvergenceTest(tf.test.TestCase, - parameterized.TestCase): - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'differentiator_num_runs': DIFFS_NUM_RUNS, - 'n_qubits': [3], - 'n_programs': [3], - 'n_ops': [3], - 'symbol_names': [['a', 'b']], - 'eps': [0.1] - }))) - def test_gradients_vs_cirq_finite_difference(self, differentiator_num_runs, - n_qubits, n_programs, n_ops, - symbol_names, eps): - """Convergence tests on SGDifferentiator variants.""" - - # TODO(trevormccrt): remove this once I build the user-facing op - # interface - differentiator, num_runs = differentiator_num_runs - differentiator.refresh() - op = differentiator.generate_differentiable_op( - analytic_op=tfq_simulate_ops.tfq_simulate_expectation) - - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs) - - psums = [ - util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch - ] - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch], - dtype=np.float32) - - # calculate tfq gradient - symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - programs = util.convert_to_tensor(circuit_batch) - ops = util.convert_to_tensor(psums) - - def _get_gradient(): - with tf.GradientTape() as g: - g.watch(symbol_values_tensor) - expectations = op(programs, symbol_names, symbol_values_tensor, - ops) - return tf.cast(g.gradient(expectations, symbol_values_tensor), - dtype=tf.float64) - - # warm-up & initialize tfq_grads. - grads_sum = _get_gradient() - tfq_grads = grads_sum - - # calculate gradients in cirq using a very simple forward differencing - # scheme - cirq_grads = _cirq_simple_finite_difference(circuit_batch, - resolver_batch, - symbol_names, psums) - cnt = 1 - # Since self.assertAllClose() has more strict atol than that of - # np.allclose(), it is required to set smaller value to np.allclose() - total_time = 0 - while cnt < num_runs and (not np.allclose( - tfq_grads, cirq_grads, atol=eps * 0.9)): - cnt = cnt + 1 - s = time.time() - grads_sum = grads_sum + _get_gradient() - total_time += time.time() - s - tfq_grads = grads_sum / cnt - - self.assertAllClose(cirq_grads, tfq_grads, atol=eps) - print('Passed: count {}, total_time {} ({}sec/shot)'.format( - cnt, total_time, total_time / cnt)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/benchmarks/scripts/flags.py b/benchmarks/scripts/flags.py deleted file mode 100644 index eaf7e78e2..000000000 --- a/benchmarks/scripts/flags.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Command line flags shared between benchmarks.""" -from collections import namedtuple -from absl import flags as absl_flags - -FLAGS = absl_flags.FLAGS - -absl_flags.DEFINE_integer('n_qubits', - None, - 'Number of qubits in the benchmark circuit.', - lower_bound=2, - upper_bound=16) - -absl_flags.DEFINE_integer('n_moments', - None, - 'Depth of benchmark circuit.', - lower_bound=1) - -absl_flags.DEFINE_float( - 'op_density', - 0.99, - 'Density of operators in benchmark circuit, or the probability that a ' - 'given qubit in each moment is acted on by an operation.', - lower_bound=0, - upper_bound=0.99) # For compatibility with util lib - -absl_flags.DEFINE_integer('n_symbols', - 1, 'Number of symbols to parametrize a circuit by. ' - 'Use this to tune optimization convergence times.', - lower_bound=1) - -absl_flags.DEFINE_integer( - 'n_rows', - None, - 'Number of qubit rows in random circuit to benchmark.', - lower_bound=2) - -absl_flags.DEFINE_integer( - 'n_cols', - None, - 'Number of qubit columns in random circuit to benchmark.', - lower_bound=2) - -absl_flags.DEFINE_integer('batch_size', - 1, - 'The number of circuits to simulate in parallel.', - lower_bound=1) - -absl_flags.DEFINE_integer( - 'n_iters', - 1, "Number of rounds to run each benchmark, corresponding to" - "number of iterations in a training context. ", - lower_bound=1) - -# Benchmark metadata. -absl_flags.DEFINE_string('backend', None, - 'Which backend simulator to benchmark.') - -absl_flags.DEFINE_integer( - 'n_runs', - 1, - 'Number of times to run the model for its specified number of iterations ' - 'during benchmarking. For example, if a model is specified to be trained ' - 'for 50 iterations, `n_runs=10` would reset this model after training a ' - 'total of 10 times, resulting in a time overhead of 500 total iterations.', - lower_bound=1) - -absl_flags.DEFINE_integer('n_burn', - 0, - 'Number of burner runs. See `n_runs`.', - lower_bound=0) - - -def TEST_FLAGS(**kwargs): - """Create a set of test flags by kwarg assignment. - - This constructs a named tuple that mimics the interface of absl.flags. - Any command line flags defined with defaults will be present in the output - with their default value unless overwritten. - - Returns: - namedtuple containing valid flag names. - """ - base_flags = FLAGS.flag_values_dict() - updated = dict(base_flags, **kwargs) - return namedtuple('params', updated.keys())(**updated) diff --git a/benchmarks/scripts/flags_test.py b/benchmarks/scripts/flags_test.py deleted file mode 100644 index 6383809c6..000000000 --- a/benchmarks/scripts/flags_test.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for benchmark command line flags.""" - -import tensorflow as tf -from benchmarks.scripts import flags - - -class FlagsTest(tf.test.TestCase): - """Test the flag and test-flag interface.""" - - def test_test_flags_defaults(self): - """Test default values in TEST_FLAGS conform to flag defaults.""" - params = flags.TEST_FLAGS() - assert params.n_runs == 1 - assert params.n_burn == 0 - assert params.n_iters == 1 - - def test_test_flags(self): - """Test that kwargs convert to attributes.""" - params = flags.TEST_FLAGS(garbage="garbage value", other_garbage=123) - assert params.garbage == "garbage value" - assert params.other_garbate == 123 - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/models/BUILD b/benchmarks/scripts/models/BUILD deleted file mode 100644 index e5bd16e60..000000000 --- a/benchmarks/scripts/models/BUILD +++ /dev/null @@ -1,21 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_binary( - name = "random_clifford_circuit", - srcs = ["random_clifford_circuit.py"], - python_version = "PY3", -) - -py_test( - name = "random_clifford_circuit_test", - srcs = ["random_clifford_circuit_test.py"], - python_version = "PY3", - deps = [ - ":random_clifford_circuit", - ], -) diff --git a/benchmarks/scripts/models/__init__.py b/benchmarks/scripts/models/__init__.py deleted file mode 100644 index bf5b48863..000000000 --- a/benchmarks/scripts/models/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== \ No newline at end of file diff --git a/benchmarks/scripts/models/random_clifford_circuit.py b/benchmarks/scripts/models/random_clifford_circuit.py deleted file mode 100644 index a08a667b5..000000000 --- a/benchmarks/scripts/models/random_clifford_circuit.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from typing import Iterable - -import numpy as np -import cirq - - -def random_clifford_circuit(qubits, n_moments, op_density, random_state=None): - """Generate a dense circuit using elements of C2. - - Each layer will consist of a random number of one- or two-qubit Clifford - gates acting on a random subset of qubits. - Args: - qubits: The sequence of GridQubits that the circuit should act on. - Because the qubits on which an operation acts are chosen randomly, - not all given qubits may be acted upon. - n_moments: The number of moments in the generated circuit. - op_density: the expected fraction of qubits acted on in each - moment in half-open interval [0, 1]. - random_state: Optional random state or random state seed. - - Returns: - Clifford circuit with randomly chosen and assigned gates. - """ - if random_state and not isinstance(random_state, - (np.random.RandomState, int)): - raise TypeError("Random state input must be a numpy RandomState or an " - "integer seed to a random state.") - - if not isinstance(qubits, Iterable) or not all( - isinstance(q, cirq.GridQubit) for q in qubits): - raise TypeError("Must provide an iterable of GridQubits.") - - n_qubits = len(qubits) - if n_qubits < 2: - raise ValueError("Must provide at least 2 qubits to circuit generator.") - - rng = np.random - if isinstance(random_state, np.random.RandomState): - rng = random_state - elif isinstance(random_state, int): - rng = np.random.RandomState(random_state) - - cliffords_1q = (cirq.X, cirq.Y, cirq.Z, cirq.H) - cliffords_2q = (cirq.CZ, cirq.CNOT, cirq.SWAP) - moments = [] - for _ in range(n_moments): - moment_ops = [] - n_layer_qubits = rng.binomial(n_qubits, op_density) - layer_qubits = list( - rng.choice(qubits, size=n_layer_qubits, replace=False)) - while any(layer_qubits): - sampler = cliffords_1q - if len(layer_qubits) > 1: - sampler += cliffords_2q - gate = rng.choice(sampler) - gate_qubits = [layer_qubits.pop() for _ in range(gate.num_qubits())] - moment_ops.append(gate(*gate_qubits)) - moments += moment_ops - return cirq.Circuit(*moments) diff --git a/benchmarks/scripts/models/random_clifford_circuit_test.py b/benchmarks/scripts/models/random_clifford_circuit_test.py deleted file mode 100644 index c6d968ea0..000000000 --- a/benchmarks/scripts/models/random_clifford_circuit_test.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from absl.testing import parameterized -import cirq -import numpy as np -import tensorflow as tf - -from random_clifford_circuit import random_clifford_circuit - - -class RandomCliffordCircuitTest(parameterized.TestCase, tf.test.TestCase): - """Test the Random Clifford Circuit model.""" - - def test_random_clifford_circuit_inputs(self): - """Test for input validation.""" - qubits = cirq.GridQubit.rect(3, 2) - n_moments = 10 - op_density = 0.9 - with self.assertRaisesRegex(TypeError, 'RandomState'): - random_clifford_circuit(qubits, - n_moments, - op_density, - random_state="string") - with self.assertRaisesRegex(TypeError, 'RandomState'): - random_clifford_circuit(qubits, - n_moments, - op_density, - random_state=[1, 2, 3]) - - with self.assertRaisesRegex(TypeError, 'iterable'): - random_clifford_circuit(cirq.GridQubit(0, 0), - n_moments, - op_density, - random_state=None) - with self.assertRaisesRegex(TypeError, 'iterable'): - random_clifford_circuit(cirq.LineQubit(0), - n_moments, - op_density, - random_state=None) - - with self.assertRaisesRegex(ValueError, '2 qubits'): - random_clifford_circuit([cirq.GridQubit(0, 0)], - n_moments, - op_density, - random_state=None) - - def test_reproducible_circuit(self): - """Test that circuits are reproducible via random state seeding.""" - qubits = cirq.GridQubit.rect(4, 2) - n_moments = 13 - op_density = 0.8 - rng = np.random.RandomState(4902796) - - c1 = cirq.Circuit(*random_clifford_circuit( - qubits, n_moments, op_density, random_state=rng)) - - rng = np.random.RandomState(4902796) - c2 = cirq.Circuit(*random_clifford_circuit( - qubits, n_moments, op_density, random_state=rng)) - self.assertEqual(c1, c2) - - def test_only_cliffords(self): - """Test that the circuit contains only Cliffords.""" - qubits = cirq.GridQubit.rect(4, 2) - n_moments = 10 - op_density = 0.9 - circuit = cirq.Circuit( - *random_clifford_circuit(qubits, n_moments, op_density)) - cliffords = set( - [cirq.X, cirq.Y, cirq.Z, cirq.H, cirq.CZ, cirq.CNOT, cirq.SWAP]) - non_id_gates = [op.gate for op in circuit.all_operations()] - self.assertTrue(set(non_id_gates).issubset(cliffords)) - - @parameterized.parameters([5, 7, 11, 20]) - def test_random_clifford_circuit_depth(self, n_moments): - """Test that the circuit has the number of moments requested.""" - qubits = cirq.GridQubit.rect(3, 2) - op_density = 0.9 - circuit = cirq.Circuit( - *random_clifford_circuit(qubits, n_moments, op_density)) - self.assertEqual(len(circuit), n_moments) - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/reports/.gitignore b/benchmarks/scripts/reports/.gitignore deleted file mode 100644 index 005717ead..000000000 --- a/benchmarks/scripts/reports/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore diff --git a/binary_classifier/binary_classifier.ipynb b/binary_classifier/binary_classifier.ipynb new file mode 100644 index 000000000..a2116d378 --- /dev/null +++ b/binary_classifier/binary_classifier.ipynb @@ -0,0 +1,410 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "TFQ_Example_BinaryClassifier.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "mFq2aRw_w3cL", + "colab_type": "text" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Quantum Authors." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "eOzjTj_JxBnv", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Lusn46uoyCcv", + "colab_type": "text" + }, + "source": [ + "# Binary classification of quantum states\n", + "\n", + "Author : Antonio J. Martinez\n", + "\n", + "Contributors : Masoud Mohseni\n", + "\n", + "Created : 2020-Feb-14\n", + "\n", + "Last updated : 2020-Feb-29" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "O8hXbFbkv_D_", + "colab_type": "text" + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tensorflow/quantum/blob/research/binary_classifier/binary_classifier.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2j5_tMNP12Mq", + "colab_type": "text" + }, + "source": [ + "An elementary learning task is [binary classification](https://en.wikipedia.org/wiki/Binary_classification), a supervised task in which the learner is to distinguish which of two classes a given datapoint has been drawn from. Here, using ideas from the paper [Universal discriminative quantum neural networks](https://arxiv.org/abs/1805.08654) in the one-qubit setting, we train a hybrid quantum-classical neural network to distinguish between quantum data sources." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IrWw_xv4fs44", + "colab_type": "text" + }, + "source": [ + "## Import dependencies" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "1eVDbG_2ZhMe", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade cirq==0.7.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "rFqxhKypZoSJ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade tensorflow==2.1.0\n", + "!pip install qutip" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "xcDb1zbSdXKi", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install tensorflow-quantum" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "kW2sb1rAfhwt", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import cirq\n", + "import numpy as np\n", + "import qutip\n", + "import random\n", + "import sympy\n", + "import tensorflow as tf\n", + "import tensorflow_quantum as tfq\n", + "\n", + "# visualization tools\n", + "%matplotlib inline\n", + "import matplotlib.pyplot as plt\n", + "from cirq.contrib.svg import SVGCircuit" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Hd1mo09k1Dt3", + "colab_type": "text" + }, + "source": [ + "## Quantum dataset\n", + "For our quantum dataset, you will generate two blobs on the surface of the Bloch sphere. The task will be to learn a model to distinguish members of these blobs. To do this, you first select two axes in the X-Z plane of the block sphere, then select random points uniformly distributed around them:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "FUEawr8o1C2g", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def generate_dataset(qubit, theta_a, theta_b, num_samples):\n", + " \"\"\"Generate a dataset of points on `qubit` near the two given angles; labels\n", + " for the two clusters use a one-hot encoding.\n", + " \"\"\"\n", + " q_data = []\n", + " bloch = {\"a\": [[], [], []], \"b\": [[], [], []]}\n", + " labels = []\n", + " blob_size = abs(theta_a - theta_b) / 5\n", + " for _ in range(num_samples):\n", + " coin = random.random()\n", + " spread_x = np.random.uniform(-blob_size, blob_size)\n", + " spread_y = np.random.uniform(-blob_size, blob_size)\n", + " if coin < 0.5:\n", + " label = [1, 0]\n", + " angle = theta_a + spread_y\n", + " source = \"a\"\n", + " else:\n", + " label = [0, 1]\n", + " angle = theta_b + spread_y\n", + " source = \"b\"\n", + " labels.append(label)\n", + " q_data.append(cirq.Circuit(cirq.ry(-angle)(qubit), cirq.rx(-spread_x)(qubit)))\n", + " bloch[source][0].append(np.cos(angle))\n", + " bloch[source][1].append(np.sin(angle)*np.sin(spread_x))\n", + " bloch[source][2].append(np.sin(angle)*np.cos(spread_x))\n", + " return tfq.convert_to_tensor(q_data), np.array(labels), bloch" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5Y4Mr9SiF_AG", + "colab_type": "text" + }, + "source": [ + "Generate the dataset:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "o8u98FpJGCoq", + "colab_type": "code", + "colab": {} + }, + "source": [ + "qubit = cirq.GridQubit(0, 0)\n", + "theta_a = 1\n", + "theta_b = 4\n", + "num_samples = 200\n", + "q_data, labels, bloch_p = generate_dataset(qubit, theta_a, theta_b, num_samples)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kFRlGAMoHMya", + "colab_type": "text" + }, + "source": [ + "View the data set on the Bloch sphere:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "GC37TFXdHMB1", + "colab_type": "code", + "outputId": "01a50089-5f97-47ef-f468-09bab0e4f5d3", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 391 + } + }, + "source": [ + "bloch = qutip.Bloch()\n", + "bloch.sphere_alpha = 0.0\n", + "bloch.frame_alpha = 0.05\n", + "bloch.vector_color[0] = bloch.point_color[0] = \"#a4c2f4ff\"\n", + "bloch.vector_color[1] = bloch.point_color[1] = \"#ffab40ff\"\n", + "bloch.add_points(bloch_p[\"a\"])\n", + "bloch.add_points(bloch_p[\"b\"])\n", + "vec = [[np.cos(theta_a),0,np.sin(theta_a)]]\n", + "bloch.add_vectors(vec)\n", + "vec = [[np.cos(theta_b),0,np.sin(theta_b)]]\n", + "bloch.add_vectors(vec)\n", + "bloch.show()" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXYAAAF2CAYAAAB6XrNlAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nOy9aXBc13nn/Tv39o7GvhALwZ0ARFIi\nxV1ktNqiJVq2HDmOU05syXamMp5MMpOq9633w3yYpWqq5st8SFJ2KjNVSd6ZjON6HcWiHEUWae20\nREkkRVLcwH0FCRI70NvdzvuhcRq3N6AbaIDb/VWhSHTfvkvj3v95znOeRUgp8fDw8PC4f9Du9Al4\neHh4eFQWT9g9PDw87jM8Yffw8PC4z/CE3cPDw+M+wxN2Dw8Pj/sMT9g9PDw87jN8M7zvxUJ6eHh4\n3J2IYm94FruHh4fHfYYn7B4eHh73GZ6we3h4eNxneMLu4eHhcZ/hCbuHh4fHfYYn7B4eHh73GZ6w\ne3h4eNxneMLu4eHhcZ/hCbuHh4fHfYYn7B4eHh73GZ6we3h4eNxneMLu4eHhcZ/hCbuHh4fHfYYn\n7B4eHh73GZ6we9x3/Kf/9J8QQnDp0qU57Wf//v0IIfjxj39cmRPz8FggPGH3eKC4du0aP/jBD2hv\nbycYDLJs2TL+/b//9wwPD+dtu2PHDpqbm9mzZ88dOFMPj9njCbvHA8P58+fZtGkTf/u3f8vWrVv5\nsz/7M1asWMGf//mf89hjjzE4OJi1vaZpvPDCC7z33nuMjo7eobP28CgfT9g9Hhj+zb/5N9y6dYu/\n+Iu/4LXXXuO//bf/xjvvvMOf/dmf0dvby3/4D/8h7zMvvvgipmny5ptv3oEz9vCYHZ6wezwQnD9/\nnr1797Js2TL++I//OOu9//yf/zNVVVX87//9v4nFYlnvPfvss4TDYc8d43FP4Qm7xwPBu+++C8Cu\nXbvQtOzbvrq6mp07dxKPxzlw4EDWe5FIhGeffZY333wT0zQX7Hw9POaCJ+weDwS9vb0AdHV1FXx/\n9erVAJw5cybvvRdffJHR0VHee++9eTs/D49K4gm7xwOBWvysra0t+L56fWRkJO+9F154AU3TPHeM\nxz2DJ+weHjPQ0tLC9u3bef311+/0qXh4lIQn7B4PBMoiLxa2qF6vq6sr+P7Y2Bg1NTXzc3IeHhXG\nE3aPB4Lu7m6gsA8d4OzZs0BhH/yFCxc4fvw4L7744vydoIdHBfGE3eOB4OmnnwZg7969OI6T9d74\n+Di/+c1viEQibN++Pe+zyrfuCbvHvYIn7B4PBCtXrmTXrl1cunQpr/bLf/yP/5FYLMZ3v/tdqqqq\n8j67Z88e2tra2LJly0KdrofHnPDd6RPw8FgofvKTn7Bjxw7+9E//lLfffpuHHnqITz75hHfffZeu\nri7+63/9r3mfGRwcZP/+/fzhH/4hQog7cNYeHuXjWeweDwwrV67k4MGDvPLKK3zyySf89//+3zl/\n/jz/7t/9Ow4cOEBjY2PeZ9544w1s2/bcMB73FJ7F7vFA0dnZyd/+7d+WvP2ePXuIRqM888wz83hW\nHh6VxbPYPTyKkEwmeeutt3juuecIBoN3+nQ8PErGE3YPjyIcPXqUVatW8Z3vfOdOn4qHR1l4rhgP\njyJs27aNI0eO3OnT8PAoG0/YPe47nnrqKaB4FqmHx/2OkFJO9/60b3p43K/Yto3jOAghEEKg6/qd\nPiUPj1yKxt96FruHRw62bWNZFkIIpJRIKRFC5NVx9/C4W/Esdo/7jmL39Az3OjAl6rqu4/f7ATAM\nA4BAIOAlKXncTRS9GT1h97ijKIvY/QNkXCDqR21b7N/c12Z7LoZhoGlaRtTV66ZpIoQgEAhknZ/6\nf+6/Qggcx2F0dJSBgQEGBwdxHIf6+nrq6+upq6sjFArN+lw9PPCE3eNuQEqJ4zg4jpMlxu731b9q\nW7crxP2jaVqe8Of+qyhmZbtfl1JiWRa2bWcsc/f5WZaFZVkZYXefr+M43Lx5MyPgQ0NDDA4OMjIy\nQlVVFY2NjTQ0NKDrOiMjI4yMjDA8PIzP58uIfH19PStWrGD58uXerMCjVDxh97hzSCkzi5FKpN3v\nlbsvha7raJpWkYVNZa3ruo7Pl7/0pN73+XyZ4yUSCY4cOcJnn32GEIK2tjYaGxszP3V1dfj9/rxr\nVINGIpFgdHSU0dFRhoeHOXHiBLqus2PHDtauXev59D1mwhN2j4VHCbplWTiOk7Gy3eRa4rlWeLH9\nOo6DbdtZC5u6rs/a2s211guhfO23bt3i4MGDnD59mq6uLjZv3kxnZ+eM51zsx82FCxc4cOAAo6Oj\nPPbYYzz66KNZswQPDxeesHssDErMTdPEtm2ATLigEvZCbpTZogRe1VhXAl+OtTuTtQ5pUT969CgH\nDx4klUqxZcsWNmzYULDMrzqv6QTcfWy360l97ubNmxw8eJCrV6/y6KOPsm3bNmpqajw3jYcbT9g9\n5g+3KJmmmQkV9Pv9GZEtZK3PlfGEQ/+ITSQoWFSnIV3CqAaTUtw0alZRyFqPxWJ88MEHHDt2jM7O\nTjZs2EBXV1dmAMgVb3V8xWwGMLUv27YZGBjgs88+4/z583z9619n8eLFGXeQe4D0eCDxhN2jsigh\ndwuZco34fL55Dw08dc3g6GUTACEgGhTsWh/G7xNZVrymafh8vmnPxTCMzECkkFJy/Phx3nrrLdau\nXcuOHTuora0llUoB6ZlBriVebHF3rkgpOXXqFL/85S/ZtWsXq1atynNBqdmGJ/IPFF6Cksfccfu1\nlaApa9xxnCyBAYinHE5eMxmOObTV6axu0/HrIjMg5OJ2nxT7P0DSkBy5ZOK4zI6xhKS3z2TdkkDm\nnGzbzriFilnv6lrc742NjfHGG28wPDzM7/3e79HR0YHjOBiGgWmaSCkJBoOZWUGl3ErFEEKwZs0a\n6uvr+dnPfsbExATbt2/PfI+WZWXCNNWg6i28Pth4FrvHtBSKaMl1rRiGkbc4Gk/ZvHU0hWmBRKIB\n0TA81TN7l4wSq/5Rh88uOJg2wKSgCkFLjc6uDZG883cv3uZate5FU4DPP/+ct99+m02bNrFz587M\noKWOr9wtStgXmrGxMf7hH/6BtrY2vvrVr6LrembAda9rqAQrz4q/r/FcMR6l4/aZF1qUVFaiaZqk\nUqmMxeu2Es/elJy+4eDIqdhynybY2e2nrcGf2dYtnEBJ/x+P2+z9YtJiVwlNwIoWwSNLfRkBVwOQ\n23pXriJloSs3zMTEBL/85S9JJBK88MILNDU1Zc7b7c9W6wh+v/+OWcWGYfCP//iP2LbNt771raxE\nJ3V+pmlmBjO/339Hz9dj3vCE3WNmlJDnRrPAVFEsZf0qS15N/ZX7RYnHp+dMzvdbWfvXNdiyMsDK\nVj9z5cCZFJdvW1gOCCR+TbJrfQC/Zue5egot3ipxT6VSHD58mP3797N9+3a2bt2ateCbK4aF4tnv\nBI7j8NZbb3Hx4kVeeeUVIpHCMxU1AAN5fyuPex5P2D2K415sdIufW8hhSiB1XUdKmbEGC031rw5Y\nfNSbwnK50nUBL2wOEw3N3XKUUnJ9yOb6kE00JFjV6ifozz53949yubhdSwMDA+zbt49gMMju3btp\namoqKVSy0GKrolg27Hzxq1/9ipGREb797W8XPVauFe/3+wkGg54Ff+/jCbtHPm5BV6IHZCx2SFt5\nyq2hLD0lktNN76WUHDhjcHnAQhNgO/Docj89HfOTbJNbjqDY747jkEqluHr1Knv27MG2bf7wD/+Q\nUCiU8UnPZNGqBdTcxKFCz1Ju/Zhc0Z+r8FuWxd/8zd+wYcMGtm7dOu22arbhLmrmuWjuabyoGI8p\ncgXdLeru6XohgVOfmcmyFULwWHeQdUv8jCccGqI6ocDcRGy6zM1iouout6u2v3r1Km+88Qbf+MY3\nePXVV6mpqcm4l9QMRV1/oe9A+ewLZYQWG1SKJSoVE/5SRd/n8/HNb36Tv/mbv2HJkiW0trYW3VYI\nQTAYxO/3k0qlMAwj405Tg7fH/YEn7A8QSpSVxW3bdkb4gsHgjA+3Ej639V6IwXGbI5cMYklJe4PO\nI0sDBHzli3pu0o/bb16OJaxcMVJKjhw5wv79+/mDP/gDmpubAYhEIlhWej1A07RMkpU7hNAt8u5q\nk8VKJEx3Te5ry51NuIW/UKZuIRobG9m1axevvvoq/+pf/asZSxBomkYoFMr4390LrZ7A3x94wv4A\n4E7zV24E5SMuZzFNiV+h7S1bcmXAYnDc4dxNKxNjfvaGxa1Rm+cfDc9ogeaKeK7IlZttmfYtWyRS\nNkG/4De/2c+pU6f44Q9/SHV1dWagULHoSvzD4XDmd/WjRF6JX6GCZqUwk+89dzBTA7D6DooJ/fr1\n67lw4QK/+tWv+PrXv17SeSg3jJq9KV+8ivn3wiTvXTxhv89xHIdkMpkX/lZudIRyURRaLE2Zkl8d\nSZA0ZNZiKYAjYTwhGZxwaKrOjiIpJuRKtJS4lGtBqpnF5VspDp43kSId620MwQ9+8AOqqqoyC6AK\nJdjKis210t0in0gkMlZ9MBisaJGuYu6jUoR+9+7d/I//8T84fvw469atK+l4arB0Rzup/5daksHj\n7sMT9vsUKSWpVIpEIpER5HA4PKusxJn86r19JvGUzMoEdSNEWvxzRbxSQq5wC99EUvLZBQcbPwIB\nmk644zEswgWzTYHMoKdmNu6FxVyRj8Vi2LZNMpnEMIzMQFlpN0a5Qv+Nb3yDf/iHf6C9vZ2GhoaS\nj+H3+zNuOoV7vcGz3u8tPGG/DzEMIyM8Pp+P6urqOYmOKupVzHobGLOLiroSn/qwD9Ocio/PtTbn\nQq7A6bpO/5gDQkuL+iSOA1cGLdaE0tdR6PtQIue23HOv2+fzEQ6H0TSNWNLh80tJhmIxmqsFazuD\nVFdNH0oYTzlcHbTRBXQ2+bLCNGdiJqFvaWlh27ZtvPnmm/zu7/5uWZUu3S4p9Xuue8bj3sAT9vsI\nx3GIxWKkUik0TaO6uppgMFhwW8uWnO83uT3q0FCtsbrVj7/AAqcSjGLx6gBNNTr9o05G3KWUIB00\nkRavHV1B/L6phJ+5CHnCcOgfcYgEBc01WsZ1oCxwJU6p5DCWJdD0qVhzIcCvi4xvvNh5CCEybhm1\n71y3lRCCpOGw9wsT09JxpGAiadM3nOTLaw1CoWDB2VHfkMUHp1Ig0+dz+KLBs+vD1FfNbtAtJPSP\nPfYYf/EXf8HNmzdpaWnJy56dDuWSUmsxPp8vY8kX+h487k68v9J9gmVZTExMYFkWkUgkLxPRjeNI\n9h5NMJaQ2A5cG7I5d8Ni98YwPj37wVfW+nRWX0+Hnwv9JsmUjWk76EKyrFljdXuEuiodn68yC3Hn\nbpp8ds5AE4CAaMDhqTV+/D4ty20yODjIu2/8lLZNL+N2+esaLGv24TjmjFas23JXMwG3qAkhuHjb\nwrLTyR5CaCA0LCQDMUGLZmQyVJWbRkrJx2cNbHVSkwPhZ+dS7FofnvP3o84rEAiwY8cOPvnkE156\n6aUsF0sp9erdLil13bklGTzXzN2NJ+z3OI7jkEgkSCaT6LpObW1twYxIN9eHbMYnRR3SyUNxIx3V\nsmLR1GeVtT6dleY4DhoOux72cW0Q4imN1no/i+orW7Y3ZUoOnjdwJNiORDomowac69d5ZNlUQa6J\niQn+/u//nqeffIIV3VEOXUgxNOFQX6WxaUWQoB8Mo/RoFnXtKjRUuSOEECSMAusKUmCLANGojmEY\nWJZFPB5P70fzkzLzfVbDsfxKl3Nl8+bN/Pmf/znDw8M0NjZmZdyqxtzTdZ1yi7tlWZkZm3JRTTeD\n87jzeMJ+D6MiNNTiXSQSKckPOp50sHP0xXZgLJEtMLlipihU8THg11nVPn9ZjEMTDpoAy7GRTtqC\ndjQf/WNTLpVUKsVPf/pTNmzYwKOPPgrAM+uyLWH3QmMh4imHz84b9I/YhAKCjcsDLG70ZVw+7tlL\nW53gwq2pARIAAe31eiZW3F1W1zbj6NLBdHSENvWd1oYr/50FAgG2bt3K/v37efHFFzPuJZhaDFc/\n7ro400UK5Yq7F/N+9+L9Ve5BVAhjLBbDNE2qqqqoqqoqeXGrpUZPuzNc+DRoqZ36vIprVvt0x8Kr\njMXcWPj5fMirgmBZZlrUhUDo6UGkLpI+pm3b/PznP6etrY0nnnii6H6m869LKdl3LMn1QRvTTodp\n7j+d4taonbk+FTYqhKClRqO73YcmwK+nXT0bl/updgm1pmkEAgGi0SjhcJgNSzU0aSMtAyFtdCHZ\nvHJ+yixs3bqV3t5eRkZGsl7PDXlVg5ayzt2JYKq+vhqg1GeBzHfhcffhWez3GJZlkUwmM8IajUbL\nnhY3VmusWOTjQr+FIO3qXdyo01Y3Jexq/0KIrAdYWX7z0equGLZtE/LZdNRr3BgBm/TApGuwptOP\nlJJf/vKX6LrOV7/61RkzP4u9PzDukDRlVoEk20mHc7bUhjKLipZlZQa89Uv9PNQRYDzpUBvRps2w\nDQQCdC8J0FxncKE/CdJhSaOgJmQDlY84CYfDbNy4kY8++ojdu3fnva9mYyr6xW3Ju99TRd/caw2F\n3DQedw+esN8jqA4+KgMyEAhgOYLLgxKBxeJGX8m1WIQQbF0VpLvNz1DMoS4iqI9mW+uWZRFLSQ5f\njDM0LomEdDavitDesHC3TG6TjMfXRrky4HBt0KIqJOhu9xMJarzzzjsMDAzwve99b8ZZg3sWkovt\nFK6qZE3WRFMzFCVoKtQwFNAIBUoX5oaaAA01gYyLRsXCh0KhikedbN++nR//+Mc88cQTRKPRotu5\nK3fmlp5wx/C7RT835t8T97sHT9jvARzHIR6P4zhOZvo8OG7y3gkDhI4EDl4weHptiEV1pQtMbZVG\nbU6Y3VSmqsU7x21MWwdNZzwFH5xK8ZUN2qxD88rBXd/FHUu+rEVjWcvUbXvw4EFOnDjBD37wgxkz\nQN3lhwvRXKPluah0DVa3ZUfDKMtd+ZlnixJMNQuLx+MEAoGKtraLRqM88sgjfPzxxzz77LMzbu+2\n1N2tBXOre6q1BvcsZqZFe4+Fw/Ox3+UoUYd0sSqVIXj4goUldSwnbWnaDnx8JlWwyqFCSsnwhM3g\nuJ23nYqWUP7z4aQfR/hBc1vycLbPnJ8LdeFuDhEIBIpa2L29vbz//vv8/u//PlVVVTPu153pWghd\nE3zpkTA1YYEQ6XWHh5f4WdyYLd7KzyylzJznXPD5fEQiEQKBAIZhEI/HM6V1K8GOHTv4/PPPM424\nS0XX9Sw/vGEYme/QHT7p9sF73B14FvtdTK6oCyEyrpjRZP6YHE+lIzR8BXQwYTj8+osk8aQEAQGf\n4MsPh9CF5PS1JEMTFq31OisXqTZqOppwsF37kJBXC6aS5NYpmc4avnbtGq+//jrf+c53Sk6dnykx\nCaC+SuNrmyMYlsSng1ZkWyXuStDm6kJRUTSBQIBkMkkymcSyrIp0PKqtrWXx4sWcPXu25BoyblTc\nuzuWXUXTqNmU+zUvUubO4wn7XYqankNa1FWhJkhbeNUhk5F4ttUd8KddB4X49JzBeHxqYdC0HN47\nPk4i5aQbTgud2xPpZKVn1vpoa/AhybYadQ1WLpqfW0ZZvyrLdTpxGBwc5Gc/+xkvvvgiHR0dJR8j\nt0PUdJRSZljFgBdKYJotmqYRiUQy6ymVcs/09PRw+vTpWQk7TLmgdF3PKoYWDoczryvfvCfsdx7v\nL3AXUkjUlUWkxGTLqiC6lk5LF0z1Ex2LS94/meTVAzH2fBrj2KUUpiW5MWyT7v0scWwTaZuMTDhY\njo7U/AgtnRY/GpcMTgj8uuBL60JEQ2IyFR82Lg+U5cMvFeUGAmYUdZWA9Mwzz9DV1VXyMdRCZyVF\np5A/ulKovIRKuWe6uro4d+7cnN0lagFZ1cpRC7+qpENuv1mPO4Nnsd9lFBJ1yE8WaqnVeWFTmAv9\nFo4jWdbiRxPw5ueJLHfJF1ctLg9Y+HWJaVqg/MyajhAaUoisSBAJxCb1o6lG5+ubw5g207ol5oI7\nE3KmyAopJb/4xS9Yu3YtGzduLOs4M/nXZ4MQIlM5s1ACUzlMJB0sG2ojU64i5Z7x+XyZ6BnLsgiF\nQmUfIxqNsmjRIi5evMjq1avLPr9cdF0nFApl1mTcg6Zntd95PGG/i1Dp52o67i7s5LbWFdGQxiNL\npyJBPjmbyvOBSykZj1msWKRxoV/iCA2Ejk8XdDTo9A3ZWZ+REha5EpWEEATm6S5RIXXKVzud6Jq2\n5N2PT2D7m3jyqafLPtZ8CLub3OiQUo9j2pL3TyQZGHcQQMAveGZdiNrIlDCq6BnlnpmYmJiVe6a7\nu5vTp09XRNhhKnlJibo7TFKFTnrcGbxh9S6hmKhD8dT+XOKpbJ+7dCykbeI4kuqInyfWRWlrCLCo\nVmf76iA7ugO01evomrLIYe1iPStzcr5Qflq1CDmdEA6M27z6cYx+s51w5+O8fjDJRLK86b7yr1da\n2N0DhhK5clwyRy4a3B5zsJ30wnQ8lRZ6y3Y4cdXgzc8TfHgqyfCEXdA9U45rpaenh97e3oq6StQ1\nq7WRQCCQCZn1XDJ3Ds9ivwtwF4oqNM0uVZSWNOncHLGxHQfpWJNlBzU0Xae9MUh9lcbixuxY4yfW\nhBiOOYxOWNSGHWqj85Perign8kVtv/9UElsKNF8wE9756dkUzzxcekXE2bSxm4nc/SkrtdSBGODq\nYH4t+1hK8u7xFIMTacEfmkgXbvvKhnR5X+Wecce+h0KhGY/V0NBAVVUV169fp7Ozs6xrLYa7xLHK\nOVAuGlXH3Sv1u/B4FvsdRllexUS9nEW/5Yt8RIM20k6LutB8aLqPziZ92qSi+iqNjgZBVWh+Q9Xc\nou7OZpwO05LEcqxzCdweL90anI+F09z9K1S5BeV3nolgga9ASjKirrAdOHF1avHU5/MRjUazrPdS\nLOSenh5OnTo143blkNszVom5ihhS0U4eC4cn7HcQtVBaTNShdN+wKjkwHrfTnYN0P2Jyf8MT0z9U\nqgPPfIu6qjnj9/tL9r+ePPFFevaRQ1WwdOt7vv3rubgzNGfikaWBrBBVXYO2+vwMWMh3tQGEQiEi\nkUgm52GmY6qwx0oLrXLJuENKVdkB99/eY2HwhP0OofyQKvKhmKi6i28VQ2Vq2jZI0la6e/tEgRrg\nbtRDPl/CXk44o5vBwUH27dtLT5uTET8V2rlpRekuo/kS9pk6MJWSjdnZ5OPxh4I012jUVQk2LPOz\nsydIru7qGixtKjwYqsxVTdOIx+OZqKpCtLa24jgOt2/fnv7iykQlJrmLx7mbp6ticl526sLgOb/u\nEGpxKXehNBflQigkIko4bMfB7/MRCOjURm1GXYlLAmidIfZcWVnzIezlhDO6sW2bV199lSeffJLN\n3Y10LrI5329larc0REuPuCgl43QuFPLfu/3tM2VjdjT46Mgprvb4miAfnkohAEema7yvbitei0Ut\nuqu4csdxChoMQoiM1d7S0lL+xU6DrutZZY3dA6q7G5XXhWn+8YT9DqA668ylmp9lWVy5neLQBZOU\npVMTcdjeFeS3ekL8+otExj8b8gu2zFDvu5yMzHIoJ5wxl7fffpuamhq2bNkCwKI6fdbJUfOxcFoK\nKmFnNqVt2+t9/M52naEJh3BAEA2VNugqMVcLq4XusaVLl3LkyJGyrqUUlAFSLCpIrT94jTrmH0/Y\nFxjlglG9MEvB7Q9VQjEatzlw1sLBh9AE40nJu8eTvLg1wktbI9wec9A0aKrOt/ZNK11aIOAT87aw\nWKgxQ6mcO3eOEydO8Ed/9EcVEeT5Wjid6dyUpaoG8nK/B10TNNeUP5ip+jLxeDwj7u57rbW1lRs3\nbpS931JQJQeKzZDU/aDE3YuamR+8b3SBUbHqpYSnAVlTWjWVFUJwbQgQvrz64dcGLVa1+gtat6Yt\n+eh0ir7htEXVXKOxszuARvn+dcuWnLxmcm3QJhoSPLw0kIm8Ue4Xd6nXUpmYmGDPnj1885vfnLYh\nd6mogWs+LfbpFiLd4YAqyWwhUK6ZsYkE127HqQmbNNSlywDU1dVhmiaxWKykqpjlHhfI1HEvhOea\nmX88YV9ASvWr52LbdlY9Dl1PJxUhHMjRlOkejYPnDfqGp+Kmb405fHzGYGdX+d2Q3jmeZGg83Tt1\nOAZ9wwme3xCmOjxVr7vch1VKyWuvvcbGjRtZtmxZWeeTux+F6suqIjYKbVcoWqPQeZf7HfWP2py7\nkV5fWN4saaiafcmB2XDxts1n50BIDcsyWNxosqOnCr/fn7HaV61aVdFjqvh9wzBmHMQ818z84Qn7\nAqG65ZRbhlVZvyp0TN34yxf5OHndzGpKLQR5tcPdXLltZSXDSAk3RxykLE/YhydshiayG2I7Dhy/\narBlRfr8ZtNR58CBAxiGwZNPPpkRXfe/hV6bupbCVrPy80NxYZ5NTXW1L7VQ6R7EhBBcuGVx6IKV\nWeu4fFuyZYVgSZMo2QU3FxKGw6dnjcm/t47UBNeHLM5dj7GyPUJrays3b96suLADWX1yZyLXNeNu\nquIxezxhXwDcfvVSXTAw5XpRERZua6Y6rPHU2hCfnUsxkZTUVWlsWx0g6C8uppoG5BinQoAoFDQ9\nDUlTogmyarU7UjIRTwHhkop5uX8AxsbG+OCDD3j55ZenFVq377aQdVfMr+sW03hKMhxzqJmsxxII\nBAqKUDFhcr+ecT04knM3La4OOoQDcGuMrAQjRwqOXXZYVJ3KDATu63EPCpWgfyS9xuJk2vppOPi5\nOW7TkUzS2NjIhQsXKnKsQqhwx1K3dbccBDxxnyOesC8AKq64VFF3Z2iqZJ5CvTpb63S+trl0P/RD\nHX6OXzUzgqNrsKxJoJc5/W2q0XMsf4mGSWdTICPqueKtfmzbYTwJ4cBUzXMhBPv27WPTpk00NTVl\nXlP/zkX0ctvhfXHF4PgVE11LhxG2PPRc1vuz2b+mafym1+LGyGS2aKzwtklbQ9dFxlVVCLfQ5/6U\nQyQo8tx0uiaoi4YIhQQNDQ385je/mZfENHfBunL274l75fCEfZ5xhzaWcoO7G064GxuoRaa5WHRr\nO/1oGvT2WUgJq1p1Vi8qfxak6EEAACAASURBVJ9+XfD4Q0H2n06BlFiWSVu9xspWf1aTZzdCCG6M\nSD45ZyNlWlR7OnxsWBbg/Pnz9Pf389JLL1U8QsI9II7GHU5cNXHklCUbbe7i+pBNQ1Tj6qCNrkFn\no2/amU8uE0nJjRE7y0IvRFO1Rjgcyorrz3U1udcD3N9huYLfXKNRE9EYiTk4ciqxq7vdTyCgsXjx\nYsbHxxkZGaGurq6i4q5mJO6uSqXiiXtl8IR9HlEuGFVitZTt1Q3t9qerjL5CVns5CCFYszjAmsXp\nc3HHmZeCW3Caow4vbNAYGDMJ+TRqqgKZhVsVz+z+SZmSj8/Gs8TvTJ9FYxTefPNNnn/++Yo3Q87N\nOO0fsfOsWM0X4NwNk5uj6YVoIeDwBYNn14dLbtqdMPJdU5AWU5+ePqRPg+1dwcziopqRqe++kEAX\nmvHkCr76rPrO3d/9lx8JcfqaSd+wTW1EY22nn0hwyv3U0tJCf39/pmpkJcRdnZ+6T9VAX47x4In7\n3PGEfR5RHW9mI+ruB0E9rHMV9lymy8gsJCa5QqkJaK7xZRpOT/fw3hi2yX3bcuDgqZu0tLRUrEZ4\n7jW4z7cqJBAaWQrs2Cb9Y/6pAWdSMw+eS/Hs+tKqRzZERe54ga7B2s50hqwQ0Fqro2lTNVTUoDrd\nekQpf5tCgu8eUNd2+li3pPAx2tvbGRkZobOzk3g8XhFxz3V9qdfKvW89cZ8bXmzRPKGKcpXSDEFV\nwJsu7X4+2o65LSk1bVbRCarsqrK4VEy6mn0o61BlNs5kkRVybQgkt25c5Stf+UrFrin3+mBK2Nvq\ndaIhkak7owlwrFRBF8pwrPTvWdcETzwUxKenWwjqGiyq1VizOEBNWONCv8UbhxMcOJMinkrvdza1\n2xXqu1fJPaoOuoq4crdSVH9L5RJ0V51sa2ujv78/ky9Qbn33XHIbwrhrxswGNWtV5+1ROp7FPk+U\naq27I1+mE8hSrbxScRyHC/0m5/slQhh0tWksbpiaxqsonEJWo6oHUk7c8aI6jbBfEHNkZuFVOjbd\n7QHq6urmdC3FyJ2RaELwlfVhevtM+kcdGqKCX/70/6Xn6T/GyNGzmkjpNo+UkrZ6P9/cpjM4WQKg\nJqyRNCW/OpJINwsHxhMWfcMWX98cwadP1ZKZabZTKurv5j4vt0Wfa9U3NTXx6aefIoQgEolkCojN\nttSFMk7c56DciLPFs9xnhyfs80Ap1nq5DSdgqv3adFl903H+psGxyyYpC8J+iBkSKdOCcuCcw0Md\ngo0rgtPuw11PvZyHTBOCXRvCHLtkcGPERnfiXD2+l9975dtlX0epFPLt+nTB2s4Aayf7TOwxE2xd\nFeDjMwaOk/axa4IZ6+sUwqeLrLaCF/tNLJdbXwKmDdcGbZa1+PJqyVQa96Dmji1XAt/U1MTAwACp\nVCoTEqpqzEQikbLuMTUTKORGVOtDs3XzeOJePp6wzwMzWeuFIl9KoZyKgeo46kG+PmTx2fmpyI2J\nFOTmqZ66bvPwEonfV9h6dKfFz+bhCvkFW1cHMQyDn/zk73nxxRfn9SEttUbM0mY/dVU6l2+nq0cu\nb/FlFhlnYjpLO2nKvO5IjpN+XX220ELqfOK2qJUbxzRNQqFQRpiTySRjY2NUVVVl3G7ToYyUQvek\n+uxcr88T9/LwhL3CzGStK1GH8mqTK9x1vgu5ZNzTbnct97M3nRnD8QD6hm2WNuffFqpRcSWKNn34\n4Yd0dnayfPnyOe2nGLGkw4GzKW4OJgkGdTauCLNi0fQWcW0kuzF4Jehs9NHbZ2V970JAR8OUKFXa\nxVYu1dXVxONxqqurgamFzng8zvj4OMFgMBOqW6x8tLuERC5qPaASjT08cS8db/G0whiGkZfpqJht\nw4lc1MKbWthUQu9eJFMhZ2phTU5bRSaNYDI7NQe3RTZXUR8YGODQoUPs2rVrTvspuv8xi38+lODG\nZE2cpAGfnjO4Obzwi29NNToPL/FnmoXrGmxeGchrFj6XhdS5Ul1dzcTEROZ3de/W1NQQCoWwLItU\nKpV3fymjwb3eMl10T6EwzdngLaiWhmexV5DprPXZNpwohBLYVCqVKbZ0a0zy+WWHWArqIoLtXcGs\nZhRdbX4GxwtHgCj8vnRDBzfuGcZcRV1Kyb/8y7/wxBNPZCzESnL5tslHvZP1UZSICIHtQG+fSWv9\nwlt4azsDrG7zM5F0qAlr+PTCJQ8qvZBaKtFoNEvY3ecUjUazImXUbNG27awaPMFgcFojxd0DthLX\n5rbcF7Ko2r2E941UkGLWuruM7VxFXe1L3dRSSsYTDr85YzORTOvZcEzy62NJUq6WeEua0tajT09b\n5vVVgs3LBdGQCs8TPLchjJ5TN6aSZVVPnDhBPB5n69atc9pPMQ5fMF0+bfWfSR/vPPZSnskSDfgE\nDVG9oKgr3A2hF5JoNMr4+HjR9yORCIFAgFQqhWmamRkgTC1Oq7DKYmGN7jyMSqHuRzU79cjGs9gr\nRDFrXbkxlA9yNuKo3C1uq0fX9UzxqtPnY9iWDdrU/qVM12Zf2Zr2LaeTVQKsWeyfTKlPPxDdi/Nd\nRlJKhiYcEimL+ogkFJy920iRSqXYu3cv3/rWt+bNwoob+dUehRBoArra7+5b3b2QupB126PRKGNj\nY9Nuo2ocqaAAdW7hcDgr6kXNSNUMxH2vV1rY1cx3tk1M7nfu7rv9HqKQtZ67UFquqCsxd2fz5caO\npx8kH0gL6VjZ4l5gn0IIdJGulVLofAxL8utjCcbidnp/QvD0w35aZxFqnjAkN0dsgj44deQ3rFix\ngs7OzvJ3VCINUY2hCVcK6eT1bVzuz+spejeiwh8XUtirq6vp6+ubcTsVNROLxfD5fESj0azoGiDL\nTeOO3FI5EbMpLzAdylha6MHwXuDuv9vvAQpZ67MVdbWIphablCU3ne91ZVuQ3j4r/eDYJmg+0LRp\na7MXCwU8dtlgJOZgWxYIEJqPD08l+eb2CFoZD+Tl2yYfnzEyS7ZJq5vnds69I9J0PNYVZN+xBI6c\ndBNoGrs2hKgvo/F1uSh3WKVQDaEXSqiK+dhzcedbuDNM3Sghd88w3V2/1OuVvC5d17NmxZ6/PY0n\n7BVA+UXdvkdlnZQq6uoz4wmb41cdhmKSpmqdDcv9RAPT36y1EY3H14T55GyKeNKgKmizfXWA0DQV\nCouJ0fUhG9uebEwxaf07DozHJbVVpQm7ZUsOnDGyFmp9oRqujAZY1FzSLmZFXZXGb2+N0DdsYRnQ\n0RQkGChfRBJGugrkwLiTKQ1QTrXHuaDEUVm8872QOpOPHaaKxQkhqK6uJplMkkwmi0ZJuY0R96xT\n+eFLrXRaKnNpGn6/4gl7BTAMI8tF4q6lXkoSkdretOHtE3YmvT2Wsrk5kuDrWyKZ2uXFWNzoY3Gj\nD0dGcCanwoZhFI11h8KumIjfZkxKhKYjxGQPUwmhQOkPy1jCye/RJzRujsx/eJpPFyxu0DFNH35f\n9ndv2ZIjlwyuDNgEdFi3JN8va1qSfzmcIGVNLkRPOFy+bfO1zfkLy/OFstorbd0WIjfc0Y373nSX\nvFDlB0rJUHVb8Wp/KpAgt3nMbPH87fl485Y5ohKB1M3tTrmf7qZVrhp3L9O+EZEXjmg56ZZ2paJN\n+h3Vza2m9YXIFXbHcVi7WEfXNTQtLSi6Bitby6tPXhXUKLROVltG/ZW5UGzg+uBUkrM3LBKGZDQh\nOXDWoKopuzXcxdsWpj0VLelISJmSqwOVGZQGx20+PJXk7WMJLvSbBWdObqt9viM+gsFgxpXoxm1h\nq/vJ/X0qq1v18Z0J5Q9XhcrU/T9dNE05qP2rGcKDjmexzxF3jK/yJ05XJkD5J5Xf0e0/NywjTxAd\nB1JW+Q+3u5ekuyZNsSxAZU011fh4flOIU9dMkqZkeYuPZc3lWY1Bv6Cn3UfvDRPbEQgkui5YX+HM\nznKIpxz6R5yssEfbgfql2/K2yx1cHZleCC6V/hGbz86nGE9I6qMa21YFqI/q3Bq1eed4MrP/2+MG\ng+MOW1bl1+dZKKtdCJHxszc0NGRZ6UKIoiUFNE3Ls9xnsr6Vn10NXEqEK2XBu10yD7q//cG98gqh\nMjLdC0qFHkT1wKjmxyor1B0C2dHoy8v81DRmHdGhpqjFLCT3A+te6K2v0tjRHeSZdSGWt8wuRHPD\n8gDhieMEzX7WLPbz1Y1haktsXDFXCtWZt5xMkEwWmi97sGmv92XK+iqEoOTkpvGEw7snkozG03Vi\nBscd9h1LYlhpN5B70LAdOHfTwigwcC+k1a787Or+VHkLM9WJUWWbVUOZmXCXiFZGTaH7cy7X68W3\np/GEfQ4o60DFHxdaTHILuhL+XEFX1FdpbFyenYK+cUWAujkKYu4D5C47AFNdbiqRhKQYGxvj9JH3\neX7bIjYsDxINLdytViikrjokCOesE+gajN04nvVac41Gd7sPTaRrq2sCHl7iL7mb0oV+K2/WpXIK\n4ql8oRFiqihYLirio5Lx37lIKQkGg0xMTGTdn6XOElSDdsuyZhR3t7C7Xyt0f85W4JVLRj13Dyqe\nK2YOuGtmKNdH7vvK3zdTyKKiqz3AikV+JpKSaEhMm61YDu4a61LKTCahShwpp7Z6KXz44Yds2rSJ\nqqqqiu2zVAoJuxCCZ9aFeP9kkvFEWjCWNvvoffsgsDtru0eXB+np8DMal9RFtLIWjp0iYtQ/ahd0\n5/h1iIYK73++I2TcLkEpZUmVHAsRCAQyfvpidZJg+kqY7vtTRdGochnllllQBtaDHN/uCfsccLtV\n3Ja6suTVomi5N6ZPF9SVGFpYLm6LBiCZTGbFJ1dCPIaHhzl58iT/9t/+WwBOXzf4/KJJOCD40iNB\nqkPz+6AVi9GvDmu8sClCwnDwaQK/T7C3iBCHAxrhWSwJLG/x51V0lBIu37bzyhr4dHhiTWja/ACf\nz5eZ7VWi0XduFrM76W0uf3uVnTpdGCTMHPfvvj/VwDMbw+NB97c/WFdbQWzbJplMZkUMuMO5gKIu\nl7sBdbMHAgH8fn8mPLISUQoffPABmzdvzrRcO3E1XcMllpK8cSjJxVvzN0WeLpRTEQ5oRWvOz5W6\nKo0dXQFCk5O3aEiwuFHPW5DVBGxc5qe5ZvpBTv2d5lod0V0BVImd3+/PuF0q4e5RnZema7FXakKX\nEnh3dFe5fnP17D2IUTKexT5LlD8xHA5n3BlzsdIXGnWzqwfbnfHqbnFWriU3NDREb28vf/Inf5J5\nzacLmPQj2w581Jvi+qDFllXBiif+lCLs0312aMJB0wR1kcKNpEthSbOfziYfjkz3Qz1yKYUQUyGU\nkBZ2n6+wXeVIyc3hdD5DW71OYA5Wu/qbqiiXQvemGjgqQSgUyrTYKxQpU27NGGV8KLdKOdb7nWhk\ncrfgCfssUCFayiJ3p03Ppc76QqFEPG7qfH7JYGjCIRoSbF4ZZFFdIPMAuaexpXRsAnj//ffZtm0b\n4XA481p1SDCRzLa0Lg/Y3BpL8FhXkLYKltMtR9jd1t9YwuHtL9LRK0ioCgm+/HC4LP+6G1WTB2Dl\nIj+nr1vYbmHXYHFj/nUnDMlbRxKZEFcp4fGHgiyq0bPKTEyHu9GK2n46d0slhX2mMEhlsZfr9lMh\nxCpyZqYewQp3I5OZ+g/fT4gZpjYPbrxQEaSUJJNJEolEZup5p630sbjD9SGbgA+WNPlmdDMkEgmS\nKYN9J3VMe2pbXYPdG8PUTDaCUOKgrPuZRP727dv83d/9HX/6p39KMDgVm/3BySRXB4tPh7vbfWxY\nFii6UKy+84mJiayfZDKZidNXg6u7EYRFAK1hPSLchJMaZfzqAZKjfVnbQ/rh79j0+/irmjLZttKx\nsWN9OP0f4/P5iv4ogamrqyMajWZ+IpFI1r1wa9Tm4PkU40lJY1Rjy6pgwYStj8+kuNhvZT14AR+8\ntC2MZZpFO1jlijlM1XmfaUB+/fXXWbx4MRs3bpx2u3KwLIt4PI7P58u45NR5mqY564VayC5xUEqb\nRnXMcvv03gMU/QI9i71M3JEw6gG6k1b6+Zsmn503kDI9vf/8osFzj4aLhheqh79/XEc1sp56Dy7c\nNNmwPC3K7up87pZ7anaiHiz1//fff5/HHnssS9QBij1Ljm1gp+IcORnj5MkYq5pMkom0aMdisYyA\nx2Ix/H5/lnBWVVURCoUIh8NZQqsGn2AozJFbizAdAQg0X4imh77O0w9BbZUv85D/l//yX/i//u//\nh3/61MgSU6HpBGoWs2PtM1mDR+5PKpUiHo8zNDSUN+hEIpGsc45GozRGo0T1KCO3o1iTr7lF7uaI\nnWdNOQ7EUhDxZzfjmEnMSxXOSlrsChUGqerKqMVVxVwW6t3VIt2Z3sX2p76LBylCxhP2MnB3jlHx\nv5WIVJgtli05eH4q6cWR6UScI5cMfqsnlLe9u8WdBByZ/TBLyHIXKJRwq8HLLShqoXhgYICLFy/y\nta99LeuzpmkSH73FeP9tjNggZnwQIzaEmRwFKdEDVfgCVejBKkQ8ypLWWjo6OvJEvNTvWZ3PjRGB\nvJ3Kes9BcGPMx6LG7IEnEPChawZWjraFAxpLly6d9nhKWHKn+bZtZw1O6mdgcIhLNxMkklcYv32B\nifExHMehrq6OpqYmtNbfAi27RrIEwv70928YRiaSSYmjux/pbMRShRhWmkJhkIVi2WeDcnuq51FZ\n5MUMLJ/Pt6BVM+80nrCXiLpBlX/wTos6pKNMCjEwVtj6UiFuUuj09pl54Xe6BstbZr4mtytGCfz+\n/fvp7u7m6NGjDA4OMjg4yMDAAOPj40SqG5CBegJVjVQ1raZuaQP+cB2aPvWgawK+/EhoxiiRmVAh\nfJYj8xyJUoJZQL80IViz2M/Ja2ZG3HUN1i+dfTEpXdepqamhpqYm89pozGHfsQRCQhiILn2CXY+E\nCfksRkZGuH37Nn0DfdxyokhVgM0ysMbO8tavBmhqaqKuro66ujoaGhoqNlOcD4tdoTJT3YXyKlnq\nWFnvKmqmmLgvdNXMO40n7CWifLeBQKCon3OhqQqKgosgDdH8G9vdwOHCDYtkgWi0ng5fVp/UQhiG\nwfXr17l16xa3b99mYGCAW7dukUgkWLRoEbZt09DQQGdnJw0NDdTV1XHims3pPnvyYdIgx7LsbNTZ\nsCxATQWKhCnBaG/wIckubKVrsKzIwLVuiZ/qsMaZGya6Bj0dlW/O8dGZFCnX927ZcOBsil3rwzQ3\nN9PU1ESPlIzGLM7dMEgYDk0RG82oZWDApL+/n+PHjzMwMABAS0sLTU1NNDU10dLSQkdHR9aidanM\np7BDfqRMpVHWu9tyL2SVL2TVzDvNnVenewDVNFpZ6alU6q6IfPHpgo3L/Ry+aGI7aeHSRLpOi5vc\n9nwjcbNgXHVVMP+axsbGuHLlClevXuXq1asMDAywaNEiWltbaW5uZs2aNZw9exbTNHnhhRfyjiul\nxO9LAjbScYDJ9QkBIKiNaOzoCqJpc/O7quNB+kEP+QRPrgnxUW8Sw0pHoTy6LFB0RiCEYFmLr6jw\nz3TMUrYbmlC+cDkZ+yi5NSwxDD1rP9GQYOPK8KRlWYMQLVn7MgyDWCzG6OgoAwMD3L59m7Nnz9LX\n10dtbS2dnZ0sWbKEzs5O6uvrZ/xO51vYlRsmmUxmqkhWuo6Lu5uSu0RG7nk8KFa7J+wzYBgGqVQK\nv99PMBjMJF7cDcIO6RIEzTU6Vwdtgr60OOXGhisXjEr2aKnRuDyQXcVQCGiICm7evJkl5IZhZETi\nueeeo729PeuBsW2bX/ziF3z3u9/NOzfl8w2H/Gi6zIhaVRBiCQeJw2jM5vKtVKbbk/pM7o96bzpy\nQx3b6nVe2hYhaUoCPrFg9dTVeaiBTf34hJGpta+IBKbWL0r1k6tIk9raWpYtW5Z53bZt+vv7uXLl\nCmfOnOHtt9/GcZzM36+zs5O2traCnY/mU9iBTCx6MpnMzHorjdtyL9aEvdKZvHcr9++VzRFVbS6V\nSmVW+IGs2jB3C/VRvWj7N7cLRp1zZ6PGhVuCoQmJPRlRYY1c4n/++F+IRqN0dnayYsUKnnzySRob\nG6cVmhMnTtDc3ExLS0vRbZY0+bh4y8K0JD0dfla2+vj8osHp62mVO34dlrb4JpN4ZCYCp5BVlyv0\n7v+7KyG6KzzmFv8qF/d5uPcPU4unbr9x7nmr83h0eYhDF9KljBHpOkCbVgfKbgzhjvJw34e6rtPe\n3k57ezvbt29HSsno6GhmoD569ChDQ0O0t7dnhL6zs7NimaczoVwyiUSCaDQ6b8dxV3hUfnf3faJi\n2+/2JMK54Al7AdylAXRdJxQKZW4AlcF2Nwn7dLjjfaWUXLt2jVOnTnHhwgVidpjG1mW01PpYsbKR\n337qT8ou2vXpp5/y+OOPT7tNOCB4bkO273ddZ4DzN9NNLSaScOGWQ1d7tsC5rV31O8DwhM2p6wbx\nlENng8aKVj/apNCp6836DmzJRAqiQZEV46+2y20y4T5WLpYtuTZoEUtJWmp16sJTgqjuidyBR/3e\n1eGnLhrg7I10Zu/qNt+sF4tVRuV07ishRGax9ZFHHgHSGdPXrl3j6tWrHDhwgFdffTXT7Wvp0qUs\nXbp03vzPqsyviuyZz05HSrRVQpO7UYgS9vvZar8/r2oOKEtdCXhu5MG9lJqsMmSvXbvGmTNnOHPm\nDOFwmNWrV/Pss8+ydOnSOd3Y165dIxaLsXr16vxjO5LDFwwuTCbbrFjkY9OKQMYdEvQLVizy0duX\nttoPXTBortGyZh6F3BLDEzZvn7CwHQ3QGEnAYFzyWz2BzPbuazrfb3DovAECpAPrOv08tDhbUKZz\n9bhfS5mSvceSpCyJ5QjO9EtWtOg8usxXski11Oq01OpYtuT4FYP3TyaxbagOCzYuD5Zc991ttZfz\nNwyFQqxatYpVq1YhpWTv3r0cOnSImpoa3n77bYaGhli9ejU9PT2sXLkyLydhrqj6L+4olvnC3WzG\nLe4PgtXuCXsO7kJDKunFjbsN3t1KIpHgzJkznDp1iosXL9La2kp3dzff//73aWhoyErqmAuffvop\nW7ZsKTjQHb5gcL5/qsrhhX4LKWHb6rRQJAyH8/1TDmdHwltHk3xzW2TazNkvrph5zSr6hh0SBgR0\nkRURMZF0OHzRwcGXDn0UcLJP0taoZVnKpYryuWsGSUvgSIEQ6WOf73dY3SqpL9P4fPd4kluusNTh\nmOTdE0meXhsqSdyVX3624mSaJq+99hrj4+M8/PDDNDc3s337dsbGxujt7eXw4cPs2bOHpUuX0t3d\nTXd3d8XcJ25/eymdl+aCEvdcyz23Afb9xt2tUAuMO6s01/KDqZjtu9FiHx0dpbe3l9OnT3P9+nWW\nLFlCV1cXL7zwQsEHcq5RCePj45w9e5bnn3++4PtuUYe0CF7ot9i6Km1ZXyzQkCItlCY9HcVrehSK\n3ddEusaKP5TtlugbsvNi2W0Hrg5Ys3KBDIw7ebH/mgajcUl9den7GY05DE7k+7QdCV9cMWitLy1k\nUVmd5YbvxWIxfvazn1FXV8f3vvc93njjjUyCVU1NDVu2bGHLli0kk0nOnTvH6dOn2bdvH83NzfT0\n9NDd3U1TU1PJx8tF07RMIIJhGHlZqZVGPctqlqDE3d2h6n6z2j1hn0T53FSEQKEU5btt4bS/v5/T\np0/T29vLyMgIXV1dbNmyhW9961uZGiaFHnh39t9sb+hDhw6xdu3aWcVNA5i2zBNJSNct7+ko/rnO\nRp3ReHZEjwTqoxq2ZWddT8AnEBrgSkrSBIRmWVGyqVqjfyS7rrrjQF2kvP0lTVm0yEeqSDelQuRa\n7aUwODjI//k//4e1a9fyzDPPIIQglUoVdLmEQiHWrVvHunXrsCyLS5cu0dvby//6X/+LYDBId3c3\nPT09dHR0lH0fqUQilbg037Ngd/MN5QKa7cB4L+AJO9kt7tz9GAttB3dW2A3D4IsvvuDQoUPE43F6\nenrYtWsXS5YsyZyXYRhFr6ESWJbFwYMHefnll4tus7zFx8X+qYqGmoBlzVODZWeTj1PXrbx4+oFx\nh9G4Q20k3enp9lhaxFtqNXRN0NPh5+aIzcC4gzZZCvfxniCaACtnoFrcqHP4Itguw13XYPmi2U29\nezr8XLxlkTQktpO21le2aFQV6YBUjKYajUI6KEhHB5WDO+lmpvvy8uXL/PznP+fpp59m06ZNmddV\njsZ0+Hy+jG9+9+7d9PX1cfr0afbs2YOUkk2bNrF+/fqSEpBUBJG7pd58u2RgqtWgu5G8MuQ8Yb/P\ncNdPUanJxayHOxkR09/fz8GDBzl+/DhLly7lmWeeYeXKleliUFJyfdBmOGZRG5a01EiC81ii9Njx\n0yxa+jDBqsai22xeGcCRcGmyqcbSZh9bVk2dU0NUZ9OKAIcvpItvOc6U+B65ZLB1VYC9R5NT1q2A\nL60L0Vit8+VHwozEHJKGpKlGw6cXTlH36elonKOXTW6N2jRENdYvDcw6/DHgE7ywKcyVAZtYyqG1\nVqc2XH6YoK6lk6feO5nEcs0mOho01iyuTOhjLl988QW/+tWv+O3f/m1WrVqV9V4xi70YQgg6Ojro\n6OjgmWee4erVqxw6dIj333+f7u5uNm/ezOLFi0uy4iORSFbRtPnG3Q9VzXhmii66F3mghV1FwACZ\nadp0lu5C+9dN0+TkyZMcPHiQ0dFRNm7cyI9+9KOs+iOOlPz6WJLhCQfTlujSpLFW59n1xR/Uubhi\nzt00ODnaia9tKf/yeYKOBp2dPcG89m66JnisK8j21YGsY7pZ3eZnxSIfCUOSNB3eOpIu2nVt0Maw\nUsSSMstFvv90iq9vTjc2qavSwBWZWawOeySo8VhX5SI7dE1k1dMxzdnFfy+q0/nW9gjDMQfThtqI\nIByY3b01XeijlJL9+/dz8OBBvve977Fo0aK8z6dSqVnXKhdCsGTJEpYsWUI8Hufo0aO89tpr+Hw+\nNm3axCOPPDKtD12FLyUTlAAAIABJREFUQKqs1IWome72tysj7n6z2h9oYVcPg9/vzyTFTOfrW6iI\nmIGBAQ4dOsTRo0fp6Ohg586ddHV1FRxUrg/aDE846eJV0sZyYGhC0DdsV7zWScJw+OycgdDS3YGQ\ncH3I5vJtu2jxsGIDx+XbJkcumSRNSWudztZVAZa16Fy6lTZhb486eXVw4imJYUGwgFE7l85Jc2Eu\nlp6mCRqr5y4mxUIfbdvmjTfe4MaNG/zwhz/MMgjclGuxFyMSifDYY4+xfft2Ll26xKFDh3j33Xd5\n6KGH2Lx5M+3t7QU/p6Jk3IXC5hOVoarE/X4s6fvACruKgFFx6qX4pefTYrdtm1OnTnHo0OeMWRGW\nrVjNH7yylfaW+mk/NxxLi7qUMl2LRQgcqTEcc+hoKPyZ2QrRrVEHx7ZAm1JWFWVSSlVIxc1hm4/P\nTJUbvj5o8+t4kqfWBrky2fS50BKiEOkG0HNFSslYQhKcp76nC02h0MdkMsnPf/5zdF3nlVdemVa4\nS/Gxl3s+y5cvZ/ny5UxMTPD555/z85//nHA4zObNm+np6ckzkOa7UFihc1SWuxqc79aIt9nwQAq7\nWkBxL54UKhrkZr5SrpPJJAcOHODgwYO0tHUSXvV1QviIAR+chW2YLG8p7netr9LwaWBOOmuF5kPX\nCld4zKXskEc7gSNlVgd0TaSTa8rhdJ+ZF9WirPHu9vSiai66lk4uKlbvxV1CYDoGx23eO5H2bTsS\nWh56Pn1Nc7D07wbfrDvCY2Jigp/+9Kd0dnaye/fuacVKSlkxi70Q0WiUxx9/nJ07d3L+/HkOHTrE\n3r17efjhh3nyySczobjuQmHznbikUJEyavHZE/Z7HHdLNGBG3/p8YJomn3zyCR9//DFdXV288sor\nnB+u5vzN7LZon5416Gz0FW0b19Go0xAV3B5xsIWGXxc012q01mqcumZw4qqJaacLYm1fHZx1D0+A\n86cO4qcbtAC2k47i0DXobi9vwc8uFOc4mfCztjPA+X4rUyyrIZr2Pa9q9WUKhRWilEHKkZJ3jyez\nSudGW7o40zd97Py9gLLar1+/zj/+4z+yfft2HnvssRkHHRUNNt+Cpmkaq1evZvXq1QwNDfHxxx/z\nk5/8hI0bN7Jz507C4TCBQCBTbmA+a8nknpdaQF2IejkLxQMn7G5rXU2/ZrLWK4llWRw+fJgPP/yQ\nJUuW8P3vfz+T7PHRpXi+C0LAeFJSX1Wk7ZcQPLnGz/VByUTKR2O1Tlu9zsVbFkcvT1nGfUM2755I\n8PyjkZItTMuWnO4z6RuyqQkLjp04y+99ax1Dpj/9WiQdxREpUO53Ola3+rk9lsqy2n1aOgxQE4K1\nnQE+v5iu3xJLSb78cHDGPq4ws+U8POHkdYjSdD8Xb00fO3+vcP78eV5//XV2797NunXrSvrMfFrr\nxaipqeHZZ5/l8ccf5/333+cv//Iv2bZtG9u3b8+4ZBZqIdVdXkDNeO4Hq/2BE/Zc63yhrHXHcTh6\n9Cjvv/8+LS0tfOc736GtrS1rm7qIxngiu8WPZUM86VBfVfhmU771zqbsjk6n+6w8d8doXDKecKie\nbFatrFxHSnqvm1zot/D7BGs7/bTX6/z6iyQjsXQc+e0xSdO6b1FdV0trUGPN4tl/F51NOusSfk5c\nTQ88tVWC3+oJZdwh3e0+evtM4ilJyoST10zWL5v+IVedk6Yj4BMUMuxnm7B0N/HZZ5/xwQcf8Du/\n8zt0dnaW/Lk7IezKbVZTU8PXvvY1du7cyXvvvcdf/uVfsnPnTtasWbNgC6mQ3YXJE/Z7EOVHU1ml\nylovp1ZEuX90KSUnT57k3XffJRqN8tJLL7FkyZKC265fFuDmSAIrJxP+w9MpfquHgq6IQgOTlHLG\nDEa3dfvpOYNLt9RAIPnwVIp1nX5GY+4MT4HQffReN9m4Ym5CIIRg3ZIAazr9OA55biZdE6xf6ufj\nM2mr/dR1k9VtvmlnBqW4YqrDGs01WnoReHJzxzZZ2zn7lPZKN4yYzfH37dvHmTNn+P73v09NTU1Z\nafJzCXWsFA0NDbz00kvcunWLd999l48++oht27bx6KOPll1tdLbkdmC617n3r6AMVMaZEmd3ksJM\nlOt/k1Jy9uxZ3nnnHXRd5/nnn2fFihXTPmy1EY3dj4Z4/WAy+7wdOHLRyBP23IFK0dtnkihQU6U6\nLDLWusK0ZLpui2tz24GzN0zy8941Jor0WZ0NmhBoRSZKy1t8nL5uMTw5uHxxxcwUEMulnFDHJ9eE\nOH7F4MqATTgg+Py9f6LlqR/M+hpKPe58YJomv/jFL4jFYvzwhz8kHA5n6u+XGpdd6YiYUig2u2pp\naeHb3/42169fZ+/evRw8eJCnnnqKRx55ZEHWAFQTjvvBan9ghD1XBHO7ClWSK1eusG/fPlKpFE8/\n/TQ9PT0lP/x+nzbZcCL79UQBCzx3oFKcuGoWDBd8em22ZSqlxCy0kEk6YiR3LNM16JxmAbOSpBtT\nBHjneHqQO3/ToqfdT20Bl1Q5wu7TBRuWB9mwPP37R69dnfU53klrXRXyqq+v57vf/W7GylQx7aUK\neyKRmPciXOXS0dHByy+/zOnTp/noo4/46KOP+NKXvkR3d/e8Htfv92MYxoIUJptvHhhhz3VZFBPF\nuWAYBm+//TanTp3iy1/+MuvWrSt7/wEfVIUE44kp0RAC2nNKuRaz1gHMbDd9BndEjKrXEQ6IvONp\nIl0CoD6q8dk5A9syEZpOZ6Ofpc3zuxaRMiVnb5qMxSVt9TqttRo3J5OVPr9k8NTa4g/c3RB2uBAM\nDAzw05/+lHXr1vH000/nXXc5VQvHxsaKJi7NF6Wcl6ZprFixgo6ODm7cuMG+ffv44osv2L1797zF\nubvDHwOBwD1ttd+7Z14GqguPW9QrHQlz5coV/vqv/5pkMsmPfvSjWU8fhRA8/lCIoD+djOPToCYs\n2LQye7rsLmSUS3u9nlVkSgCLarWsWG13c4kn14SIBAU+DXSRLri1flmAlYv8PPcw3Dr5zzy3PsBO\n1wJnIUxbcuBMiv/voxj/9Emc09eNsqzapCn550NxvrhscvGWxSdnU1lRLNeHbPpH8ketO+3nXkgu\nX77M3/3d3/H4449nqjPmou6JUtyHo6Oj1NbWVvw8i1HO30r1Rl2yZAl/9Ed/RHV1NX/1V3/F6dOn\n5+383Fno9zIPhMWeW5VxLtZ67mdM0+Sdd97h+PHjfPWrX6Wnp2fO51tfpfHStggDYw4+Pf27+wF2\nW+uF2Lo6SPxEOqIF0l3vd3RnDwzKYpdSUhvR+MaWMKNxiV+HqtDUNZ7tPcmSljANNTP7YfefSnFz\nsqytaUuOXDLRNcHqttLcXWf6TAyLjL/fdmBowqG9XqdvOC3on18y+Mr6UNb3cafKCSw0x44d4623\n3uKb3/wmK1asKLpdOeV8R0dHi6b6zyel/K3cSUuBQICvfOUr9PT0sGfPHk6dOsVzzz0367LR0x0T\nuOfb5t27Z14GqtmwW8wq8Ue7du0ar732Gq2trfzoRz+q6BRRE4KW2sIP5UwhmiF/uqrhRNJByrSw\nz/QgpQtr5W9z7NixGXuaQtqFcjOnVrntqIiW0oR9JJbfyEIIaKvXMvseHHe4MmCztNmXs93CinrK\nlJy8ZnBrxKKpRrK2U5tT8td0SCn58MMPOXz4MC+//PK0jcMVpVYtXGhXTLmzK5W0pMIfly5dyr/+\n1/+aX//61/zVX/0VL7zwAl1dXRU7P03TFqy593zyQAi7exVeZZ2Wa627/9CWZfHee+9x5MgRnn/+\nedauXVu5k52BchZ9o6Hi11hKhcfh4WGGhoZYuXLljMcqsgabtwA7HW2Tlrk7/t5x0mGecQNOXUtX\n4jxyyWBxo54pL7DQrhjLlrz5eYKEkW4WMhSzuTyQ4Gubw/iLZAjPFtu2+ed//mf6+/v54Q9/SHV1\naa2aSrU8F9oVoyhnIM4tNRAIBNi9ezcPPfQQr7/+OqdOnfr/2Xvz6CjONM33F5GLVkC7hCS0IQRi\nRwhjFhswtgFDGWxX2ZSXsg3urnJVdc1M37n3TN85M32rZnpmeq3q6equ6p6226fbLDaYHWODbWyz\nGcy+mCUlIQmhBQkEQltuEfeP1JeKTOUSuUkgeM5Rlcnli4iM73vi/d7leVmyZEnUAp56GoXf6xj2\nPnZhoYsbJFKZwr1hjY2N/NM//RO3bt3i7bffjhmpN7c7+fhkN5uOdPHVhV66el1sJ3YfkQZ2tMTu\nD+fOnWPSpEm6sisSzBKpybJHhqRBhtIc/bZDSbaR9GSX9o1Rdn1/SoGJ5HiZSfkmzH1DdfaqVDV5\n6skM5gK8dtOJ1d7fAUpRXWmjda0DNW4iQW9vL+vXr6erq4s33nhDN6lDvzsmkOXpdDrp6uoKadxI\nEc5DWAQye3s904CLi4v5yU9+gsFg4He/+x3V1dVROUetftT9imFvsWv9695B1FDQ1dXFjh07aGlp\nYdmyZUyePDlmZNLepfDld71uy7XhlpObnb08OzNOl3Sw1a5yp1thZIJ/90AwYldVlXPnzvHss8/q\nPu8FE+M4dMnKjQ4FCRdRTxyjP53UIEs8OTWe1g6Fzl6FzJEGd959nEli8hgzJ/ukBs7V2yjJNmIy\nSrqqTqOJbqsyoPOTQ3EJmUULt2/fZv369RQVFbF06dKw40FCA8XX9+/evUtycvKQZH+Eunb8SQ3E\nxcWxYsUKt5xCaWkpS5YsiajoSsiNBGteci/j/jzrEKD1r4frhrHZbHz44YfU1tZit9u5fPkyZ86c\nobOzMxanzJVG+wAXht2p0thuD2qtX7puY+uxbr680MvWY92crrX5/WygxdXU1ITT6SQ/X792QIJZ\n5smpCbw4J5GX5ibySOnABhzBIPXFFkqyTQOKqcpyjSTFucazOuBCn2tmsC2r0akGvG+BQXa9Hg00\nNjby7rvvMmPGDJYtWxY2uWhz2n1hKNww4bo3RF9UUUDkjbFjx/L222/jcDh47733Ilqb4ndzOKK7\nAxtMDHuLXWvNheOGERKo2dnZvPTSS0iSRE1NDVeuXOGTTz4hPT2d0tJSxo0bR25ublSe8A6nOqDA\nSFVVHHYnsjwwb13gTrfC6VqX/opICrx03U5uqsFnIFYEk33h3LlzTJkyJbxFGGU/s4BBlphWZObw\nZVenpUvX7YzLMWCUBtcVk5ZsYGK+S+tGll1xgPGjjWSOjJzYL1++zI4dO1ixYgXl5eURjRXMHTNU\n/vVwYTab3Va7L396fHw8q1at4quvvuLdd9/llVdeIT3df/tGf9D+bvern31YE7vWvy5uUihuGNHR\nferUqcybN4+enh4SExOZMWMGM2bMwOl0cu3aNSwWCzt37qSzs9Pd8Hfs2LFhZ8mMzTZx7aZnEBFV\nISvFHPD8m9qdAypWnQpcv+Xwm2Hji9gVReH8+fMBm1UHg8Op0m1TSYqT/Gqoh4OiTAMXG2S31MDZ\nOjsVRYO/8KYWminJkrl5107GSDNJ8ZGT+rFjxzhw4AA//OEPQ9opAbBtOTi6PV8zJiI/u9OvauGd\nO3fuyeIkfxCBU+GO8WVESZLEwoULGTlyJO+99x4vvfRS6L8l/U3CHxL7PQztdlSvRd3Q0MAHH3zA\nokWLqKiocH9fa/0YDAaKioooKiriqaee4s6dO1gsFi5cuMCuXbvIyspya1Dn5OToniA5qQamFZk4\nW+dyySSYJR4pMRFnMgQcI94sIUueGSoGGb/Nm/1t02traxkxYoRbTjhUWJrsnKzpdwFVlrqKnaIB\nSZKoKDbzeZ/UQE2LnbFZRjJGDf7iSzBL5IySMYfZq1RAURT27duHxWJhzZo1pKYG7poF+CZybzi6\nAza7vnPnjq7UyWgi0niIcMc4HI6AfvSKigqSk5PZsGEDK1euDDklUsTk7lc8EMQOoblhrly5wvbt\n20OeEKNGjaKyspLKykocDgd1dXVYLBY++ugjrFar22VTUlISNDWrPM/M+NEm7E4wyoq7IUIgjEk3\ncNokofSl4Um4skv8dWDyl/J47tw5pk6dqvu6tWjvUjhRY/PYbXxbZSNzpIGRCdEJ6eSkujTnxQ7l\nTJ2NJ6fdn9oedrudLVu20NPT4xby0oVgpK6BP4mBjo4Oxo0bF+ophw3vDLVwoPW1BwuQlpWV8fLL\nL7Nx40YWLlzIzJkzwzrf+xEPBLGH4oY5ceIEX375JS+//DJ5ef3dF0SnFb2FC0ajkbFjx7pzwG/d\nukVVVRWnTp1i+/btjB492k30WVlZPie7LEvEyWC360txNMiuwqTz12y0diikJctMKTAR50dv3Bex\n2+12Ll26xOLFi3VdpzcabjoGBH5V1dXXdGR+9GL1M4pMNLU7AZXmdoWW205yUn1PZ1VV+a7BzuXr\nDpyqSlGmkYoSc9RcROESVWdnJxs3biQ9PZ0XXnghZpWOgti9hcGGSk4gUteG0WjU3UIvLy+PN998\nk/fff5+Ojg4WLlyo6/h60oHvZTwQxK4nG0ZVVb788kvOnTvHG2+84TfoEm5FmmoahSF9GpOzprP8\nWWhtdlnzGzduRFEUD2tea4kI3Qq9iz7eLFE5NnwZVovFQm5ubtitycxGCVnGw2KXJDBFeaalJhso\nyTJS3ezKXDhVa2dpim9X1cXrds7V93eTqmp2YHWozJ8QmZUfyaJvbW1l/fr1TJ06VTfZhAufio/b\nlvN2Zjcc/NDzw8ZEWLU7JucRLWL3rkYNhrS0NNasWcOGDRvo6OhgxYoVQY28h8R+H8CfCqKAqqrs\n2rWL5uZm1qxZ45fUwvUNWprsbveEJMGl67B4agnPjBuHqqrcvHkTi8XCsWPH2Lp1K/n5+W6iFxZV\nLPJpfU3eK1euRKR3U5hp5EytzZ2VI/qiFmREf6pNLTJR09yLiktTpq7NSVHmwONcuu7ZTUpR4Vqb\nE4dXn7y2u06OWazc7lIxG2FCnomJY0wBUzbDIana2lo2b97Mk08+yfTp00P+fjgY4I7x58oJwcUT\nKvQ2HNeDULXTk5OTef3119m0aRMffPABL730UkByF+d5v0oLDHti1+OGOXToEM3Nzbz++utB/Xah\n3mhFUTl5td/nrKrgUOFkjY2npyUgSRIZGRlkZGQwZ84crFYrV69exWKxcOTIEQwGA2PHjmX8+PEU\nFRVFVT9eq5/jOjeVqqoqFixYEPaYQqfm1FUbt7oU0pNlZhSbMevoWRoqkuJkxucauXjdFag9U2tj\njEZqQEDxYXWpeGred/UqfHam160maXXAmTo7NzqcPDE5ekJTZ86cYe/evUGFvILCmBichI39WVmy\nLOPc/hyq0oUkDY0VGs0ME63VrldKwGw2s3r1ajZt2sSePXtYsWJFwM8HSge+1/FAEHsgK0FYym+9\n9VZQUg/Fxy5gdag+9VLudPseJy4ujgkTJjBhwgQcDgdNTU3U1tZy6NAhNm/eTEFBgTvTRlf2RBBo\nJ29TUxMJCQkRjzsyUWZBAN30aKI830R1ix2H6pIasDQ5mJDn+fAryTJypanfapckyBopezTIrrnh\nGNDoGqDltkJ7l++es6EselVV+frrrzl16pRuIa+ACNFdIssykqMHRZKQh5DYo7XzFNrpoRYRGQwG\nnnvuOf75n/+Z48ePU1lZ6fezD4n9HoaiKJhMJp/E3tbWxrZt21i9erWufF5B7KG0zoo3SZiMYLV7\nvp4xInggV1EUcnJyGDNmDI899hi9vb1UV1dTVVXF119/TXx8vJvkCwoKwgq+abebFotlULMkogGT\nASaNieNMvesazvdJDWh3CNOKzFjtUNvqQMWlTT/Py7/u9MXqABJ0BmgmrscCdTqd7Ny5k9bWVt56\n6y398Qt/KY1h+sElSSUix0IE5xONjBhviIIlPUFULeLi4li9ejX/8i//QmZmJoWFhT4/dz/mrwsM\na2IPNJl6e3vZuHEjixcvDqmre6iQJIm5ZXF8fdHqEsiSXM0sKscGd/l4ywvHx8czadIkJk2ahKqq\nNDU1YbFY2L9/P62trRQVFTFu3DhKS0t1ZztopYwtFkvY2TBDBVVVGTfaSFWLnS6ritUB3zXYmV7U\n//saZIk54+N4ZJwZVfVdGVuYZeK7BsfAloIqZOp4CPtDb28vH3zwAXFxcbpcfR4I5Afftjx0q11S\ncSgyqgphcVYEfvlYaOYbjUZkWdYdRNUiPT2dVatWsXnzZt56660B60V7vvdjkdKwJ3ZflrWqqmzd\nupXi4mIqKip0j+ctTaAXuWlGVs4ycP2WA6MskZ9uCFp2H6wZiCRJ5Obmkpuby4IFC+ju7qa6uhqL\nxcLnn3/OiBEj3AHYMWPG+I0xiAnb2dlJW1sbBQUFuq9rqCEWn9EgD5AaKBttJDHO87cLlN6YmiQz\ne5yJo5b+frGyBDPHmv0KqQVb8ELIq7i4mCVLlkQ3AB5GkFPuuzJFlTD4c8cYY9N2LlbNUISkbzgN\nqEtLS3n00Uf54IMPePPNNz3iV8FcuPc6Hhhi1y7C/fv3Y7VaWbp0aUjjRbIwE8wSpTn6Ap+hpjgC\nJCYmMmXKFKZMmYKiKDQ2NmKxWNi7dy/t7e2UlJS45Q60Mq3iN6murqa4uDgs5UstbtxxcqLGxt1e\nhcwRBmaVmgPqwkcLRZkGLl2XudXZLzXwaFloaZ9jc8wUZRm5cUfB5lDJGmUgIcyq0sbGRjZs2MC8\nefN49NFHwxoj2pAk4Y6RMHjvTb6/P6bHjmZGjBZijYTbgHru3Lk0Nzezc+dOnnvuOY9MsftV2REe\nAGI3Go1uojQYDHz33XecPXuWP/iDPwibxGKdAhWuCqWALMvk5+eTn5/PokWL6OzspKqqiqqqKvbu\n3Utqaqrbms/Ly0OSpKj41zu6Fb443y833Nju5NPTPax6JDGqejECWitQkiRmFJv5/JyQGnAwPs/k\n1zfuDwZZZnSq/u/4IqpLly6xc+dOvve970WlVWLUYExEtvXgVL3cMb6sdD2SBSEgVu4M0T4vkDxx\nIEiSxLPPPsu7777LkSNHmDt3rkcmnViL9xuGPbFrA55tbW3s3r2bV199laSkpJDH07piYgnxEIrW\nQkhOTmb69OlMnz4dp9NJQ0MDFouF3bt309HRQVFRERaLRVcLvECoah4oN+xQoPGWkzExyGP3Rk6K\nwd0fVQVOX7WxaPLgSg188803HDp0aEDlcljQk9IYClbtRlYUTnz7LU1NTaxatcr/Z0M9bhCffywt\nYL36Mf5gMplYvXo1//zP/0x2drY7mKqny9i9imFL7NrAqSjO2LlzJ08++SSjR48Oe9xwUh5DgbAW\nYrUIDAYDhYWFFBYW8uSTT9J2q4Ojx89iMNbx3nvvkZGR4c60GT16dEgT2u5kYPARF7lHG2IXJn4v\n8f9TCgxcv+nKa79+08n1NonsFMOA73qct92VsuTrWgO9JuIgYq59+umn1NTUsHbtWlJSUiK/yFW7\nA2eihIK+cWRghtPATEmFbf8nelWmAR4EsciI0UIbRA23wcaoUaN4/vnn2bZtGz/+8Y/9ZtLdLxjW\nxA792/SLFy9it9sjrvQLJ3c2FESr9Z0enK61cbHBgBo/hTFzpvJIqRlDbyNVVVVs3bqVnp4eDxni\nYAJVxVlGrt7wrPJUVcgNoQGFtlgq2H87nU53Y2+xCEfEQWEG1N5wbaFPXe3hqakJAxap9799CT4F\ny2EWsq69vb3s3LkTq9XKa6+9RkJCgvu8tMcKy8ccA+KVJBVFDVB9GmXEKnCqhXdf1HBQXFxMdnY2\nJ0+eZM6cOVE+w8HFA0HsiqLw1VdfsXjx4ognVzi57KFAEHus0dzu5NJ1u0viV3JNg+M1Tp6dVURJ\nSQlPP/007e3tVFVVcfbsWXbu3ElOTo7bN5+dnT3gPLNGGagoNnOq1oaiuNrZzRsf5xYh67EpnKm1\n09rhJDVJZvIYA0lxrnt1865Cl1UlY4Q0QGZYS4ziNxckKUkSZrPZ41xmjDXS0N6Dokrc6YWmOwaK\nsvxP9WDVvP406xVFobe3l02bNpGRkcELL7zg0YLR34PBm+i9/6KCAD5yWVJxqJLLzx6dowXEYBC7\nsNojIXaAhQsXsm7dOiorKyNqrzfUGLbErk1XunDhAvHx8ZGVcPch3JRHPfCVux4r1N90DOjbKUmu\nZh2lOa7rSk1NZdasWcyaNQu73e6WIf7www9xOBwewmVxca4MlLJcE6WjjdgcYDa4FrTD4cDuVPj4\nlB2r3eWu6ehx0tju5OkpBr6pUrjTLVLxYGaJkdIcU1CiE4ThfR+S4w1MyDPzXV/rvDN1NsZkDJQa\n0Avfypsyt27dYtOmTUyfPp0FCxb4/FywnYe3W88f2YdMigGscY+0x9BGDQuDkTooKlEDNeHQg8zM\nTIqKijh27FjEMaehxLAlduHTczqdfPnll25dCKfTGVFKnyDdSC0DXxCLfDAs9gQfTTkk8CvxazKZ\n3G6ZpUuXcuvWLSwWCydOnGDr1q3k5eW5ZYrT0tKQALuGs67fVHF4+eCdCpyuU7ndrXo8ZE7UOCjM\nNPs9F4FArpJJY0xUN9uxOvxLDUSCmpoaPvroI5566ilmzJjh93N6CE1r4WtjBr7GEX+htnj0HMtl\ntbvSHv0gioHbwUodDFUYzBtiF7Zw4UL+5V/+hcrKSgwGw30pKzAsiV0sEoPBwKlTp0hLS6OkpASb\nzebuJBOp2H8sAqjh9GQNF6U5Ri422FEcCkgysgRmI+Tp9IenpKQwc+ZMKioqsFqt1NXVUV1dzbff\nfusav8+aLy4uJi4uDodqR1E9U8cUFW73tbjTwiDDzbtOUpMNXGmy09GtkJfmcqd4Ky36+63MRonJ\nBWZO9HVy8iU1EC5Onz7N3r17WbVqFaWlpRGPpyV/rdERjPC9ST4UMpNQcRqS/Ac1V+2GzYv0X4Sf\nYG6skwE8TiFCd4wIhmdkZFBeXs6hQ4d4/PHHY3CmscewJXZwWdVff/01q1evBlw33m63D2g4ECpE\nBD6aCKcnayRxcODhAAAgAElEQVRIMMs8PdXMh3vPkTNmPNmjZMpzJWQf7grvzBOtz1SWZRITE5k0\naRKTJ09GVVVaW1uxWCwcPXqUrVu3MmbMGApLJwHj0Hp1DTKkjTDQfdPpYckrKhgNsPtEN3an69+N\nt5zUtzlZqBEXC5ZpMW60kcuNdjp7XVIDF67ZmVEcvt9UaPafPXuW119/nZSUFN/HD5YDrlPrJRDh\na++JCOZ7EL3qqpz1B/n7n+GMZC3oLGgaDP+6FoLYQ4VoRCLkvRcsWMDvfvc7KioqopPhNMgY1sR+\n/Phx8vPzyc3NBfq7IEVqtWvzZqPljgm1J2s0cLvtOtLNEzz7fIU7w0SchzadUMDbOvSXCpiVlUVW\nVhbz5s3DarVSU1ODxWKhvb2RkUXz+yogDUzKN1KcbaapvQen4nLTyBLkpRm4fsvpJnVwpUw233YO\nUFoMdA8NssS0QjOH+qQGLjfaKcs1khQX+m/sdDrZsWMHbW1t7hZ2fglERy/SSCBJkk/LXtwvh8MB\ncjI4epAlFQkVWXJZ6eLnEvcvoJvBnzsmhFTLwS7NF92VQnHHqKrq5gTxu44cOZLp06dz+PBhnnnm\nmVieckwwbIndbrdz+PBh3njjDY/3BCk7nc6wSTkWhUqD6YYRENWmghjsdrtHDEIQiDivcM4tLi6O\n8vJyysvLUVWV600tXK5uoK76IrsONlBYPBZz/iKc9KckFmQYqLnh9PD/g4v0O3v8Ky36QmGmgYte\nUgNzQpQa6Onp4cMPPyQ+Pp433ngDk8l0T1Ukai116CP657ajbH4SFQmnKrsliWVJRUZF1hTv+UUU\nUi0j3R2HCm0MTG9Wi7+khfnz5/Pb3/6WOXPmhN3YfagwbIm9rq6O7OxsMjMzPd4TZCWe0OFYyKH2\nPw2GYG6YbqvC1RsOHE6VwkwTKSGWyfuCqqpUV1ezZMkSD7eSLMvu4oxoP2QkSSI/N4f83Bx4rJKe\nnh4On2+juSfOFdFTXVb74ctWphYYabnt2WLPqUDGyP5r11P04ktqYEIIUgPt7e2sX7+esWPH8vTT\nTw+YL/diEUs/0auAitr3uyqqhIqEQ5WhL8goiDcW5DsUu1CxNvUSu9Za9z7PxMREysrKsFgs9x2x\n378qNwGgqir19fUUFRX5fF+U60didYXry/OFQAvg5l0nO4/3cLbOzoVrDj453cPVG/YBn9MDMYlt\nNht3797l5s2bjB492uVXNJgwmMweFnqskZCQgN2YAZInqTgddj7dtQml5yYSCgbZ5UqYUWxyi3KF\nkqkgpAYETl/VFx+5fv067777LpWVlSxdutTj/twXZeZ9LhNXFgwYZRWTrGA2x2E0Gt0GjtVqdbsW\no7kLHWz/uoAoItRzLaJdoL/de0FBAXV1ddE+xZhj2Fnswq1QV1fH8uXLfX5GWO3hCgdBeL48fwjk\nhjlebfMoyXcqrtcKMwdmiPgbW1t6D64HyI0bN8jJyQFDHPsv9HKjQ0EC8lIl5k4wYDQMzjN/RILE\nzU7P14wmM6+tfp7r9VVY6iw03+zArNzlujWPROc48vPzB1R1BsOMYpcvX8UlTtbc7iQnQAbQxYsX\n2bVrF88++yzjx48P7aKCpQrGSBp3ALxcKVu2bKG4uJgZM2ZgAI9UPmHoaOWig+5ogzTeGAr3IuhP\ne1QUxe169FeDUFRUxL59++6PB7kGw47YweUTvXXrljto6gvCWnE4HGHpQkSrUEmbmukLvlroOZxg\nd0Ccn7RsYZl7kzmSTPMdVz75tcYbjBkzhoOXXKQutuvX21VOXbUxq3RwxLMmF5hpuNkfPDXIkJ9u\nIDMtnsy0aUyf7vqNhXDZnj17uH37NiUlJRQXFzN+/HgPGWJ/SEmSKck2Ut3i2mWdqrWxNGXgNaqq\nyjfffMORI0d45ZVX/M6hgDuGaMkARBn19fUD+tmKSlmTyeQRgNVF8gEab2gVVQcbetIeVVV1yz74\nO0dVVUlOTiYpKYnm5uaINKYGG8OS2Ovr6ykoKAg6qUwmE3a7HbvdHjK5RyuAGkyiNy1ZpuWO5zHM\nRtefN8SC1Lp2hGulx6byyele7E4XgzvUyRRn3aL2jovU3WOoUNvqZFbk6dm6MCpRZllFApca7HRa\nVQozDBRne16cLMsUFBRQUFDA4sWL6ejo4MqVK1gsFvbt20dGRoY7bz43N9fvbzm10ERtq6vi9lan\nQm2rV169ovDJJ59QW1vLmjVrgqa53U8W3J07d7Db7aSlpXm8LghQWKTC3y4MDjGftH5oPWTtryp4\nsBDIVSpIHXCnN/r7HLg0ZK5evfqQ2IcadXV1fvsYaiFJktsfJ57uehdrqEEafwi2XZ01No5Pz/S4\nLWqAR8vi3J8XlpHwFYpr8h7z5FUrvTa1P19cNtDQneFTjjEG0ukBMTJB5pFx+jNVRo4cyYwZM5gy\nZQoGg8Ftze/YsYOuri4P4bLExH63R2KczIQ8Exeu9UkN1Pb72m02Gx999BF2u501a9aE1bThXsa1\na9cYM2bMgHmmnUfa97SZNto55nA4+lMDA7TYG8wqal8I5CoVLliTyRT0wSNJEsXFxZw5c4a5c+fG\n8pSjimFH7MK/rlfFUWhM2O12t1tGLyJVegzmhgEYlSSz6pFErrW5LM38dAOJcbLHlll7Hf4m6o07\nygAOV1SJ7BTXjkBY7QYZynLun5i6wWCgqKiIoqIinnrqKW7fvk1VVRXnz59n165dZGdnu635nJwc\nJuabqGpySQ10WV0XfffuXTZs2EB2djYrVqzwfz80PmVVkV2lVrISdnPpwYTYxXpDj0tRa8lr3TRO\nxYAsqRgkZYAxIKpNh5LYYWDaoyD1QGtFQDzsioqK2LlzZ8yE/2KBYUfsnZ2ddHZ2hrRtEqSotdz1\nINJCJb3pYGajxNi+tnpOp9NdPQvQelfCqUqMTjX6rBoVGJUo0WPzpHZFcRG+RH9x0KR8mXHZ934D\nX38ZFykpKVRWVlJZWYnD4XALl3300UdYrVZKS0tJSS+m0ZGPwejaJbzzzjtUVFTw2GOPBb5mrU9Z\nlZBE39BBkr+NBNeuXWPKlCk+3xNWuR6InaqqqjglBUWVsasGJEnF0JcjD4Ofv+7vPLXELh5IetM7\nxRpISkpi1KhRNDU1Rd48ZZAw7Ii9rq6OMWPGhPxkFX5FETDSc+PFMSIhdj1WjXcwVJIkHIqBfeds\nWB1CLdDGwonxfjM9ZhSb2XemF6cKqqKCpIIkexQByRIkJxiRJGXIF6YeBPvdjEajW5gMcAuXWSxn\nuVa/m4Qklw+9cmYF8+YHIXUvqJr/vddhtVrdqa2+EE7qryRJGM0J4OjGqbqMC4fYxRjjB00fJhC0\n2THCr24wGHSvVe01FBUVcfXq1YfEPlTo7Oxk5MiRYVmb2u0bEJTYtJZ+OAhGnoLQfblbjl+20m1V\nPajl4KVenn800WcaZFqygeUzE6hqtnPy9HnGjS2isSMeh2YAhwJN7Qp5KVJMif1uj0LDLScmAxRk\nhCfMFY7iXlpaGrNnz2b27NkcP36czz77DIATBz/h228OMK58mlu4TE/c5N7dz3iioaGB/zhuP8Zt\n+wa+aUxEfnYnzu3PoSidA+MrgdxMfa8b+v6Em8Zms+G02dwxq6F0x4gdtSDpUEgd+o2HUaNGcffu\n3Zida7Qx7Ig90kkkml+LVKigbhKzme7u7pCt9mBuGEHowgfvnWvbfNs5wF50KNDVqzIiwfU5p6Jy\nptZGdYsDWXJppY/PVthzcS/LHv+PNHZ4FuoYJBiZIAXXEIkAda12jlyxuZo8SHDqqo0l0xIYmRi6\ndRfOvVZVlf3793P+/HneevNH/P3v/w+/KD1Cmy2RKiWdo0dvsWXLFvLz892++fT0dI9juX+a+4TZ\n6+vrGSv517WRJAkcPaiS5NrJeb2vF9qKbKHJJAyEocpnlySJ3t5e4uPjQ1qfvlx997Jr0hvDktgj\nJSURTLXb7e4KPX8I1x3jz0csRJy0FoYv8h8RP9BnDnh0HzpebfNoVXeu3o7lup288kWkJBtITe7X\nUJElMBmhdLQJWVY8UuAihdWucvWGnW6rypUmTYMP1VVwdfKqzUO1MVZwOBzs2LGDW7dusXbtWndD\nc0mCzLhuMm0fMueJP8Wa+RJXr17FYrFw5MgRjEYjpaWleEtB3S/L/Nq1axCgJsplVQt99sjWjjBY\n4uJcmVsibqUNwA4WtGsp0h6m93rMyRsPid3PGCaTyT0pA5Uch+uO8Va90+agi+NrCd3hVLndrZAc\nJxNvdumffHau102SBhkm5pswGvrGU1VqWhwePnRVhR6HCUPaRHaf7GHZ9ASu33LQ2O4S1hqfayLe\nJKGq/ZkSkS7EbqvCx6d6cDgZoLsucPNu6LUAoS60np4ePvjgAxITE3n99dc9s5/SJ8HNC4AKx/4H\ncY//NRMmTGbChAmoqsqNGzewWCzYuoyYJYeG+vr+a7AqScOAoihcv37dpZgcABKgqpETl3Zei3ns\nnSqpJyMlUojjidhZqKTsbXjdb802HhJ7gHEEuWv1JHxNEJEzG4rVLixy4fYRC8LXDuHqDTtHLTZk\nyUWOZaONVJSYWTYjgSuNdqwOleIsI3lpmmOrGpfBgIszYHeApcnO9OI4yryKK8WijMbveP6aHZs9\ncJgxNTncTkD6vnfr1i3Wr19PWVkZTz311MDvzf0z2P9z6GwAxQaH/zMs+i2McOV9Z2dnk52dDeyj\np6eHqqoqLBYL1dXVJCYmulw2NTUUFBQMSlvDUNDc3MyoUaOCfk5GxYHkdpH5RBAJAfCdEKB10Tgc\nDux2u0fxXLShzbWPi4vDarWGnKroi9gfWuxDCEFI0boJgszFhPS1pQu1XZ62UYLIwvFVVASuBtBH\nLTacCoi8hapmB9kpBvLTjcwq9V3YI8sSeWkGGtsHyt+Cq8K0vds/3UqSFLCqtsuqUN3swO5QKcw0\nkjHSt2V/8+7A/Hn3OUqunUZFcWgyuqGgoaGBDz74gMcff5xZs2b5/lDcKJj/v1zkbr0Ntg44+J9c\n5B6f6vHRhIQEJk6cSFlZGSaTiebmZiwWC/v376e1tZWioiLGjRtHaWmpLkKNNerr6xkzZgxYA39O\nktQ+dU0Jyd8dCyAhAMG7JcmyjNlsdsePBMGHUhgYCFojSWS/iJ10OMTuGVd5SOxDjmhvm0TgUpC7\n91YyVHeMVsZAWOj+Jk1Tu3OABeVQ4NpNJ/npgW/fnPFxHL5s5fqtgalsBhnSkiS+uWLldpfC6FQD\nE/NNmPoyVLxLzbW41elk35leFNX1gLA0O5hRZGJ83sBMkowRMu2dnuQuSzAh10hCnExRppF4c+gL\nRs9C++6779i9ezcrV66krKws8IDJeTDvf8BX/wGcVuhqhEP/Lyz4NRh9+/8lSSI3N5fc3FwWLFhA\nd3c3VVVVVFVV8fnnnzNixAjGjRvHuHEu4bKhSB+9du2a69qvBm6aIbJhFO/OS2E01QhGoMJS12bR\nBItl6Tm2WH9aN2Yk0h8Pif0eQyz8YUKn3F9QVY87RljowlIRAaZA8EV6sgQJOlQMzEaJhZPicThV\nLjXaOVtrRe2ruhuRIHG50eFuMN3epdBw08EzFQkevn9fE/pkzUDFydO1dkpHmzBoWOH6LQdVzY4B\npD69yER5fmQt6oK9f+TIEb755hteffVV/cVqaeUw+7/A4f8KKNB+CY79d5jzSw9pYX+B78TERKZO\nncrUqVNRFIXGxkYsFguffvop7e3tlJSUuOUO9AiXecCXGyRIxauQr37yySdhWvDKWFlSUd3lan0I\noaI2FKtYBFOFAaGtCA2VQIU/XbhP/bmB9MKXgNlDYh9ixDJVL1BQNZg7xle2i56JkpNiINEs0dmr\noqiuIJdBhrLRIUgfGCQmjzFz5dQXjMoay+QJJTTfdvBdQz/pKip0WlVabivkpPanp/larHd8uHBU\noMemkhzfn2p56JJ1gBuoIMMQEakHg6Io7Nmzh/r6etauXRu6OyR3Hkz/OZz+365/Nx6C07+F6b/w\ncD4Hu3eyLJOfn09+fj6LFi2is7PT7Zvfu3cvqamp7nTKvLy84IToy9oOkorY3NyM2WwmNTU14OcE\nJFScang+73CbVos1JcjZ1444ELT+dH9rKhxiF9/TvvaQ2IcQI0aM4Pbt2zEb33siCnIP5I4Rk08r\nOqZ34sqSxNPTErhwzUZTu5NRSTJTC8wkhtG383qdhXmzZ5CdYqDaK2NGoNum4Co38e9nzxwpc+2m\np3vHIENiXP/Ev9uj+vTUtoWRAeMP3gvNZrOxefNmnE4nb775ZvhCXqXPwZl/ALXvXlZvc/0BGBNR\nl28Lecjk5GSmT5/O9OnTcTqdbuGyXbt2cffuXbclX1pa6iFcFglE60O9kCVwqj7cMTqglfgFdAVa\ntdBa73a73Wfthha+/Ol+ryvEpji+akza29t1CQveKxh2xF5QUMCNGzfo6ekhISEhZsfR+t1tNpt7\nImr1Zrwnn3djA72IM0lUlEQWYLTZbHR0dLhbBY5JN3CtzeHhUlEU1w5BwJ+fvaLEzI2OPg31Pvae\nUxbnUfGaYJbwZSSNTIhOWz9v3L17l/Xr1zN69GiWL18euT9bDdyoOhLrzWAwUFhYSGFhIU8++SR3\n7tyhqqqKixcv8vHHH5OZmem25kePHh32saqqqgbor/uFMRHJ3idwpvYVKnn71/00EFENiQOt9SCB\nVl/wzkTzJ9alLd7T45sXFrteV5H351RVpaamhkWLFgX97r2CYUfsZrOZvLw8amtrKS8vj+mxhN9d\nO9GEy0Wrv6EN5ojXBntb19bWRnp6uvs88tMNFGUZqWlxYJBdVlrlWM+dgDbwpF08yfEyq2Yl0nDT\nicOpkpducLesE4gzSYzPNWJpcj08hAtpWpF+F5JetLS0sGHDBmbOnMn8+fNj/ttGe1s+atQoZs6c\nycyZM3E4HNTX12OxWNi6dSs9PT2UlpayKsQxu7u7aWlp0W9lrtqNBEg2G6osgy8L2I+/XVUU1L64\nUTSgbZShjWd5uzP1Fh2F0hTHl+JqS0sLcXFxul1a9wKGHbGDS7Cnuro65sQO/ZruWuv27t27jBgx\nwr1F9BVdH2xib21t9WjIK0kSs8fFMbnARGevSmqSPECzRZynr0Ilo0GiKCvw9JlRbCZrlIHaVgfx\nJomyXFNULXZJkqiurmbLli0sXbrUr3ph1NFRj5RSFJOhjUYjJSUllJSUsGTJEtrb27FYLNjrTJgk\nz163qjHBb/VrdXU1RUVFIefVhxOjGuCGiQK0RpPdbsdms7mPoUdH3Xss0JcZ40tHvrq6mpKSkhCv\nYGgx7IhdCONv2xa6HzQSaCeccM8kJSUNIPChUr1ra2vz2Wk9KU4mKYCXR2h+hGOlSpJEfroxaFpm\nuDh58iT79+/nxRdfHFT/p3rkv8Kiv4XkzJgfKzU1lUceeQQe2Yvdbqe2ttYdhHU4HJTu2MG4ceMo\nKSkhLq7/RlZVVYXkXxcIVr/gjVi2wNMWyokqUrPZHPL6CSUzRsxz7TFqamr810DcoxiWxJ6VlYXV\naqW9vX3Qtk+qqmK3291+fTEZtSXUWtndwUZbWxuTJ08O+XuREHusoKoqX375JZcvX+bNN98kPT09\nvIG2LedPJ3XD5i89Xzcm+vcnq0D3DaTD/xme+Fswxi6O4w2TyeTOi1+6dKlbhvj48eNs27aNvLw8\nd+eoqqqqsHzCgkj13m+fYnbbfDeRB3TnxWvjU0ajkbi4OI/4VTjkrtdi11633W6noaGBH/zgByEd\nb6gx7IgdXDexuLiYmpoaZs6cGfPjCVIH11Y6ISGB3t5et19QRPm15zfY8GexB0OgtMehgMPhYOvW\nrdy+fdtDyCu8wQIE+L6/f+DrTd/Aof/s+u87VfDNL11yBPLgFx5JkkR6ejrp6ek8+uij2Gw2t3DZ\noUOH6O3t5dChQ24ZYr2dwUItofdZlBQoDTNIXrxWqtpbYkPUkYRD7noyY3z51+vr68nOzr7vWiUO\nS2IX7pjq6uqYE7t3xZuYjOAiofj4eLflHkqaYzThdDppb28P27INdXseK3R3d7uFvF5++eXISD0c\njH4UZvx7OP63Lt9281E49Wuo+L8CCKwMDsxmM+PHj2f8+PEkJSXR0dFBSkoKR44c4aOPPmLMmDFu\na9+7obUWoYheRdsNE0yqWmTNaCu3dacN68iM8bX7qK6udjdquZ8wrIn9s88+i6ml6YvUoV8Tw2az\nYTab3VaHIHitCNJguDfa29sZOXJk2AJVgeQFBgu3bt1i3bp1TJgwgYULFw6Z2p5avBw6mqFqveuF\nq7shMQfKXw05dzuq0Bzb7YBpgnmjEul96SNqamqwWCwcPHgQs9nsJvnCwkKPeREKsYsMr0iI3bsZ\ne7DevdqUyFDIXU9mjLfiKrj868uXB3At3aMYlsQOkJSUREZGBhcuXIhJtoRwsfgqYwbcxG6z2YiP\nj3db8mLiePdfjCVhtra2uvPXw4G/tMfBwrVr1/jggw9YuHAhlZWVbrfXkKH8NSTbDbjm6sDEhXcg\nMTus3O2oIcCx4+PjmThxIhMnTkRVVbdw2VdffUVLS4uHcFlKSoruzBi9rR19wbvdYzBC10Jb6Ken\nZwLoy4zxTmy4du0aPT099007PC2GJbGLifbEE0+wY8cOysvLoyqnqnWrBCpj1laiioWiLVRyd3vv\nK4mOlYxpuP51gUBpj7HGhQsX+Pjjj1m1apVHlsdQ7RxUoWtb+X9D701oPeV64/hfhD7YEFj4kiQx\nevRoRo8ezeOPP05PTw/V1dVuhcqkpCSKi4vdvWL93W9tgVAo8G73GK5ho3XL6GllGYzYvQPGqqqy\nb98+Fi1adE/ElkLFsCb2wsJCsrKy+Pbbb5kzZ05UxhbuFz16L6JtnlCv056bNjAkLJdY6VS3tbVR\nVFQU0RiDnR2jqiqHDx/m2LFjvPbaa+Tk5ET/IH4yXwJlbrhz6A1mmPsr2P8L6Ljqv1JVYPOigYQ9\nlBZ+HxISEpg8eTKTJ09GURSampq4fPky+/fvZ8uWLW7hsnHjxnkIlwXMXffxuyoqOA1JKDZXO8Zo\n7VS12k1iXH8ItKa8/euXLl3CarUyderUiM5vqDCsiV1VVZ588knee+89pk+fHrHEgEjBEjnrwWA0\nGt0NdcWE857IguC1vkbh4olWK7G2tjYqKysjGmMwiV1RFD7++GMaGhpYu3YtI0eO9Hg/auewaje/\n/OUv+dM//dOQvuY+tinZpeP+xc+gty34F0Mh7G3LY++X94Isy+Tl5ZGTk8O8efOw2Wxu3/y+fftI\nSUlxp1NmZ2f7b/itOW/vzmCGGMSWQu1T7Ata/7rT6eSzzz5j2bJl96W1DsOU2LXIzMxkwoQJHDx4\nkKeeeiqisbQpjXqhlfMNBC2Ri62qCLZG0gxYVVXa2toi8rHD4KU9Wq1WNm/ejKqqvPnmmx5FN1oM\nqStGi8QsmP8/4ct/F11LexCtdm+I+5uUlMS0adOYNm0aiqK4hcs++eQT7ty5Q0lJCWVlZZSWlnpk\nKHkHRLXV2bG6b6JPscPhCLm/qXd2z8mTJ0lJSbkvs2EEhiWxayvWABYuXMjvfvc7Zs2aRUpKSlhj\nhtsUVwRRrVarO4gaDILgtb0iob+CLpQF0tHRgdlsjkoebqjyp6Gio6OD9evXk5eXxzPPPDMkgVo9\nGPDbp5TCo/8fHPx/huR8gLDcSsGgfYjJskxBQQFjxoxh/vz5dHV1UVtby+XLl9mzZw/p6emUlpZS\nUlJCdna2ew1G2kBDL4TP3WazucndF3zNX62/32q18tVXX/HKK6/cMwV54WBYEruAmJgjRoxg1qxZ\n7N+/n+eeey7kcbQZLOFYq2azmbt37/q1Pv1BkLg2g0BL8nrOJ9LAqff5iPOIttXe3NzMhg0bmDVr\nFvPmzbtnF5VfSYicoS05V1fu4u/+7u/4/ve/T25ubvAvBECgYLkgwdTUVNLS0pg+fTp2u526ujqq\nq6vZtWsXXV1dlJaWUlZWxtixY2Oqsup93iJhIZT+w9rsnkOHDjF27Fj9zVnuUQxbYve2LufOnctv\nf/tbmpqaQrppWr96uJk1Yhtqt9tDJnfon7CAu8hCm26pteS9EU1i1y74aBJ7VVUVW7duZdmyZbpk\nD+7ZjvH+rOZofT4AGhsb3Rkv0YCvlEfhsgA8UhUlSXIHWWVZ5vbt21RVVXHu3Dl27txJdna2OwCb\nk5MT04e2NuPMe034mrPa7J67d+9y/PhxfvzjH8fs/AYLw5rYtdZlXFwcjz/+OPv27eO1117TNbmE\nVICWWMM9F1mWsdlsEZOidrJqSV6bpaD9TKQ57FqI8aPpjjlx4gT79+/npZdeoqCgIKRzGWwE1VBZ\ntdslJrPlKVAH9pkd4BZZtduVLRMFnD9/nsmTJ0ftd/GuNlZV1V2XIdaCv9zzlJQUKisrqaysxOFw\nUFdXh8ViYfPmzdhsNjfJl5SUxKRUXyvxG8x1qpVE2L9/PzNmzLgnmpBHimFL7OJmaom0oqKCU6dO\ncejQIebPnx90jHD96t5QVRWz2Uxvb6+7YCka0LpqvPPixWRtbW1l4sSJUTmeOGY03DGqqvL5559z\n8eLFkIS87llrXUCS4LlP4NCfQMvxvtcMMO9/xsxdoygKFy5c4LXXXovamFpVRWGZ22w2d1W13jiP\n0Wh058QvXbqUmzdvUlVVxcmTJ9m+fTu5ubluos/MzIzKg8k7x92fv13sQIxGI+fPn6empoaf/OQn\nER//XsCwJnZBegIGg4HVq1fzzjvvkJaWFpDwhCUcjpKcN7SNAcRCiaYrQ7hIxPVqrfibN28yatQo\nj1SwSBZPNLJjHA4H27Zto6Ojg7Vr10atFVysobv7lWx0BVO//Hdwp9plvX/zp7Dwf7sCrVFGfX09\niYmJEe/MhIEgrF2bzeZ2UwiyNJlMEQVDhXDZ7Nmzsdls1NbWYrFYWL/eJdEgSL64uNh/OqUOiF22\n3W53xx+UPrkAACAASURBVMfAM3gq4gXXr19nz549/OhHP7rvxL78YdgSO/jWOBk5ciSrV6/m/fff\nJyUlxW+gSRBhNCL6ghASEhLo6uqKqtXuDe+0ye7ubpKTk91Wl/iMNrsmVKKPRIK4u7ubjRs3MnLk\nSH70ox9FtSLYjaHUbBEwJfXnuPfcAEcPfPYHA88nCjh37lxYshlaIvc1P4xGIyaTyU2QImAfLZjN\nZsrKyigrK3On5VosFo4ePcqWLVvIz8/3EC4LZ56KHaa3ESKuu6Ojg02bNrFy5Uqys7Ojdm1DjWFN\n7Fp3jHZCjh49mu9973ts3LjRZyd7Mcn1Sp0Gg1ZOQCsOFuvih+7ubhITE90BW+0iFhY9hE704RYr\n3bx5k/Xr11NeXs7ixYvDzssX5+wXMaroDLlfbUKGi9z3rfF/PhEGUJ1OJxcvXtQV8NMSuTYQCn1Z\nVrueR3J2uRtZq04Dkike9bmdMZeTkCSJzMxMMjMzmTt3Llar1V0cdfjwYYxGo4dwmd61aTAYsNvt\n7h2muGZFUejp6eHDDz9k/vz5lJWVxezahgLDmtgFUfnyy06YMIFbt26xYcOGAYUwkVSw+YKWAM1m\nMw6Hg97e3pi7IDo7O0lOTnb/27uaVbvAtfod4nfzR/TidV+WkD/U19fz4YcfsmjRokHRyL9nMKo4\n8PsR7iCqq6vJyMjwG/DTWuMDiLwvXdZ9D5UutL32JEB1dEdFxTFUxMXFUV5eTnl5Oaqq0tLSQlVV\nFQcOHGDTpk0UFha63TaBmulorXYtD9jtdrZv305RUZGrQ9Uww7AmdgisJT5nzhza2tr46KOPWL16\ntYeLIdouAm9JX1GNGhNXRB+6uroCapaLBa0lenH9vohe+xeK1X7+/Hn27NnDc889R2lp9H3MHgjU\nvSdCDFW/2kAQ2TDCGtf++STynSuRHN0MuARfbiFJRVEl6LPWh+q6JUkiJyeHnJwc5s+fT09Pj9ua\n//rrr0lISHCTfGFh4YAHkNZqB9cuZ+/evciyzLJly+6p+xktDHtiD6QlLkkSy5cvZ926dezdu5el\nS5dG1bcu4F3UItwxvb29HhZ1tOFtsQeD1nrztW3XWjwiYwLwWy6uqiqHDh3i22+/jbqQl9/FOISl\n+IMFcT9sNhuXL19m0aJF2PrEtaA/mK6VonD/Xs5ufHbA9vG7SYBDlZC9ugoNNRISEpg0aRKTJk1C\nVVWampqwWCx88cUXtLW1UVxc7Cb6kSNHeljtAEePHuXatWusXbv2vtWCCYYHgtjBv5a4wWDgBz/4\nAe+88w7Hjh1j2rRpUbeifT1U4uPj3cqPkUT/A6GzszPsLkNay1zA2yIUeti+vqeqKnv27KGpqcmn\nkFe4GMp0x5gIoNXshJLv9R1AgcZDoNghfxEqDLDAtdd/6dIl8vLySE5OHrCrigZUFRyqTMIQWuvB\nIEkSubm55ObmsmDBArq6uqiurqaqqorPP/+cESNGMG7cOMaOHcuoUaO4evUqhw8f5o033hg2GTC+\nMOyJPVB5tEBCQgIvv/wy77zzDvHx8VGV6vRHRFrlx2ikVPpCV1eXh9RqpPAmjfj4eJxOpzuQJYin\nt7eXrVu3IkkSL7/8MvHx8e5CL+9xBtW1EWoWild2jarILmPXHB+abzxAY2z1xG8gLh01cwbqib+E\na1+5CH1GN2pBv2id9kEr/v/y5ctMnTo1akF+byiq677cS9Z6MCQlJTF16lSmTp2Koihcv37drU7Z\n3NyMqqq89tpr4TdAv08wPPchXtD6zv0hJSWFF198kc8++4wDBw4MimUYHx/v3lLHAl1dXTF19YgF\nr+2A093dzfvvv09qaio//OEPSUpK8tg1CVEz0bdSCKSJqkZRVCKULb1T8SJCqIFKbzJWJSRU1+ub\nF7n+fPj0tRa20+nEsWI7jlX7sK/ci+3ZT7E9sw3riPHYFAN2RcJ++L/j+OLf46z/GuEVl69/7U43\nNJvNmM1md+qhwWCgt7eX2tpaJkyYEN5v4QuaB5+qgoKEbNQnXHcvQpZlxowZwxNPPMHs2bMxGo1M\nnDiR3NzcqF7T3/zN3yBJEn/913/t8/3Lly+7K98HC8PeYgd96XmKopCTk8Nbb73Fxo0buXnzJitW\nrIhpcFMEUmOV/hiJK0YPvHdDTU1NbNiwgdmzZzN37ly/v7UgaUGA3v8diMi1jU68zwUAOclNyO6j\nS4AxEclPEN07uO4+ttJ//ooKdkXCKEngoncXbL3Q547SXoM3PHYo5iSk+f8N9v8CqacFiR64ewVJ\naxi3nwWcIJt95uWbpTiKi98M3Z0QSAVS8+Bz2O1IDgdyn1vtfiV3VVX54osvuHDhAi+//DIJCQlR\nv5Z58+YB8M033/h8/4/+6I9wOp389re/jepxA+GBIXYI3MhWWJ0jR47kjTfeYNu2bfzbv/0bL774\nYkzJMZbpj6EGT8OBaNJ95coVtm/fzjPPPMOkSZMCfkdLcoHgi/h9kYwHoa7Y4t+699Mr1W8PVVUb\nX3BZsIokaWndIw4p5pYuV5MxE6b/BL75pe9jO61w6zvInO6TiA2qNbxevjp2LWKXJAS17ldit9ls\nbNu2ja6uLtasWYPdbo/JtVRUVJCQkMDRo0cHvLdp0yb27dvHL37xi0HtxvRAEDsE1hL3Fto3m838\n4Ac/4IsvvuCdd97hhz/8YcTl2v4mUyzTHweD2GVZ5uTJkxw6dIjVq1czZsyYqI0tbV+B5E1qioRk\nSML0wk5dY/hSKPSG1kftcZ9kUcDlstglBUyy013AoxlA17l4oOkofPu/gAAuphsnXcTuB9oesNGE\n0BoSD+37ER0dHWzcuJGsrCyef/55t6Cf2WyO+sPKZDIxa9Ysvv76aw/12K6uLv74j/+YrKwsfvWr\nX0XlWHrxQPjYAQ8dFW+Ixa615iVJYvHixSxYsID33nuPqqqqsI6rxzcs3DC9vb1hHcMXnE4nVqs1\nplrYqupq+Hv8+HFee+21qJI64Dd1UXL06B7CV/69twKmdwOT/j80+d6S5n/DxLbl/b75Q/8JnEHu\n942TAd+ORdBUa61rU1/vJzQ2NvLOO+8wceJEVq5c6TbqtGnM0W4YI9wxR44ccb/2q1/9ioaGBv78\nz/980BUjHxhiFwUWIpdVi0Cl4tOmTeOll15i+/btHDt2LGbnF+1AqpATiFWert1uZ/PmzVy/fp03\n33yTlJSUmHZX6scgugS8gokRI9Qc+1uXwD64eflaZdD70f1y8eJF1q1bx9KlS5k/fz6SJLnrWESR\nlr9q9EggiF24Yy5dusSvf/1r5syZw+uvvx7VY+nBA+OKgX5/sPc2TNvI1hcKCgpYs2YNGzZsoLW1\nNSZNbqOd/hjLwGlXVxcbN24kJSWF1157zX3eoUgMAL7FugZTqCsYNOehblkB9l59FZvRguqEtjO+\nA54xOK5W0VQbI7gfLHZVVTl48CDHjx/nlVdecYv7iaQJ7e7M3849EohkARFA/fnPf47T6eTv//7v\nh+QB+UARu1bjROvL9q4M9YXU1FTWrFnD5s2bWbduHStXroxa0Y1AfHw8nZ2dUVF/jJV/va2tjfXr\n1zN58mQWLVrkIZUQsjCYLwtWj1UrDQHRPLvNlVUTo2Iyv7j6sccDRoiozZgxI/KxvR6sjr48fYNX\nnv69Tuw9PT3s2bOHtrY2j2I4IY3hvfsIVI0eLlJTUykvL+fEiROsX7+ezz//nLfffjs69ykMPFDE\nrtU40W7J9N7g+Ph4Xn75Zb7++mt+//vfs2DBAmbNmhU1612b/igs+HARC2Kvq6tj06ZNPPHEE1RU\nVHi8J2SCvR+a0cZQUYxuEggkGRwIc/8bVO+Elm/xuMrGgx5dll4wGpAn79F30sGgOU9FlVBVCaOs\n9OfpA5LSN7dDLcoaBKiqyoULF/j000+ZMGECb7zxhkcVtzA0RNMN8IyjRTtDZv78+Xz33Xf8+Mc/\nJiMjgz/7sz+L2tih4oEidhhIQGJLppecZVlm4cKFTJo0iV27dnHu3DlWrFgRVAdF7wQSvnaR/hju\nQyOYAFioOHfuHJ988gnPP/88Y8eOHfC+eGhGdYvrL+faMPiNOXSTQCDJ4EASvYf/i+v9pe+DZTPU\n7PDZXi9OdoaXhRMEDlVCklQMPnZDKtxzGjzt7e18/PHHdHR08OKLLw4I3AtrXRhwvtZ5LPzs//RP\n/0RnZye//vWvA6pOxhoPHLFrCSiSG5uZmckbb7zBqVOn+Ld/+zemT5/OwoULB2QqhHMMoSMTSW57\ntOQEVFXlwIEDnDx5kh/96EcBmxF4a19HDF8Wot0+mOFTD0Rs3QXrcerohuRcmPELmPZTV+/UQYCz\nz1o3yQMfyhIqqnrv5FgoisKRI0c4dOgQc+fOZc6cOT4lD3yJ+cVa8Ku42CXRPGvWLNauXRvTYwXD\nA0fs4ElAkSxWSZKoqKigrKyMTz/9lH/4h39g+fLlEUvTanPbwxUJi0ZOvNPpZNeuXTQ3N7N27dqg\nD4qQddoHKSgYKYakSEcenKWpquBUZGRJRR6K2EUIuH79Ojt37iQpKYm33nqLtLQ0n5/zFQTWGhux\nihn85V/+JbIsD1nAVIsHkti1Mp7R8AcnJyfzwgsvUFVVxe7du8nPz2fJkiUR+bi1FanhZMlEajX3\n9vayadMmDAYDb775pu6Hi7/MI5+4x3y2gG8fuSERntkyNOcTSxgTcdp6UAGj5N+FNtR0b7Va3bIA\nTz/9NFOmTAk4t3xZ69HuM+yN9evXs3PnTn72s58xa1ZsmpaHggeS2GGg+H40LLLS0lJ++tOf8uWX\nX/K73/2OxYsXR1RGLFwyom9pKIhkIt+5c4f169dTUFAQcmqn+Gysg6gxgy8VRqE9cx/mdQeC8uxO\nnHa7qxhJ3KtArqIhwKVLl9izZw8lJSX89Kc/Deqa1AZMvRFtYq+vr2f9+vVUV1fzr//6r0yaNIm/\n+Iu/iOoxwsV9uPKiA63bIJowmUw89dRTTJkyhZ07d3L69GmWLFnit2l2sHPU+ttDSYEMt0elEPKa\nM2cOjz76aMhkJiylcJtd34tQ6c/pDopAIluhIsauKp9NZbyOKWlfH0R0dHSwZ88eWltbee655ygq\nKgr6HW16o5bEhWsm2sT+ySef8Cd/8iekpKSwcuVKfvOb38S83aVePLDEDi6rPVaSuTk5Oaxdu5Zj\nx46xbt06SkpKeOyxx0LuhG40GsNKgQxnIgshr+XLlzNx4sSQvquFcHPFogHykDws+nwRuo67ardv\nd46j2/V6oMwYb/JctRur1cpvfvMb3n777ajWTWgtW4/r8naPOZ3gcICmJ3As0d7ezqFDh7hw4QKP\nPPIIL7zwgu45r+3opUWomW968Yd/+If84R/+YVTHjBYeaGIXVnushI5kWWbWrFlMnDiRs2fP8v77\n75Obm8tjjz1Gfn6+7nHi4+M9FCD1TNBQq0CPHTvGgQMH+OEPfxjSufmC+F1j3dl+sKDiVZUcrGI2\nUMrj9/eHdOxz585RVFQUVVIX3a+8Ldtg34nlA7W1tZWDBw9isViYOXMmP//5z0NK13U4HCiKMvBB\nReyI/V7GA03s0Up9DHaMuLg45s6dy+zZszl9+jSbN28mLS2N+fPnU1xcrGvBJCYm0tnZqTsFUi+p\nqqrK3r17sVgsrFmzJmq5tyKIGost8GBb7Kr3McOtmAVPH3YQ+QRVVTl+/DhPPRXdtEfhgtFlCW9f\nCfZel9Kl+AmiKPvQ2NjIwYMHqa+vZ/bs2SxbtizkqmtRl6IVLtMi1IfYcMADTeyAOyXK3hdEija0\nhCDkPSsqKjh37hwff/wx8fHxPPbYY5SVlQUkLOFv15sCqYdQ7XY7W7dupbu7m7Vr10ZVCVJrtcdk\nQQWq8IyEdHy4SlRDYmxy54M8DBoaGrDb7ZSUlETtkL5SAQPC2c0ArcAIi5VUVaW+vp4DBw7Q2trK\n3Llzee6558JSq9T61f09qBwOh8d6CST6N1zwwBO7mBCiFVusXAfaHYHBYGD69OlMnTqVS5cu8eWX\nX/LFF18wf/58Jk2a5JcIRQqkHqGwYK6Yrq4uNmzYQFpaGq+++mrUM1h8yTdEBTtfcJGNj2IaIPIK\nSV8PBZttSEjg6NGjVFZWhndsHw8+VQWHYkCWVAyyMuiCa6qqUlVVxYEDB+jq6mLevHlMmzYt7DUn\nNNZhoF9dwJffXdSvDGcL/oEndnBZ0qI/ZbTlSsVYvlw9siwzceJEysvLqa6u5sCBA+zfv9894X1N\nVr1VqYFcMW1tbaxbt46pU6eycOHCmJFWTPRjHD2DKgIWVnFSoACpTty8eZOrV6/y7LPPhjeAj+M7\nVdkzZ32QZAIUReHixYscPHgQVVWZP38+EydOjJhY/QaANRDE7p0lM5ytdXhI7ABuneZgW7pwEWwS\nSZJEaWkppaWl1NXVcfDgQb766iseeeQRpkyZ4iHSrzcF0p8LpLa2ls2bN7N48eKYK8/d16mPwuJV\nAcWAJCuuB4oxMXgaoi8rOMT88IMHDzJr1qywqo59QVFd0gEGSRnYASpG6O7u5vz58xw7doyEhAQW\nLVrEuHHjojIPtAZDoAeEcDtpq05VVR0WQf1AeEjsePal9JUHGw3oDc4WFhZSWFhIU1MTx48f5x//\n8R/Jzs5m6tSplJeXEx8f75ECKeQHvOGL2M+ePcunn37KCy+8EFW/bSAMlupj1NFH3Krbu672vx5i\nZkuouH37NpcvX+aP/uiPojamQ5X9inwFhDHR1bDb+zU/sNvtXLlyhbNnz1JXV0dZWRkrVqygsLAw\nag92bUPzQAQtPqddHw9Khsx9tNJiB62/TaSCBdrehTN+qBg9ejTf+973WLZsGRaLxU3KpaWlTJky\nhdLSUrcKpK9dhtbHrqoqX3/9NadOneL1118nKysrKtelB96ZR/eV1U5/56SIzzqEYqPDhw8zY8aM\nqAWznUq/yFeoP7+0coerCbjZjL8vq6pKbW0tZ8+e5dKlS+Tm5vJy4joMpVZQPoXjuP6i4NMX61NP\nVo8gcV/+9fttHoaKh8TeByG+rw2kRsvC1MqGhgqj0Uh5eTnl5eX09PRw4cIFDh8+zI4dOygvL6es\nrIzRo0eTlJQ0wI8orOVdu3bR0tKiS8grFtDKN0S8BTYmgLMrwPvRrfwT9q0HD2xbHjpB6fx8Z2cn\n586d42c/+1lo4/uBqrqsdb8iX94uIi35blsO9h5QZJ/pjjdu3ODs2bOcO3eOxMREpkyZwhNPPOGa\nY5vfHXisKPj0hQ6RHsPLV+BUT1Od4YCHxN4HbSswrb/9XpoECQkJVFZWUllZSXt7u1sjXVVVysvL\nqaioICMjA3ARu91uZ926dZhMpgFNCAYTWtG1SIldXbEZDAYYJLeOqzjJixBjGHQ8cuQIU6ZMibxJ\nSt8OwaG6uiIFEvnygPbaHN0INlfR7Foc3fz+97+np6eHKVOm8MorrwzKLlBUM+sVxfNWOBW7xntp\nTccKD4m9D95t83z558JFqJ2a9CA1NZXHH3+cxx57jPr6ek6fPs0777xDWloa06dPx2q1smXLFsrK\nyliyZMmQT2ZhtccypTSq0LhOBmvT3tPTw6lTp/jxj38c+WCrdruIsI/cJF+/eQSCX0uWLKGoqGjQ\nXBp6/erazyuK4tO/PtzdMPCQ2D0grEpwbd/uByKSJInCwkLy8vJYsGAB165d4/Lly3R0dJCcnExC\nQgINDQ3k5eUN6XVE02ofFPQ1xVBVadB0yo8ePcr48eM9sqDCRahE6P3d5uZmXLJ1vq9dNJUYDITi\nVxfwDpKKBtbRTme+V/GQ2DUQqXkieCr87pFOBm0ue6wmldFoJDExkcLCQsaNG0dLSwuzZs3izp07\n7Nmzh/b2dgoKCigpKaGkpITMzMxBn+CDbbV7ZyL5+7d3/MP9u6iuNMHBIHar1cq3337LmjVrIh5L\nFxFuW+73+3/1V39FUlISP8sGsV8JeaZESZlSW4QUSkKDWLfi+gPJ+Q5HPCR2DYR/XWicCKtdEH0k\n4w4GzGYziqJgs9lITEwkIyODRx55BHBVmtbW1lJTU8OxY8ew2WyUlJRQXFxMSUlJVKzEYIiqr73P\ntaX9b+9/64UgjgHHcBqwKQYUJBQUN7lJqEh9OzuRYRHpPT5+/DjFxcWkp6dHNA7oDDAGiBP85Cc/\ncYmObd4cfpMNXyqXokm2zuwY78rSUH5jQexinKhXQN/jeEjsXtDmXZtMJjfRR4uMYg3RDNtkMtHZ\n2el+PSkpiUmTJjFp0iTAJY9aU1NDdXU1n332GQkJCW6SLyoqiqpujBbh9EUV22ix0P3dC0GwYlzt\nIvZe0Np/m81mn9a8Yo7HaLNilJzISKi4HBOqIQnVSxFUS/Def8Fgt9v55ptvePXVV4N+Nhj8BRht\nNht1dXXU1NRw9epVfpLhfwy3kqQxEWw9Az+g1/IOpHIZBOJeiwdUKDEiYZgJ/7pP3flhjofE7gNa\nq13rnglXX0Is8MEgdnCRe3x8PJ2dnX4JNDU1lZkzZzJz5kxUVaWlpYWamhpOnDjBtm3byMjIoLi4\nmLy8PLKzs0lNTY2KtaOVSvYVmNaSuD/VTbHF1hJnpK4yn99fuR2Dw4HJbB7wvq/dgq9z1j5s/Ln0\nTp06RW5ubsha/d7QztPOzk5aWlpobGyktraWxsZGcnNzKSkpYfny5XD0g+ADrtrteoA5nUiDpMcO\nkZE64FG4J4KoD5K1Dg+J3SeE1S7Ix9slE84EiSSXPVTIskxycjI9PT10d3cH1XCXJImcnBxycnKY\nO3cuDoeDhoYGrl69yunTp2lpaaGnp4esrCyys7M9/uLCWPDaB6d44IkFqFXe8yZE8bsbDIbYWV8a\n94GqSKDKSAZnv/ug7/1+t0wfNO4FX0QvcqrFNWkLtw4dOsQPfvCDsE7Xbrdz48YNmpubaWpq4saN\nG9y4cQOTyeS+R/Pnz6egoMDzQXpU/zGkXd8HtdPzxRgJiInfKlxS985mczqdD5y1Dg+J3S+8s2JM\nJhN2ux273R4WuQtiH6zqy8TERKxWK4AuctfCaDRSVFREkaYdWU9PDzdu3KClpYWmpibOnDnDjRs3\nSEpKGkD2aWlpQSWIxYNTBLcEcQtN7SGzrjRuAo8cdvG6DveC90MI8Hh4aR9gZ8+eJS0tLWjrRFVV\nuXPnDi0tLR5/d+7cISMjg8zMTDIzM5kwYYK7YC0gQmnh5+weoNwbi1x+QerClRjO7lh0RNNa66H6\n54cDHhK7H2gDfYJotJZ7qJNFjDVYxB4fH8/du3d1q0EGQ0JCglvHRkBRFNrb22lubqalpYUzZ87Q\n0tJCd3e3h3Wfk5NDSkoKCQkJA4KcIh1vqPPs/SFad0o8tLREb7VaOXjwIM8++yx2u91txff09Pgk\nca0VPn78eBYsWOAOtoo5qdsy1Wltq6o6KHn80SB1rbUuyzK2PrnlB81ah4fEHhAi0KdNfxSWe6iZ\nMv9/e2cfI8Vd//H3zO7ePtwjB8dTaSkPR4sKJAicPCntjwPa8uNoeyipGsESY9QmzS81jTEmNRol\nqWnUxBijpiG01T54cu0BBZQHAUsjlIOmtFW4giX2KHfYe9znmd8fe5/hO3MzuzOzs7t3y+eVXEp3\nb2dn53be38/385itfW8hCIfDiMVi8Pv92oCO4eFhhEIhz0RUlmVMnDgREydO1IKyABCLxXDt2jV0\nd3eju7sbnZ2d6OvrQzQaRWVlJWpqalBTU4PKykpUV1djwoQJqK6uRk1NDaqrq3MWhRXL+vI6hz2V\nSmFgYAD9/f0YGBjAuXPn4PP5cObMGRw9ehT9/f0YHBxERUUFamtrMXnyZEydOhV33303pkyZYmqF\nq6qq+ZTHnIDZ3BV4IeoAdNlsojvnVoSFPQviEA6yiMweswNtz7P1S1m3bh0OHTqEV155BQ8//LD2\nuKqq2L59O3bt2oUnn3wSO3fuzPl+oVAI0Wgmo4GEMhaLaa1+C2khB4NB3HbbbZg2bZq2Q/H5fFBV\nFYODg5q4ffLJJ+jv78fHH3+sezwQCOiEnv5dU1ODYDCISCSidbmkH1Oxz2PKkqpSGb19Ye/u7sbA\nwIBOvMX/xuNx7bOEw2FcvnwZTU1NmDJlivZZq6qqtIA9xWSsYgoUZHRSuOOUvCx2mymNXog6AG0A\nDQXnx/JOsNCwsOeABEkMwpg9ZgcKllnx9NNPY/HixfjBD36AzZs3a8d94oknsGvXLnzjG9+wJeoA\nNCudKIa40zWh6l3f3ochp4d0/b/rANQJwipmP1AgNRqNjhLG//znP3j//ffR19eHRCKBdDqt7Zyo\nDYTf78fjsw4hKKezn2hqGAcPHtSsuUOHDmnHorEWZs2/nnnmGfzfHdaHbWtr0y1I06ZNw7x587T/\nr6ys1Bagjo4OLFmyBGvXrjU9lvgdox+j28rOoAkvKOT+SLSy8/k+iimOxuKkW5Fb95M7wO/366r5\n6EtjfCwXuXrGLFq0CF/96lexa9cu7N69G9u2bcNPfvITPPPMM/jiF7+IX//617bPmVwxIuR7FId0\neCHuNH2Kslxo8ZOUIXNVEKxov9+vCTVZW5FIBJFIBFOnTh310ng8Dp/PN6q5E+2ggvtet3XOlZWV\nWvFLOBxGTU0N/H4/0peD8Klx3Ky4HKlO9YXw6KOPQj38F0hp89zub33rW7be++OPP8a7776L73zn\nO1l/jyxxsuBpMaO/Ge3+CmmVqqqa6aipGDpqetBFk+oZHMUGLEgI4wsLuYMZL9zan94BZimP5JKh\nTJlcN5jxhjTjRz/6EV588UX88Ic/xODgIL7//e9j/fr12L17t6MbWHTFGD9HJBLB8PCw42wZI+I2\nmq6H04wWsZum21xjyll20rBt5cqVAIBjx45h1apVN5/4bGZhUNNpIJXS+pDLAGoB4MF9js/PyKFD\nh7B69WrbRWBithB17aTh64X0IWuppy3tmY6aHuKlqNM9SMcp9A5mPMDCbhNJknSBU7IuneS42ylU\nybEzIwAAHSJJREFUuv322/H4449j586deOyxx7BixQq0tbU57jJJrhiz3UG+4i66CESr0i3GHj1j\nAbpuXgvEpUuXcOPGDWzdutXxa8W8flpESdBKXoBjM54hGgNeiDqgd8PciqmNZtyakQWXkIiRCNFj\nJEbkL851jFyFSg0NDdq/f//737tKUxRvfKvn6bjDw8O2i6fIHSDm9+d7c5JFKgYMs/1uMShEWqqi\nKDh48CDWrl3r6pqJvVNCoRAqKiq03Q79TQqBretgI7+fdhsUF/BC1BVFQTQa1XZtt2qw1AhfBYeQ\ntWQUd9Hnnk3cZVnO2qTqhRdewBNPPKH5l3/xi1+4PlczP7sIpUICucWdRIViCoFAwFPriCzOQomT\nUwoh7J2dnQiFQrj77rtdvZ6+W2I7BZp/S5kgiUTCs5RaL1NzafEBkHf2i0g0GkU6nUY4HB576Z4l\nhIXdBbT1pepJYLTgW90U2fLZ9+3bh23btuEzn/kMzp8/j7vuugu/+93v8P7777s6z6qqKvT392f9\nHTviLlpafr/f/o1pFWCzeFwMErrGo9F4Xgt7PB7HkSNHsH79elfHJZ+02bWnhZZ2jhSM9oSOVkh/\n+p9MV0b6ydLy1wwKbFMdiFfXNZlMIhaLIRAIlGw62FiFfewuMfZ5Jn8hWU5WrQeshP3EiRNobW3F\njBkzcODAATQ0NODHP/4xtmzZgieffBJ79uxxfI4NDQ3o6enBjBkzcn4WK587pROSeDi6KR32EhGF\n3XVbAeN7upgSVIgispMnT2L27Nk5WweYYTclkISThJS+l25RFAVIRSEZDeHUsK25rzRvVRlx23mZ\nqZJKpTA0NARZlksyx3esw8KeB2IxhFkBUzKZHHUzis2faOvY2dmJjRs3ora2FocOHcK0adMAAK2t\nrViyZAna29tx/PhxrF692tH5TZo0CdevX7f1u7Is68SdmnvReRYrfczTwdcuERuReUFfXx9Onz7t\nauQdBal1RUpZApXS5r2auItGh+3PomuCJsPyZdRbfeR9jSgqkFJuZql45Xohd2csFoOiKKiqqmK/\nugks7HlCVrrYIExsPUDiLooUTWZSVRWXLl3Chg0bIEkSDhw4gDlz5uiO/9Of/hTNzc347ne/i1On\nTjk6t4aGBrz11lu2f5/EfWhoCAMDAwgGgwiFQkUVWLMePcVGVdWMC0IZNM/Dd9jZ8PDhw/jsZz/r\neJiJON5Ot7DaCFSSQWFlYFiia4JmszgpNaxrH5BWJaQUGZKkIhAIQfJQ1ClIrCiKFkBmRsPC7gHi\nFli8icRtMXCz059YOTh37lx0d3dbHnvt2rWuXQMNDQ22LXZCTOukharYlrOxaMmIo+vhYkSbqqqQ\nzFwQhIPOhlevXkVXV1fOYiQjFNeg75FtBNeT7I8g0NKhS9F1ltYqQZZstpoeWehopyD2zPcCMSOI\ndjAUG2JGw8LuEVZWupnPU3THFJIJEyZgcHAQiUTClmVDN48sy6itrUU8HkcsFkMqlSp4fxkRsWiJ\nLHjXuOgZngmc5u9nT6VSePXVV7F+/XpHfevdDG82P4HhUfUXduMktHba1WUxP91r152YgUbtj4v5\nfRyPsLB7CN1ERivdKtDqdEScU6j7Yk9Pj61+3+J8SXLLJBIJrTNkPlWqThEzjIpdSaiqKmT30z41\n/va3v6G+vl7X+VKHia9c9UWQfODPACwqKB1mpADmu7Bc11M1tFTI+rsqdFlTXu7wKPOMFvtoNKql\neDLW8JLnMXQT0ZQg8QtPYkUd+YqRt23HHWMm6kRFRYWukIkWrGJAtQHiNSq0wN/s5ZPfcbq7u3Hm\nzBk88MAD1udsFHUVSCVj2QOeLgdcOC2k0yz2HMdNqxISQpDUK1GnHYCYJhmPxzVrnckOC3uBMAo5\nbVHFm0ucqlQo7GTGiG4iM4vcWKVKU2oKDXUzpGBZMdAyYvxZ+rjk8NGn02m0t7ejubnZUSpeSpWh\nqJKnWSQiorhbLtAjn00ZmR5ltSapKpBUZKQUGXIg7Ok50z0jVjeT5c4uGHuwK6aAUOMmstwpoETb\nYnFUWqEClA0NDTh//rzl8+L4sFw50pFIRGv7W6ybrNh9ZDRh39xh38Fs4O9//zsqKyuxaNEim+95\nU9T9cuFcc8DN+AUFOUd972huK3VLNLnmonvE7+H8WWMPIlosFEXRhsawC8YevPQVGLH8XrTe6Uub\nTqe12aSFIJsrhra7dqfvkLiHQiGkUilHPWbcIrZrKEa7gXybf12/fh2nTp3Cxo0bbQcpRVH3eTix\nKVuFr5hya35eoytvFUVBIpHQhlh46XoxWuli35dEIsEuGIewxV4kxJtJFFPyHcZisYI0Maqvr0df\nX58WNBNxOz6MzlMct+dJFoRF4Y3sj8C3sV0T9kL62fNpJaAoCl599VWsWbMGdXV1Nt5LcL/ISv5j\n+FqPjH7M4pr6R4K0yWRylBVsLNCysqS9wNj62Xhs6n/DLhhnsLAXEfri0k1CaZGUwlWIFqw+nw/1\n9fXo7e3VDa7Id4K7sQ2BJ8UiWQpvRJdMIatgacC2G9588034fD4sWbLE1vsk5Sqoqahe1HP1urGa\nIwo4ap8gpYe1ltNGl4wo7GLvI6+/m3QfWGXT0K6QXTDOYWEvAUbrnaxEcs14MSpMhNwxorA7Hetn\nhjElspB+d2Pv+0Lc6NmmW+Xixo0bOH78OHbs2JE7lXAkC0nd+Ipz69cqL99FTxyxytco7GKn0kJa\n6VaNwahtgCzL7IJxAQt7iaCbhfyWNGne7/dr1rxXFtLUqVNx9epVLFiwAMDNIg8vLF+68QrimjF5\nr0Lm/1v2iMkxREJVVbz22mtYvXo16uvrc76Hp+1rrc7NJmRgiNdTXDy9zEun7x251KyOTcFSAEWt\nnSgn+IqVGFmWEQwGEQgEkEgktDRIsqRIxPKhsbERFy9e1P6fbiwvbxhjvjs1afIaEoJC5NNbCnuO\n3ixnzpxBMplEU1NTzuNb1Qu4Jg9RB/R98On8EomEp8FRstCN/fytRH14OPOZWNTdw1dtDCBJEkKh\nkBbEJP83jdGjG8JtvvuUKVOQSCTQ29urWU2FGKXm9/tRVVWFiooKJBIJ5wVNNvq3mw068Qo3GTGf\nfPIJjhw5gk2bNmUVIc8t9XwRrqkkSUgkEojH41p/nmAwmPf3QxR0cv1VVFRYVr6yqHsHu2LGCORD\nTqfTmouGLGvjnEinX3hJkjB37lxcvHgRS5cuzStAaAcKpFJglYZ55HxPm31djB0gvfT9OhWzl19+\nGStXrsTkyZMtf0dchIo+aDlLJ0rKdqHvmjiMPJ9rKh6Xvmu5Prco6pwBkz989cYQ4raYMgHoMRJ3\nyiN2ar03NjbiX//6l+e9xq2QZRlVVVW6nHcvK1bFoSZejoJzel1qa2uxfPlyy+fHoqgbLWlyB/p8\nvrwWffE7SsfNZqET5FOnXPVi9f4vZ/gKjiHMqgIpwESpYfQcPW7XpTJ79my0t7cjHo8XdZI7BeC8\nqlil8zbrWpgPbjNiNm3aZPkasS9QwUTdqi2xhaDT9wiA7vtDsRxKa3RCtuPmgkQ9lUohEomwqHsE\nX8Uxhtl4OHLT0A1D1hblwYtj+awIhUKYPn06Ll++jMbGxiJ+In1aJPneKyoq8kpZJMtSTIHMRxSy\n7mSy5I6HOu4zFdJC9SUfhQ33lR3hpd2PqWvLIvNG8UWg/G+7K0EnWNQLA7tixiC0JTaW0JPA02Qj\nKnaKRqMYHh7O2bWP/OylmEoE3MycEVMj882cERuF5dNyIKuwb95rXtVJiFOHhCk/lFlSquttdI1Q\nqb7ZQiN2Gs2WFaSqQFqRkFBkJBPxnMe1gnzqtINjUfcWvppjEFGszCwgEnj6obTIaDSqBarMMi/m\nzp2Lf/zjH8X8KKMwWu+Dg4OOqlbNhIOqd8nidCOklIWUjwiLVZpe9yV3glvXCF0DMwtdUSWkVQmK\nmjmGLKnwyQrkfQ9BetDZMBMSdR5vVzhY2Mcodrsa0k1L1julrcXjcS1wRQIzefJkKIqCnp4eXRVq\nKSALj6pWqR+IXcvNuDMhl4zdQRJmx8tH1MWOh6VKZzT2XXHq6wZG6gRGRF1RAWWklw3NP/VJmUZl\n2iHTzvLo6e9NCzxb6oWBr+oYRQyk2qmyFK14suCp2IRaBfv9fsyZMweXLl0qubADN6tWKbg6PDys\ne8wJxpYDToKpFDjNZmF/8MEHmGXxXEqRkRaGThd72pPY/tmpoBPijiWtSEhDgqpKkJCxzmVJgZzH\nxxIrrG2nvzKu4Ss7hnGb0keDfindUFVVRKNRDA0N4Y477sA///nPgg73cAoVNlFPEMp/NytAEjsO\nGhGLl5z423OlgPb396Otrc3kdZlhE2lVKro/nXZzYvGauIA7OQ9VVbU02mQyiZQqQwLglxUE5HSm\nR3yeok7prqFQiIuPigBf3TEOWa5u8rWp0i8SiSAcDsPn82H69On46KOPcP36dcTjcW1HMBaEvqKi\nQhN4EgMzgc8mWmKDNbuB2WzCnk6n8fLLL2PZsmW6ak1FBZKKD6oqIVARLIpLgcScWk+ITbTEmgen\nx6LMFFoYK+Q0ArLB5eISiqMAmWpS9qcXB3bFjHGMKX1u8rXFYR+yLGP+/Pk4ffo0Pv/5z2uTnShg\n60XlYb5QKqSYHkkFWySg2RYi4/Wy023RLHCqqipef/11RCIRrFq1CpAyQUKjP73Q/eFpB0Kfmb4T\nTv5GRpcNQX9v2uX5fD4gkKU1sE3Y9VJaWNjHAXQjU5tft9YhCdGKFSvw7LPP4gtf+IKuPzz18yCh\nF0W+FCl7VgJP/nQrxOIlO8FULRvEwLFjx/Dhhx9i27ZtWvUvXadC+tPNRNi48No5hngccVdiPA5l\nX2kxBmNufLaWwCb9fTjrpfSwsI8TxNz2fPqoS5KEuro6zJ49G+fPn8fy5ct1IiIKAYkKWbOi2BdT\n6I0CTy2Oq6qqLAWexJ/8xtnE3SxweurUKbz99tvYvn275hpylMpoVtSTpW8LAM0yF6+72L8lF9n+\ndlaLArms6HlTHFS3ctbL2ICv+jhC9LcDcCXuJNBNTU1oa2vDsmXLdDe1UdyBm24PMTdaFHv6d6HF\nnlwxYoGT0UUjYlaZanWO4uNnz57FqVOnsH37dlRWVmr+bEepjGaujNSwTvAVFVBVCYo/AnVjm+YS\nypXVQta4aJWLrilxN6Gdq8lCo6pAWq6CdP9L2RcPG9WttOBSozp2vZQWFvZxhliM49YXLssypk6d\nigkTJuCdd97BwoULdc9ZibyZD1oMbIoCXygXDjWWIgEkFw09bvQ9U3AxV4yCzvPChQs4fPgwvva1\nr6Gqqkqr5s234EhVARWAkohChawV+gCAlIrq3F/61+nFm37E85Y6HoKcGoYsARKEgKdoVZssNGlV\ngpqM5hVTMRN0ttJLD/8FxiFGK9TpTUmC29TUhGPHjmHBggWmApxN5AFYWurGVEOjVZ+vdU9CRAFh\nij3Q1B2jFS/GKMyGetP5XLx4EXv37sVXvvIV1NbWapki7gqeABVSxiqHpBNyWVLhG8kL14RYSNM0\nWuTGzz3qOipD5vltWQKgKhUfQYLPhWuPrjdl5rDbZWzBf4lxiBgctJv5YXy9z+fDrFmzcPjwYXR1\ndWHOnDlZXyOKvGhBGrMsRP8/Wflm7gI6X6PQG/+dC7LUKyoqtEwMahOspe+NpAICMBV3WZbx73//\nG3/+85/R2tqK+vp6x1b6qGClcvN1UsZG1yxpCRmxT4+IPhQJajyuuzZGEffaraGMvL8E1VGaJAv6\n+ID/IuMUY+aHU8udWhY0NTXh5MmTOYXd+N5mw4/NsjAA/aIgFhjR71jl0VuJvfhe4nGpahWAlqMt\nptzRNaJAKH2Ga9eu4cUXX8SmTZswffp0Sx+3GHMwukaM7irJFwbSw5AzNnlGwH1hIBXNPA8AkgoJ\ngCSpkEZ84sUKTKfVzDn5JXtteo2CztkuYxsW9nGMMfPDqR/Y5/Nh/vz5OHbsGLq7u123GRDFSLTW\njZa9sRDI6FIwE3cz8ac+52ZDIURRJCFPp9OIx+OIxWK69yNL8/nnn0dzczNuu+02LfOIMmCMQWSr\nXYcxNVRqaRt1bSRJgtT+v5CM/VX8EaCIDcMUVUJKySw5Ptk81ZMQh7uwoI8fWNjHOeQDJj8zBbGc\nuDGWLFmCv/71r3jkkUc8sxbNXAhGSxewttaNx6FjkVVMWSqie8govPRvn8+HcDista+lXU5PTw8A\n4HOf+xxmzZqltdwVrwHtTsxiBEaXkS1r22EnRM8YSVnMtEHI+PyDPgW+QHjUr9J3SexTxII+vmBh\nLwPILUOWphPXjN/vx7Jly3DhwgWcPn0aS5cuLeh5Wu0ojKmVokibBRMte4cb3k8kGAxqFvmVK1fQ\n0dEBALjrrru049Kup5R91B1jNQhELB4ayY5JJZNIxGKZwHMwCIwskqKQk5iLraGZ8YWUo0dI6RuI\nMI5w0xNcVVV89NFH2L17N77+9a+joaGhCGfqHmpaBcCWFUluHir+uXDhAg4cOID77rsPjY2NugCw\nWLYvlu6Xg7hRRagkSQgGgzpBB8BiPv6wtDxY2MsQsS+33dJ3RVHw5ptv4vz589ixY0fJhkTYhXzs\nuYRdFHRFUfDGG2+gs7MTX/rSlzBx4kQEAgFdXQBV+IoWLKAXejcppqVGVVUMDQ0hHo9rog6wmI9z\nWNhvRcRmVXbEKJVK4Y9//CMmT56MdevWFeks3UGiayXsxilCqqpi37596O3txdatWxEOh3WvJ+sV\ngO5a0YJgJfTGH3qulJi1iBgaGkIikUAkEtG6QY7HBYrRwcJ+qyK6ZsRGUlb09fXht7/9LR588EHM\nnj17zPqZadGiKlRC3K0AmcBpLBbDSy+9hOrqamzevBmBQACJREKLTYivzVVpStdTXAiM7YGNIl8I\n4TcTb2NNAZBxQ9EM1nA4zL3QywsW9lsZY+tXsR+JGe+99x7279+PRx99FNXV1WNS3CnlUbS4xSlC\ntIj19PTgD3/4Az796U/j3nvv1fztlNtuvAbiwuDz+Wy5J0RBzSW0gF746TVuPr/ZMY2dOVOplDYm\nkXL8mbKBhZ3JQO4JUeDNerp0dHQgGo2ipaVlTG7ZKTddFGbj57l06RLa2tqwbt06LFq0SPdaM2tf\nJJVKedae1yjyVoJvhtV1N9sJiNDuI5lMwufzIRgMjskFmskLFnZGj9giVrRw6eZPJpP4zW9+g/nz\n52PlypW6odilQqxspcHdgUBA67hIApdKpXD8+HGcOXMGW7ZswcyZM3XHsRt4dRqjGCvQeVN1bUVF\nxbg5d8YRLOyMOcYe4CTusixjaGgIzz//PCZPnoz169e7mqfpxfkZ2xSQiJNwie6Srq4u7N27F1Om\nTMGGDRtQU1Mz6pjUz92um4ViFG6GRBcTciPR35OK10q9IDMFg4WdyY6YFiiW/ieTSezZswfpdBqb\nN29GMBi07Xt2+v70X7M+40bfMaC3vIeGhnDw4EFcuXIF999/P+bNm2f6PuSbt91XfQQSzFzxiVIh\nutjE+ABb6mUNCztjH2OPl3Q6jYMHD+Lq1atobW1FVVWV1o4gW68Wq2Nb/RiPQzsHq77u5G6gHuoL\nFy7EmjVrsrpYKDDqrhWv8/qAQmPMAgIw7lxHjGtY2Bn3kMCfPHkSp0+fRmtrKyZMmKClUNqdTETH\nEp/L1oMlF9euXcPevXuRSqWwadMmW03MnLhhrBCt40LsXuwg5unTNROnPJV6wWGKAgs74w1vv/02\nXn/9dbS0tODOO+/UBM4o8FbfKzcCbmR4eBinTp3C6dOnsWrVKixdutRyMpKIWzeMGUZhdTJo2qv3\nBW5mzYylXQRTNCz/0LxXYxyxYMECbNmyBfv27cMLL7yAq1evakIpBu3EMnXxx60AqqqKrq4uvPLK\nK/jlL3+JgYEBfPOb38SSJUtGHeupp56CJEm4fPmy7nFjR8hcnDhxApIk4Ve/+tWo58jdQQsKNV+j\nQKvXkMuFOlRSFpPoU2dLnSG4OQTjmDvvvBOPPfYY3nnnHfzlL3+BJElYsWIF5s+frxX/2KlytcPA\nwAA6Oztx9uxZVFRUYPHixXjggQcQDmfazdKgaTv86U9/wokTJ9DZ2Ylz585hYGAAX/7yl/Hcc8+Z\n/v6KFSvQ0NCA9vZ2fPvb3zb9HYo1iGmYJLzZ4gN2MWYtGadYiY3KGIZgYWdc4fP5sHDhQixYsAAX\nL17EiRMncOTIESxfvhwLFy7U9TbPVkhjhqIouHjxIt566y1cuXIFn/rUp/Dwww9j+vTpo0SS0h7F\n6UVWx3z66adx9uxZRCIRzJgxA++9917W85BlGRs3bsRzzz2Hvr4+1NbWZv1dWsxIjMWB4+KYu1wY\nM5TEXYaYljqWUy+Z0sLCzuSFJElobGxEY2MjPvzwQ5w8eRJHjx7FlClTUFdXp/upr6/X9SoZHh7G\njRs30Nvbixs3bmg/vb29aGhowOLFi/HQQw9lzXIRBS9bCmIqlcLOnTsxc+ZMzJ07F8eOHcM999yT\n8/O1tLTg2Wefxf79+7F161Zb14NEV8zBF/vHWw3qEC1+o6AbW0GwoDPZYGFnPOP222/H1q1b0dfX\nh56eHty4cQP//e9/8e6772r/DgQCqKysRH9/PyRJQn19PSZNmoT6+nrMmzcPEydORH19veZqyQWJ\nXzqdthR2snzvuecex/nnzc3NCIfDaG9vtyXsImSpm6V2WvWUIfGmIKhxghQLOmMHFnbGc2pra1Fb\nWztqQLaqqhgcHERfXx+qq6u1plR2ctazkcsdI/Zad0okEkFzczP279+vZdQ4hT4fCToAnUUuzksV\nx/AxjFs44sIUDUmSUF1djRkzZqC2tlbXE1zM+qDhycZKWCvIXUHuDhF6fT6Voi0tLejr68PRo0cd\nvU70tSeTSe1zUXaO3+9HMBhEKBRCMBjUsotY1Jl8YYudKRlGK9roojArZhKDkKKPmtwxxuHZ+Vjr\nxMaNGyHLMtrb29Hc3DzqeTM3i9m5k3uFM1iYQsPCzowZjH1YzMSShluIrzHONAUyVjz1knHjPhGF\nedKkSVi+fDk6Ojrw85//fNS5mZ2P00wYhvESFnZmzCKKopnYi43DgIyY05DrSCSi+d2TyaTumEDG\nbUMFPfQa8VhGhoaGUFdXpyvhJ8vbi2pahvESFnZmXJFLPCk1MhqNan5rwugeERcE43uIfPDBBzh3\n7hy+973vIRgM5v0ZGKbQsLAzZYUYSM3WXlfMpMnVxOu1116DqqpoaWnx/HwZphBwFIdhctDe3o5p\n06Zh6dKlpT4VhrEFW+zMLcOePXuwZ88eAEB3dzcA4I033sC2bdsAZIKkP/vZz3Sv6e3txYkTJ7Bj\nxw72nzPjBhZ25pahs7MTu3bt0j3W1dWFrq4uAMDMmTNHCfvevXuRTqfZDcOMK9gVw9wyPPXUU1kn\nOBnb/AIZN0xVVRXuvffe4p8ww7iEhZ1hLIjFYjhw4AA2bNjA2TDMuIKFnWEsOHfuHObOnYtHHnmk\n1KfCMI5gHzvDWNDU1ITOzs5SnwbDOIaFnSk71qxZAwCoq6sr7YkwTIngYdYMwzDjEx5mzTAMc6vA\nws4wDFNmsLAzDMOUGSzsDMMwZQYLO8MwTJnBws4wDFNmsLAzDMOUGSzsDMMwZQYLO8MwTJnBws4w\nDFNmsLAzDMOUGSzsDMMwZQYLO8MwTJnBws4wDFNmsLAzDMOUGSzsDMMwZQYLO8MwTJnBws4wDFNm\nsLAzDMOUGSzsDMMwZQYLO8MwTJnBws4wDFNmsLAzDMOUGSzsDMMwZQYLO8MwTJnBws4wDFNmsLAz\nDMOUGSzsDMMwZQYLO8MwTJnBws4wDFNmsLAzDMOUGSzsDMMwZQYLO8MwTJnBws4wDFNmsLAzDMOU\nGf4cz0tFOQuGYRjGM9hiZxiGKTNY2BmGYcoMFnaGYZgyg4WdYRimzGBhZxiGKTNY2BmGYcqM/wey\nAC/46LLvSQAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "aAd2wxWq7JV2", + "colab_type": "text" + }, + "source": [ + "## Model\n", + "\n", + "We will use a parameterized rotation about the Y axis followed by a Z-axis measurement as the quantum portion of our model. For the classical portion, we will use a two-unit SoftMax which should learn to distinguish the measurement statistics of the two datasources. Finally, we compile the model with [standard optimizer settings](https://www.tensorflow.org/tutorials/keras/classification) for classification. Note that the classical NN outputs represent the network's predicted probability that the given datapoint is a member of each category." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BRwBx06m8TAU", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Build the quantum model layer\n", + "theta = sympy.Symbol('theta')\n", + "q_model = cirq.Circuit(cirq.ry(theta)(qubit))\n", + "q_data_input = tf.keras.Input(\n", + " shape=(), dtype=tf.dtypes.string)\n", + "expectation = tfq.layers.PQC(q_model, cirq.Z(qubit))\n", + "expectation_output = expectation(q_data_input)\n", + "\n", + "# Attach the classical SoftMax classifier\n", + "classifier = tf.keras.layers.Dense(2, activation=tf.keras.activations.softmax)\n", + "classifier_output = classifier(expectation_output)\n", + "model = tf.keras.Model(inputs=q_data_input, outputs=classifier_output)\n", + "\n", + "# Standard compilation for classification\n", + "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.1),\n", + " loss=tf.keras.losses.CategoricalCrossentropy())\n", + "tf.keras.utils.plot_model(model, show_shapes=True, dpi=70)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XwrqFXytI9Xn", + "colab_type": "text" + }, + "source": [ + "## Training\n", + "\n", + "The model is trained on our quantum data and label inputs:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "xIRk7vvzJByB", + "colab_type": "code", + "colab": {} + }, + "source": [ + "history = model.fit(x=q_data, y=labels, epochs=50, verbose=0)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kn0aBzBZKkDw", + "colab_type": "text" + }, + "source": [ + "We can view the loss history to see that the model has been correctly trained:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "XRztm4ilJF05", + "colab_type": "code", + "colab": {} + }, + "source": [ + "plt.plot(history.history['loss'])\n", + "plt.title(\"Learning to classify quantum data\")\n", + "plt.xlabel(\"Iterations\")\n", + "plt.ylabel(\"Error in classification\")\n", + "plt.show()\n", + "print(\"Final loss value:\")\n", + "print(history.history[\"loss\"][-1])" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9b8Y5KM2iFto", + "colab_type": "text" + }, + "source": [ + "Now we test how well our model performs on a sample. Notice that the network has high probability for predicting the correct state, even though the variation in the data was significant." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "fG4YpLF_iAet", + "colab_type": "code", + "colab": {} + }, + "source": [ + "test_data, _, _ = generate_dataset(qubit, theta_a, theta_b, 1)\n", + "p = model.predict(test_data)[0]\n", + "print(f\"prob(a)={p[0]:.4f}, prob(b)={p[1]:.4f}\")" + ], + "execution_count": 0, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/configure.sh b/configure.sh deleted file mode 100755 index 798da6021..000000000 --- a/configure.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -function write_to_bazelrc() { - echo "$1" >> .bazelrc -} - -function write_action_env_to_bazelrc() { - write_to_bazelrc "build --action_env $1=\"$2\"" -} - -# Function to write the SHARED_LIBRARY_DIR as a linkopt. This is required to -# get bazel tests that require the libtensorflow shared object to compile. This -# function is necessary because ${SHARED_LIBRARY_DIR} is space delimited and -# using bash arguments is a hacky way to split it. -function write_linkopt_dir_to_bazelrc() { - write_to_bazelrc "build --linkopt -Wl,-rpath,$1" >> .bazelrc -} - -# Remove .bazelrc if it already exist -[ -e .bazelrc ] && rm .bazelrc - -# Check if we are building GPU or CPU ops, default CPU -while [[ "$TF_NEED_CUDA" == "" ]]; do - read -p "Do you want to build ops again TensorFlow CPU pip package?"\ -" Y or enter for CPU (tensorflow), N for GPU (tensorflow-gpu). [Y/n] " INPUT - case $INPUT in - [Yy]* ) echo "Build with CPU pip package."; TF_NEED_CUDA=0;; - [Nn]* ) echo "Build with GPU pip package."; TF_NEED_CUDA=1;; - "" ) echo "Build with CPU pip package."; TF_NEED_CUDA=0;; - * ) echo "Invalid selection: " $INPUT;; - esac -done - - - -# CPU -if [[ "$TF_NEED_CUDA" == "0" ]]; then - - # Check if it's installed - if [[ $(python3 -m pip show tensorflow) == *tensorflow* ]] || [[ $(python3 -m pip show tf-nightly) == *tf-nightly* ]] ; then - echo 'Using installed tensorflow' - else - # Uninstall GPU version if it is installed. - if [[ $(python3 -m pip show tensorflow-gpu) == *tensorflow-gpu* ]]; then - echo 'Already have gpu version of tensorflow installed. Uninstalling......\n' - python3 -m pip uninstall tensorflow-gpu - elif [[ $(python3 -m pip show tf-nightly-gpu) == *tf-nightly-gpu* ]]; then - echo 'Already have gpu version of tensorflow installed. Uninstalling......\n' - python3 -m pip uninstall tf-nightly-gpu - fi - # Install CPU version - echo 'Installing tensorflow......\n' - python3 -m pip install tensorflow - fi - -else - - # Check if it's installed - if [[ $(python3 -m pip show tensorflow-gpu) == *tensorflow-gpu* ]] || [[ $(python3 -m pip show tf-nightly-gpu) == *tf-nightly-gpu* ]]; then - echo 'Using installed tensorflow-gpu' - else - # Uninstall CPU version if it is installed. - if [[ $(python3 -m pip show tensorflow) == *tensorflow* ]]; then - echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' - python3 -m pip uninstall tensorflow - elif [[ $(python3 -m pip show tf-nightly) == *tf-nightly* ]]; then - echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' - python3 -m pip uninstall tf-nightly - fi - # Install CPU version - echo 'Installing tensorflow-gpu .....\n' - python3 -m pip install tensorflow-gpu - fi -fi - - -TF_CFLAGS=( $(python3 -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') ) -TF_LFLAGS="$(python3 -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))')" - -write_to_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true" -write_to_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain" -write_to_bazelrc "build --spawn_strategy=standalone" -write_to_bazelrc "build --strategy=Genrule=standalone" -write_to_bazelrc "build -c opt" - - -write_action_env_to_bazelrc "TF_HEADER_DIR" ${TF_CFLAGS:2} -SHARED_LIBRARY_DIR=${TF_LFLAGS:2} -SHARED_LIBRARY_NAME=$(echo $TF_LFLAGS | rev | cut -d":" -f1 | rev) -if ! [[ $TF_LFLAGS =~ .*:.* ]]; then - if [[ "$(uname)" == "Darwin" ]]; then - SHARED_LIBRARY_NAME="libtensorflow_framework.dylib" - else - SHARED_LIBRARY_NAME="libtensorflow_framework.so" - fi -fi -write_action_env_to_bazelrc "TF_SHARED_LIBRARY_DIR" ${SHARED_LIBRARY_DIR} -write_action_env_to_bazelrc "TF_SHARED_LIBRARY_NAME" ${SHARED_LIBRARY_NAME} -write_action_env_to_bazelrc "TF_NEED_CUDA" ${TF_NEED_CUDA} -write_linkopt_dir_to_bazelrc ${SHARED_LIBRARY_DIR} - -# TODO(yifeif): do not hardcode path -if [[ "$TF_NEED_CUDA" == "1" ]]; then - write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "/usr/lib/x86_64-linux-gnu" - write_action_env_to_bazelrc "TF_CUDA_VERSION" "10.0" - write_action_env_to_bazelrc "TF_CUDNN_VERSION" "7" - write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "/usr/local/cuda" - write_to_bazelrc "build --config=cuda" - write_to_bazelrc "test --config=cuda" -fi diff --git a/control/control.ipynb b/control/control.ipynb new file mode 100644 index 000000000..df695f958 --- /dev/null +++ b/control/control.ipynb @@ -0,0 +1,836 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "TFQ_Example_DNN_LSTM_Qcontrol_application.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "f0_gMI4Blbe8", + "colab_type": "text" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Quantum Authors." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "bbL8NoC4IoNd", + "colab_type": "text" + }, + "source": [ + "" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "bogCr-sSkXM1", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wppQ3TJ23mWC", + "colab_type": "text" + }, + "source": [ + "# A Hybrid Quantum-Classical Optimization for Quantum Control Optimization\n", + "\n", + "Author : Murphy Yuezhen Niu\n", + "\n", + "Created : 2020-Feb-01\n", + "\n", + "Last updated : 2020-Feb-27" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ngDCx3sUlmlA", + "colab_type": "text" + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tensorflow/quantum/blob/research/control/control.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "5AUS_ZuopGz5", + "colab_type": "text" + }, + "source": [ + "\n", + "Now that the basics are understood, let's show how one might use TFQ to construct a **hybrid quantum-classical neural net** for quantum control.\n", + "\n", + "In the first problem, we train a classical deep neural net to control a single qubit for realizing an arbitrary unitary; the output of the classical neural network determines the parameters of the quantum circuit to be applied to the qubit, which is then measured to produce the expectation values of different measurement operators.\n", + "\n", + "In the second problem, we train a recurrent neural network, to learn to predict the future quantum dynamics based on past obervations of a noisy implementation of quantum circuit." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AJa9sPPckfqS", + "colab_type": "text" + }, + "source": [ + "## Installation and imports" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "1eVDbG_2ZhMe", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade cirq==0.7.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "rFqxhKypZoSJ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade tensorflow==2.1.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "xcDb1zbSdXKi", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install tensorflow-quantum" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "enZ300Bflq80", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import tensorflow as tf\n", + "import tensorflow_quantum as tfq\n", + "import cirq\n", + "import sympy\n", + "import cmath\n", + "import numpy as np\n", + "from scipy import linalg\n", + "\n", + "# visualization tools\n", + "%matplotlib inline\n", + "import matplotlib.pyplot as plt\n", + "from cirq.contrib.svg import SVGCircuit" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8J6c2-DVXQdr", + "colab_type": "text" + }, + "source": [ + "## Problem 1: Gate decomposition with DNN." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wzfSYDfAcJTB", + "colab_type": "text" + }, + "source": [ + "### 1.1 Problem definition.\n", + "\n", + "More specifically, we provide an example of training a classical neural network to learn the Bloch Theorem: any single qubit unitary transformation can be realized by three single qubit rotations around two different angles.\n", + "\n", + "\n", + "Given the specification of the single qubit unitary in regard to three parameters $\\phi, \\theta_1, \\theta_2$ that specifies the block sphere as the input to the classical neural network: the neural network will output rotation angles along two different axis ( $Ry$, and $Rz$) that realizes the given rotation. If we include a penalty term on the number of non-zero rotations, the training of a classical neural network should be able to find the most optimal.\n", + "\n", + "\n", + "\n", + "This idealized version of a real quantum control problem where a classical neural network is learning a physical law of optimal decomposition of single-qubit unitaries." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P2TQJMt1HOhF", + "colab_type": "text" + }, + "source": [ + "Up to a global phase, an arbitrary single-qubit unitary can be specified in terms of three angles $\\phi$, $\\theta_1$, and $\\theta_2$ as $U=\\exp(-i \\phi (\\cos(\\theta_1)Z + \\sin(\\theta_1)(\\cos(\\theta_2)X + \\sin(\\theta_2)Y))$.\n", + "\n", + "It is possible to realize an arbitrary unitary of this form using three rotations about only two axes, $U = R_z(\\beta)R_y(\\gamma)R_z(\\delta)$. There exists an analytic solution mapping $\\{\\phi, \\theta_1, \\theta_2\\}$ to $\\{\\beta, \\gamma, \\delta\\}$; however, for more sophisticated control problems, such an analytic mapping may not be available. Therefore we investigate training neural networks to perform the control. First we consider training a purely classical neural network to perform the mapping from unitary parameters to rotation angles. Then, we consider training a hybrid quantum-classical network directly on expectation value data.\n", + "\n", + "First, we define the map from $\\{\\phi, \\theta_1, \\theta_2\\}$ to the associated unitary:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "PU_dl3iPVylT", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_unitary_from_params(phi, theta_1, theta_2):\n", + " return linalg.expm(-1j*phi*(\n", + " np.cos(theta_1)*cirq.Z._unitary_()\n", + " + np.sin(theta_1)*(\n", + " np.cos(theta_2)*cirq.X._unitary_()\n", + " + np.sin(theta_2)*cirq.Y._unitary_()\n", + " )\n", + " ))\n", + "\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HjQXFVuM4Kim", + "colab_type": "text" + }, + "source": [ + "### 1.2 Physics model construction\n", + "\n", + "#### Following lines are analytic solution to the Bloch theorem, where batch input is the input to a unitary parameterized by $U=\\exp(-i \\phi (cos(\\theta_1)Z + \\sin(\\theta_1)(\\cos(\\theta_2)X + \\sin(\\theta_2)Y))$, the output is the angle $\\beta, \\gamma, \\delta$ for the Z Y Z rotations that realizes the target unitary." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6sJ7NKALZ8oJ", + "colab_type": "text" + }, + "source": [ + "Next, we write down the known mapping between the angles $\\{\\phi, \\theta_1, \\theta_2\\}$ and the two-axis control angles $\\{\\beta, \\gamma, \\delta\\}$." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gPQkoK93HTFx", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def map_unitary_params(phi, theta_1, theta_2):\n", + " \"\"\"Convert unitary angles into two-axis control angles.\"\"\"\n", + " #### Below works ONLY when all input angles are less than pi\n", + "\n", + " gamma = -2*np.arccos(-np.sqrt(3 + np.cos(2 * theta_1) + 2 * np.sin(theta_1) ** 2 * np.cos(phi* 2) )/2.0)\n", + "\n", + " delta = 2*np.real(- 1j * np.log(-(-1.0) ** (1/4) *np.sqrt(- np.exp(1j * phi) * (-1 + np.exp(2 * 1j * phi)) * \n", + " np.sqrt(3 + np.cos(2 * theta_1) + 2 * np.sin(theta_1) ** 2 * np.cos(phi* 2) )) /\n", + " 2 / np.sqrt(-np.exp(1j * ( 2 *phi+ theta_2)) * np.sin(phi) ** 2 \n", + " * (np.cos(theta_1) + 1j / np.tan(phi))) ))\n", + "\n", + " beta = 2*np.real(- 1j * np.log(- np.exp(1j * ( phi+ theta_2)) * \n", + " np.sqrt(-1j * np.exp(1j * phi)*(-1 + np.exp(2 * 1j * phi))\n", + " *np.sqrt(3 + np.cos(2 * theta_1) + 2 * np.sin(theta_1) ** 2 * np.cos(phi* 2) ))\n", + " * np.sin(phi)/(-1 + np.exp(2 * 1j * phi)) /\n", + " np.sqrt(- np.exp(1j * (2 * phi+ theta_2)) * np.sin(phi) *( np.sin(phi) * np.cos(theta_1) + 1j * np.cos(phi) )) ))\n", + "\n", + " return beta, gamma, delta" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zh0vypmLpLWe", + "colab_type": "text" + }, + "source": [ + "Build a function to generate training data:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "PPxwDSubpOcm", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_angles_training_data(batch_size):\n", + " data = []\n", + " labels = []\n", + " for _ in range(batch_size):\n", + " random_unitary_params = np.random.uniform(0, np.pi, (3)).tolist()\n", + " beta, gamma, delta = map_unitary_params(*random_unitary_params)\n", + " data.append(random_unitary_params)\n", + " labels.append([beta, gamma, delta])\n", + " return data, labels" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6KnTDJ138e5m", + "colab_type": "text" + }, + "source": [ + "Using this function, set up the training and validation data:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "unUaBFTo8hRi", + "colab_type": "code", + "colab": {} + }, + "source": [ + "train_size = 10000\n", + "validation_size = 10000\n", + "all_commands, all_expectations = get_angles_training_data(train_size + validation_size)\n", + "\n", + "commands_train = all_commands[:train_size]\n", + "expectations_train = all_expectations[:train_size]\n", + "commands_val = all_commands[-validation_size:]\n", + "expectations_val = all_expectations[-validation_size:]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zkcRVIXTjLx1", + "colab_type": "text" + }, + "source": [ + "Run a test to confirm that all training data is correct. We do this by checking the Hilbert-Schmidt inner product $\\langle U_i, U_o\\rangle = \\text{Tr}\\left(U_o^\\dagger U_i\\right)$ between the two resulting matrices:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "TYJ8MHPLjLU8", + "colab_type": "code", + "colab": {} + }, + "source": [ + "q = cirq.GridQubit(0, 0)\n", + "for data_angles, label_angles in zip(commands_train, all_expectations):\n", + " test_phi, test_theta_1, test_theta_2 = data_angles\n", + " beta = label_angles[0]\n", + " gamma = label_angles[1]\n", + " delta = label_angles[2]\n", + " u_i = get_unitary_from_params(test_phi, test_theta_1, test_theta_2)\n", + " u_o = np.matmul(cirq.rz(beta)._unitary_(),\n", + " np.matmul(cirq.ry(gamma)._unitary_(), cirq.rz(delta)._unitary_()))\n", + "\n", + " circuit = cirq.Circuit(cirq.rz(delta)(q), cirq.ry(gamma)(q), cirq.rz(beta)(q))\n", + " \n", + " check1= np.trace(np.matmul(u_o.conj().T, circuit.unitary())) ** 2 / 4.0\n", + "\n", + " check = np.trace(np.matmul(u_o.conj().T, u_i))** 2 / 4.0\n", + "\n", + " if (abs(abs(check) - 1) > 1e-5) and (abs(abs(check1) - 1) > 1e-5):\n", + " print(\"Inner product value:\")\n", + " print(check)\n", + " print(\"Input angles quadrant check:\")\n", + " print([int(test_phi>np.pi), int(test_theta_1>np.pi), int(test_theta_2>np.pi)])" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iV85ahKKCuQw", + "colab_type": "text" + }, + "source": [ + "### 1.3 Prepare the training data set based on input and expectation values." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BFIbFhOgalLe", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_expectation_training_data(batch_size):\n", + " q = cirq.GridQubit(0, 0)\n", + " beta_s, gamma_s, delta_s = sympy.symbols(\"beta gamma delta\")\n", + " circuit = cirq.Circuit(cirq.rz(delta_s)(q), cirq.ry(gamma_s)(q), cirq.rz(beta_s)(q))\n", + " ops = [cirq.X(q), cirq.Y(q), cirq.Z(q)]\n", + "\n", + " params = []\n", + " outputs = []\n", + " for _ in range(batch_size):\n", + " random_unitary_params = np.random.uniform(0, np.pi, (3)).tolist()\n", + " beta, gamma, delta = map_unitary_params(*random_unitary_params)\n", + " expectations = tfq.layers.Expectation()(\n", + " circuit,\n", + " symbol_names=[beta_s, gamma_s, delta_s],\n", + " symbol_values=[[beta, gamma, delta]],\n", + " operators=ops \n", + " ).numpy().tolist()[0]\n", + " params.append(random_unitary_params)\n", + " outputs.append(expectations)\n", + " return params, outputs" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8OLc2MB-jOuD", + "colab_type": "text" + }, + "source": [ + "We now define the hybrid network that will be trained to perform the qubit control. Note that we restrict the gate set of the quantum portion of the net to alternating $R_z$ and $R_y$ gates. By adding a term to the loss that induces sparsity on the controls of these gates, the hope is that the hybrid network will learn the optimal two-axis control (which requires only three angles)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wUG8Hct01KUg", + "colab_type": "text" + }, + "source": [ + "### 1.4 Build a quantum-classical hybrid neural network to control the rotations." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ua2iaypA9YYz", + "colab_type": "text" + }, + "source": [ + "#### quantum part of the network:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "eyuqEa9C9bCS", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# parameters that the classical NN will feed values into\n", + "control_params = sympy.symbols(['theta_{}'.format(n) for n in range(7)])\n", + "\n", + "# create the parameterized circuit\n", + "qubit = cirq.GridQubit(0, 0)\n", + "two_axis_control_circuit = cirq.Circuit(\n", + " cirq.rz(control_params[2])(qubit),\n", + " cirq.ry(control_params[1])(qubit),\n", + " cirq.rz(control_params[0])(qubit)\n", + ")\n", + "\n", + "# Measurement will be three-axis\n", + "pauli_x = cirq.PauliString(cirq.X(qubit))\n", + "pauli_y = cirq.PauliString(cirq.Y(qubit))\n", + "pauli_z = cirq.PauliString(cirq.Z(qubit))\n", + "measure_list = [pauli_x, pauli_y, pauli_z]\n", + "\n", + "# Display the circuit\n", + "SVGCircuit(two_axis_control_circuit)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "YEwrVek41MZZ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def loss(y_true,y_pred ):\n", + " return -tf.math.log(tf.reduce_mean(tf.square(y_pred - y_true), axis=-1)) #+ 0.1 * tf.reduce_sum(tf.square(tf.math.tanh(dense_3)), axis=-1)\n", + "\n", + "\n", + "\n", + "\n", + "circuits_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string, name='circuits_input')\n", + "commands_input = tf.keras.Input((3,), name='commands_input')\n", + "dense_layer_1 = tf.keras.layers.Dense(128, activation='relu', name='dense_layer_1')(commands_input)\n", + "dense_layer_2 = tf.keras.layers.Dense(128, name='dense_layer_2')(dense_layer_1)\n", + "dense_layer_3 = tf.keras.layers.Dense(64, activation='relu', name='dense_layer_3')(dense_layer_2) \n", + "angles_layer = tf.keras.layers.Dense(3, activation='linear', name='angles_layer')(dense_layer_3)\n", + "\n", + "measured_expectations = tfq.layers.ControlledPQC(\n", + " two_axis_control_circuit, measure_list)([circuits_input, angles_layer])\n", + "two_axis_control_model = tf.keras.Model(inputs=[circuits_input, commands_input], outputs=measured_expectations)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dL0wVKys4kYw", + "colab_type": "text" + }, + "source": [ + "Set up data" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "mRqW1lmkYEMA", + "colab_type": "code", + "colab": {} + }, + "source": [ + "train_size = 10000\n", + "validation_size = 10000\n", + "all_commands, all_expectations = get_expectation_training_data(train_size + validation_size)\n", + "\n", + "commands_train = all_commands[:train_size]\n", + "expectations_train = all_expectations[:train_size]\n", + "commands_val = all_commands[-validation_size:]\n", + "expectations_val = all_expectations[-validation_size:]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ziAd64oNYH3D", + "colab_type": "text" + }, + "source": [ + "Perform optimization" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Ig-95OfT2AYG", + "colab_type": "code", + "colab": {} + }, + "source": [ + "epochs = 200\n", + "batch_size = 1000\n", + "lr=0.010 \n", + "\n", + "two_axis_control_model.compile(tf.keras.optimizers.Adam(learning_rate=lr, decay=lr / epochs), \n", + " loss='mse')\n", + "history_two_axis = two_axis_control_model.fit(\n", + " x=[tfq.convert_to_tensor([cirq.Circuit()]*train_size), tf.convert_to_tensor(commands_train)],\n", + " y=tf.convert_to_tensor(expectations_train), batch_size=batch_size, epochs=epochs,\n", + " validation_data=(\n", + " [tfq.convert_to_tensor([cirq.Circuit()]*validation_size), tf.convert_to_tensor(commands_val)],\n", + " tf.convert_to_tensor(expectations_val)))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "FcVcyv3V-rAJ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "plt.plot(history_two_axis.history['loss'])\n", + "plt.title(\"Loss\")\n", + "plt.xlabel(\"Epochs\")\n", + "plt.ylabel(\"Error in Control\")\n", + "plt.show()\n", + "\n", + "plt.plot(history_two_axis.history['val_loss'])\n", + "plt.title(\"Validation loss\")\n", + "plt.xlabel(\"Epochs\")\n", + "plt.ylabel(\"Error in Control\")\n", + "plt.show()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zBVLQZRBd4M4", + "colab_type": "text" + }, + "source": [ + "## 2. LSTM for learning time-dependent quantum noise" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "X4hiTZTheWaq", + "colab_type": "text" + }, + "source": [ + "## 2.0 Model definition." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gMsuJCzDGPqR", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def build_model(batch_size, rnn_units, stateful=False): \n", + " model = tf.keras.Sequential([\n", + " tf.keras.layers.LSTM(\n", + " rnn_units,\n", + " return_sequences=True,\n", + " stateful=stateful,\n", + " recurrent_initializer='glorot_uniform',\n", + " batch_input_shape=[batch_size, None, 1]),\n", + " tf.keras.layers.Dense(1)\n", + " ])\n", + " return model \n", + "\n", + "def train_lstm(data, batch_size, rnn_units, epochs, learning_rate):\n", + " print('Start training.')\n", + " model = build_model(batch_size, rnn_units)\n", + "\n", + " def loss(labels, logits):\n", + " return tf.keras.losses.binary_crossentropy(labels, logits, True)\n", + "\n", + " \n", + " optimizer = tf.keras.optimizers.Adamax( learning_rate) \n", + " model.compile(optimizer=optimizer, loss=loss)\n", + " model.summary()\n", + " \n", + " \n", + " model.fit(data, epochs= epochs)\n", + " eval_loss = model.evaluate(data)\n", + " print( \"final loss = \", eval_loss)\n", + " return model\n", + "\n", + "\n", + "def sample_eval(weights, eval_samples, epoch, rnn_units, input_lenth):\n", + " model = build_model( batch_size, rnn_units, True)\n", + " model.build(tf.TensorShape([ batch_size, None, 1]))\n", + " model.set_weights(weights)\n", + " # Whole sequence sampling and fidelity\n", + " eval_samples = eval_samples // batch_size * batch_size\n", + " sample_data = np.zeros(((eval_samples, input_length)), np.float64)\n", + " sample_n = 0\n", + " model.summary()\n", + "\n", + " while sample_n < eval_samples:\n", + " model.reset_states()\n", + " input_eval = tf.zeros([ batch_size, 1, 1])\n", + " output_eval = tf.reshape(model(input_eval), [ batch_size])\n", + " output_prob = 1 / (1 + np.exp(-output_eval.numpy()))\n", + " sample_data[sample_n:sample_n + batch_size,\n", + " 0] =output_prob\n", + " for i in range( input_length - 1):\n", + " input_eval = tf.cast(\n", + " tf.reshape(sample_data[sample_n:sample_n + batch_size, i],\n", + " [ batch_size, 1, 1]), tf.float32)\n", + " output_eval = tf.reshape(model(input_eval), [ batch_size])\n", + " output_prob = 1 / (1 + np.exp(-output_eval.numpy()))\n", + " sample_data[sample_n:sample_n + batch_size,\n", + " i + 1] = np.random.binomial(1, output_prob)\n", + " sample_n += batch_size\n", + " \n", + "\n", + "\n", + "def generate_data(data_time, data_length, omega_0, exponent, alpha):\n", + "\n", + " timesteps = np.linspace(0.02, data_time, data_length)\n", + " q = cirq.GridQubit(0, 0)\n", + " phase_s = sympy.symbols(\"phaseshift\")\n", + " circuit = cirq.Circuit(cirq.H(q), cirq.Rz(phase_s)(q))\n", + " ops = [cirq.X(q)]\n", + "\n", + " params = []\n", + " outputs = np.zeros(data_length)\n", + "\n", + " for i in range(data_length):\n", + " phaseshift = timesteps[i] * omega_0 + alpha * timesteps[i] ** (exponent +1) / (exponent +1)\n", + " expectations = tfq.layers.Expectation()(\n", + " circuit,\n", + " symbol_names=[phase_s],\n", + " symbol_values=[[phaseshift]],\n", + " operators=ops \n", + " ).numpy().tolist()[0]\n", + " \n", + " outputs[i]= expectations[0] \n", + " return outputs\n", + " \n", + "def load_data(data_size, data_time, data_length, alpha_min, alpha_max, omega_0, exponent):\n", + " alpha_list = np.linspace(alpha_min, alpha_max, data_size)\n", + " train_data = []\n", + " for k in range(data_size):\n", + " data1 = generate_data(data_time, data_length, omega_0, exponent, alpha_list[k])\n", + " train_data.append(data1)\n", + " return np.array(train_data)\n", + "\n", + " \n", + " " + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dCbdEZ4qeDty", + "colab_type": "text" + }, + "source": [ + "### 2.1 Generate training data" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "iFLQEJ4r9uP6", + "colab_type": "code", + "colab": {} + }, + "source": [ + "epoch = 30\n", + "batch_size = 60\n", + "learning_rate = 0.001\n", + "\n", + "rnn_units = 256\n", + "data_size = 500\n", + "alpha_min = 0.031\n", + "alpha_max = 0.2\n", + "exponent = 0.5\n", + "omega_0 = 0.7 \n", + "data_time = 0.5 / alpha_min\n", + "data_length = 40\n", + " \n", + "\n", + "train_data = load_data(data_size, data_time, data_length, alpha_min, alpha_max, omega_0, exponent) # this should be a numpy array" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WBNZYAO6edop", + "colab_type": "text" + }, + "source": [ + "## 2.2 Training the LSTM model on the time-dependent expectation values." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ikQ6MXZaec4B", + "colab_type": "code", + "colab": {} + }, + "source": [ + "data = tf.data.Dataset.from_tensor_slices(train_data)\n", + "\n", + "def build_example(chunk):\n", + " input_seq = tf.cast(tf.concat([[0], chunk], 0), tf.float32)\n", + " target = tf.concat([chunk, [0]], 0)\n", + " input_seq = input_seq[:-1]\n", + " target = target[:-1]\n", + " return tf.expand_dims(input_seq, 1), tf.expand_dims(target, 1)\n", + "BUFFER_SIZE = 100\n", + "data = data.map(build_example).shuffle(BUFFER_SIZE).batch(\n", + " batch_size, drop_remainder=True)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "89_k81SYxKFt", + "colab_type": "code", + "colab": {} + }, + "source": [ + "model = train_lstm(data, batch_size, rnn_units, epoch, learning_rate)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "V3a12DaDnKBg", + "colab_type": "code", + "colab": {} + }, + "source": [ + "plt.plot(model.history.history['loss']) \n", + "plt.xlabel(\"Training Epochs\", fontsize='14')\n", + "plt.ylabel(\"Error in LSTM samples\", fontsize='14')\n", + "plt.show()" + ], + "execution_count": 0, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/docs/_book.yaml b/docs/_book.yaml deleted file mode 100644 index eefd661fe..000000000 --- a/docs/_book.yaml +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -upper_tabs: -# Tabs left of dropdown menu -- include: /_upper_tabs_left.yaml -- include: /api_docs/_upper_tabs_api.yaml -# Dropdown menu -- name: Resources - path: /resources - is_default: true - menu: - - include: /resources/_menu_toc.yaml - lower_tabs: - # Subsite tabs - other: - - name: Guide & Tutorials - contents: - - title: "Overview" - path: /quantum/overview - - title: "Install" - path: /quantum/install - - title: "Design and concepts" - path: /quantum/design - - heading: Tutorials - - title: "Hello, many worlds" - path: /quantum/tutorials/hello_many_worlds - - title: "MNIST classification" - path: /quantum/tutorials/mnist - - title: "Calculate gradients" - path: /quantum/tutorials/gradients - - title: "Barren plateaus" - path: /quantum/tutorials/barren_plateaus - - title: "Quantum CNN" - path: /quantum/tutorials/qcnn - - title: "Quantum sensing" - path: /quantum/tutorials/sensing - - - name: API - skip_translation: true - contents: - - include: /quantum/api_docs/python/_toc.yaml - -- include: /_upper_tabs_right.yaml diff --git a/docs/_index.yaml b/docs/_index.yaml deleted file mode 100644 index cc9788d89..000000000 --- a/docs/_index.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -book_path: /quantum/_book.yaml -project_path: /quantum/_project.yaml -description: -landing_page: - custom_css_path: /site-assets/css/style.css - rows: - - heading: "TensorFlow Quantum is a library for hybrid quantum-classical machine learning." - items: - - classname: devsite-landing-row-50 - description: > -

TensorFlow Quantum (TFQ) is a Python framework for hybrid - quantum-classical machine learning. As an application framework, TFQ - allows quantum algorithm researchers and ML application researchers to - leverage Google’s quantum computing frameworks, all from within - TensorFlow.

-

TensorFlow Quantum focuses on modeling quantum data. It provides - tools to interleave quantum algorithms and logic designed in - Cirq - with TensorFlow. A basic understanding of quantum computing is required - to effectively use TensorFlow Quantum.

-

To get started, read the overview and - design and concepts guide, then run the - tutorials.

- - code_block: | -
-        # A hybrid quantum-classical model.
-        model = tf.keras.Sequential([
-            # Quantum circuit data comes in inside of tensors.
-            tf.keras.Input(shape=(), dtype=tf.dtypes.string),
-
-            # Parametrized Quantum Circuit (PQC) provides output
-            # data from the input circuits run on a quantum computer.
-            tfq.layers.PQC(my_circuit, [cirq.Z(q1), cirq.X(q0)]),
-
-            # Output data from quantum computer passed through model.
-            tf.keras.layers.Dense(50)
-        ])
-        
- - - classname: devsite-landing-row-cards - items: - - heading: "Quantum supremacy using a programmable superconducting processor" - image_path: /resources/images/tf-logo-card-16x9.png - path: https://ai.googleblog.com/2019/10/quantum-supremacy-using-programmable.html - buttons: - - label: "Read on the Google AI blog" - path: https://ai.googleblog.com/2019/10/quantum-supremacy-using-programmable.html - - heading: "Programming a quantum computer
with Cirq" - youtube_id: 16ZfkPRVf2w - buttons: - - label: Watch the video - path: https://www.youtube.com/watch?v=16ZfkPRVf2w - - heading: "TensorFlow Quantum on GitHub" - image_path: /resources/images/github-card-16x9.png - path: https://github.com/tensorflow/quantum - buttons: - - label: "View on GitHub" - path: https://github.com/tensorflow/quantum diff --git a/docs/api_docs/python/_toc.yaml b/docs/api_docs/python/_toc.yaml deleted file mode 100644 index a801aa6fd..000000000 --- a/docs/api_docs/python/_toc.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -toc: -- title: tfq - section: - - title: Overview - path: /quantum/api_docs/python/tfq - - title: convert_to_tensor - path: /quantum/api_docs/python/tfq/convert_to_tensor - - title: from_tensor - path: /quantum/api_docs/python/tfq/from_tensor - - title: get_expectation_op - path: /quantum/api_docs/python/tfq/get_expectation_op - - title: get_sampled_expectation_op - path: /quantum/api_docs/python/tfq/get_sampled_expectation_op - - title: get_sampling_op - path: /quantum/api_docs/python/tfq/get_sampling_op - - title: get_state_op - path: /quantum/api_docs/python/tfq/get_state_op - - title: get_supported_gates - path: /quantum/api_docs/python/tfq/get_supported_gates - - title: padded_to_ragged - path: /quantum/api_docs/python/tfq/padded_to_ragged -- title: tfq.datasets - section: - - title: Overview - path: /quantum/api_docs/python/tfq/datasets - - title: excited_cluster_states - path: /quantum/api_docs/python/tfq/datasets/excited_cluster_states -- title: tfq.differentiators - section: - - title: Overview - path: /quantum/api_docs/python/tfq/differentiators - - title: CentralDifference - path: /quantum/api_docs/python/tfq/differentiators/CentralDifference - - title: Differentiator - path: /quantum/api_docs/python/tfq/differentiators/Differentiator - - title: ForwardDifference - path: /quantum/api_docs/python/tfq/differentiators/ForwardDifference - - title: LinearCombination - path: /quantum/api_docs/python/tfq/differentiators/LinearCombination - - title: ParameterShift - path: /quantum/api_docs/python/tfq/differentiators/ParameterShift - - title: SGDifferentiator - path: /quantum/api_docs/python/tfq/differentiators/SGDifferentiator -- title: tfq.layers - section: - - title: Overview - path: /quantum/api_docs/python/tfq/layers - - title: AddCircuit - path: /quantum/api_docs/python/tfq/layers/AddCircuit - - title: ControlledPQC - path: /quantum/api_docs/python/tfq/layers/ControlledPQC - - title: Expectation - path: /quantum/api_docs/python/tfq/layers/Expectation - - title: PQC - path: /quantum/api_docs/python/tfq/layers/PQC - - title: Sample - path: /quantum/api_docs/python/tfq/layers/Sample - - title: SampledExpectation - path: /quantum/api_docs/python/tfq/layers/SampledExpectation - - title: State - path: /quantum/api_docs/python/tfq/layers/State diff --git a/docs/api_docs/python/index.md b/docs/api_docs/python/index.md deleted file mode 100644 index 2673ec069..000000000 --- a/docs/api_docs/python/index.md +++ /dev/null @@ -1,29 +0,0 @@ -# All symbols in TensorFlow Quantum - -## Primary symbols -* tfq -* tfq.convert_to_tensor -* tfq.datasets -* tfq.datasets.excited_cluster_states -* tfq.differentiators -* tfq.differentiators.CentralDifference -* tfq.differentiators.Differentiator -* tfq.differentiators.ForwardDifference -* tfq.differentiators.LinearCombination -* tfq.differentiators.ParameterShift -* tfq.differentiators.SGDifferentiator -* tfq.from_tensor -* tfq.get_expectation_op -* tfq.get_sampled_expectation_op -* tfq.get_sampling_op -* tfq.get_state_op -* tfq.get_supported_gates -* tfq.layers -* tfq.layers.AddCircuit -* tfq.layers.ControlledPQC -* tfq.layers.Expectation -* tfq.layers.PQC -* tfq.layers.Sample -* tfq.layers.SampledExpectation -* tfq.layers.State -* tfq.padded_to_ragged \ No newline at end of file diff --git a/docs/api_docs/python/tfq.md b/docs/api_docs/python/tfq.md deleted file mode 100644 index 6f45eed9c..000000000 --- a/docs/api_docs/python/tfq.md +++ /dev/null @@ -1,49 +0,0 @@ -
- - -
- -# Module: tfq - - - - -
- - - View source on GitHub - -
- - - -Module functions for tensorflow_quantum.* - - - -## Modules - -[`datasets`](./tfq/datasets.md) module: Interesting quantum datasets. - -[`differentiators`](./tfq/differentiators.md) module: Module functions for tfq.differentiators.* - -[`layers`](./tfq/layers.md) module: Module definitions for tensorflow_quantum.python.layers.* - -## Functions - -[`convert_to_tensor(...)`](./tfq/convert_to_tensor.md): Convert lists of tfq supported primitives to tensor representations. - -[`from_tensor(...)`](./tfq/from_tensor.md): Convert a tensor of tfq primitives back to Python objects. - -[`get_expectation_op(...)`](./tfq/get_expectation_op.md): Get a Tensorflow op that will calculate batches of expectation values. - -[`get_sampled_expectation_op(...)`](./tfq/get_sampled_expectation_op.md): Get a TensorFlow op that will calculate sampled expectation values. - -[`get_sampling_op(...)`](./tfq/get_sampling_op.md): Get a Tensorflow op that produces samples from given quantum circuits. - -[`get_state_op(...)`](./tfq/get_state_op.md): Get a tensorflow op that produces states from given quantum circuits. - -[`get_supported_gates(...)`](./tfq/get_supported_gates.md): A helper to get the gates supported by tfq. - -[`padded_to_ragged(...)`](./tfq/padded_to_ragged.md): Utility `tf.function` that converts a padded tensor to ragged. - diff --git a/docs/api_docs/python/tfq/_api_cache.json b/docs/api_docs/python/tfq/_api_cache.json deleted file mode 100644 index 9693b9daa..000000000 --- a/docs/api_docs/python/tfq/_api_cache.json +++ /dev/null @@ -1,854 +0,0 @@ -{ - "duplicate_of": { - "tfq.differentiators.CentralDifference.differentiate_analytic": "tfq.differentiators.LinearCombination.differentiate_analytic", - "tfq.differentiators.CentralDifference.differentiate_sampled": "tfq.differentiators.LinearCombination.differentiate_sampled", - "tfq.differentiators.CentralDifference.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.CentralDifference.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.differentiators.Differentiator.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.Differentiator.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.Differentiator.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.Differentiator.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.Differentiator.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.Differentiator.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.Differentiator.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.ForwardDifference.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.ForwardDifference.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.ForwardDifference.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.ForwardDifference.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.ForwardDifference.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.ForwardDifference.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.ForwardDifference.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.ForwardDifference.differentiate_analytic": "tfq.differentiators.LinearCombination.differentiate_analytic", - "tfq.differentiators.ForwardDifference.differentiate_sampled": "tfq.differentiators.LinearCombination.differentiate_sampled", - "tfq.differentiators.ForwardDifference.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.ForwardDifference.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.differentiators.LinearCombination.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.LinearCombination.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.LinearCombination.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.LinearCombination.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.LinearCombination.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.LinearCombination.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.LinearCombination.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.LinearCombination.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.LinearCombination.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.differentiators.ParameterShift.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.ParameterShift.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.ParameterShift.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.ParameterShift.__init__": "tfq.differentiators.Differentiator.__init__", - "tfq.differentiators.ParameterShift.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.ParameterShift.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.ParameterShift.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.ParameterShift.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.ParameterShift.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.ParameterShift.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.differentiators.SGDifferentiator.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.SGDifferentiator.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.SGDifferentiator.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.SGDifferentiator.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.SGDifferentiator.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.SGDifferentiator.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.SGDifferentiator.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.SGDifferentiator.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.SGDifferentiator.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.layers.AddCircuit.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.AddCircuit.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.AddCircuit.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.AddCircuit.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.AddCircuit.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.AddCircuit.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.AddCircuit.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.ControlledPQC.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.ControlledPQC.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.ControlledPQC.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.ControlledPQC.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.ControlledPQC.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.ControlledPQC.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.ControlledPQC.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.ControlledPQC.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.ControlledPQC.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.ControlledPQC.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.ControlledPQC.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.ControlledPQC.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.ControlledPQC.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.ControlledPQC.build": "tfq.layers.AddCircuit.build", - "tfq.layers.ControlledPQC.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.ControlledPQC.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.ControlledPQC.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.ControlledPQC.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.ControlledPQC.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.ControlledPQC.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.ControlledPQC.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.ControlledPQC.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.ControlledPQC.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.ControlledPQC.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.ControlledPQC.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.ControlledPQC.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.ControlledPQC.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.ControlledPQC.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.ControlledPQC.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.ControlledPQC.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.ControlledPQC.input": "tfq.layers.AddCircuit.input", - "tfq.layers.ControlledPQC.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.ControlledPQC.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.ControlledPQC.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.ControlledPQC.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.ControlledPQC.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.ControlledPQC.name": "tfq.layers.AddCircuit.name", - "tfq.layers.ControlledPQC.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.ControlledPQC.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.ControlledPQC.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.ControlledPQC.output": "tfq.layers.AddCircuit.output", - "tfq.layers.ControlledPQC.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.ControlledPQC.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.ControlledPQC.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.ControlledPQC.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.ControlledPQC.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.ControlledPQC.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.ControlledPQC.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.ControlledPQC.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.ControlledPQC.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.ControlledPQC.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.Expectation.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.Expectation.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.Expectation.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.Expectation.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.Expectation.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.Expectation.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.Expectation.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.Expectation.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.Expectation.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.Expectation.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.Expectation.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.Expectation.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.Expectation.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.Expectation.build": "tfq.layers.AddCircuit.build", - "tfq.layers.Expectation.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.Expectation.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.Expectation.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.Expectation.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.Expectation.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.Expectation.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.Expectation.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.Expectation.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.Expectation.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.Expectation.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.Expectation.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.Expectation.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.Expectation.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.Expectation.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.Expectation.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.Expectation.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.Expectation.input": "tfq.layers.AddCircuit.input", - "tfq.layers.Expectation.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.Expectation.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.Expectation.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.Expectation.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.Expectation.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.Expectation.name": "tfq.layers.AddCircuit.name", - "tfq.layers.Expectation.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.Expectation.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.Expectation.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.Expectation.output": "tfq.layers.AddCircuit.output", - "tfq.layers.Expectation.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.Expectation.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.Expectation.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.Expectation.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.Expectation.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.Expectation.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.Expectation.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.Expectation.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.Expectation.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.Expectation.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.PQC.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.PQC.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.PQC.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.PQC.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.PQC.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.PQC.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.PQC.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.PQC.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.PQC.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.PQC.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.PQC.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.PQC.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.PQC.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.PQC.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.PQC.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.PQC.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.PQC.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.PQC.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.PQC.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.PQC.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.PQC.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.PQC.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.PQC.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.PQC.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.PQC.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.PQC.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.PQC.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.PQC.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.PQC.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.PQC.input": "tfq.layers.AddCircuit.input", - "tfq.layers.PQC.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.PQC.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.PQC.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.PQC.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.PQC.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.PQC.name": "tfq.layers.AddCircuit.name", - "tfq.layers.PQC.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.PQC.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.PQC.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.PQC.output": "tfq.layers.AddCircuit.output", - "tfq.layers.PQC.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.PQC.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.PQC.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.PQC.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.PQC.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.PQC.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.PQC.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.PQC.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.PQC.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.PQC.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.Sample.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.Sample.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.Sample.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.Sample.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.Sample.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.Sample.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.Sample.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.Sample.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.Sample.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.Sample.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.Sample.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.Sample.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.Sample.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.Sample.build": "tfq.layers.AddCircuit.build", - "tfq.layers.Sample.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.Sample.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.Sample.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.Sample.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.Sample.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.Sample.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.Sample.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.Sample.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.Sample.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.Sample.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.Sample.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.Sample.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.Sample.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.Sample.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.Sample.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.Sample.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.Sample.input": "tfq.layers.AddCircuit.input", - "tfq.layers.Sample.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.Sample.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.Sample.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.Sample.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.Sample.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.Sample.name": "tfq.layers.AddCircuit.name", - "tfq.layers.Sample.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.Sample.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.Sample.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.Sample.output": "tfq.layers.AddCircuit.output", - "tfq.layers.Sample.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.Sample.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.Sample.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.Sample.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.Sample.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.Sample.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.Sample.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.Sample.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.Sample.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.Sample.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.SampledExpectation.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.SampledExpectation.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.SampledExpectation.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.SampledExpectation.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.SampledExpectation.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.SampledExpectation.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.SampledExpectation.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.SampledExpectation.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.SampledExpectation.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.SampledExpectation.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.SampledExpectation.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.SampledExpectation.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.SampledExpectation.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.SampledExpectation.build": "tfq.layers.AddCircuit.build", - "tfq.layers.SampledExpectation.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.SampledExpectation.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.SampledExpectation.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.SampledExpectation.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.SampledExpectation.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.SampledExpectation.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.SampledExpectation.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.SampledExpectation.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.SampledExpectation.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.SampledExpectation.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.SampledExpectation.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.SampledExpectation.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.SampledExpectation.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.SampledExpectation.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.SampledExpectation.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.SampledExpectation.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.SampledExpectation.input": "tfq.layers.AddCircuit.input", - "tfq.layers.SampledExpectation.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.SampledExpectation.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.SampledExpectation.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.SampledExpectation.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.SampledExpectation.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.SampledExpectation.name": "tfq.layers.AddCircuit.name", - "tfq.layers.SampledExpectation.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.SampledExpectation.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.SampledExpectation.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.SampledExpectation.output": "tfq.layers.AddCircuit.output", - "tfq.layers.SampledExpectation.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.SampledExpectation.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.SampledExpectation.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.SampledExpectation.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.SampledExpectation.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.SampledExpectation.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.SampledExpectation.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.SampledExpectation.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.SampledExpectation.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.SampledExpectation.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.State.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.State.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.State.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.State.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.State.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.State.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.State.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.State.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.State.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.State.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.State.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.State.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.State.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.State.build": "tfq.layers.AddCircuit.build", - "tfq.layers.State.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.State.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.State.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.State.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.State.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.State.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.State.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.State.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.State.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.State.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.State.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.State.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.State.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.State.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.State.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.State.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.State.input": "tfq.layers.AddCircuit.input", - "tfq.layers.State.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.State.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.State.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.State.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.State.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.State.name": "tfq.layers.AddCircuit.name", - "tfq.layers.State.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.State.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.State.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.State.output": "tfq.layers.AddCircuit.output", - "tfq.layers.State.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.State.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.State.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.State.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.State.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.State.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.State.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.State.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.State.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.State.weights": "tfq.layers.AddCircuit.weights" - }, - "is_fragment": { - "tfq": false, - "tfq.convert_to_tensor": false, - "tfq.datasets": false, - "tfq.datasets.excited_cluster_states": false, - "tfq.differentiators": false, - "tfq.differentiators.CentralDifference": false, - "tfq.differentiators.CentralDifference.__eq__": true, - "tfq.differentiators.CentralDifference.__ge__": true, - "tfq.differentiators.CentralDifference.__gt__": true, - "tfq.differentiators.CentralDifference.__init__": true, - "tfq.differentiators.CentralDifference.__le__": true, - "tfq.differentiators.CentralDifference.__lt__": true, - "tfq.differentiators.CentralDifference.__ne__": true, - "tfq.differentiators.CentralDifference.__new__": true, - "tfq.differentiators.CentralDifference.differentiate_analytic": true, - "tfq.differentiators.CentralDifference.differentiate_sampled": true, - "tfq.differentiators.CentralDifference.generate_differentiable_op": true, - "tfq.differentiators.CentralDifference.refresh": true, - "tfq.differentiators.Differentiator": false, - "tfq.differentiators.Differentiator.__eq__": true, - "tfq.differentiators.Differentiator.__ge__": true, - "tfq.differentiators.Differentiator.__gt__": true, - "tfq.differentiators.Differentiator.__init__": true, - "tfq.differentiators.Differentiator.__le__": true, - "tfq.differentiators.Differentiator.__lt__": true, - "tfq.differentiators.Differentiator.__ne__": true, - "tfq.differentiators.Differentiator.__new__": true, - "tfq.differentiators.Differentiator.differentiate_analytic": true, - "tfq.differentiators.Differentiator.differentiate_sampled": true, - "tfq.differentiators.Differentiator.generate_differentiable_op": true, - "tfq.differentiators.Differentiator.refresh": true, - "tfq.differentiators.ForwardDifference": false, - "tfq.differentiators.ForwardDifference.__eq__": true, - "tfq.differentiators.ForwardDifference.__ge__": true, - "tfq.differentiators.ForwardDifference.__gt__": true, - "tfq.differentiators.ForwardDifference.__init__": true, - "tfq.differentiators.ForwardDifference.__le__": true, - "tfq.differentiators.ForwardDifference.__lt__": true, - "tfq.differentiators.ForwardDifference.__ne__": true, - "tfq.differentiators.ForwardDifference.__new__": true, - "tfq.differentiators.ForwardDifference.differentiate_analytic": true, - "tfq.differentiators.ForwardDifference.differentiate_sampled": true, - "tfq.differentiators.ForwardDifference.generate_differentiable_op": true, - "tfq.differentiators.ForwardDifference.refresh": true, - "tfq.differentiators.LinearCombination": false, - "tfq.differentiators.LinearCombination.__eq__": true, - "tfq.differentiators.LinearCombination.__ge__": true, - "tfq.differentiators.LinearCombination.__gt__": true, - "tfq.differentiators.LinearCombination.__init__": true, - "tfq.differentiators.LinearCombination.__le__": true, - "tfq.differentiators.LinearCombination.__lt__": true, - "tfq.differentiators.LinearCombination.__ne__": true, - "tfq.differentiators.LinearCombination.__new__": true, - "tfq.differentiators.LinearCombination.differentiate_analytic": true, - "tfq.differentiators.LinearCombination.differentiate_sampled": true, - "tfq.differentiators.LinearCombination.generate_differentiable_op": true, - "tfq.differentiators.LinearCombination.refresh": true, - "tfq.differentiators.ParameterShift": false, - "tfq.differentiators.ParameterShift.__eq__": true, - "tfq.differentiators.ParameterShift.__ge__": true, - "tfq.differentiators.ParameterShift.__gt__": true, - "tfq.differentiators.ParameterShift.__init__": true, - "tfq.differentiators.ParameterShift.__le__": true, - "tfq.differentiators.ParameterShift.__lt__": true, - "tfq.differentiators.ParameterShift.__ne__": true, - "tfq.differentiators.ParameterShift.__new__": true, - "tfq.differentiators.ParameterShift.differentiate_analytic": true, - "tfq.differentiators.ParameterShift.differentiate_sampled": true, - "tfq.differentiators.ParameterShift.generate_differentiable_op": true, - "tfq.differentiators.ParameterShift.refresh": true, - "tfq.differentiators.SGDifferentiator": false, - "tfq.differentiators.SGDifferentiator.__eq__": true, - "tfq.differentiators.SGDifferentiator.__ge__": true, - "tfq.differentiators.SGDifferentiator.__gt__": true, - "tfq.differentiators.SGDifferentiator.__init__": true, - "tfq.differentiators.SGDifferentiator.__le__": true, - "tfq.differentiators.SGDifferentiator.__lt__": true, - "tfq.differentiators.SGDifferentiator.__ne__": true, - "tfq.differentiators.SGDifferentiator.__new__": true, - "tfq.differentiators.SGDifferentiator.differentiate_analytic": true, - "tfq.differentiators.SGDifferentiator.differentiate_sampled": true, - "tfq.differentiators.SGDifferentiator.generate_differentiable_op": true, - "tfq.differentiators.SGDifferentiator.refresh": true, - "tfq.from_tensor": false, - "tfq.get_expectation_op": false, - "tfq.get_sampled_expectation_op": false, - "tfq.get_sampling_op": false, - "tfq.get_state_op": false, - "tfq.get_supported_gates": false, - "tfq.layers": false, - "tfq.layers.AddCircuit": false, - "tfq.layers.AddCircuit.__call__": true, - "tfq.layers.AddCircuit.__eq__": true, - "tfq.layers.AddCircuit.__ge__": true, - "tfq.layers.AddCircuit.__gt__": true, - "tfq.layers.AddCircuit.__init__": true, - "tfq.layers.AddCircuit.__le__": true, - "tfq.layers.AddCircuit.__lt__": true, - "tfq.layers.AddCircuit.__ne__": true, - "tfq.layers.AddCircuit.__new__": true, - "tfq.layers.AddCircuit.activity_regularizer": true, - "tfq.layers.AddCircuit.add_loss": true, - "tfq.layers.AddCircuit.add_metric": true, - "tfq.layers.AddCircuit.add_update": true, - "tfq.layers.AddCircuit.add_weight": true, - "tfq.layers.AddCircuit.build": true, - "tfq.layers.AddCircuit.call": true, - "tfq.layers.AddCircuit.compute_mask": true, - "tfq.layers.AddCircuit.compute_output_shape": true, - "tfq.layers.AddCircuit.compute_output_signature": true, - "tfq.layers.AddCircuit.count_params": true, - "tfq.layers.AddCircuit.dtype": true, - "tfq.layers.AddCircuit.dynamic": true, - "tfq.layers.AddCircuit.from_config": true, - "tfq.layers.AddCircuit.get_config": true, - "tfq.layers.AddCircuit.get_input_at": true, - "tfq.layers.AddCircuit.get_input_mask_at": true, - "tfq.layers.AddCircuit.get_input_shape_at": true, - "tfq.layers.AddCircuit.get_losses_for": true, - "tfq.layers.AddCircuit.get_output_at": true, - "tfq.layers.AddCircuit.get_output_mask_at": true, - "tfq.layers.AddCircuit.get_output_shape_at": true, - "tfq.layers.AddCircuit.get_updates_for": true, - "tfq.layers.AddCircuit.get_weights": true, - "tfq.layers.AddCircuit.input": true, - "tfq.layers.AddCircuit.input_mask": true, - "tfq.layers.AddCircuit.input_shape": true, - "tfq.layers.AddCircuit.input_spec": true, - "tfq.layers.AddCircuit.losses": true, - "tfq.layers.AddCircuit.metrics": true, - "tfq.layers.AddCircuit.name": true, - "tfq.layers.AddCircuit.name_scope": true, - "tfq.layers.AddCircuit.non_trainable_variables": true, - "tfq.layers.AddCircuit.non_trainable_weights": true, - "tfq.layers.AddCircuit.output": true, - "tfq.layers.AddCircuit.output_mask": true, - "tfq.layers.AddCircuit.output_shape": true, - "tfq.layers.AddCircuit.set_weights": true, - "tfq.layers.AddCircuit.submodules": true, - "tfq.layers.AddCircuit.trainable": true, - "tfq.layers.AddCircuit.trainable_variables": true, - "tfq.layers.AddCircuit.trainable_weights": true, - "tfq.layers.AddCircuit.updates": true, - "tfq.layers.AddCircuit.variables": true, - "tfq.layers.AddCircuit.weights": true, - "tfq.layers.AddCircuit.with_name_scope": true, - "tfq.layers.ControlledPQC": false, - "tfq.layers.ControlledPQC.__call__": true, - "tfq.layers.ControlledPQC.__eq__": true, - "tfq.layers.ControlledPQC.__ge__": true, - "tfq.layers.ControlledPQC.__gt__": true, - "tfq.layers.ControlledPQC.__init__": true, - "tfq.layers.ControlledPQC.__le__": true, - "tfq.layers.ControlledPQC.__lt__": true, - "tfq.layers.ControlledPQC.__ne__": true, - "tfq.layers.ControlledPQC.__new__": true, - "tfq.layers.ControlledPQC.activity_regularizer": true, - "tfq.layers.ControlledPQC.add_loss": true, - "tfq.layers.ControlledPQC.add_metric": true, - "tfq.layers.ControlledPQC.add_update": true, - "tfq.layers.ControlledPQC.add_weight": true, - "tfq.layers.ControlledPQC.build": true, - "tfq.layers.ControlledPQC.call": true, - "tfq.layers.ControlledPQC.compute_mask": true, - "tfq.layers.ControlledPQC.compute_output_shape": true, - "tfq.layers.ControlledPQC.compute_output_signature": true, - "tfq.layers.ControlledPQC.count_params": true, - "tfq.layers.ControlledPQC.dtype": true, - "tfq.layers.ControlledPQC.dynamic": true, - "tfq.layers.ControlledPQC.from_config": true, - "tfq.layers.ControlledPQC.get_config": true, - "tfq.layers.ControlledPQC.get_input_at": true, - "tfq.layers.ControlledPQC.get_input_mask_at": true, - "tfq.layers.ControlledPQC.get_input_shape_at": true, - "tfq.layers.ControlledPQC.get_losses_for": true, - "tfq.layers.ControlledPQC.get_output_at": true, - "tfq.layers.ControlledPQC.get_output_mask_at": true, - "tfq.layers.ControlledPQC.get_output_shape_at": true, - "tfq.layers.ControlledPQC.get_updates_for": true, - "tfq.layers.ControlledPQC.get_weights": true, - "tfq.layers.ControlledPQC.input": true, - "tfq.layers.ControlledPQC.input_mask": true, - "tfq.layers.ControlledPQC.input_shape": true, - "tfq.layers.ControlledPQC.input_spec": true, - "tfq.layers.ControlledPQC.losses": true, - "tfq.layers.ControlledPQC.metrics": true, - "tfq.layers.ControlledPQC.name": true, - "tfq.layers.ControlledPQC.name_scope": true, - "tfq.layers.ControlledPQC.non_trainable_variables": true, - "tfq.layers.ControlledPQC.non_trainable_weights": true, - "tfq.layers.ControlledPQC.output": true, - "tfq.layers.ControlledPQC.output_mask": true, - "tfq.layers.ControlledPQC.output_shape": true, - "tfq.layers.ControlledPQC.set_weights": true, - "tfq.layers.ControlledPQC.submodules": true, - "tfq.layers.ControlledPQC.trainable": true, - "tfq.layers.ControlledPQC.trainable_variables": true, - "tfq.layers.ControlledPQC.trainable_weights": true, - "tfq.layers.ControlledPQC.updates": true, - "tfq.layers.ControlledPQC.variables": true, - "tfq.layers.ControlledPQC.weights": true, - "tfq.layers.ControlledPQC.with_name_scope": true, - "tfq.layers.Expectation": false, - "tfq.layers.Expectation.__call__": true, - "tfq.layers.Expectation.__eq__": true, - "tfq.layers.Expectation.__ge__": true, - "tfq.layers.Expectation.__gt__": true, - "tfq.layers.Expectation.__init__": true, - "tfq.layers.Expectation.__le__": true, - "tfq.layers.Expectation.__lt__": true, - "tfq.layers.Expectation.__ne__": true, - "tfq.layers.Expectation.__new__": true, - "tfq.layers.Expectation.activity_regularizer": true, - "tfq.layers.Expectation.add_loss": true, - "tfq.layers.Expectation.add_metric": true, - "tfq.layers.Expectation.add_update": true, - "tfq.layers.Expectation.add_weight": true, - "tfq.layers.Expectation.build": true, - "tfq.layers.Expectation.call": true, - "tfq.layers.Expectation.compute_mask": true, - "tfq.layers.Expectation.compute_output_shape": true, - "tfq.layers.Expectation.compute_output_signature": true, - "tfq.layers.Expectation.count_params": true, - "tfq.layers.Expectation.dtype": true, - "tfq.layers.Expectation.dynamic": true, - "tfq.layers.Expectation.from_config": true, - "tfq.layers.Expectation.get_config": true, - "tfq.layers.Expectation.get_input_at": true, - "tfq.layers.Expectation.get_input_mask_at": true, - "tfq.layers.Expectation.get_input_shape_at": true, - "tfq.layers.Expectation.get_losses_for": true, - "tfq.layers.Expectation.get_output_at": true, - "tfq.layers.Expectation.get_output_mask_at": true, - "tfq.layers.Expectation.get_output_shape_at": true, - "tfq.layers.Expectation.get_updates_for": true, - "tfq.layers.Expectation.get_weights": true, - "tfq.layers.Expectation.input": true, - "tfq.layers.Expectation.input_mask": true, - "tfq.layers.Expectation.input_shape": true, - "tfq.layers.Expectation.input_spec": true, - "tfq.layers.Expectation.losses": true, - "tfq.layers.Expectation.metrics": true, - "tfq.layers.Expectation.name": true, - "tfq.layers.Expectation.name_scope": true, - "tfq.layers.Expectation.non_trainable_variables": true, - "tfq.layers.Expectation.non_trainable_weights": true, - "tfq.layers.Expectation.output": true, - "tfq.layers.Expectation.output_mask": true, - "tfq.layers.Expectation.output_shape": true, - "tfq.layers.Expectation.set_weights": true, - "tfq.layers.Expectation.submodules": true, - "tfq.layers.Expectation.trainable": true, - "tfq.layers.Expectation.trainable_variables": true, - "tfq.layers.Expectation.trainable_weights": true, - "tfq.layers.Expectation.updates": true, - "tfq.layers.Expectation.variables": true, - "tfq.layers.Expectation.weights": true, - "tfq.layers.Expectation.with_name_scope": true, - "tfq.layers.PQC": false, - "tfq.layers.PQC.__call__": true, - "tfq.layers.PQC.__eq__": true, - "tfq.layers.PQC.__ge__": true, - "tfq.layers.PQC.__gt__": true, - "tfq.layers.PQC.__init__": true, - "tfq.layers.PQC.__le__": true, - "tfq.layers.PQC.__lt__": true, - "tfq.layers.PQC.__ne__": true, - "tfq.layers.PQC.__new__": true, - "tfq.layers.PQC.activity_regularizer": true, - "tfq.layers.PQC.add_loss": true, - "tfq.layers.PQC.add_metric": true, - "tfq.layers.PQC.add_update": true, - "tfq.layers.PQC.add_weight": true, - "tfq.layers.PQC.build": true, - "tfq.layers.PQC.call": true, - "tfq.layers.PQC.compute_mask": true, - "tfq.layers.PQC.compute_output_shape": true, - "tfq.layers.PQC.compute_output_signature": true, - "tfq.layers.PQC.count_params": true, - "tfq.layers.PQC.dtype": true, - "tfq.layers.PQC.dynamic": true, - "tfq.layers.PQC.from_config": true, - "tfq.layers.PQC.get_config": true, - "tfq.layers.PQC.get_input_at": true, - "tfq.layers.PQC.get_input_mask_at": true, - "tfq.layers.PQC.get_input_shape_at": true, - "tfq.layers.PQC.get_losses_for": true, - "tfq.layers.PQC.get_output_at": true, - "tfq.layers.PQC.get_output_mask_at": true, - "tfq.layers.PQC.get_output_shape_at": true, - "tfq.layers.PQC.get_updates_for": true, - "tfq.layers.PQC.get_weights": true, - "tfq.layers.PQC.input": true, - "tfq.layers.PQC.input_mask": true, - "tfq.layers.PQC.input_shape": true, - "tfq.layers.PQC.input_spec": true, - "tfq.layers.PQC.losses": true, - "tfq.layers.PQC.metrics": true, - "tfq.layers.PQC.name": true, - "tfq.layers.PQC.name_scope": true, - "tfq.layers.PQC.non_trainable_variables": true, - "tfq.layers.PQC.non_trainable_weights": true, - "tfq.layers.PQC.output": true, - "tfq.layers.PQC.output_mask": true, - "tfq.layers.PQC.output_shape": true, - "tfq.layers.PQC.set_weights": true, - "tfq.layers.PQC.submodules": true, - "tfq.layers.PQC.trainable": true, - "tfq.layers.PQC.trainable_variables": true, - "tfq.layers.PQC.trainable_weights": true, - "tfq.layers.PQC.updates": true, - "tfq.layers.PQC.variables": true, - "tfq.layers.PQC.weights": true, - "tfq.layers.PQC.with_name_scope": true, - "tfq.layers.Sample": false, - "tfq.layers.Sample.__call__": true, - "tfq.layers.Sample.__eq__": true, - "tfq.layers.Sample.__ge__": true, - "tfq.layers.Sample.__gt__": true, - "tfq.layers.Sample.__init__": true, - "tfq.layers.Sample.__le__": true, - "tfq.layers.Sample.__lt__": true, - "tfq.layers.Sample.__ne__": true, - "tfq.layers.Sample.__new__": true, - "tfq.layers.Sample.activity_regularizer": true, - "tfq.layers.Sample.add_loss": true, - "tfq.layers.Sample.add_metric": true, - "tfq.layers.Sample.add_update": true, - "tfq.layers.Sample.add_weight": true, - "tfq.layers.Sample.build": true, - "tfq.layers.Sample.call": true, - "tfq.layers.Sample.compute_mask": true, - "tfq.layers.Sample.compute_output_shape": true, - "tfq.layers.Sample.compute_output_signature": true, - "tfq.layers.Sample.count_params": true, - "tfq.layers.Sample.dtype": true, - "tfq.layers.Sample.dynamic": true, - "tfq.layers.Sample.from_config": true, - "tfq.layers.Sample.get_config": true, - "tfq.layers.Sample.get_input_at": true, - "tfq.layers.Sample.get_input_mask_at": true, - "tfq.layers.Sample.get_input_shape_at": true, - "tfq.layers.Sample.get_losses_for": true, - "tfq.layers.Sample.get_output_at": true, - "tfq.layers.Sample.get_output_mask_at": true, - "tfq.layers.Sample.get_output_shape_at": true, - "tfq.layers.Sample.get_updates_for": true, - "tfq.layers.Sample.get_weights": true, - "tfq.layers.Sample.input": true, - "tfq.layers.Sample.input_mask": true, - "tfq.layers.Sample.input_shape": true, - "tfq.layers.Sample.input_spec": true, - "tfq.layers.Sample.losses": true, - "tfq.layers.Sample.metrics": true, - "tfq.layers.Sample.name": true, - "tfq.layers.Sample.name_scope": true, - "tfq.layers.Sample.non_trainable_variables": true, - "tfq.layers.Sample.non_trainable_weights": true, - "tfq.layers.Sample.output": true, - "tfq.layers.Sample.output_mask": true, - "tfq.layers.Sample.output_shape": true, - "tfq.layers.Sample.set_weights": true, - "tfq.layers.Sample.submodules": true, - "tfq.layers.Sample.trainable": true, - "tfq.layers.Sample.trainable_variables": true, - "tfq.layers.Sample.trainable_weights": true, - "tfq.layers.Sample.updates": true, - "tfq.layers.Sample.variables": true, - "tfq.layers.Sample.weights": true, - "tfq.layers.Sample.with_name_scope": true, - "tfq.layers.SampledExpectation": false, - "tfq.layers.SampledExpectation.__call__": true, - "tfq.layers.SampledExpectation.__eq__": true, - "tfq.layers.SampledExpectation.__ge__": true, - "tfq.layers.SampledExpectation.__gt__": true, - "tfq.layers.SampledExpectation.__init__": true, - "tfq.layers.SampledExpectation.__le__": true, - "tfq.layers.SampledExpectation.__lt__": true, - "tfq.layers.SampledExpectation.__ne__": true, - "tfq.layers.SampledExpectation.__new__": true, - "tfq.layers.SampledExpectation.activity_regularizer": true, - "tfq.layers.SampledExpectation.add_loss": true, - "tfq.layers.SampledExpectation.add_metric": true, - "tfq.layers.SampledExpectation.add_update": true, - "tfq.layers.SampledExpectation.add_weight": true, - "tfq.layers.SampledExpectation.build": true, - "tfq.layers.SampledExpectation.call": true, - "tfq.layers.SampledExpectation.compute_mask": true, - "tfq.layers.SampledExpectation.compute_output_shape": true, - "tfq.layers.SampledExpectation.compute_output_signature": true, - "tfq.layers.SampledExpectation.count_params": true, - "tfq.layers.SampledExpectation.dtype": true, - "tfq.layers.SampledExpectation.dynamic": true, - "tfq.layers.SampledExpectation.from_config": true, - "tfq.layers.SampledExpectation.get_config": true, - "tfq.layers.SampledExpectation.get_input_at": true, - "tfq.layers.SampledExpectation.get_input_mask_at": true, - "tfq.layers.SampledExpectation.get_input_shape_at": true, - "tfq.layers.SampledExpectation.get_losses_for": true, - "tfq.layers.SampledExpectation.get_output_at": true, - "tfq.layers.SampledExpectation.get_output_mask_at": true, - "tfq.layers.SampledExpectation.get_output_shape_at": true, - "tfq.layers.SampledExpectation.get_updates_for": true, - "tfq.layers.SampledExpectation.get_weights": true, - "tfq.layers.SampledExpectation.input": true, - "tfq.layers.SampledExpectation.input_mask": true, - "tfq.layers.SampledExpectation.input_shape": true, - "tfq.layers.SampledExpectation.input_spec": true, - "tfq.layers.SampledExpectation.losses": true, - "tfq.layers.SampledExpectation.metrics": true, - "tfq.layers.SampledExpectation.name": true, - "tfq.layers.SampledExpectation.name_scope": true, - "tfq.layers.SampledExpectation.non_trainable_variables": true, - "tfq.layers.SampledExpectation.non_trainable_weights": true, - "tfq.layers.SampledExpectation.output": true, - "tfq.layers.SampledExpectation.output_mask": true, - "tfq.layers.SampledExpectation.output_shape": true, - "tfq.layers.SampledExpectation.set_weights": true, - "tfq.layers.SampledExpectation.submodules": true, - "tfq.layers.SampledExpectation.trainable": true, - "tfq.layers.SampledExpectation.trainable_variables": true, - "tfq.layers.SampledExpectation.trainable_weights": true, - "tfq.layers.SampledExpectation.updates": true, - "tfq.layers.SampledExpectation.variables": true, - "tfq.layers.SampledExpectation.weights": true, - "tfq.layers.SampledExpectation.with_name_scope": true, - "tfq.layers.State": false, - "tfq.layers.State.__call__": true, - "tfq.layers.State.__eq__": true, - "tfq.layers.State.__ge__": true, - "tfq.layers.State.__gt__": true, - "tfq.layers.State.__init__": true, - "tfq.layers.State.__le__": true, - "tfq.layers.State.__lt__": true, - "tfq.layers.State.__ne__": true, - "tfq.layers.State.__new__": true, - "tfq.layers.State.activity_regularizer": true, - "tfq.layers.State.add_loss": true, - "tfq.layers.State.add_metric": true, - "tfq.layers.State.add_update": true, - "tfq.layers.State.add_weight": true, - "tfq.layers.State.build": true, - "tfq.layers.State.call": true, - "tfq.layers.State.compute_mask": true, - "tfq.layers.State.compute_output_shape": true, - "tfq.layers.State.compute_output_signature": true, - "tfq.layers.State.count_params": true, - "tfq.layers.State.dtype": true, - "tfq.layers.State.dynamic": true, - "tfq.layers.State.from_config": true, - "tfq.layers.State.get_config": true, - "tfq.layers.State.get_input_at": true, - "tfq.layers.State.get_input_mask_at": true, - "tfq.layers.State.get_input_shape_at": true, - "tfq.layers.State.get_losses_for": true, - "tfq.layers.State.get_output_at": true, - "tfq.layers.State.get_output_mask_at": true, - "tfq.layers.State.get_output_shape_at": true, - "tfq.layers.State.get_updates_for": true, - "tfq.layers.State.get_weights": true, - "tfq.layers.State.input": true, - "tfq.layers.State.input_mask": true, - "tfq.layers.State.input_shape": true, - "tfq.layers.State.input_spec": true, - "tfq.layers.State.losses": true, - "tfq.layers.State.metrics": true, - "tfq.layers.State.name": true, - "tfq.layers.State.name_scope": true, - "tfq.layers.State.non_trainable_variables": true, - "tfq.layers.State.non_trainable_weights": true, - "tfq.layers.State.output": true, - "tfq.layers.State.output_mask": true, - "tfq.layers.State.output_shape": true, - "tfq.layers.State.set_weights": true, - "tfq.layers.State.submodules": true, - "tfq.layers.State.trainable": true, - "tfq.layers.State.trainable_variables": true, - "tfq.layers.State.trainable_weights": true, - "tfq.layers.State.updates": true, - "tfq.layers.State.variables": true, - "tfq.layers.State.weights": true, - "tfq.layers.State.with_name_scope": true, - "tfq.padded_to_ragged": false - }, - "py_module_names": [ - "tfq" - ] -} diff --git a/docs/api_docs/python/tfq/convert_to_tensor.md b/docs/api_docs/python/tfq/convert_to_tensor.md deleted file mode 100644 index c3d7e1dee..000000000 --- a/docs/api_docs/python/tfq/convert_to_tensor.md +++ /dev/null @@ -1,71 +0,0 @@ -
- - -
- -# tfq.convert_to_tensor - - - - - -
- - - View source on GitHub - -
- - - -Convert lists of tfq supported primitives to tensor representations. - -``` python -tfq.convert_to_tensor(items_to_convert) -``` - - - - - -Recursively convert a nested lists of `cirq.PauliSum` or `cirq.Circuit` -objects to a `tf.Tensor` representation. Note that cirq serialization only -supports `cirq.GridQubit`s so we also require that input circuits and -pauli sums are defined only on `cirq.GridQubit`s. - - -``` - ->>> my_qubits = cirq.GridQubit.rect(1, 2) ->>> my_circuits = [cirq.Circuit(cirq.X(my_qubits[0])), -... cirq.Circuit(cirq.Z(my_qubits[0])) -... ] ->>> tensor_input = tfq.convert_to_tensor(my_circuits) ->>> # Now tensor_input can be used as model input etc. ->>> same_circuits = tfq.from_tensor(tensor_input) ->>> # same_circuits now holds cirq.Circuit objects once more. ->>> same_circuits -[cirq.Circuit([ - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - ]), -]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), -])] - -``` - -#### Args: - - -* `items_to_convert`: Python `list` or nested `list` of `cirq.Circuit` - or `cirq.Paulisum` objects. Should be rectangular, or this function - will error. - - -#### Returns: - -`tf.Tensor` that represents the input items. diff --git a/docs/api_docs/python/tfq/datasets.md b/docs/api_docs/python/tfq/datasets.md deleted file mode 100644 index 752f7d9c7..000000000 --- a/docs/api_docs/python/tfq/datasets.md +++ /dev/null @@ -1,27 +0,0 @@ -
- - -
- -# Module: tfq.datasets - - - - -
- - - View source on GitHub - -
- - - -Interesting quantum datasets. - - - -## Functions - -[`excited_cluster_states(...)`](../tfq/datasets/excited_cluster_states.md): Return a tuple of potentially excited cluster states and their labels. - diff --git a/docs/api_docs/python/tfq/datasets/excited_cluster_states.md b/docs/api_docs/python/tfq/datasets/excited_cluster_states.md deleted file mode 100644 index 5e433a62e..000000000 --- a/docs/api_docs/python/tfq/datasets/excited_cluster_states.md +++ /dev/null @@ -1,76 +0,0 @@ -
- - -
- -# tfq.datasets.excited_cluster_states - - - - - -
- - - View source on GitHub - -
- - - -Return a tuple of potentially excited cluster states and their labels. - -``` python -tfq.datasets.excited_cluster_states(qubits) -``` - - - - - -For every qubit in `qubits` this method will create a cluster state circuit -on `qubits`, apply a `cirq.X` on that qubit along with a label of 1 and add -it to the return dataset. Finally a cluster state circuit on `qubits` that -doesn't contain any `cirq.X` gates with a label of -1 will be added to the -returned dataset. - - -``` - ->>> circuits, labels = tfq.datasets.excited_cluster_states( -... cirq.GridQubit.rect(1, 3) -... ) ->>> print(circuits[0]) -(0, 0): ───H───@───────@───X─── - │ │ -(0, 1): ───H───@───@───┼─────── - │ │ -(0, 2): ───H───────@───@─────── ->>> labels[0] -1 ->>> print(circuits[-1]) -(0, 0): ───H───@───────@─── - │ │ -(0, 1): ───H───@───@───┼─── - │ │ -(0, 2): ───H───────@───@─── ->>> labels[-1] --1 - -``` - - -Circuits that feature a `cirq.X` gate on one of the qubits are labeled 1, -while the circuit that doesn't feature a `cirq.X` anywhere has the label -1. - - -#### Args: - - -* `qubits`: Python `list` of `cirq.GridQubit`s on which the excited cluster - state dataset will be created. - - -#### Returns: - -A `tuple` of `cirq.Circuit`s and Python `int` labels. diff --git a/docs/api_docs/python/tfq/differentiators.md b/docs/api_docs/python/tfq/differentiators.md deleted file mode 100644 index c78bd64e5..000000000 --- a/docs/api_docs/python/tfq/differentiators.md +++ /dev/null @@ -1,37 +0,0 @@ -
- - -
- -# Module: tfq.differentiators - - - - -
- - - View source on GitHub - -
- - - -Module functions for tfq.differentiators.* - - - -## Classes - -[`class CentralDifference`](../tfq/differentiators/CentralDifference.md): Differentiates a circuit using Central Differencing. - -[`class Differentiator`](../tfq/differentiators/Differentiator.md): Interface that defines how to specify gradients for a quantum circuit. - -[`class ForwardDifference`](../tfq/differentiators/ForwardDifference.md): Differentiate a circuit using forward differencing. - -[`class LinearCombination`](../tfq/differentiators/LinearCombination.md): Differentiate a circuit with respect to its inputs by - -[`class ParameterShift`](../tfq/differentiators/ParameterShift.md): Calculate the general version of parameter-shift rule based gradients. - -[`class SGDifferentiator`](../tfq/differentiators/SGDifferentiator.md): Stochastic generator based differentiator class. - diff --git a/docs/api_docs/python/tfq/differentiators/CentralDifference.md b/docs/api_docs/python/tfq/differentiators/CentralDifference.md deleted file mode 100644 index 74676100c..000000000 --- a/docs/api_docs/python/tfq/differentiators/CentralDifference.md +++ /dev/null @@ -1,188 +0,0 @@ -
- - - - - - - -
- -# tfq.differentiators.CentralDifference - - - - - -
- - - View source on GitHub - -
- - - -## Class `CentralDifference` - -Differentiates a circuit using Central Differencing. - -Inherits From: [`LinearCombination`](../../tfq/differentiators/LinearCombination.md) - - - -Central differencing computes a derivative at point x using an equal -number of points before and after x. A closed form for -the coefficients of this derivative for an arbitrary positive error order -is used here, which is described in the following article: -https://www.sciencedirect.com/science/article/pii/S0377042799000886. - - -``` - ->>> my_op = tfq.get_expectation_op() ->>> linear_differentiator = tfq.differentiators.CentralDifference(2, 0.01) ->>> # Get an expectation op, with this differentiator attached. ->>> op = linear_differentiator.generate_differentiable_op( -... analytic_op=my_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) ->>> # Gradient would be: -50 * f(x + 0.02) + 200 * f(x + 0.01) - 150 * f(x) ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> grads -tf.Tensor([[-1.1837807]], shape=(1, 1), dtype=float32) - -``` - -

__init__

- -View source - -``` python -__init__( - error_order=2, - grid_spacing=0.001 -) -``` - -Instantiate a CentralDifference. - -Create a CentralDifference differentaitor, passing along an error order -and grid spacing to be used to contstruct differentiator coeffecients. - -#### Args: - - -* `error_order`: A positive, even `int` specifying the error order - of this differentiator. This corresponds to the smallest power - of `grid_spacing` remaining in the series that was truncated - to generate this finite differencing expression. -* `grid_spacing`: A positive `float` specifying how large of a - grid to use in calculating this finite difference. - - - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - - - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - - - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/Differentiator.md b/docs/api_docs/python/tfq/differentiators/Differentiator.md deleted file mode 100644 index a71f59008..000000000 --- a/docs/api_docs/python/tfq/differentiators/Differentiator.md +++ /dev/null @@ -1,204 +0,0 @@ -
- - - - - - -
- -# tfq.differentiators.Differentiator - - - - - -
- - - View source on GitHub - -
- - - -## Class `Differentiator` - -Interface that defines how to specify gradients for a quantum circuit. - - - - - -This abstract class allows for the creation of gradient calculation -procedures for (expectation values from) quantum circuits, with -respect to a set of input parameter values. This allows one -to backpropagate through a quantum circuit. - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - -Specify how to differentiate a circuit with analytical expectation. - -This is called at graph runtime by TensorFlow. `differentiate_analytic` -should calculate the gradient of a batch of circuits and return it -formatted as indicated below. See -tfq.differentiators.ForwardDifference for an example. - -#### Args: - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `forward_pass_vals`: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. -* `grad`: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - -#### Returns: - -A `tf.Tensor` with the same shape as `symbol_values` representing -the gradient backpropageted to the `symbol_values` input of the op -you are differentiating through. - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - -Specify how to differentiate a circuit with sampled expectation. - -This is called at graph runtime by TensorFlow. `differentiate_sampled` -should calculate the gradient of a batch of circuits and return it -formatted as indicated below. See -tfq.differentiators.ForwardDifference for an example. - -#### Args: - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `num_samples`: `tf.Tensor` of positive integers representing the - number of samples per term in each term of pauli_sums used - during the forward pass. -* `forward_pass_vals`: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. -* `grad`: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - -#### Returns: - -A `tf.Tensor` with the same shape as `symbol_values` representing -the gradient backpropageted to the `symbol_values` input of the op -you are differentiating through. - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/ForwardDifference.md b/docs/api_docs/python/tfq/differentiators/ForwardDifference.md deleted file mode 100644 index babc3da99..000000000 --- a/docs/api_docs/python/tfq/differentiators/ForwardDifference.md +++ /dev/null @@ -1,188 +0,0 @@ -
- - - - - - - -
- -# tfq.differentiators.ForwardDifference - - - - - -
- - - View source on GitHub - -
- - - -## Class `ForwardDifference` - -Differentiate a circuit using forward differencing. - -Inherits From: [`LinearCombination`](../../tfq/differentiators/LinearCombination.md) - - - -Forward differencing computes a derivative at a point x using only -points larger than x (in this way, it is 'one sided'). A closed form for -the coefficients of this derivative for an arbitrary positive error order -is used here, which is described in the following article: -https://www.sciencedirect.com/science/article/pii/S0377042799000886. - - -``` - ->>> my_op = tfq.get_expectation_op() ->>> linear_differentiator = tfq.differentiators.ForwardDifference(2, 0.01) ->>> # Get an expectation op, with this differentiator attached. ->>> op = linear_differentiator.generate_differentiable_op( -... analytic_op=my_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) ->>> # Gradient would be: -50 * f(x + 0.02) + 200 * f(x + 0.01) - 150 * f(x) ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> grads -tf.Tensor([[-1.184372]], shape=(1, 1), dtype=float32) - -``` - -

__init__

- -View source - -``` python -__init__( - error_order=1, - grid_spacing=0.001 -) -``` - -Instantiate a ForwardDifference. - -Create a ForwardDifference differentiator, passing along an error order -and grid spacing to be used to contstruct differentiator coeffecients. - -#### Args: - - -* `error_order`: A positive `int` specifying the error order of this - differentiator. This corresponds to the smallest power - of `grid_spacing` remaining in the series that was truncated - to generate this finite differencing expression. -* `grid_spacing`: A positive `float` specifying how large of a - grid to use in calculating this finite difference. - - - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - - - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - - - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/LinearCombination.md b/docs/api_docs/python/tfq/differentiators/LinearCombination.md deleted file mode 100644 index 9e7dfead0..000000000 --- a/docs/api_docs/python/tfq/differentiators/LinearCombination.md +++ /dev/null @@ -1,191 +0,0 @@ -
- - - - - - - -
- -# tfq.differentiators.LinearCombination - - - - - -
- - - View source on GitHub - -
- - - -## Class `LinearCombination` - -Differentiate a circuit with respect to its inputs by - -Inherits From: [`Differentiator`](../../tfq/differentiators/Differentiator.md) - - -linearly combining values obtained by evaluating the op using parameter -values perturbed about their forward-pass values. - - -``` - ->>> my_op = tfq.get_expectation_op() ->>> weights = [5, 6, 7] ->>> perturbations = [0, 0.5, 0.25] ->>> linear_differentiator = tfq.differentiators.LinearCombination( -... weights, perturbations -... ) ->>> # Get an expectation op, with this differentiator attached. ->>> op = linear_differentiator.generate_differentiable_op( -... analytic_op=my_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums -... ) ->>> # Gradient would be: 5 * f(x+0) + 6 * f(x+0.5) + 7 * f(x+0.25) ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> # Note: this gradient visn't correct in value, but showcases ->>> # the principle of how gradients can be defined in a very flexible ->>> # fashion. ->>> grads -tf.Tensor([[5.089467]], shape=(1, 1), dtype=float32) - -``` - -

__init__

- -View source - -``` python -__init__( - weights, - perturbations -) -``` - -Instantiate this differentiator. - -Create a LinearComobinationDifferentiator. Pass in weights and -perturbations as described below. - -#### Args: - - -* `weights`: Python `list` of real numbers representing linear - combination coeffecients for each perturbed function - evaluation. -* `perturbations`: Python `list` of real numbers representing - perturbation values. - - - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - - - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - - - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/ParameterShift.md b/docs/api_docs/python/tfq/differentiators/ParameterShift.md deleted file mode 100644 index eb94800c3..000000000 --- a/docs/api_docs/python/tfq/differentiators/ParameterShift.md +++ /dev/null @@ -1,254 +0,0 @@ -
- - - - - - -
- -# tfq.differentiators.ParameterShift - - - - - -
- - - View source on GitHub - -
- - - -## Class `ParameterShift` - -Calculate the general version of parameter-shift rule based gradients. - -Inherits From: [`Differentiator`](../../tfq/differentiators/Differentiator.md) - - - -This ParameterShift is the gradient estimator of the following paper: - -[arXiv:1905.13311](https://arxiv.org/abs/1905.13311), Gavin E. Crooks. - -This ParameterShift is used for any programs with parameterized gates. -It internally decompose any programs into array of gates with at most -two distinct eigenvalues by using `cirq.decompose`. - -``` ->>> non_diff_op = tfq.get_expectation_op() ->>> linear_differentiator = tfq.differentiators.ParameterShift() ->>> # Get an expectation op, with this differentiator attached. ->>> op = linear_differentiator.generate_differentiable_op( -... analytic_op=non_diff_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) ->>> # This value is now computed via the ParameterShift rule. ->>> # https://arxiv.org/abs/1905.13311 ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> grads -tf.Tensor([[-1.1839752]], shape=(1, 1), dtype=float32) -``` - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - -Calculate the gradient. - -The gradient calculations follows the following steps: - -1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) -2. Use formula (31) from paper inside of TensorFlow to calculate - gradients from all the decomposed circuits. -3. Sum up terms and reshape for the total gradient that is compatible - with TensorFlow. - -**CAUTION** -Analytic gradient measurements based on this ParameterShift generally -run at least K(=2) times SLOW than the original circuit. -On top of it, since all parameters of gates are shifted individually, -the time complexity is linear in the number of parameterized gates L. -So, you will see O(KL) slower time & space complexity than the original -forward pass measurements. - -#### Args: - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `forward_pass_vals`: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. -* `grad`: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - -#### Returns: - -Backward gradient values for each program & each pauli sum. It has -the shape of [batch_size, n_symbols]. - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - -Calculate the gradient. - -The gradient calculations follows the following steps: - -1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) -2. Use formula (31) from paper inside of TensorFlow to calculate - gradients from all the decomposed circuits. -3. Sum up terms and reshape for the total gradient that is compatible - with TensorFlow. - -**CAUTION** -Analytic gradient measurements based on this ParameterShift generally -run at least K(=2) times SLOW than the original circuit. -On top of it, since all parameters of gates are shifted individually, -the time complexity is linear in the number of parameterized gates L. -So, you will see O(KL) slower time & space complexity than the original -forward pass measurements. - -#### Args: - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `num_samples`: `tf.Tensor` of positiver integers indicating the number - of samples used per term to calculate the expectation value - in the forward pass. -* `forward_pass_vals`: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. -* `grad`: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - -#### Returns: - -Backward gradient values for each program & each pauli sum. It has -the shape of [batch_size, n_symbols]. - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/SGDifferentiator.md b/docs/api_docs/python/tfq/differentiators/SGDifferentiator.md deleted file mode 100644 index 46be1970a..000000000 --- a/docs/api_docs/python/tfq/differentiators/SGDifferentiator.md +++ /dev/null @@ -1,268 +0,0 @@ -
- - - - - - - -
- -# tfq.differentiators.SGDifferentiator - - - - - -
- - - View source on GitHub - -
- - - -## Class `SGDifferentiator` - -Stochastic generator based differentiator class. - -Inherits From: [`Differentiator`](../../tfq/differentiators/Differentiator.md) - - -SGDifferentiator allows you to get the sampled gradient value from three -different stochastic processes: -- parameter coordinate sampling - Choose one of the symbols of the given programs and perform coordinate - descent optimization. - e.g. if a program has parameters ['a','b','c'], choose 'a' w.r.t given - probability and get the partial derivative of the direction 'a' only -- parameter-shift rule generators sampling - e.g. Given symbols, there could be many operators sharing the same - symbol, X**'a', Y**'a', Z**'a'. Choose Y**'a' w.r.t given - probability and get the partial derivative of the generator. -- cost Hamiltonian sampling - e.g. if there are cost Hamiltonians such as ['Z1',Z2',Z3'], then choose - 'Z2' w.r.t given probability and get the partial derivative of the - Hamiltonian observable only. -and the expectation value of the sampled gradient value converges into -the true ground truth gradient value. -This Stochastic Generator Differentiator is the modified gradient estimator -of the following two papers: -- [arXiv:1901.05374](https://arxiv.org/abs/1901.05374), Harrow et al. -- [arXiv:1910.01155](https://arxiv.org/abs/1910.01155), Sweke et al. - -``` ->>> # Get an expectation op. ->>> my_op = tfq.get_expectation_op() ->>> # Attach a differentiator. ->>> my_dif = tfq.differentiators.SGDifferentiator() ->>> op = my_dif.generate_differentiable_op( -... analytic_op=my_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) ->>> # This value is now computed via the stochastic processes described in: ->>> # https://arxiv.org/abs/1901.05374 ->>> # https://arxiv.org/abs/1910.01155 ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> # the result is non-deterministic in general, but in this special case, ->>> # it has only one result. ->>> grads - -``` - -

__init__

- -View source - -``` python -__init__( - stochastic_coordinate=True, - stochastic_generator=True, - stochastic_cost=True, - uniform_sampling=False -) -``` - -Instantiate this differentiator. -Create a SGDifferentiator. -Args: - stochastic_coordinate: Python `bool` to determine if - sampling on coordinate is performed or not. Default to True. - stochastic_generator: Python `bool` to determine if - sampling on generator is performed or not. Default to True. - stochastic_cost: Python `bool` to determine if sampling on - cost Hamiltonian is performed or not. Default to True. - uniform_sampling: Python `bool` to determine the - probabilistic distributions on the sampling targets. - Default to False. - - - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - -Compute the sampled gradient with cascaded stochastic processes. -The gradient calculations follows the following steps: -1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) -2. Construct probability distributions & perform stochastic processes - to select parameter-shift terms. - - Stochastic generator : sampling on parameter-shifted gates. - - Stochastic coordinate : sampling on symbols. - - Stochastic cost : sampling on pauli sums -3. Sum up terms and reshape for the total gradient that is compatible - with tensorflow differentiation. -Args: - programs: `tf.Tensor` of strings with shape [n_programs] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_symbols], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [n_programs, n_symbols] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops] - representing output observables for each program. - forward_pass_vals : `tf.Tensor` of real numbers for forward pass - values with the shape of [n_programs, n_ops] - grad : `tf.Tensor` of real numbers for backpropagated gradient - values from the upper layer with the shape of - [n_programs, n_ops] -Returns: - A `tf.Tensor` of real numbers for sampled gradients from the above - samplers with the shape of [n_programs, n_symbols] - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - -Compute the sampled gradient with cascaded stochastic processes. -The gradient calculations follows the following steps: -1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) -2. Construct probability distributions & perform stochastic processes - to select parameter-shift terms. - - Stochastic generator : sampling on parameter-shifted gates. - - Stochastic coordinate : sampling on symbols. - - Stochastic cost : sampling on pauli sums -3. Sum up terms and reshape for the total gradient that is compatible - with tensorflow differentiation. -Args: - programs: `tf.Tensor` of strings with shape [n_programs] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_symbols], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [n_programs, n_symbols] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - num_samples: `tf.Tensor` of positive integers representing the - number of samples per term in each term of pauli_sums used - during the forward pass. - pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops] - representing output observables for each program. - forward_pass_vals : `tf.Tensor` of real numbers for forward pass - values with the shape of [n_programs, n_ops] - grad : `tf.Tensor` of real numbers for backpropagated gradient - values from the upper layer with the shape of - [n_programs, n_ops] -Returns: - A `tf.Tensor` of real numbers for sampled gradients from the above - samplers with the shape of [n_programs, n_symbols] - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/from_tensor.md b/docs/api_docs/python/tfq/from_tensor.md deleted file mode 100644 index 7cd9eca69..000000000 --- a/docs/api_docs/python/tfq/from_tensor.md +++ /dev/null @@ -1,69 +0,0 @@ -
- - -
- -# tfq.from_tensor - - - - - -
- - - View source on GitHub - -
- - - -Convert a tensor of tfq primitives back to Python objects. - -``` python -tfq.from_tensor(tensor_to_convert) -``` - - - - - -Convert a tensor representing `cirq.PauliSum` or `cirq.Circuit` -objects back to Python objects. - - -``` - ->>> my_qubits = cirq.GridQubit.rect(1, 2) ->>> my_circuits = [cirq.Circuit(cirq.X(my_qubits[0])), -... cirq.Circuit(cirq.Z(my_qubits[0])) -... ] ->>> tensor_input = tfq.convert_to_tensor(my_circuits) ->>> # Now tensor_input can be used as model input etc. ->>> same_circuits = tfq.from_tensor(tensor_input) ->>> # same_circuits now holds cirq.Circuit objects once more. ->>> same_circuits -[cirq.Circuit([ - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - ]), -]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), -])] - -``` - -#### Args: - - -* `tensor_to_convert`: `tf.Tensor` or `np.ndarray` representation to - convert back into python objects. - - -#### Returns: - -Python `list` of items converted to their python representation stored - in a (potentially nested) `list`. diff --git a/docs/api_docs/python/tfq/get_expectation_op.md b/docs/api_docs/python/tfq/get_expectation_op.md deleted file mode 100644 index 155344276..000000000 --- a/docs/api_docs/python/tfq/get_expectation_op.md +++ /dev/null @@ -1,103 +0,0 @@ -
- - -
- -# tfq.get_expectation_op - - - - - -
- - - View source on GitHub - -
- - - -Get a Tensorflow op that will calculate batches of expectation values. - -``` python -tfq.get_expectation_op(backend=None) -``` - - - - - -This function produces a non-differentiable TF op that will calculate -batches of expectation values given tensor batches of `cirq.Circuit`s, -parameter values, and `cirq.PauliSum` operators to measure. - - -``` - ->>> # Simulate circuits with C++. ->>> my_op = tfq.get_expectation_op() ->>> # Prepare some inputs. ->>> qubit = cirq.GridQubit(0, 0) ->>> my_symbol = sympy.Symbol('alpha') ->>> my_circuit_tensor = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.H(qubit) ** my_symbol) -... ]) ->>> my_values = np.array([[0.123]]) ->>> my_paulis = tfq.convert_to_tensor([[ -... 3.5 * cirq.X(qubit) - 2.2 * cirq.Y(qubit) -... ]]) ->>> # This op can now be run with: ->>> output = my_op( -... my_circuit_tensor, ['alpha'], my_values, my_paulis) ->>> output -tf.Tensor([[0.71530885]], shape=(1, 1), dtype=float32) - -``` - - -In order to make the op differentiable, a `tfq.differentiator` object is -needed. see tfq.differentiators for more details. Below is a simple -example of how to make my_op from the above code block differentiable: - -``` ->>> diff = tfq.differentiators.ForwardDifference() ->>> my_differentiable_op = diff.generate_differentiable_op( -... analytic_op=my_op -... ) -``` - - -#### Args: - - -* `backend`: Optional python `object` that specifies what backend this op -should use when evaluating circuits. Can be any -`cirq.SimulatesFinalState`. If not provided the default C++ analytical -expectation calculation op is returned. - - -#### Returns: - -A `callable` with the following signature: - -```op(programs, symbol_names, symbol_values, pauli_sums)``` - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - -* `Returns`: `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). \ No newline at end of file diff --git a/docs/api_docs/python/tfq/get_sampled_expectation_op.md b/docs/api_docs/python/tfq/get_sampled_expectation_op.md deleted file mode 100644 index 759bca794..000000000 --- a/docs/api_docs/python/tfq/get_sampled_expectation_op.md +++ /dev/null @@ -1,112 +0,0 @@ -
- - -
- -# tfq.get_sampled_expectation_op - - - - - -
- - - View source on GitHub - -
- - - -Get a TensorFlow op that will calculate sampled expectation values. - -``` python -tfq.get_sampled_expectation_op(backend=None) -``` - - - - - -This function produces a non-differentiable TF op that will calculate -batches of expectation values given tensor batches of `cirq.Circuit`s, -parameter values, and `cirq.PauliSum` operators to measure. -Expectation is estimated by taking num_samples shots per term in the -corresponding PauliSum. - - -``` - ->>> # Simulate circuits with C++. ->>> my_op = tfq.get_sampled_expectation_op() ->>> # Prepare some inputs. ->>> qubit = cirq.GridQubit(0, 0) ->>> my_symbol = sympy.Symbol('alpha') ->>> my_circuit_tensor = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.H(qubit) ** my_symbol) -... ]) ->>> my_values = np.array([[0.123]]) ->>> my_paulis = tfq.convert_to_tensor([[ -... 3.5 * cirq.X(qubit) - 2.2 * cirq.Y(qubit) -... ]]) ->>> my_num_samples = np.array([[100]]) ->>> # This op can now be run with: ->>> output = my_op( -... my_circuit_tensor, ['alpha'], my_values, my_paulis, my_num_samples) ->>> output -tf.Tensor([[0.71530885]], shape=(1, 1), dtype=float32) - -``` - - -In order to make the op differentiable, a `tfq.differentiator` object is -needed. see tfq.differentiators for more details. Below is a simple -example of how to make my_op from the above code block differentiable: - - -``` - ->>> diff = tfq.differentiators.ForwardDifference() ->>> my_differentiable_op = diff.generate_differentiable_op( -... analytic_op=my_op -... ) - -``` - -#### Args: - - -* `backend`: Python `object` that specifies what backend this op should use - when evaluating circuits. It only accepts `cirq.Sampler`. - - -#### Returns: - -A `callable` with the following signature: - -```op(programs, symbol_names, symbol_values, pauli_sums, num_samples)``` - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `num_samples`: `tf.Tensor` with `n_samples[i][j]` is equal to the - number of samples to draw in each term of `pauli_sums[i][j]` - when estimating the expectation. It can also be tiled up to the - shape of pauli_sums by broadcasting if tf.shape(num_samples)[0] - or tf.shape(num_samples)[1] is 1 and the other dimension is the - same with that of pauli_sums. - -* `Returns`: `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). \ No newline at end of file diff --git a/docs/api_docs/python/tfq/get_sampling_op.md b/docs/api_docs/python/tfq/get_sampling_op.md deleted file mode 100644 index 6dd84d19e..000000000 --- a/docs/api_docs/python/tfq/get_sampling_op.md +++ /dev/null @@ -1,88 +0,0 @@ -
- - -
- -# tfq.get_sampling_op - - - - - -
- - - View source on GitHub - -
- - - -Get a Tensorflow op that produces samples from given quantum circuits. - -``` python -tfq.get_sampling_op(backend=None) -``` - - - - - -This function produces a non-differentiable op that will calculate -batches of circuit samples given tensor batches of `cirq.Circuit`s, -parameter values, and a scalar telling the op how many samples to take. - - -``` - ->>> # Simulate circuits with cirq. ->>> my_op = tfq.get_sampling_op(backend=cirq.sim.Simulator()) ->>> # Simulate circuits with C++. ->>> my_second_op = tfq.get_sampling_op() ->>> # Prepare some inputs. ->>> qubit = cirq.GridQubit(0, 0) ->>> my_symbol = sympy.Symbol('alpha') ->>> my_circuit_tensor = tfq.convert_to_tensor( -... [cirq.Circuit(cirq.X(qubit)**my_symbol)]) ->>> my_values = np.array([[2.0]]) ->>> n_samples = np.array([10]) ->>> # This op can now be run to take samples. ->>> output = my_second_op( -... my_circuit_tensor, ['alpha'], my_values, n_samples) ->>> output - - -``` - - -#### Args: - - -* `backend`: Optional Python `object` that specifies what backend this op - should use when evaluating circuits. Can be any `cirq.Sampler`. If - not provided the default C++ sampling op is returned. - - -#### Returns: - -A `callable` with the following signature: - -```op(programs, symbol_names, symbol_values, num_samples)``` - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `num_samples`: `tf.Tensor` with one element indicating the number of - samples to draw. - -* `Returns`: `tf.Tensor` with shape - [batch_size, num_samples, n_qubits] that - holds samples (as boolean values) for each circuit. \ No newline at end of file diff --git a/docs/api_docs/python/tfq/get_state_op.md b/docs/api_docs/python/tfq/get_state_op.md deleted file mode 100644 index 885d23c21..000000000 --- a/docs/api_docs/python/tfq/get_state_op.md +++ /dev/null @@ -1,85 +0,0 @@ -
- - -
- -# tfq.get_state_op - - - - - -
- - - View source on GitHub - -
- - - -Get a tensorflow op that produces states from given quantum circuits. - -``` python -tfq.get_state_op(backend=None) -``` - - - - - -This function produces a non-differentiable op that will calculate -batches of state tensors given tensor batches of `cirq.Circuit`s and -parameter values. - - -``` - ->>> # Simulate circuits with cirq. ->>> my_op = tfq.get_state_op(backend=cirq.DensityMatrixSimulator()) ->>> # Simulate circuits with C++. ->>> my_second_op = tfq.get_state_op() ->>> # Prepare some inputs. ->>> qubit = cirq.GridQubit(0, 0) ->>> my_symbol = sympy.Symbol('alpha') ->>> my_circuit_tensor = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.Y(qubit) ** my_symbol) -... ]) ->>> my_values = np.array([[0.5]]) ->>> # This op can now be run to calculate the state. ->>> output = my_second_op(my_circuit_tensor, ['alpha'], my_values) ->>> output - - -``` - - -#### Args: - - -* `backend`: Optional Python `object` that specifies what backend this op - should use when evaluating circuits. Can be any - `cirq.SimulatesFinalState`. If not provided, the default C++ - wavefunction simulator will be used. - - -#### Returns: - -A `callable` with the following signature: - -```op(programs, symbol_names, symbol_values)``` - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - -* `Returns`: `tf.Tensor` with shape [batch_size, size of state] that - contains the state information of the circuit. \ No newline at end of file diff --git a/docs/api_docs/python/tfq/get_supported_gates.md b/docs/api_docs/python/tfq/get_supported_gates.md deleted file mode 100644 index 7a4409939..000000000 --- a/docs/api_docs/python/tfq/get_supported_gates.md +++ /dev/null @@ -1,29 +0,0 @@ -
- - -
- -# tfq.get_supported_gates - - - - - -
- - - View source on GitHub - -
- - - -A helper to get the gates supported by tfq. - -``` python -tfq.get_supported_gates() -``` - - - - diff --git a/docs/api_docs/python/tfq/layers.md b/docs/api_docs/python/tfq/layers.md deleted file mode 100644 index 1cb0edd2d..000000000 --- a/docs/api_docs/python/tfq/layers.md +++ /dev/null @@ -1,39 +0,0 @@ -
- - -
- -# Module: tfq.layers - - - - -
- - - View source on GitHub - -
- - - -Module definitions for tensorflow_quantum.python.layers.* - - - -## Classes - -[`class AddCircuit`](../tfq/layers/AddCircuit.md): A layer that pre/appends a sequence of gates to the input circuit tensor. - -[`class ControlledPQC`](../tfq/layers/ControlledPQC.md): Controlled Parametrized Quantum Circuit (PQC) Layer. - -[`class Expectation`](../tfq/layers/Expectation.md): A Layer that calculates an expectation value. - -[`class PQC`](../tfq/layers/PQC.md): Parametrized Quantum Circuit (PQC) Layer. - -[`class Sample`](../tfq/layers/Sample.md): A Layer that samples from a quantum circuit. - -[`class SampledExpectation`](../tfq/layers/SampledExpectation.md): A layer that calculates a sampled expectation value. - -[`class State`](../tfq/layers/State.md): A Layer that simulates a quantum state. - diff --git a/docs/api_docs/python/tfq/layers/AddCircuit.md b/docs/api_docs/python/tfq/layers/AddCircuit.md deleted file mode 100644 index 7a2ec3980..000000000 --- a/docs/api_docs/python/tfq/layers/AddCircuit.md +++ /dev/null @@ -1,857 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.AddCircuit - - - - - -
- - - View source on GitHub - -
- - - -## Class `AddCircuit` - -A layer that pre/appends a sequence of gates to the input circuit tensor. - - - - - -This layer allows for an arbitrary `cirq.Circuit` (or list of circuits of -equal length to the input) to be appended or prepended to the list of input -circuits. - - -``` - ->>> qubits = cirq.GridQubit.rect(1,4) ->>> add = tfq.layers.AddCircuit() ->>> output = add( -... [cirq.Circuit(cirq.Y(qubits[0])), cirq.Circuit(cirq.Z(qubits[0])) -... append = cirq.Circuit(cirq.Y(qubits[0]))) -... ])) ->>> # Now we have a layer that would append a single Y gate to any inputs. ->>> tfq.from_tensor(output) -[cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), -]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), -])] - -``` - -

__init__

- -View source - -``` python -__init__(**kwargs) -``` - -Instantiate this layer. - - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/CircuitConstruction.md b/docs/api_docs/python/tfq/layers/CircuitConstruction.md deleted file mode 100644 index 3581801c5..000000000 --- a/docs/api_docs/python/tfq/layers/CircuitConstruction.md +++ /dev/null @@ -1,894 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.CircuitConstruction - - - - - -
- - - View source on GitHub - -
- - - -## Class `CircuitConstruction` - -A class that defines the behavior of circuit constructing layers in TFQ. - - - - - -An abstract class that defines all behaviors of a circuit constructing -layer, namely the ability to modify a tensor representation of a -circuit inside the TF graph by prepending or appending to the input circuit. - - -``` - ->>> class AddXLayer(tfq.layers.CircuitConstruction): -... def __init__(self, qubits): -... super().__init__() -... self.qubits = qubits -... def get_circuit(self): -... x_wall = cirq.Circuit() -... for qubit in qubits: -... x_wall.append(cirq.X(qubit)) -... return x_wall ->>> qubits = cirq.GridQubit.rect(1,4) ->>> model = tf.keras.Sequential([ -... tf.keras.layers.Input(shape=(), dtype=tf.dtypes.string), -... AddXLayer(qubits) -... ]) ->>> output = model(tfq.convert_to_tensor([ -... cirq.Circuit(cirq.Y(qubits[0])), -... cirq.Circuit(cirq.Z(qubits[0])) -... ])) ->>> tfq.from_tensor(output) -[cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - cirq.X.on(cirq.GridQubit(0, 1)), - cirq.X.on(cirq.GridQubit(0, 2)), - cirq.X.on(cirq.GridQubit(0, 3)), - ]), -]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - cirq.X.on(cirq.GridQubit(0, 1)), - cirq.X.on(cirq.GridQubit(0, 2)), - cirq.X.on(cirq.GridQubit(0, 3)), - ]), -])] - -``` - -

__init__

- -View source - -``` python -__init__( - prepend=False, - **kwargs -) -``` - -Instantiate a GateLayer object. - -Create a GateLayer, whose main purpose is to append or prepend to -tensors of circuits in the TF graph. - -#### Args: - - -* `prepend`: Python `bool` if set to true, the gates produced by this - layer will be prepended to the input instead of appended. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -View source - -``` python -build(input_shape) -``` - -Keras build function. - - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_circuit

- -View source - -``` python -get_circuit() -``` - -Abstract method that returns a cirq.Circuit - -At runtime, this method will be called, and must produce a -`cirq.Circuit` which will be pre/appended to the input circuit tensor. - -#### Returns: - -`cirq.Circuit` circuit to prepend/append to the input tensor - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/ControlledPQC.md b/docs/api_docs/python/tfq/layers/ControlledPQC.md deleted file mode 100644 index fb5141e64..000000000 --- a/docs/api_docs/python/tfq/layers/ControlledPQC.md +++ /dev/null @@ -1,954 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.ControlledPQC - - - - - -
- - - View source on GitHub - -
- - - -## Class `ControlledPQC` - -Controlled Parametrized Quantum Circuit (PQC) Layer. - - - - - -The `ControlledPQC` layer is very similar to the regular `PQC` layer, but -with one major difference. The `ControlledPQC` layer requires the caller -of the layer to provide the control parameter inputs for `model_circuit`. -You can see how this works through a simple example: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> model = cirq.Circuit( -... cirq.X(bit) ** sympy.Symbol('alpha'), -... cirq.Z(bit) ** sympy.Symbol('beta') -... ) ->>> outputs = tfq.layers.ControlledPQC(model, cirq.Z(bit)) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(bit)) -... ]) ->>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) ->>> res = outputs([quantum_data, model_params]) ->>> res -tf.Tensor( -[[-1.4901161e-08] - [-7.0710683e-01]], shape=(2, 1), dtype=float32) - -``` - - -Just like with the `PQC` it is *very important* that the quantum datapoint -circuits do not contain any `sympy.Symbols` themselves (This can be -supported with advanced usage of the tfq.layers.Expectation layer). Just -like `PQC` it is possible to specify multiple readout operations and -switch to sample based expectation calculation: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> model = cirq.Circuit( -... cirq.X(bit) ** sympy.Symbol('alpha'), -... cirq.Z(bit) ** sympy.Symbol('beta') -... ) ->>> outputs = tfq.layers.ControlledPQC( -... model, -... [cirq.Z(bit), cirq.X(bit), cirq.Y(bit)], -... repetitions=5000) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(bit)) -... ]) ->>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) ->>> res = outputs([quantum_data, model_params]) ->>> res -tf.Tensor( -[[-0.0028 1. -0.0028] - [-0.6956 -0.498 -0.498 ]], shape=(2, 3), dtype=float32) - -``` - - -A value for `backend` can also be supplied in the layer constructor -arguments to indicate which supported backend you would like to use. -A value for `differentiator` can also be supplied in the constructor -to indicate the differentiation scheme this `ControlledPQC` layer -should use. Here's how you would take the gradients of the -above example using a `cirq.Simulator` backend (which is slower -than `backend=None` which uses C++): - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> model = cirq.Circuit( -... cirq.X(bit) ** sympy.Symbol('alpha'), -... cirq.Z(bit) ** sympy.Symbol('beta') -... ) ->>> outputs = tfq.layers.ControlledPQC( -... model, -... [cirq.Z(bit), cirq.X(bit), cirq.Y(bit)], -... repetitions=5000, -... backend=cirq.Simulator(), -... differentiator=tfq.differentiators.ParameterShift()) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(bit)) -... ]) ->>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) ->>> with tf.GradientTape() as g: -... g.watch(model_params) -... res = outputs([quantum_data, model_params]) ->>> grads = g.gradient(res, model_params) ->>> grads -tf.Tensor( -[[-3.1415927 3.1415927 ] - [-0.9211149 0.02764606]], shape=(2, 2), dtype=float32)] - -``` - - -Lastly, like all layers in TensorFlow the `ControlledPQC` layer can be -called on any `tf.Tensor` as long as it is the right shape. This means -you could replace `model_params` in the above example with the outputs -from a `tf.keras.Dense` layer or replace `quantum_data` with values fed -in from a `tf.keras.Input`. - -

__init__

- -View source - -``` python -__init__( - model_circuit, - operators, - **kwargs -) -``` - -Instantiate this layer. - -Create a layer that will output expectation values of the given -operators when fed quantum data to it's input layer. This layer will -take two input tensors, one representing a quantum data source (these -circuits must not contain any symbols) and the other representing -control parameters for the model circuit that gets appended to the -datapoints. - -model_circuit: `cirq.Circuit` containing `sympy.Symbols` that will be - used as the model which will be fed quantum data inputs. -operators: `cirq.PauliSum` or Python `list` of `cirq.PauliSum` objects - used as observables at the end of the model circuit. -repetitions: Optional Python `int` indicating how many samples to use - when estimating expectation values. If `None` analytic expectation - calculation is used. -backend: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead. - If a cirq object is given it must inherit `cirq.SimulatesFinalState` - if `sampled_based` is True or it must inherit `cirq.Sampler` if - `sample_based` is False. -differentiator: Optional `tfq.differentiator` object to specify how - gradients of `model_circuit` should be calculated. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/Expectation.md b/docs/api_docs/python/tfq/layers/Expectation.md deleted file mode 100644 index cc38dc21f..000000000 --- a/docs/api_docs/python/tfq/layers/Expectation.md +++ /dev/null @@ -1,1035 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.Expectation - - - - - -
- - - View source on GitHub - -
- - - -## Class `Expectation` - -A Layer that calculates an expectation value. - - - - - -Given an input circuit and set of parameter values, prepare a quantum state -and output expectation values taken on that state with respect to some -observables to the tensorflow graph. - - -First define a simple helper function for generating a parametrized -quantum circuit that we will use throughout: - -``` ->>> def _gen_single_bit_rotation_problem(bit, symbols): -... """Generate a toy problem on 1 qubit.""" -... starting_state = [0.123, 0.456, 0.789] -... circuit = cirq.Circuit( -... cirq.Rx(starting_state[0])(bit), -... cirq.Ry(starting_state[1])(bit), -... cirq.Rz(starting_state[2])(bit), -... cirq.Rz(symbols[2])(bit), -... cirq.Ry(symbols[1])(bit), -... cirq.Rx(symbols[0])(bit) -... ) -... return circuit -``` - - -In quantum machine learning there are two very common use cases that -align with keras layer constructs. The first is where the circuits -represent the input data points (see the note at the bottom about -using compiled models): - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] ->>> circuit_list = [ -... _gen_single_bit_rotation_problem(bit, symbols), -... cirq.Circuit( -... cirq.Z(bit) ** symbols[0], -... cirq.X(bit) ** symbols[1], -... cirq.Z(bit) ** symbols[2] -... ), -... cirq.Circuit( -... cirq.X(bit) ** symbols[0], -... cirq.Z(bit) ** symbols[1], -... cirq.X(bit) ** symbols[2] -... ) -... ] ->>> expectation_layer = tfq.layers.Expectation() ->>> output = expectation_layer( -... circuit_list, symbol_names=symbols, operators = ops) ->>> # Here output[i][j] corresponds to the expectation of all the ops ->>> # in ops w.r.t circuits[i] where keras managed variables are ->>> # placed in the symbols 'x', 'y', 'z'. ->>> tf.shape(output) -tf.Tensor([3 2], shape=(2,), dtype=int32) - -``` - - -Here, different `cirq.Circuit` instances sharing the common symbols 'x', -'y' and 'z' are used as input. Keras uses the `symbol_names` -argument to map Keras managed variables to these circuits constructed -with `sympy.Symbol`s. Note that you used a Python `list` containing your -circuits, you could also specify a `tf.keras.Input` layer or any -tensorlike object to specify the circuits you would like fed to the layer -at runtime. - - -Another common use case is where there is a fixed circuit and the -expectation operators vary: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] ->>> fixed_circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> expectation_layer = tfq.layers.Expectation() ->>> output = expectation_layer( -... fixed_circuit, -... symbol_names=symbols, -... operators=ops, -... initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)) ->>> # Here output[i][j] corresponds to ->>> # the expectation of operators[i][j] w.r.t the circuit where ->>> # variable values are managed by keras and store numbers in ->>> # the symbols 'x', 'y', 'z'. ->>> tf.shape(output) -tf.Tensor([1 2], shape=(2,), dtype=int32) - -``` - - -Note that in the above examples you used a `cirq.Circuit` object and a list -of `cirq.PauliSum` objects as inputs to your layer. To allow for varying -inputs your could change the line in the above code to: -`expectation_layer(circuit_inputs, symbol_names=symbols, operators=ops)` -with `circuit_inputs` is `tf.keras.Input(shape=(), dtype=tf.dtypes.string)` -to allow you to pass in different circuits in a compiled model. Lastly -you also supplied a `tf.keras.initializer` to the `initializer` argument. -This argument is optional in the case that the layer itself will be managing -the symbols of the circuit and not have them fed in from somewhere else in -the model. - - -There are also some more complex use cases. Notably these use cases all -make use of the `symbol_values` parameter that causes the -`Expectation` layer to stop managing the `sympy.Symbol`s in the quantum -circuits for the user and instead require them to supply input -values themselves. Lets look at the case where there -is a single fixed circuit, some fixed operators and symbols that must be -common to all circuits: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> ops = [cirq.Z(bit), cirq.X(bit)] ->>> circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> values = [[1,1,1], [2,2,2], [3,3,3]] ->>> expectation_layer = tfq.layers.Expectation() ->>> output = expectation_layer( -... circuit, -... symbol_names=symbols, -... symbol_values=values, -... operators=ops) ->>> # output[i][j] = The expectation of operators[j] with ->>> # values[i] placed into the symbols of the circuit ->>> # with the order specified by symbol_names. ->>> # so output[1][2] = The expectation of your circuit with parameter ->>> # values [2,2,2] w.r.t Pauli X. ->>> output -tf.Tensor( -[[0.63005245 0.76338404] - [0.25707167 0.9632684 ] - [0.79086655 0.5441111 ]], shape=(3, 2), dtype=float32) - -``` - - -Here is a simple model that uses this particular input signature of -tfq.layers.Expectation, that learns to undo the random rotation -of the qubit: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> control_input = tf.keras.Input(shape=(1,)) ->>> circuit_inputs = tf.keras.Input(shape=(), dtype=tf.dtypes.string) ->>> d1 = tf.keras.layers.Dense(10)(control_input) ->>> d2 = tf.keras.layers.Dense(3)(d1) ->>> expectation = tfq.layers.Expectation()( -... circuit_inputs, # See note below! -... symbol_names=symbols, -... symbol_values=d2, -... operators=cirq.Z(bit)) ->>> data_in = np.array([[1], [0]], dtype=np.float32) ->>> data_out = np.array([[1], [-1]], dtype=np.float32) ->>> model = tf.keras.Model( -... inputs=[circuit_inputs, control_input], outputs=expectation) ->>> model.compile( -... optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), -... loss=tf.keras.losses.mean_squared_error) ->>> history = model.fit( -... x=[tfq.convert_to_tensor([circuit] * 2), data_in], -... y=data_out, -... epochs=100) - -``` - - -For an example featuring this layer, please check out `Quantum sensing` -in our dev website http://www.tensorflow.org/quantum/tutorials. - -Lastly `symbol_values`, `operators` and circuit `inputs` can all be fed -Python `list` objects. In addition to this they can also be fed `tf.Tensor` -inputs, meaning that you can input all of these things from other Tensor -objects (like `tf.keras.Dense` layer outputs or `tf.keras.Input`s etc). - -Note: When specifying a new layer for a *compiled* `tf.keras.Model` using -something like `tfq.layers.Expectation()(cirq.Circuit(...), ...)` please -be sure to instead use `tfq.layers.Expectation()(circuit_input_tensor, ...)` -where `circuit_input_tensor` is filled with -`tfq.conver_to_tensor([cirq.Circuit(..)] * batch_size)` at runtime. This -is because compiled keras models require layer `call` inputs to be -traceable back to a `tf.keras.Input`. - -

__init__

- -View source - -``` python -__init__( - backend=None, - differentiator=None, - **kwargs -) -``` - -Instantiate this Layer. - -Create a layer that will output expectation values gained from -simulating a quantum circuit. - -#### Args: - - -* `backend`: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead, - which must inherit `cirq.SimulatesFinalState`. -* `differentiator`: Optional Differentiator to use to calculate analytic - derivative values of given operators_to_measure and circuit, - which must inherit tfq.differentiators.Differentiator and - implements `differentiate_analytic` method. Defaults to None, - which uses `linear_combination.ForwardDifference()`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/PQC.md b/docs/api_docs/python/tfq/layers/PQC.md deleted file mode 100644 index a145a050c..000000000 --- a/docs/api_docs/python/tfq/layers/PQC.md +++ /dev/null @@ -1,963 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.PQC - - - - - -
- - - View source on GitHub - -
- - - -## Class `PQC` - -Parametrized Quantum Circuit (PQC) Layer. - - - - - -This layer is for training parameterized quantum models. -Given a parameterized circuit, this layer initializes the parameters -and manages them in a Keras native way. - -We start by defining a simple quantum circuit on one qubit. -This circuit parameterizes an arbitrary rotation on the Bloch sphere in -terms of the three angles a, b, and c: - - -``` - ->>> q = cirq.GridQubit(0, 0) ->>> (a, b, c) = sympy.symbols("a b c") ->>> circuit = cirq.Circuit( -... cirq.Rz(a)(q), -... cirq.Rx(b)(q), -... cirq.Rz(c)(q), -... cirq.Rx(-b)(q), -... cirq.Rz(-a)(q) -... ) - -``` - - -In order to extract information from our circuit, we must apply measurement -operators. For now we choose to make a Z measurement. In order to observe -an output, we must also feed our model quantum data (NOTE: quantum data -means quantum circuits with no free parameters). Though the output values -will depend on the default random initialization of the angles in our model, -one will be the negative of the other since `cirq.X(q)` causes a bit flip: - - -``` - ->>> outputs = tfq.layers.PQC(circuit, cirq.Z(q)) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(q)) -... ]) ->>> res = outputs(quantum_data) ->>> res - - -``` - - -We can also choose to measure the three pauli matrices, sufficient to -fully characterize the operation of our model, or choose to simulate -sampled expectation values by specifying a number of measurement shots -(repetitions) to average over. Notice that using only 200 repetitions -introduces variation between the two rows of data, due to the -probabilistic nature of measurement. - - -``` - ->>> measurement = [cirq.X(q), cirq.Y(q), cirq.Z(q)] ->>> outputs = tfq.layers.PQC(circuit, measurement, repetitions=200) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(q)) -... ]) ->>> res = outputs(quantum_data) ->>> res - - -``` - - -A value for `backend` can also be supplied in the layer constructor -arguments to indicate which supported backend you would like to use. -A value for `differentiator` can also be supplied in the constructor -to indicate the differentiation scheme this `PQC` layer should use. -Here's how you would take the gradients of the above example using a -`cirq.Simulator` backend (which is slower than the default -`backend=None` which uses C++): - - -``` - ->>> q = cirq.GridQubit(0, 0) ->>> (a, b, c) = sympy.symbols("a b c") ->>> circuit = cirq.Circuit( -... cirq.Rz(a)(q), -... cirq.Rx(b)(q), -... cirq.Rz(c)(q), -... cirq.Rx(-b)(q), -... cirq.Rz(-a)(q) -... ) ->>> measurement = [cirq.X(q), cirq.Y(q), cirq.Z(q)] ->>> outputs = tfq.layers.PQC( -... circuit, -... measurement, -... repetitions=5000, -... backend=cirq.Simulator(), -... differentiator=tfq.differentiators.ParameterShift()) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(q)) -... ]) ->>> res = outputs(quantum_data) ->>> res - - -``` - - -Lastly, like all layers in TensorFlow the `PQC` layer can be called on any -`tf.Tensor` as long as it is the right shape. This means you could replace -replace `quantum_data` with values fed in from a `tf.keras.Input`. - -

__init__

- -View source - -``` python -__init__( - model_circuit, - operators, - **kwargs -) -``` - -Instantiate this layer. - -Create a layer that will output expectation values of the given -operators when fed quantum data to it's input layer. This layer will -accept one input tensor representing a quantum data source (these -circuits must not contain any symbols) and append the model_circuit to -them, execute them and then finally output the expectation values. - - -model_circuit: `cirq.Circuit` containing `sympy.Symbols` that will be - used as the model which will be fed quantum data inputs. -operators: `cirq.PauliSum` or Python `list` of `cirq.PauliSum` objects - used as observables at the end of the model circuit. -repetitions: Optional Python `int` indicating how many samples to use - when estimating expectation values. If `None` analytic expectation - calculation is used. -backend: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead. - If a cirq object is given it must inherit either - `cirq.SimulatesFinalState` if analytic expectations are desired or - `cirq.Sampler` if sampled expectations are desired. -differentiator: Optional `tfq.differentiator` object to specify how - gradients of `model_circuit` should be calculated. -initializer: Optional `tf.keras.initializer` object to specify how the - symbols in `model_circuit` should be initialized when creating - the managed variables. -regularizer: Optional `tf.keras.regularizer` object applied to the - managed variables parameterizing `model_circuit`. -constraint: Optional `tf.keras.constraint` object applied to the - managed variables parameterizing `model_circuit`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -View source - -``` python -build(input_shape) -``` - -Keras build function. - - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/Sample.md b/docs/api_docs/python/tfq/layers/Sample.md deleted file mode 100644 index 815332a5c..000000000 --- a/docs/api_docs/python/tfq/layers/Sample.md +++ /dev/null @@ -1,960 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.Sample - - - - - -
- - - View source on GitHub - -
- - - -## Class `Sample` - -A Layer that samples from a quantum circuit. - - - - - -Given an input circuit and set of parameter values, output samples -taken from the end of the circuit. - -First lets define a simple circuit to sample from: - -``` ->>> def get_circuit(): -... q0 = cirq.GridQubit(0, 0) -... q1 = cirq.GridQubit(1, 0) -... circuit = cirq.Circuit( -... cirq.X(q0), -... cirq.CNOT(q1) -... ) -... -... return circuit -``` - -#### When printed: - - - -``` ->>> get_circuit() -(0, 0): ───X───@─── - │ -(1, 0): ───────X─── -``` - -Using tfq.layers.Sample, it's possible to sample outputs from a given -circuit. The circuit above will put both qubits in the |1> state. - -To retrieve samples of the output state: - -``` ->>> sample_layer = tfq.layers.Sample() ->>> output = sample_layer(get_circuit(), repetitions=4) ->>> output - -``` - -Notice above that there were no parameters passed as input into the -layer, because the circuit wasn't parameterized. If instead the circuit -had parameters, e.g. - -``` ->>> def get_parameterized_circuit(symbols): -... q0 = cirq.GridQubit(0, 0) -... q1 = cirq.GridQubit(1, 0) -... circuit = cirq.Circuit( -... cirq.X(q0) ** symbols[0], -... cirq.CNOT(q1) -... ) -... -... return circuit -``` - -Then it becomes necessary to provide a value for the symbol using -`symbol_names` and `symbol_values`. - -``` ->>> symbols = sympy.symbols(['x']) ->>> sample_layer = tfq.layers.Sample() ->>> output = sample_layer(get_parameterized_circuit(), -... symbol_names=symbols, symbol_values=[[0.5]], repetitions=4) ->>> tf.shape(output.to_tensor()) -tf.Tensor([1 4 2], shape=(3,), dtype=int32) -``` - -Note that using multiple sets of parameters returns multiple -independent samples on the same circuit. - -``` ->>> symbols = sympy.symbols(['x']) ->>> sample_layer = tfq.layers.Sample() ->>> params = tf.convert_to_tensor([[0.5], [0.4]], -... dtype=tf.dtypes.float32) ->>> output = sample_layer(get_parameterized_circuit(), -... symbol_names=symbols, symbol_values=params, repetitions=4) ->>> tf.shape(output.to_tensor()) -tf.Tensor([2 4 2], shape=(3,), dtype=int32) -``` - -The sample layer can also be used without explicitly passing in a -circuit, but instead using the layer with a batch of circuits. This layer -will then sample the circuits provided in the batch with multiple sets of -parameters, at the same time. Note that the parameters will not be -crossed along all circuits, the circuit at index i will be run with the -parameters at index i. - -``` ->>> symbols = sympy.symbols(['x']) ->>> sample_layer = tfq.layers.Sample() -``` - -With the sample layer defined, just define both the circuit and -parameter inputs. - -``` ->>> q0 = cirq.GridQubit(0, 0) ->>> q1 = cirq.GridQubit(1, 0) ->>> circuits = tfq.convert_to_tensor([ -... cirq.Circuit( -... cirq.X(q0) ** s[0], -... cirq.CNOT(q0, q1), -... ), -... cirq.Circuit( -... cirq.Y(q0) ** s[0], -... cirq.CNOT(q0, q1), -... ) -... ]) ->>> params = tf.convert_to_tensor([[0.5], [0.4]], -... dtype=tf.dtypes.float32) -``` - -The layer can be used as usual: - -``` ->>> output = sample_layer(circuits, -... symbol_names=symbols, symbol_values = params, repetitions=4) ->>> tf.shape(output.to_tensor()) - tf.Tensor([2 4 2], shape=(3,), dtype=int32) -``` - -

__init__

- -View source - -``` python -__init__( - backend=None, - **kwargs -) -``` - -Instantiate this Layer. - -Create a layer that will output bitstring samples taken from either a -simulated quantum state or a real quantum computer - -#### Args: - - -* `backend`: Optional Backend to use to simulate this state. Defaults - to the native Tensorflow simulator (None), however users may - also specify a preconfigured cirq execution object to use - instead, which must inherit `cirq.SimulatesSamples` or a - `cirq.Sampler`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/SampledExpectation.md b/docs/api_docs/python/tfq/layers/SampledExpectation.md deleted file mode 100644 index e712f12be..000000000 --- a/docs/api_docs/python/tfq/layers/SampledExpectation.md +++ /dev/null @@ -1,1039 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.SampledExpectation - - - - - -
- - - View source on GitHub - -
- - - -## Class `SampledExpectation` - -A layer that calculates a sampled expectation value. - - - - - -Given an input circuit and set of parameter values, output expectation -values of observables computed using measurement results sampled from -the input circuit. - - -First define a simple helper function for generating a parametrized -quantum circuit that we will use throughout: - -``` ->>> def _gen_single_bit_rotation_problem(bit, symbols): -... """Generate a toy problem on 1 qubit.""" -... starting_state = [0.123, 0.456, 0.789] -... circuit = cirq.Circuit( -... cirq.Rx(starting_state[0])(bit), -... cirq.Ry(starting_state[1])(bit), -... cirq.Rz(starting_state[2])(bit), -... cirq.Rz(symbols[2])(bit), -... cirq.Ry(symbols[1])(bit), -... cirq.Rx(symbols[0])(bit) -... ) -... return circuit -``` - - -In quantum machine learning there are two very common use cases that -align with keras layer constructs. The first is where the circuits -represent the input data points: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x y z') ->>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] ->>> num_samples = [100, 200] ->>> circuit_list = [ -... _gen_single_bit_rotation_problem(bit, symbols), -... cirq.Circuit( -... cirq.Z(bit) ** symbols[0], -... cirq.X(bit) ** symbols[1], -... cirq.Z(bit) ** symbols[2] -... ), -... cirq.Circuit( -... cirq.X(bit) ** symbols[0], -... cirq.Z(bit) ** symbols[1], -... cirq.X(bit) ** symbols[2] -... ) -... ] ->>> sampled_expectation_layer = tfq.layers.SampledExpectation() ->>> output = sampled_expectation_layer( -... circuit_list, -... symbol_names=symbols, -... operators=ops, -... repetitions=num_samples) ->>> # Here output[i][j] corresponds to the sampled expectation ->>> # of all the ops in ops w.r.t circuits[i] where Keras managed ->>> # variables are placed in the symbols 'x', 'y', 'z'. ->>> tf.shape(output) -tf.Tensor([3 2], shape=(2,), dtype=int32) - -``` - - -Here, different `cirq.Circuit` instances sharing the common symbols 'x', -'y' and 'z' are used as input. Keras uses the `symbol_names` -argument to map Keras managed variables to these circuits constructed -with `sympy.Symbol`s. The shape of `num_samples` is equal to that of `ops`. - - -The second most common use case is where there is a fixed circuit and -the expectation operators vary: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] ->>> fixed_circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> expectation_layer = tfq.layers.SampledExpectation() ->>> output = expectation_layer( -... fixed_circuit, -... symbol_names=symbols, -... operators=ops, -... repetitions=5000, -... initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)) ->>> # Here output[i][j] corresponds to ->>> # the sampled expectation of operators[i][j] using 5000 samples w.r.t ->>> # the circuit where variable values are managed by keras and store ->>> # numbers in the symbols 'x', 'y', 'z'. ->>> tf.shape(output) -tf.Tensor([1 2], shape=(2,), dtype=int32) - -``` - - -Here different `cirq.PauliSum` or `cirq.PauliString` instances can be -used as input to calculate the expectation on the fixed circuit that -the layer was initially constructed with. - - -There are also some more complex use cases that provide greater flexibility. -Notably these configurations all make use of the `symbol_values` parameter -that causes the `SampledExpectation` layer to stop managing the -`sympy.Symbol`s in the quantum circuits and instead requires the user to -supply inputs themselves. Lets look at the case where there -is a single fixed circuit, some fixed operators and symbols that must be -common to all circuits: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x y z') ->>> ops = [cirq.Z(bit), cirq.X(bit)] ->>> num_samples = [100, 200] ->>> circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> values = [[1,1,1], [2,2,2], [3,3,3]] ->>> sampled_expectation_layer = tfq.layers.SampledExpectation() ->>> output = sampled_expectation_layer( -... circuit, -... symbol_names=symbols, -... symbol_values=values, -... operators=ops, -... repetitions=num_samples) ->>> # output[i][j] = The sampled expectation of ops[j] with ->>> # values_tensor[i] placed into the symbols of the circuit ->>> # with the order specified by feed_in_params. ->>> # so output[1][2] = The sampled expectation of a circuit with parameter ->>> # values [2,2,2] w.r.t Pauli X, estimated using 200 samples per term. ->>> output # Non-deterministic result. It can vary every time. -tf.Tensor( -[[0.52, 0.72], - [0.34, 1. ], - [0.78, 0.48]], shape=(3, 2), dtype=float32) - -``` - - -Tip: you can compare the above result with that of `Expectation`: -tf.Tensor( -[[0.63005245 0.76338404] - [0.25707167 0.9632684 ] - [0.79086655 0.5441111 ]], shape=(3, 2), dtype=float32) - - -Here is a simple model that uses this particular input signature of -tfq.layers.SampledExpectation, that learns to undo the random rotation -of the qubit: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> control_input = tf.keras.Input(shape=(1,)) ->>> circuit_inputs = tf.keras.Input(shape=(), dtype=tf.dtypes.string) ->>> d1 = tf.keras.layers.Dense(10)(control_input) ->>> d2 = tf.keras.layers.Dense(3)(d1) ->>> expectation = tfq.layers.SampledExpectation()( -... circuit_inputs, # See note below! -... symbol_names=symbols, -... symbol_values=d2, -... operators=cirq.Z(bit), -... repetitions=5000) ->>> data_in = np.array([[1], [0]], dtype=np.float32) ->>> data_out = np.array([[1], [-1]], dtype=np.float32) ->>> model = tf.keras.Model( -... inputs=[circuit_inputs, control_input], outputs=expectation) ->>> model.compile( -... optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), -... loss=tf.keras.losses.mean_squared_error) ->>> history = model.fit( -... x=[tfq.convert_to_tensor([circuit] * 2), data_in], -... y=data_out, -... epochs=100) - -``` - - -For an example featuring this layer, please check out `Taking gradients` -in our dev website http://www.tensorflow.org/quantum/tutorials. - -Lastly `symbol_values`, `operators` and circuit `inputs` can all be fed -Python `list` objects. In addition to this they can also be fed `tf.Tensor` -inputs, meaning that you can input all of these things from other Tensor -objects (like `tf.keras.Dense` layer outputs or `tf.keras.Input`s etc). - -Note: When specifying a new layer for a *compiled* `tf.keras.Model` using -something like `tfq.layers.SampledExpectation()(cirq.Circuit(...), ...)` -please be sure to instead use -`tfq.layers.SampledExpectation()(circuit_input_tensor, ...)` where -`circuit_input_tensor` is filled with -`tfq.conver_to_tensor([cirq.Circuit(..)] * batch_size)` at runtime. This -is because compiled keras models require layer `call` inputs to be -traceable back to a `tf.keras.Input`. - -

__init__

- -View source - -``` python -__init__( - backend=None, - differentiator=None, - **kwargs -) -``` - -Instantiate this Layer. - -Create a layer that will output expectation values gained from -simulating a quantum circuit. - -#### Args: - - -* `backend`: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead, - which must inherit `cirq.SimulatesFinalState`. -* `differentiator`: Optional Differentiator to use to calculate analytic - derivative values of given operators_to_measure and circuit, - which must inherit tfq.differentiators.Differentiator. - Defaults to None, which uses `parameter_shift.ParameterShift()`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/State.md b/docs/api_docs/python/tfq/layers/State.md deleted file mode 100644 index 408a96a61..000000000 --- a/docs/api_docs/python/tfq/layers/State.md +++ /dev/null @@ -1,928 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.State - - - - - -
- - - View source on GitHub - -
- - - -## Class `State` - -A Layer that simulates a quantum state. - - - - - -Given an input circuit and set of parameter values, Simulate a quantum state -and output it to the Tensorflow graph. - - -A more common application is for determining the set of states produced -by a parametrized circuit where the values of the parameters vary. Suppose -we want to generate a family of states with varying degrees of entanglement -ranging from separable to maximally entangled. We first define a -parametrized circuit that can accomplish this - -``` ->>> q0, q1 = cirq.GridQubit.rect(1, 2) ->>> alpha = sympy.Symbol('alpha') # degree of entanglement between q0, q1 ->>> parametrized_bell_circuit = cirq.Circuit( -... cirq.H(q0), cirq.CNOT(q0, q1) ** alpha) -``` - -Now pass all of the alpha values desired to tfq.layers.State to compute -a tensor of states corresponding to these preparation angles. - -``` ->>> state_layer = tfq.layers.State() ->>> alphas = tf.reshape(tf.range(0, 1.1, delta=0.5), (3, 1)) # FIXME: #805 ->>> state_layer(parametrized_bell_circuit, -... symbol_names=[alpha], symbol_values=alphas) #FIXME: cl/285479498 - -``` - - -This use case can be simplified to compute the wavefunction produced by a -fixed circuit where the values of the parameters vary. For example, this -layer produces a Bell state. - -``` ->>> q0, q1 = cirq.GridQubit.rect(1, 2) ->>> bell_circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1)) ->>> state_layer = tfq.layers.State() ->>> state_layer(bell_circuit) #FIXME: cl/285479498 - -``` - -Not specifying `symbol_names` or `symbol_values` indicates that the -circuit(s) does not contain any `sympy.Symbols` inside of it and tfq won't -look for any symbols to resolve. - - -tfq.layers.State also allows for a more complicated input signature -wherein a different (possibly parametrized) circuit is used to prepare -a state for each batch of input parameters. This might be useful when -the State layer is being used to generate entirely different families -of states. Suppose we want to generate a stream of states that are -either computational basis states or 'diagonal' basis states (as in the -BB84 QKD protocol). The circuits to prepare these states are: - -``` ->>> q0 = cirq.GridQubit(0, 0) ->>> bitval = sympy.Symbol('bitval') ->>> computational_circuit = cirq.Circuit(cirq.X(q0) ** bitval) ->>> diagonal_circuit = cirq.Circuit(cirq.X(q0) ** bitval, cirq.H(q0)) -``` - -Now a stream of random classical bit values can be encoded into one of -these bases by preparing a state layer and passing in the bit values -accompanied by their preparation circuits - -``` ->>> qkd_layer = tfq.layers.State() ->>> bits = [[1], [1], [0], [0]] ->>> states_to_send = [computational_circuit, -... diagonal_circuit, -... diagonal_circuit, -... computational_circuit] ->>> qkd_states = qkd_layer( -... states_to_send, symbol_names=[bitval], symbol_values=bits) ->>> # The third state was a '0' prepared in the diagonal basis: ->>> qkd_states #FIXME: cl/285479498 - -``` - -

__init__

- -View source - -``` python -__init__( - backend=None, - **kwargs -) -``` - -Instantiate a State Layer. - -Create a layer that will simulate a quantum state and output it into -the TensorFlow graph given a correct set of inputs. - -#### Args: - - -* `backend`: Optional Backend to use to simulate this state. Defaults - to the native TensorFlow Quantum state vector simulator, - however users may also specify a preconfigured cirq execution - object to use instead, which must inherit - `cirq.SimulatesFinalState`. Note that C++ Density Matrix - simulation is not yet supported so to do Density Matrix - simulation please use `cirq.DensityMatrixSimulator`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/padded_to_ragged.md b/docs/api_docs/python/tfq/padded_to_ragged.md deleted file mode 100644 index 67d6c763c..000000000 --- a/docs/api_docs/python/tfq/padded_to_ragged.md +++ /dev/null @@ -1,42 +0,0 @@ -
- - -
- -# tfq.padded_to_ragged - - - - - -
- - - View source on GitHub - -
- - - -Utility `tf.function` that converts a padded tensor to ragged. - -``` python -tfq.padded_to_ragged(masked_state) -``` - - - - - -Convert a state `tf.Tensor` padded with the value -2 to a `tf.RaggedTensor` -using efficient boolean masking. - -#### Args: - - -* `masked_state`: `tf.State` tensor with -2 padding. - -#### Returns: - - -* `state_ragged`: State tensor without padding as a `tf.RaggedTensor`. \ No newline at end of file diff --git a/docs/design.md b/docs/design.md deleted file mode 100644 index 1e0dfd918..000000000 --- a/docs/design.md +++ /dev/null @@ -1,175 +0,0 @@ -# TensorFlow Quantum design and concepts - -In October 2019, -Google announced -they achieved -quantum supremacy. -Using 53 *noisy* qubits, this demonstration was a critical first step to unlock -the full potential of quantum computing and marks the beginning of the -Noisy Intermediate-Scale Quantum (NISQ) -computing era. In the coming years, quantum devices with tens-to-hundreds of -noisy qubits are expected to become a reality. So what is possible with these -devices? - -There are many ideas for leveraging NISQ quantum computing including -optimization, quantum simulation, cryptography, and machine learning. -TensorFlow Quantum (TFQ) is designed to help researchers experiment -with these ideas. Researchers create and run *quantum circuits*. It integrates -with TensorFlow, an established machine learning framework used for research and -production. TensorFlow Quantum provides flexible and performant tools and -constructs used by quantum machine learning researchers. TensorFlow Quantum -hopes to bridge the quantum and classical machine learning communities—and -enrich both with new perspectives and ideas. - -## NISQ quantum machine learning - -During the NISQ-era, quantum algorithms with known speedups over classical -algorithms—like -Shor's factoring algorithm or -Grover's search algorithm—are -not yet possible at a meaningful scale. - -A goal of TensorFlow Quantum is to help discover algorithms for the -NISQ-era, with particular interest in: - -1. *Use classical machine learning to enhance NISQ algorithms.* The hope is that - techniques from classical machine learning can enhance our understanding of - quantum computing. For example, - this paper - shows a recurrent neural network (RNN) used to discover that optimization of - the control parameters for algorithms like the QAOA and VQE are more efficient - than simple off the shelf optimizers. And - this paper - uses reinforcement learning to help mitigate errors and produce higher - quality quantum gates. -2. *Model quantum data with quantum circuits.* Classically modeling quantum data - is possible if you have an exact description of the datasource—but sometimes - this isn’t possible. To solve this problem, you can try modeling on the - quantum computer itself and measure/observe the important statistics. - This paper - shows a quantum circuit designed with a structure analogous to a - convolutional neural network (CNN) to detect different topological phases of - matter. The quantum computer holds the data and the model. The classical - processor sees only measurement samples from the model output and never the - data itself. In - this paper - the authors learn to compress information about quantum many-body systems - using a DMERA model. - -Other areas of interest in quantum machine learning include: - -1. Modeling purely classical data on quantum computers. -2. Quantum-inspired classical algorithms. TFQ does not contain any purely - classical algorithms that are quantum-inspired. - -While these last two areas did not inform the design of TensorFlow Quantum, -you can still use TFQ for research here. For example, in -this paper -the authors use a quantum computer to solve some purely classical data problems— -which could be implemented in TFQ. - - -## Software components - -TensorFlow Quantum is designed for the problems of NISQ-era quantum machine -learning. Integration with [TensorFlow](https://www.tensorflow.org/overview) and -[Keras](https://www.tensorflow.org/guide/keras/overview) is seamless and -performant. And the `tfq.datasets` module allows researchers to experiment and -converse about new and interesting quantum datasets. - -### Primitives - -TensorFlow Quantum implements the components needed to integrate TensorFlow with -quantum computing hardware. To that end, TFQ introduces two datatype primitives: - -- *Quantum circuit*: This represents - Cirq-defined - quantum circuits (`cirq.Circuit`) within TensorFlow. Create batches of - circuits of varying size, similar to batches of different real-valued - datapoints. -- *Pauli sum*: Represent linear combinations of tensor products of Pauli - operators defined in Cirq (`cirq.PauliSum`). Like circuits, create batches of - operators of varying size. - -With these primitives, TFQ can build the functionality to merge quantum -computing with TensorFlow. - -### Fundamental ops - -Using the quantum circuit primitives within a `tf.Tensor`, TensorFlow Quantum -implements ops that process these circuits and produce meaningful outputs. - -The TensorFlow ops are written in optimized C++. These ops sample from -circuits, calculate expectation values, and output the state produced by the -given circuits. Writing ops that are flexible and performant has some -challenges: - -1. Circuits are not the same size. For simulated circuits, you are unable to - create static operations (like `tf.matmul` or `tf.add`) and then substitute - different numbers for circuits of different sizes. These ops must allow for - dynamic sizes that the statically sized TensorFlow compute graph doesn't - allow. -2. Quantum data can induce a different circuit structure altogether. This is - another reason to support dynamic sizes in the TFQ ops. Quantum data can - represent a structural change to the underlying quantum state that is - represented by modifications to the original circuit. As new datapoints are - swapped in and out at runtime, the TensorFlow compute graph can not be - modified after it is built, so support for these varying structures is - required. -3. `cirq.Circuits` are similar to compute graphs in that they are a series of - operations—and some might contain symbols/placeholders. It is important to - make this as compatible with TensorFlow as possible. - -For performance reasons, Eigen (the C++ library used in many TensorFlow ops) is -not well suited for quantum circuit simulation. Instead, the circuit simulators -used in the quantum supremacy experiment were used as verifiers and extended for -the foundation of TFQ ops (all written with AVX2 and SSE instructions). Ops with -identical functional signatures were created that use a physical quantum -computer. Switching between a simulated and physical quantum computer is as easy -as changing a single line of code. These ops are located in the -`circuit_execution_ops.py` in `tensorflow_quantum/core/ops/`. - -### Layers - -TensorFlow Quantum layers expose sampling, expectation, and state calculation to -developers using the `tf.keras.layers.Layer` interface. It's convenient to -create a circuit layer for classical control parameters or for readout -operations. Additionally, you can create a layer with a high degree of -complexity supporting batch circuit, batch control parameter value, and perform -batch readout operations. See `tfq.layers.Sample` for an example. - -### Differentiators - -Unlike many TensorFlow operations, observables in quantum circuits do not have -formulas for gradients that are relatively easy to calculate. This is because a -classical computer can only read samples from the circuits that are run on a -quantum computer. - -To solve this problem, the `tfq.differentiators` module provides several -standard differentiation techniques. Users can also define their own method to -compute gradients—in both the “real world” setting of sample-based expectation -calculation, and the analytic exact world. Methods like finite difference are -often the fastest (wall clock time) in an analytic/exact environment. While -slower (wall clock time), more practical methods like -parameter shift or -stochastic methods -are often more effective. A `tfq.differentiators.Differentiator` is instantiated -and attached to an existing op with `generate_differentiable_op`, or passed to -the constructor of `tfq.layers.Expectation` or `tfq.layers.SampledExpectation`. -To implement a custom differentiator, inherit from the -`tfq.differentiators.Differentiator` class. To define a gradient operation for -sampling or state vector calculation, use `tf.custom_gradient`. - -### Datasets - -As the field of quantum computing grows, more and more quantum data and model -combinations will arise, making structured comparison more difficult. The -`tfq.datasets` module is used as the data source for quantum machine learning -tasks. It ensures structured comparisons for the model and performance. - -It is hoped that with large community contributions, the `tfq.datasets` module -will grow to enable research that is more transparent and reproducible. -Carefully curated problems in: quantum control, fermionic simulation, -classification near phase transitions, quantum sensing, etc are all great -candidates for addition to `tfq.datasets`. To propose a new dataset open -a GitHub issue. diff --git a/docs/install.md b/docs/install.md deleted file mode 100644 index 6bf6eded6..000000000 --- a/docs/install.md +++ /dev/null @@ -1,194 +0,0 @@ -# Install TensorFlow Quantum - -There are a few ways to set up your environment to use TensorFlow Quantum (TFQ): - -* The easiest way to learn and use TFQ requires no installation—run the - [TensorFlow Quantum tutorials](./tutorials/hello_many_worlds.ipynb) directly - in your browser using - [Google Colab](https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/hello_many_worlds.ipynb). -* To use TensorFlow Quantum on a local machine, install the TFQ package using - Python's pip package manager. -* Or build TensorFlow Quantum from source. - -## Pip package - -### Requirements - -* pip 19.0 or later (requires `manylinux2010` support) -* [TensorFlow 2.1](https://www.tensorflow.org/install/pip) -* [Cirq 0.6](https://cirq.readthedocs.io/en/stable/install.html) - -See the [TensorFlow install guide](https://www.tensorflow.org/install/pip) to -set up your Python development environment and an (optional) virtual environment. - -Upgrade `pip` and install TensorFlow and Cirq (these are not included as -dependencies): - - -
-  pip3 install --upgrade pip
-  pip3 install tensorflow==2.1.0
-  pip3 install cirq==0.7.0
-
- - -### Install the package - -Install the latest stable release of TensorFlow Quantum: - - -
-  pip3 install -U tensorflow-quantum
-
- - -Success: TensorFlow Quantum is now installed. - - -## Build from source - -The following steps are tested for Ubuntu-like systems. - -### 1. Set up a Python 3 development environment - - -
-  sudo apt update
-  sudo apt-get install pkg-config zip g++ zlib1g-dev unzip python3
-  sudo apt install python3 python3-dev python3-venv python3-pip
-  python3 -m pip install --upgrade pip
-
- - -### 2. Create a virtual environment - - -
-  python3 -m venv tfq_env
-  source tfq_env/bin/activate
-
- - -### 3. Install Bazel - -See the TensorFlow -[build from source](https://www.tensorflow.org/install/source#install_bazel) -guide to install the Bazel -build system. - -To ensure compatibility with TensorFlow, `bazel` version 0.26.1 or lower is -required. To remove any existing version of Bazel: - - -
-  sudo apt-get remove bazel
-
- - -Then install Bazel version 0.26.0: - - -
-  wget https://github.com/bazelbuild/bazel/releases/download/0.26.0/bazel_0.26.0-linux-x86_64.deb
-  sudo dpkg -i bazel_0.26.0-linux-x86_64.deb
-
- - - -### 4. Build TensorFlow from source - -Read the TensorFlow [build from source](https://www.tensorflow.org/install/source) -guide for details. TensorFlow Quantum is compatible with TensorFlow version 2.1. - -Download the -TensorFlow source code: - - -
-  git clone https://github.com/tensorflow/tensorflow.git
-  cd tensorflow
-  git checkout v2.1.0
-
- -Install the TensorFlow dependencies: - - -
-  python3 -m pip install -U pip six numpy wheel setuptools mock 'future>=0.17.1'
-  python3 -m pip install -U keras_applications --no-deps
-  python3 -m pip install -U keras_preprocessing --no-deps
-
- - -Configure the TensorFlow build. The default Python location and Python library -paths should point inside the virtual environment. The default options are -recommended: - - -
-  ./configure
-
- - -Verify that your Bazel version is correct: - - -
-  bazel version
-
- - -Build the TensorFlow package: - - -
-  bazel build -c opt --cxxopt="-O3" --cxxopt="-march=native" --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" //tensorflow/tools/pip_package:build_pip_package
-
- - -Note: It may take over an hour to build the package. - -After the build is complete, install the package: - - -
-  ./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
-  pip install /tmp/tensorflow_pkg/name_of_generated_wheel.whl
-
- - -### 5. Download TensorFlow Quantum - -Download the TensorFlow Quantum source code and install the requirements: - - -
-  cd ..
-  git clone https://github.com/tensorflow/quantum.git
-  cd quantum
-  python3 -m pip install -r requirements.txt
-
- - -Verify your Bazel version (since it can auto-update): - - -
-  bazel version
-
- - -### 6. Build the TensorFlow Quantum pip package - -Build the TensorFlow Quantum pip package and install: - - -
-  ./configure.sh
-  bazel build -c opt --cxxopt="-O3" --cxxopt="-march=native" --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" release:build_pip_package
-  bazel-bin/release/build_pip_package /tmp/tfquantum/
-  python3 -m pip install /tmp/tfquantum/name_of_generated_wheel.whl
-
- - -Success: TensorFlow Quantum is now installed. diff --git a/docs/overview.md b/docs/overview.md deleted file mode 100644 index 66a67fb59..000000000 --- a/docs/overview.md +++ /dev/null @@ -1,56 +0,0 @@ -# TensorFlow Quantum - -TensorFlow Quantum (TFQ) is a Python framework for hybrid quantum-classical -machine learning. As an application framework, TFQ allows quantum algorithm -researchers and ML application researchers to leverage Google’s quantum -computing frameworks, all from within TensorFlow. - -TensorFlow Quantum focuses on modeling quantum data. It provides tools to -interleave quantum algorithms and logic designed in -Cirq with -TensorFlow. A basic understanding of quantum computing is required to -effectively use TensorFlow Quantum. - -After Google's -quantum supremacy -milestone, the -Google Quantum AI team -is focused on developing and implementing new algorithms to run on a quantum -computer—that have -real world applications. - -To get started with TensorFlow Quantum, see the [install guide](install.md) and -read through some of the runnable -[notebook tutorials](./tutorials/hello_many_worlds.ipynb). - -## Design - -TensorFlow Quantum implements the components needed to smoothly integrate -TensorFlow with quantum computing hardware. To that end, TensorFlow Quantum -introduces two datatype primitives: - -- *Quantum circuit*: This represents Cirq-defined quantum circuits within - TensorFlow. Create batches of circuits of varying size, similar to batches of - different real-valued datapoints. -- *Pauli sum*: Represent linear combinations of tensor products of Pauli - operators defined in Cirq. Like circuits, create batches of operators of - varying size. - -Using these primitives to represent quantum circuits, TensorFlow Quantum -provides the following operations to developers: - -- Sample from output distributions of batches of circuits. -- Calculate the expectation value of batches of Pauli sums on batches of - circuits. TFQ implements backpropagation-compatible gradient calculation. -- Simulate batches of circuits and states. While inspecting all quantum state - amplitudes directly throughout a quantum circuit is inefficient at scale in - the real world, state simulation can help researchers understand how a quantum - circuit maps states to a near exact level of precision. - -For more details about TFQ design choices and implementation, read the -[Design and concepts](design.md) guide. - -## Report issues - -Report bugs or feature requests using the -TensorFlow Quantum issue tracker. diff --git a/docs/tutorials/barren_plateaus.ipynb b/docs/tutorials/barren_plateaus.ipynb deleted file mode 100644 index 72f24a3be..000000000 --- a/docs/tutorials/barren_plateaus.ipynb +++ /dev/null @@ -1,547 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "barren_plateaus.ipynb", - "provenance": [], - "private_outputs": true, - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5rc1" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xLOXFOT5Q40E" - }, - "source": [ - "##### Copyright 2020 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "metadata": { - "cellView": "form", - "colab_type": "code", - "id": "iiQkM5ZgQ8r2", - "colab": {} - }, - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "j6331ZSsQGY3" - }, - "source": [ - "# Barren plateaus" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "i9Jcnb8bQQyd" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DyEcfFapraq6", - "colab_type": "text" - }, - "source": [ - "In this example you will explore the result of McClean, 2019 that says not just any quantum neural network structure will do well when it comes to learning. In particular you will see that a certain large family of random quantum circuits do not serve as good quantum neural networks, because they have gradients that vanish almost everywhere. In this example you won't be training any models for a specific learning problem, but instead focusing on the simpler problem of understanding the behaviors of gradients." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zB_Xw0Y9rVNi", - "colab_type": "text" - }, - "source": [ - "## Setup\n", - "\n", - "Download and install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "fAVlg7rxkvUw", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade pip\n", - "!pip install cirq==0.7.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "TorxE5tnkvb2", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade tensorflow==2.1.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FxkQA6oblNqI" - }, - "source": [ - "Note: If the following code cell fails, execute the first code cells and then restart the Colab runtime (*Runtime > Restart Runtime*)." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "saFHsRDpkvkH", - "colab": {} - }, - "source": [ - "%%capture\n", - "h = \"2dfcfceb9726fa73c40381c037dc01facd3d061e\"\n", - "!cd ~/\n", - "!rm -r -f TFQuantum/\n", - "!git clone https://{h}:{h}@github.com/quantumlib/TFQuantum.git;cd TFQuantum/\n", - "!pip install --upgrade ./TFQuantum/wheels/tfquantum-0.2.0-cp36-cp36m-linux_x86_64.whl" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1PaclXeSrrMW", - "colab_type": "text" - }, - "source": [ - "Now import TensorFlow and the module dependencies:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "enZ300Bflq80", - "colab": {} - }, - "source": [ - "import tensorflow as tf\n", - "import tensorflow_quantum as tfq\n", - "\n", - "import cirq\n", - "import sympy\n", - "import numpy as np\n", - "\n", - "# visualization tools\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "from cirq.contrib.svg import SVGCircuit\n", - "\n", - "np.random.seed(1234)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "b08Mmbs8lr81" - }, - "source": [ - "## 1. Summary\n", - "\n", - "The following images are from McClean, 2019.\n", - "\n", - "Random quantum circuits with many blocks that look like this ($R_{P}(\\theta)$ is a random Pauli rotation):
\n", - "\n", - "\n", - "Where the $f(x)$ is the expectation value w.r.t. $Z_{a}Z_{b}$ for any qubits $a$ and $b$:
\n", - "\n", - "\n", - "Has the problem that $f'(x)$ has a mean very close to 0 and does not vary much. You will see this below:" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "y31qSRCczI-L" - }, - "source": [ - "## 2. Generating random circuits\n", - "\n", - "The construction from the paper is straightforward to follow. The following implements a simple function that generates a random quantum circuit—sometimes referred to as a *quantum neural network* (QNN)—with the given depth on a set of qubits:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Nh9vrgPBks7O", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def generate_random_qnn(qubits, symbol, depth):\n", - " \"\"\"Generate random QNN's with the same structure from McClean et al.\"\"\"\n", - " circuit = cirq.Circuit()\n", - " for qubit in qubits:\n", - " circuit += cirq.Ry(np.pi / 4.0)(qubit)\n", - "\n", - " for d in range(depth):\n", - " # Add a series of single qubit rotations.\n", - " for i, qubit in enumerate(qubits):\n", - " random_n = np.random.uniform()\n", - " random_rot = np.random.uniform(\n", - " ) * 2.0 * np.pi if i != 0 or d != 0 else symbol\n", - " if random_n > 2. / 3.:\n", - " # Add a Z.\n", - " circuit += cirq.Rz(random_rot)(qubit)\n", - " elif random_n > 1. / 3.:\n", - " # Add a Y.\n", - " circuit += cirq.Ry(random_rot)(qubit)\n", - " else:\n", - " # Add a X.\n", - " circuit += cirq.Rx(random_rot)(qubit)\n", - "\n", - " # Add CZ ladder.\n", - " for src, dest in zip(qubits, qubits[1:]):\n", - " circuit += cirq.CZ(src, dest)\n", - "\n", - " return circuit\n", - "\n", - "\n", - "generate_random_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gUuQfOyrj_Hu", - "colab_type": "text" - }, - "source": [ - "The authors investigate the gradient of a single parameter $\\theta_{1,1}$. Let's follow along by placing a `sympy.Symbol` in the circuit where $\\theta_{1,1}$ would be. Since the authors do not analyze the statistics for any other symbols in the circuit, let's replace them with random values now instead of later." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lAVDRQ87k3md", - "colab_type": "text" - }, - "source": [ - "## 3. Running the circuits\n", - "\n", - "Generate a few of these circuits along with an observable to test the claim that the gradients don't vary much. First, generate a batch of random circuits. Choose a random *ZZ* observable and batch calculate the gradients and variance using TensorFlow Quantum." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qoDDaHgwj_Hz", - "colab_type": "text" - }, - "source": [ - "### 3.1 Batch variance computation\n", - "\n", - "Let's write a helper function that computes the variance of the gradient of a given observable over a batch of circuits:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "OkdndnBKk8B8", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def process_batch(circuits, symbol, op):\n", - " \"\"\"Compute the variance of a batch of expectations w.r.t. op on each circuit that \n", - " contains `symbol`. Note that this method sets up a new compute graph every time it is\n", - " called so it isn't as performant as possible.\"\"\"\n", - "\n", - " # Setup a simple layer to batch compute the expectation gradients.\n", - " expectation = tfq.layers.Expectation()\n", - " \n", - " # Prep the inputs as tensors\n", - " circuit_tensor = tfq.convert_to_tensor(circuits)\n", - " values_tensor = tf.convert_to_tensor(\n", - " np.random.uniform(0, 2 * np.pi, (n_circuits, 1)).astype(np.float32))\n", - "\n", - " # Use TensorFlow GradientTape to track gradients.\n", - " with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " forward = expectation(\n", - " circuit_tensor,\n", - " operators=op,\n", - " symbol_names=[symbol],\n", - " symbol_values=values_tensor)\n", - "\n", - " # Return variance of gradients across all circuits.\n", - " grads = g.gradient(forward, values_tensor)\n", - " grad_var = tf.math.reduce_std(grads, axis=0)\n", - " return grad_var.numpy()[0]" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JINYTIjDj_H1", - "colab_type": "text" - }, - "source": [ - "### 3.1 Set up and run\n", - "\n", - "Choose the number of random circuits to generate along with their depth and the amount of qubits they should act on. Then plot the results." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "xAGBcq9Bj_H3", - "colab_type": "code", - "colab": {} - }, - "source": [ - "n_qubits = [2 * i for i in range(2, 7)\n", - " ] # Ranges studied in paper are between 2 and 24.\n", - "depth = 50 # Ranges studied in paper are between 50 and 500.\n", - "n_circuits = 200\n", - "theta_var = []\n", - "\n", - "for n in n_qubits:\n", - " # Generate the random circuits and observable for the given n.\n", - " qubits = cirq.GridQubit.rect(1, n)\n", - " symbol = sympy.Symbol('theta')\n", - " circuits = [\n", - " generate_random_qnn(qubits, symbol, depth) for _ in range(n_circuits)\n", - " ]\n", - " op = cirq.Z(qubits[0]) * cirq.Z(qubits[1])\n", - " theta_var.append(process_batch(circuits, symbol, op))\n", - "\n", - "plt.semilogy(n_qubits, theta_var)\n", - "plt.title('Gradient Variance in QNNs')\n", - "plt.xlabel('n_qubits')\n", - "plt.ylabel('$\\\\partial \\\\theta$ variance')\n", - "plt.show()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qY2E0CFjxRE9", - "colab_type": "text" - }, - "source": [ - "This plot shows that for quantum machine learning problems, you can't simply guess a random QNN ansatz and hope for the best. Some structure must be present in the model circuit in order for gradients to vary to the point where learning can happen." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4RE_idhmj_H6", - "colab_type": "text" - }, - "source": [ - "## 4. Heuristics\n", - "\n", - "An interesting heuristic by Grant, 2019 allows one to start very close to random, but not quite. Using the same circuits as McClean et al., the authors propose a different initialization technique for the classical control parameters to avoid barren plateaus. The initialization technique starts some layers with totally random control parameters—but, in the layers immediately following, choose parameters such that the initial transformation made by the first few layers is undone. The authors call this an *identity block*.\n", - "\n", - "The advantage of this heuristic is that by changing just a single parameter, all other blocks outside of the current block will remain the identity—and the gradient signal comes through much stronger than before. This allows the user to pick and choose which variables and blocks to modify to get a strong gradient signal. This heuristic does not prevent the user from falling in to a barren plateau during the training phase (and restricts a fully simultaneous update), it just guarantees that you can start outside of a plateau." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Fofv9hgyj_IB", - "colab_type": "text" - }, - "source": [ - "### 4.1 New QNN construction\n", - "\n", - "Now construct a function to generate identity block QNNs. This implementation is slightly different than the one from the paper. For now, look at the behavior of the gradient of a single parameter so it is consistent with McClean et al, so some simplifications can be made.\n", - "\n", - "To generate an identity block and train the model, generally you need $U1(\\theta_{1a}) U1(\\theta_{1b})^{\\dagger}$ and not $U1(\\theta_1) U1(\\theta_1)^{\\dagger}$. Initially $\\theta_{1a}$ and $\\theta_{1b}$ are the same angles but they are learned independently. Otherwise, you will always get the identity even after training. The choice for the number of identity blocks is empirical. The deeper the block, the smaller the variance in the middle of the block. But at the start and end of the block, the variance of the parameter gradients should be large. " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "PL7mTHEVj_IC", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def generate_identity_qnn(qubits, symbol, block_depth, total_depth):\n", - " \"\"\"Generate random QNN's with the same structure from Grant et al.\"\"\"\n", - " circuit = cirq.Circuit()\n", - "\n", - " # Generate initial block with symbol.\n", - " prep_and_U = generate_random_qnn(qubits, symbol, block_depth)\n", - " circuit += prep_and_U\n", - "\n", - " # Generate dagger of initial block without symbol.\n", - " U_dagger = (prep_and_U[1:])**-1\n", - " circuit += cirq.resolve_parameters(\n", - " U_dagger, param_resolver={symbol: np.random.uniform() * 2 * np.pi})\n", - "\n", - " for d in range(total_depth - 1):\n", - " # Get a random QNN.\n", - " prep_and_U_circuit = generate_random_qnn(\n", - " qubits,\n", - " np.random.uniform() * 2 * np.pi, block_depth)\n", - "\n", - " # Remove the state-prep component\n", - " U_circuit = prep_and_U_circuit[1:]\n", - "\n", - " # Add U\n", - " circuit += U_circuit\n", - "\n", - " # Add U^dagger\n", - " circuit += U_circuit**-1\n", - "\n", - " return circuit\n", - "\n", - "\n", - "generate_identity_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2, 2)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ifWrl19kj_IG", - "colab_type": "text" - }, - "source": [ - "### 4.2 Comparison\n", - "\n", - "Here you can see that the heuristic does help to keep the variance of the gradient from vanishing as quickly:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "62kmsVAXj_IH", - "colab_type": "code", - "colab": {} - }, - "source": [ - "block_depth = 10\n", - "total_depth = 5\n", - "\n", - "heuristic_theta_var = []\n", - "\n", - "for n in n_qubits:\n", - " # Generate the identity block circuits and observable for the given n.\n", - " qubits = cirq.GridQubit.rect(1, n)\n", - " symbol = sympy.Symbol('theta')\n", - " circuits = [\n", - " generate_identity_qnn(qubits, symbol, block_depth, total_depth)\n", - " for _ in range(n_circuits)\n", - " ]\n", - " op = cirq.Z(qubits[0]) * cirq.Z(qubits[1])\n", - " heuristic_theta_var.append(process_batch(circuits, symbol, op))\n", - "\n", - "plt.semilogy(n_qubits, theta_var)\n", - "plt.semilogy(n_qubits, heuristic_theta_var)\n", - "plt.title('Heuristic vs. Random')\n", - "plt.xlabel('n_qubits')\n", - "plt.ylabel('$\\\\partial \\\\theta$ variance')\n", - "plt.show()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "E0XNSoblj_IK", - "colab_type": "text" - }, - "source": [ - "This is a great improvement in getting stronger gradient signals from (near) random QNNs.\n", - "\n", - "### 4.3 Summary\n", - "\n", - "When used in practice, Grant et al. has great success in avoiding barren plateaus in QNN classification and Variational Quantum Eigensolver (VQE) circuits. In the figure below (from the paper), each colored line represents the variance of the gradients of the first three parameters of their model used in the QNN classification task as it was trained over time. As you can see, the initial variances are well above the barren plateau line, but do gradually slope downwards. All of the instances run here did avoid barren plateaus.\n", - "\n", - "" - ] - } - ] -} diff --git a/docs/tutorials/gradients.ipynb b/docs/tutorials/gradients.ipynb deleted file mode 100644 index c2a92b1d9..000000000 --- a/docs/tutorials/gradients.ipynb +++ /dev/null @@ -1,894 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "gradients.ipynb", - "provenance": [], - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5rc1" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xLOXFOT5Q40E" - }, - "source": [ - "##### Copyright 2020 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "metadata": { - "cellView": "form", - "colab_type": "code", - "id": "iiQkM5ZgQ8r2", - "colab": {} - }, - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "j6331ZSsQGY3" - }, - "source": [ - "# Calculate gradients" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "i9Jcnb8bQQyd" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FxkQA6oblNqI" - }, - "source": [ - "This tutorial explores gradient calculation algorithms for the expectation values of quantum circuits.\n", - "\n", - "Calculating the gradient of the expectation value of a certain observable in a quantum circuit is an involved process. Expectation values of observables do not have the luxury of having analytic gradient formulas that are always easy to write down—unlike traditional machine learning transformations such as matrix multiplication or vector addition that have analytic gradient formulas which are easy to write down. As a result, there are different quantum gradient calculation methods that come in handy for different scenarios. This tutorial compares and contrasts two different differentiation schemes." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pvG0gAJqGYJo", - "colab_type": "text" - }, - "source": [ - "## Setup\n", - "\n", - "Download and install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "fAVlg7rxkvUw", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade pip\n", - "!pip install cirq==0.7.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "TorxE5tnkvb2", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade tensorflow==2.1.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OIbP5hklC338", - "colab_type": "text" - }, - "source": [ - "Note: If the following code cell fails, execute the first code cells and then restart the Colab runtime (*Runtime > Restart Runtime*)." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "saFHsRDpkvkH", - "colab": {} - }, - "source": [ - "%%capture\n", - "h = \"2dfcfceb9726fa73c40381c037dc01facd3d061e\"\n", - "!cd ~/\n", - "!rm -r -f TFQuantum/\n", - "!git clone https://{h}:{h}@github.com/quantumlib/TFQuantum.git;cd TFQuantum/\n", - "!pip install --upgrade ./TFQuantum/wheels/tfquantum-0.2.0-cp36-cp36m-linux_x86_64.whl" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MkTqyoSxGUfB", - "colab_type": "text" - }, - "source": [ - "Now import TensorFlow and the module dependencies:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "enZ300Bflq80", - "colab": {} - }, - "source": [ - "import tensorflow as tf\n", - "import tensorflow_quantum as tfq\n", - "\n", - "import cirq\n", - "import sympy\n", - "import numpy as np\n", - "\n", - "# visualization tools\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "from cirq.contrib.svg import SVGCircuit" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "b08Mmbs8lr81" - }, - "source": [ - "## 1. Preliminary\n", - "\n", - "Let's make the notion of gradient calculation for quantum circuits a little more concrete. Suppose you have a parameterized circuit like this one:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "YkPYJ_Ak-GKu", - "colab_type": "code", - "colab": {} - }, - "source": [ - "qubit = cirq.GridQubit(0, 0)\n", - "my_circuit = cirq.Circuit(cirq.Y(qubit)**sympy.Symbol('alpha'))\n", - "SVGCircuit(my_circuit)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wgQIlCWy-MVr", - "colab_type": "text" - }, - "source": [ - "Along with an observable:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "xurmJdFy-Jae", - "colab_type": "code", - "colab": {} - }, - "source": [ - "pauli_x = cirq.X(qubit)\n", - "pauli_x" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "j3OzKYe5NT_W", - "colab_type": "text" - }, - "source": [ - "Looking at this operator you know that $⟨Y(\\alpha)| X | Y(\\alpha)⟩ = \\sin(\\pi \\alpha)$" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "Ps-pd2mndXs7", - "colab": {} - }, - "source": [ - "def my_expectation(op, alpha):\n", - " \"\"\"Compute ⟨Y(alpha)| `op` | Y(alpha)⟩\"\"\"\n", - " params = {'alpha': alpha}\n", - " sim = cirq.Simulator()\n", - " final_state = sim.simulate(my_circuit, params).final_state\n", - " return op.expectation_from_wavefunction(final_state, {qubit: 0}).real\n", - "\n", - "\n", - "my_alpha = 0.3\n", - "print(\"Expectation=\", my_expectation(pauli_x, my_alpha))\n", - "print(\"Sin Formula=\", np.sin(np.pi * my_alpha))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "zcCX109cJUaz" - }, - "source": [ - " and if you define $f_{1}(\\alpha) = ⟨Y(\\alpha)| X | Y(\\alpha)⟩$ then $f_{1}^{'}(\\alpha) = \\pi \\cos(\\pi \\alpha)$. Let's check this:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "VMq7EayNRyQb", - "colab": {} - }, - "source": [ - "def my_grad(obs, alpha, eps=0.01):\n", - " grad = 0\n", - " f_x = my_expectation(obs, alpha)\n", - " f_x_prime = my_expectation(obs, alpha + eps)\n", - " return ((f_x_prime - f_x) / eps).real\n", - "\n", - "\n", - "print('Finite difference:', my_grad(pauli_x, my_alpha))\n", - "print('Cosine formula: ', np.pi * np.cos(np.pi * my_alpha))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "-SUlLpXBeicF" - }, - "source": [ - "## 2. The need for a differentiator\n", - "\n", - "With larger circuits, you won't always be so lucky to have a formula that precisely calculates the gradients of a given quantum circuit. In the event that a simple formula isn't enough to calculate the gradient, the `tfq.differentiators.Differentiator` class allows you to define algorithms for computing the gradients of your circuits. For instance you can recreate the above example in TensorFlow Quantum (TFQ) with:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Om76ZLu8NT_i", - "colab_type": "code", - "colab": {} - }, - "source": [ - "expectation_calculation = tfq.layers.Expectation(\n", - " differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))\n", - "\n", - "expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " symbol_names=['alpha'],\n", - " symbol_values=[[my_alpha]])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lx3y2DX9NT_k", - "colab_type": "text" - }, - "source": [ - "However, if you switch to estimating expectation based on sampling (what would happen on a true device) the values can change a little bit. This means you now have an imperfect estimate:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "v27rRyAHNT_l", - "colab_type": "code", - "colab": {} - }, - "source": [ - "sampled_expectation_calculation = tfq.layers.SampledExpectation(\n", - " differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))\n", - "\n", - "sampled_expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " repetitions=500,\n", - " symbol_names=['alpha'],\n", - " symbol_values=[[my_alpha]])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Igwa3EnzNT_p", - "colab_type": "text" - }, - "source": [ - "This can quickly compound into a serious accuracy problem when it comes to gradients:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "StljXH38NT_q", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Make input_points = [batch_size, 1] array.\n", - "input_points = np.linspace(0, 5, 200)[:, np.newaxis].astype(np.float32)\n", - "exact_outputs = expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " symbol_names=['alpha'],\n", - " symbol_values=input_points)\n", - "imperfect_outputs = sampled_expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " repetitions=500,\n", - " symbol_names=['alpha'],\n", - " symbol_values=input_points)\n", - "plt.title('Forward Pass Values')\n", - "plt.xlabel('$x$')\n", - "plt.ylabel('$f(x)$')\n", - "plt.plot(input_points, exact_outputs, label='Analytic')\n", - "plt.plot(input_points, imperfect_outputs, label='Sampled')\n", - "plt.legend()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "dfXObk7KNT_t", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Gradients are a much different story.\n", - "values_tensor = tf.convert_to_tensor(input_points)\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " exact_outputs = expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " symbol_names=['alpha'],\n", - " symbol_values=values_tensor)\n", - "analytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " imperfect_outputs = sampled_expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " repetitions=500,\n", - " symbol_names=['alpha'],\n", - " symbol_values=values_tensor)\n", - "sampled_finite_diff_gradients = g.gradient(imperfect_outputs, values_tensor)\n", - "\n", - "plt.title('Gradient Values')\n", - "plt.xlabel('$x$')\n", - "plt.ylabel('$f^{\\'}(x)$')\n", - "plt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')\n", - "plt.plot(input_points, sampled_finite_diff_gradients, label='Sampled')\n", - "plt.legend()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Ld34TJvTNT_w", - "colab_type": "text" - }, - "source": [ - "Here you can see that although the finite difference formula is fast to compute the gradients themselves in the analytical case, when it came to the sampling based methods it was far too noisy. More careful techniques must be used to ensure a good gradient can be calculated. Next you will look at a much slower technique that wouldn't be as well suited for analytical expectation gradient calculations, but does perform much better in the real-world sample based case:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "JsBxH_RaNT_x", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# A smarter differentiation scheme.\n", - "gradient_safe_sampled_expectation = tfq.layers.SampledExpectation(\n", - " differentiator=tfq.differentiators.ParameterShift())\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " imperfect_outputs = gradient_safe_sampled_expectation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " repetitions=500,\n", - " symbol_names=['alpha'],\n", - " symbol_values = values_tensor)\n", - "\n", - "sampled_param_shift_gradients = g.gradient(imperfect_outputs, values_tensor)\n", - "\n", - "plt.title('Gradient Values')\n", - "plt.xlabel('$x$')\n", - "plt.ylabel('$f^{\\'}(x)$')\n", - "plt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')\n", - "plt.plot(input_points, sampled_param_shift_gradients, label='Sampled')\n", - "plt.legend()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0xlUlh8wNT_z", - "colab_type": "text" - }, - "source": [ - "From the above you can see that certain differentiators are best used for particular research scenarios. In general, the slower sample-based methods that are robust to device noise, etc., are great differentiators when testing or implementing algorithms in a more \"real world\" setting. Faster methods like finite difference are great for analytical calculations and you want higher throughput, but aren't yet concerned with the device viability of your algorithm." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FaijzZ4MNT_0", - "colab_type": "text" - }, - "source": [ - "## 3. Multiple observables\n", - "\n", - "Let's introduce a second observable and see how TensorFlow Quantum supports multiple observables for a single circuit." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "ytgB_DqDNT_3", - "colab_type": "code", - "colab": {} - }, - "source": [ - "pauli_z = cirq.Z(qubit)\n", - "pauli_z" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "r51TZls4NT_6", - "colab_type": "text" - }, - "source": [ - "If this observable is used with the same circuit as before, then you have $f_{2}(\\alpha) = ⟨Y(\\alpha)| Z | Y(\\alpha)⟩ = \\cos(\\pi \\alpha)$ and $f_{2}^{'}(\\alpha) = -\\pi \\sin(\\pi \\alpha)$. Perform a quick check:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "19FKgu0ANT_7", - "colab_type": "code", - "colab": {} - }, - "source": [ - "test_value = 0.\n", - "\n", - "print('Finite difference:', my_grad(pauli_z, test_value))\n", - "print('Sin formula: ', -np.pi * np.sin(np.pi * test_value))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_33Y5mL0NT_-", - "colab_type": "text" - }, - "source": [ - "It's a match (close enough).\n", - "\n", - "Now if you define $g(\\alpha) = f_{1}(\\alpha) + f_{2}(\\alpha)$ then $g'(\\alpha) = f_{1}^{'}(\\alpha) + f^{'}_{2}(\\alpha)$. Defining more than one observable in TensorFlow Quantum to use along with a circuit is equivalent to adding on more terms to $g$.\n", - "\n", - "This means that the gradient of a particular symbol in a circuit is equal to the sum of the gradients with regards to each observable for that symbol applied to that circuit. This is compatible with TensorFlow gradient taking and backpropagation (where you give the sum of the gradients over all observables as the gradient for a particular symbol)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "3WFJfFEbNT_-", - "colab_type": "code", - "colab": {} - }, - "source": [ - "sum_of_outputs = tfq.layers.Expectation(\n", - " differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))\n", - "\n", - "sum_of_outputs(\n", - " my_circuit,\n", - " operators=[pauli_x, pauli_z],\n", - " symbol_names=['alpha'],\n", - " symbol_values=[[test_value]])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-ujQUu3WNUAB", - "colab_type": "text" - }, - "source": [ - "Here you see the first entry is the expectation w.r.t Pauli X, and the second is the expectation w.r.t Pauli Z. Now when you take the gradient:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "jcAQa9l0NUAB", - "colab_type": "code", - "colab": {} - }, - "source": [ - "test_value_tensor = tf.convert_to_tensor([[test_value]])\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(test_value_tensor)\n", - " outputs = sum_of_outputs(my_circuit,\n", - " operators=[pauli_x, pauli_z],\n", - " symbol_names=['alpha'],\n", - " symbol_values=test_value_tensor)\n", - "\n", - "sum_of_gradients = g.gradient(outputs, test_value_tensor)\n", - "\n", - "print(my_grad(pauli_x, test_value) + my_grad(pauli_z, test_value))\n", - "print(sum_of_gradients.numpy())" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-fZmbYGANUAE", - "colab_type": "text" - }, - "source": [ - "Here you have verified that the sum of the gradients for each observable is indeed the gradient of $\\alpha$. This behavior is supported by all TensorFlow Quantum differentiators and plays a crucial role in the compatibility with the rest of TensorFlow." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lZsGG7lWNUAF", - "colab_type": "text" - }, - "source": [ - "## 4. Advanced usage\n", - "Here you will learn how to define your own custom differentiation routines for quantum circuits.\n", - "All differentiators that exist inside of TensorFlow Quantum subclass `tfq.differentiators.Differentiator`. A differentiator must implement `differentiate_analytic` and `differentiate_sampled`.\n", - "\n", - "The following uses TensorFlow Quantum constructs to implement the closed form solution from the first part of this tutorial." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "5iY4q6FKNUAG", - "colab_type": "code", - "colab": {} - }, - "source": [ - "class MyDifferentiator(tfq.differentiators.Differentiator):\n", - " \"\"\"A Toy differentiator for .\"\"\"\n", - "\n", - " def __init__(self):\n", - " pass\n", - "\n", - " @tf.function\n", - " def _compute_gradient(self, symbol_values):\n", - " \"\"\"Compute the gradient based on symbol_values.\"\"\"\n", - "\n", - " # f(x) = sin(pi * x)\n", - " # f'(x) = pi * cos(pi * x)\n", - " return tf.cast(tf.cos(symbol_values * np.pi) * np.pi, tf.float32)\n", - "\n", - " @tf.function\n", - " def differentiate_analytic(self, programs, symbol_names, symbol_values,\n", - " pauli_sums, forward_pass_vals, grad):\n", - " \"\"\"Specify how to differentiate a circuit with analytical expectation.\n", - "\n", - " This is called at graph runtime by TensorFlow. `differentiate_analytic`\n", - " should calculate the gradient of a batch of circuits and return it\n", - " formatted as indicated below. See\n", - " `tfq.differentiators.ForwardDifference` for an example.\n", - "\n", - " Args:\n", - " programs: `tf.Tensor` of strings with shape [batch_size] containing\n", - " the string representations of the circuits to be executed.\n", - " symbol_names: `tf.Tensor` of strings with shape [n_params], which\n", - " is used to specify the order in which the values in\n", - " `symbol_values` should be placed inside of the circuits in\n", - " `programs`.\n", - " symbol_values: `tf.Tensor` of real numbers with shape\n", - " [batch_size, n_params] specifying parameter values to resolve\n", - " into the circuits specified by programs, following the ordering\n", - " dictated by `symbol_names`.\n", - " pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]\n", - " containing the string representation of the operators that will\n", - " be used on all of the circuits in the expectation calculations.\n", - " forward_pass_vals: `tf.Tensor` of real numbers with shape\n", - " [batch_size, n_ops] containing the output of the forward pass\n", - " through the op you are differentiating.\n", - " grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]\n", - " representing the gradient backpropagated to the output of the\n", - " op you are differentiating through.\n", - "\n", - " Returns:\n", - " A `tf.Tensor` with the same shape as `symbol_values` representing\n", - " the gradient backpropagated to the `symbol_values` input of the op\n", - " you are differentiating through.\n", - " \"\"\"\n", - "\n", - " # Computing gradients just based off of symbol_values.\n", - " return self._compute_gradient(symbol_values) * grad\n", - "\n", - " @tf.function\n", - " def differentiate_sampled(self, programs, symbol_names, symbol_values,\n", - " pauli_sums, num_samples, forward_pass_vals, grad):\n", - " \"\"\"Specify how to differentiate a circuit with sampled expectation.\n", - "\n", - " This is called at graph runtime by TensorFlow. `differentiate_sampled`\n", - " should calculate the gradient of a batch of circuits and return it\n", - " formatted as indicated below. See\n", - " `tfq.differentiators.ForwardDifference` for an example.\n", - "\n", - " Args:\n", - " programs: `tf.Tensor` of strings with shape [batch_size] containing\n", - " the string representations of the circuits to be executed.\n", - " symbol_names: `tf.Tensor` of strings with shape [n_params], which\n", - " is used to specify the order in which the values in\n", - " `symbol_values` should be placed inside of the circuits in\n", - " `programs`.\n", - " symbol_values: `tf.Tensor` of real numbers with shape\n", - " [batch_size, n_params] specifying parameter values to resolve\n", - " into the circuits specified by programs, following the ordering\n", - " dictated by `symbol_names`.\n", - " pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]\n", - " containing the string representation of the operators that will\n", - " be used on all of the circuits in the expectation calculations.\n", - " num_samples: `tf.Tensor` of positive integers representing the\n", - " number of samples per term in each term of pauli_sums used\n", - " during the forward pass.\n", - " forward_pass_vals: `tf.Tensor` of real numbers with shape\n", - " [batch_size, n_ops] containing the output of the forward pass\n", - " through the op you are differentiating.\n", - " grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]\n", - " representing the gradient backpropagated to the output of the\n", - " op you are differentiating through.\n", - "\n", - " Returns:\n", - " A `tf.Tensor` with the same shape as `symbol_values` representing\n", - " the gradient backpropagated to the `symbol_values` input of the op\n", - " you are differentiating through.\n", - " \"\"\"\n", - " return self._compute_gradient(symbol_values) * grad" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bvEgw2m6NUAI", - "colab_type": "text" - }, - "source": [ - "This new differentiator can now be used with existing `tfq.layer` objects:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "QrKnkWswNUAJ", - "colab_type": "code", - "colab": {} - }, - "source": [ - "custom_dif = MyDifferentiator()\n", - "custom_grad_expectation = tfq.layers.Expectation(differentiator=custom_dif)\n", - "\n", - "# Now let's get the gradients with finite diff.\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " exact_outputs = expectation_calculation(\n", - " my_circuit,\n", - " operators=[pauli_x],\n", - " symbol_names=['alpha'],\n", - " symbol_values=values_tensor)\n", - "\n", - "analytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)\n", - "\n", - "# Now let's get the gradients with custom diff.\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " my_outputs = custom_grad_expectation(\n", - " my_circuit,\n", - " operators=[pauli_x],\n", - " symbol_names=['alpha'],\n", - " symbol_values=values_tensor)\n", - "\n", - "my_gradients = g.gradient(my_outputs, values_tensor)\n", - "\n", - "plt.subplot(1, 2, 1)\n", - "plt.title('Exact Gradient')\n", - "plt.plot(input_points, analytic_finite_diff_gradients.numpy())\n", - "plt.xlabel('x')\n", - "plt.ylabel('f(x)')\n", - "plt.subplot(1, 2, 2)\n", - "plt.title('My Gradient')\n", - "plt.plot(input_points, my_gradients.numpy())\n", - "plt.xlabel('x')" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "oXqcJWigNUAL", - "colab_type": "text" - }, - "source": [ - "This new differentiator can now be used to generate differentiable ops.\n", - "\n", - "Key Point: A differentiator that has been previously attached to an op must be refreshed before attaching to a new op, because a differentiator may only be attached to one op at a time." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "F_WHcj3bNUAM", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Create a noisy sample based expectation op.\n", - "expectation_sampled = tfq.get_sampled_expectation_op(\n", - " cirq.DensityMatrixSimulator(noise=cirq.depolarize(0.01)))\n", - "\n", - "# Make it differentiable with your differentiator:\n", - "# Remember to refresh the differentiator before attaching the new op\n", - "custom_dif.refresh()\n", - "differentiable_op = custom_dif.generate_differentiable_op(\n", - " sampled_op=expectation_sampled)\n", - "\n", - "# Prep op inputs.\n", - "circuit_tensor = tfq.convert_to_tensor([my_circuit])\n", - "op_tensor = tfq.convert_to_tensor([[pauli_x]])\n", - "single_value = tf.convert_to_tensor([[my_alpha]])\n", - "num_samples_tensor = tf.convert_to_tensor([[1000]])\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(single_value)\n", - " forward_output = differentiable_op(circuit_tensor, ['alpha'], single_value,\n", - " op_tensor, num_samples_tensor)\n", - "\n", - "my_gradients = g.gradient(forward_output, single_value)\n", - "\n", - "print('---TFQ---')\n", - "print('Foward: ', forward_output.numpy())\n", - "print('Gradient:', my_gradients.numpy())\n", - "print('---Original---')\n", - "print('Forward: ', my_expectation(pauli_x, my_alpha))\n", - "print('Gradient:', my_grad(pauli_x, my_alpha))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OGWcpqzDNUAP", - "colab_type": "text" - }, - "source": [ - "Success: Now you can use all the differentiators that TensorFlow Quantum has to offer—and define your own." - ] - } - ] -} diff --git a/docs/tutorials/hello_many_worlds.ipynb b/docs/tutorials/hello_many_worlds.ipynb deleted file mode 100644 index 845e736c4..000000000 --- a/docs/tutorials/hello_many_worlds.ipynb +++ /dev/null @@ -1,793 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "hello_many_worlds.ipynb", - "provenance": [], - "private_outputs": true, - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5rc1" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xLOXFOT5Q40E" - }, - "source": [ - "##### Copyright 2020 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "metadata": { - "cellView": "form", - "colab_type": "code", - "id": "iiQkM5ZgQ8r2", - "colab": {} - }, - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "j6331ZSsQGY3" - }, - "source": [ - "# Hello, many worlds" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "i9Jcnb8bQQyd" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "6tYn2HaAUgH0" - }, - "source": [ - "This tutorial shows how a classical neural network can learn to correct qubit calibration errors. It introduces Cirq, a Python framework to create, edit, and invoke Noisy Intermediate Scale Quantum (NISQ) circuits, and demonstrates how Cirq interfaces with TensorFlow Quantum." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "sPZoNKvpUaqa" - }, - "source": [ - "## Setup\n", - "\n", - "Download and install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "fAVlg7rxkvUw", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade pip\n", - "!pip install cirq==0.7.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "TorxE5tnkvb2", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade tensorflow==2.1.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FxkQA6oblNqI" - }, - "source": [ - "Note: If the following code cell fails, execute the first code cells and then restart the Colab runtime (*Runtime > Restart Runtime*)." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "saFHsRDpkvkH", - "colab": {} - }, - "source": [ - "h = \"2dfcfceb9726fa73c40381c037dc01facd3d061e\"\n", - "!cd ~/\n", - "!rm -r -f TFQuantum/\n", - "!git clone https://{h}:{h}@github.com/quantumlib/TFQuantum.git;\n", - "!pip install --upgrade ./TFQuantum/wheels/tfquantum-0.2.0-cp36-cp36m-manylinux1_x86_64.whl" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "F1L8h1YKUvIO" - }, - "source": [ - "Now import TensorFlow and the module dependencies:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "enZ300Bflq80", - "colab": {} - }, - "source": [ - "import tensorflow as tf\n", - "import tensorflow_quantum as tfq\n", - "\n", - "import cirq\n", - "import sympy\n", - "import numpy as np\n", - "\n", - "# visualization tools\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "from cirq.contrib.svg import SVGCircuit" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "b08Mmbs8lr81" - }, - "source": [ - "## 1. The Basics" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "y31qSRCczI-L" - }, - "source": [ - "### 1.1 Cirq and parameterized quantum circuits\n", - "\n", - "Before exploring TensorFlow Quantum (TFQ), let's look at some Cirq basics. Cirq is a Python library for quantum computing from Google. You use it to define circuits, including static and parameterized gates. Cirq uses SymPy symbols to represent free parameters:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "2yQdmhQLCrzQ", - "colab": {} - }, - "source": [ - "a, b = sympy.symbols('a b')" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "itUlpbKmDYNW" - }, - "source": [ - "The following code creates a two-qubit circuit using your parameters:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "Ps-pd2mndXs7", - "colab": {} - }, - "source": [ - "# Create two qubits.\n", - "q0, q1 = cirq.GridQubit.rect(1, 2)\n", - "\n", - "# Create a circuit on these qubits using the parameters you created above.\n", - "circuit = cirq.Circuit(\n", - " cirq.rx(a).on(q0),\n", - " cirq.ry(b).on(q1),\n", - " cirq.CNOT(control=q0, target=q1))\n", - "\n", - "SVGCircuit(circuit)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "zcCX109cJUaz" - }, - "source": [ - "To evaluate circuits, you can use the `cirq.Simulator` interface. You replace free parameters in a circuit with specific numbers by passing in a `cirq.ParamResolver` object. The following code calculates the raw state vector output of your parameterized circuit:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "VMq7EayNRyQb", - "colab": {} - }, - "source": [ - "# Calculate a state vector with a=0.5 and b=-0.5.\n", - "resolver = cirq.ParamResolver({a: 0.5, b: -0.5})\n", - "output_state_vector = cirq.Simulator().simulate(circuit, resolver).final_state\n", - "output_state_vector" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "-SUlLpXBeicF" - }, - "source": [ - "State vectors are not directly accessible outside of simulation (notice the complex numbers in the output above). To be physically realistic, you must specify a measurement, which converts a state vector into a real number that classical computers can understand. Cirq specifies measurements using combinations of the Pauli operators $\\hat{X}$, $\\hat{Y}$, and $\\hat{Z}$. As illustration, the following code measures $\\hat{Z}_0$ and $\\frac{1}{2}\\hat{Z}_0 + \\hat{X}_1$ on the state vector you just simulated:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "hrSnOCi3ehr_", - "colab": {} - }, - "source": [ - "z0 = cirq.Z(q0)\n", - "z0x1 = 0.5 * z0 + cirq.X(q1)\n", - "\n", - "print(z0.expectation_from_wavefunction(output_state_vector,\n", - " qubit_map={q0: 0, q1: 1}).real)\n", - "print(z0x1.expectation_from_wavefunction(output_state_vector,\n", - " qubit_map={q0: 0, q1: 1}).real)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "bkC-yjIolDNr" - }, - "source": [ - "### 1.2 Quantum circuits as tensors\n", - "\n", - "TensorFlow Quantum (TFQ) provides `tfq.convert_to_tensor`, a function that converts Cirq objects into tensors. This allows you to send Cirq objects to our quantum layers and quantum ops. The function can be called on lists or arrays of Cirq Circuits and Cirq Paulis:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "1gLQjA02mIyy", - "colab": {} - }, - "source": [ - "# Rank 1 tensor containing 1 circuit.\n", - "circuit_tensor = tfq.convert_to_tensor([circuit])\n", - "circuit_tensor.shape" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "aX_vEmCKmpQS", - "colab": {} - }, - "source": [ - "# Rank 1 tensor containing 2 Pauli operators.\n", - "pauli_tensor = tfq.convert_to_tensor([z0, z0x1])\n", - "pauli_tensor.shape" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FI1JLWe6m8JF" - }, - "source": [ - "### 1.3 Batching circuit simulation\n", - "\n", - "TFQ provides methods for computing expectation values, samples, and state vectors. For now, let's focus on *expectation values*.\n", - "\n", - "The highest-level interface for calculating expectation values is the `tfq.layers.Expectation` layer, which is a `tf.keras.Layer`. In its simplest form, this layer is equivalent to simulating a parameterized circuit over many `cirq.ParamResolvers`; however, in TFQ the batching follows TensorFlow semantics and circuits are simulated using efficient C++ code.\n", - "\n", - "Create a batch of values to substitute for our `a` and `b` parameters:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "1fsVZhF5lIXp", - "colab": {} - }, - "source": [ - "batch_vals = np.array(np.random.uniform(0, 2*np.pi, (5, 2)), dtype=np.float32)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ip7jlGXIf22u" - }, - "source": [ - "Batching circuit execution over parameter values in Cirq requires a loop:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "RsfF53UCJtr9", - "colab": {} - }, - "source": [ - "cirq_results = []\n", - "cirq_simulator = cirq.Simulator()\n", - "\n", - "for vals in batch_vals:\n", - " resolver = cirq.ParamResolver({a: vals[0], b: vals[1]})\n", - " final_state = cirq_simulator.simulate(circuit, resolver).final_state\n", - " cirq_results.append([\n", - " z0.expectation_from_wavefunction(final_state, {q0: 0, q1: 1}).real\n", - " ])\n", - "\n", - "print('cirq batch results: \\n {}'.format(np.array(cirq_results)))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "W0JlZEu-f9Ac" - }, - "source": [ - "The same operation is simplified in TFQ:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "kGZVdcZ6y9lC", - "colab": {} - }, - "source": [ - "tfq.layers.Expectation()(\n", - " circuit, symbol_names=[a, b], symbol_values=batch_vals, operators=z0)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "wppQ3TJ23mWC" - }, - "source": [ - "## 2. Hybrid quantum-classical optimization\n", - "\n", - "Now that you've seen the basics, let's use TensorFlow Quantum to construct a *hybrid quantum-classical neural net*. You will train a classical neural net to control a single qubit. The control will be optimized to correctly prepare the qubit in the `0` or `1` state, overcoming a simulated systematic calibration error. This figure shows the architecture:\n", - "\n", - "\n", - "\n", - "Even without a neural network this is a straightforward problem to solve, but the theme is similar to the real quantum control problems you might solve using TFQ. It demonstrates an end-to-end example of a quantum-classical computation using the `tfq.layers.ControlledPQC` layer inside of a `tf.keras.Model`." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "VjDf-nTM6ZSs" - }, - "source": [ - "### 2.1 Model definition\n", - "\n", - "Define a learnable single bit rotation, as indicated in the figure above. This will correspond to our model control circuit." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "N-j7SCl-51-q", - "colab": {} - }, - "source": [ - "# Parameters that the classical NN will feed values into.\n", - "control_params = sympy.symbols('theta_1 theta_2 theta_3')\n", - "\n", - "# Create the parameterized circuit.\n", - "qubit = cirq.GridQubit(0, 0)\n", - "model_circuit = cirq.Circuit(\n", - " cirq.rz(control_params[0])(qubit),\n", - " cirq.ry(control_params[1])(qubit),\n", - " cirq.rx(control_params[2])(qubit))\n", - "\n", - "SVGCircuit(model_circuit)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "wfjSbsvb7g9f" - }, - "source": [ - "Now define your model. The network architecture is indicated by the plot of the model below, which is compared to the figure above to verify correctness." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "Lod5uhHo7gXH", - "colab": {} - }, - "source": [ - "# This is the simulated miscalibration that the model will learn to correct.\n", - "circuits_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string, name='circuits_input')\n", - "\n", - "# Commands will be either `0` or `1`, specifying the state to set the qubit to.\n", - "commands_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.float32,\n", - " name='commands_input')\n", - "\n", - "# The classical neural network layers.\n", - "d1 = tf.keras.layers.Dense(10)(commands_input)\n", - "d2 = tf.keras.layers.Dense(3)(d1)\n", - "\n", - "# TFQ layer for classically controlled models.\n", - "expectation_layer = tfq.layers.ControlledPQC(model_circuit, z0)\n", - "expectation = expectation_layer([circuits_input, d2])\n", - "\n", - "# The full Keras model is built from our layers.\n", - "model = tf.keras.Model(inputs=[circuits_input, commands_input],\n", - " outputs=expectation)\n", - "tf.keras.utils.plot_model(model, show_shapes=True, dpi=70)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "bbiVHvSYVW4H" - }, - "source": [ - "### 2.2 Data definition\n", - "\n", - "The simple model is given two datapoints. The inputs are the commands for the qubit state; the outputs are the correct measurement value of $\\hat{Z}$ for each command. Below you also define the random miscalibration the model will learn to correct." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "_VYfzHffWo7n", - "colab": {} - }, - "source": [ - "# Input values to the classical NN.\n", - "commands = np.array([[0], [1]], dtype=np.float32)\n", - "\n", - "# Desired Z expectation value at output of quantum circuit.\n", - "expected_outputs = np.array([[1], [-1]], dtype=np.float32)\n", - "\n", - "# Circuits producing the random initial miscalibration.\n", - "# Note that in this example the circuits are the same, meaning that the model\n", - "# assumes the miscalibration is constant, independent of the state requested.\n", - "random_rotations = np.random.uniform(0, 2 * np.pi, 3)\n", - "datapoint_circuits = tfq.convert_to_tensor([cirq.Circuit(\n", - " cirq.rx(random_rotations[0])(qubit),\n", - " cirq.ry(random_rotations[1])(qubit),\n", - " cirq.rz(random_rotations[2])(qubit))\n", - "] * 2)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "gB--UhZZYgVY" - }, - "source": [ - "### 2.3 Training" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "dtPYqbNi8zeZ", - "colab": {} - }, - "source": [ - "optimizer = tf.keras.optimizers.Adam(learning_rate=0.05)\n", - "loss = tf.keras.losses.mean_squared_error\n", - "model.compile(optimizer=optimizer, loss=loss)\n", - "history = model.fit(x=[datapoint_circuits, commands], y=expected_outputs,\n", - " epochs=30, verbose=0)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "azE-qV0OaC1o", - "colab": {} - }, - "source": [ - "plt.plot(history.history['loss'])\n", - "plt.title(\"Learning to Control a Qubit\")\n", - "plt.xlabel(\"Iterations\")\n", - "plt.ylabel(\"Error in Control\")\n", - "plt.show()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "GTd5DGcRmmOK" - }, - "source": [ - "From this plot you can see that the neural network has learned to overcome the systematic miscalibration." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "jNrW0NXR-lDC" - }, - "source": [ - "## 3 Learning to prepare eigenstates of different operators\n", - "\n", - "The choice of the $\\pm \\hat{Z}$ eigenstates corresponding to 1 and 0 was arbitrary. You could have just as easily wanted 1 to correspond to the $+ \\hat{Z}$ eigenstate and 0 to correspond to the $-\\hat{X}$ eigenstate. One way to accomplish this is by specifying a different measurement operator for each command, as indicated in the figure below:\n", - "\n", - "\n", - "\n", - "This requires more sophisticated use of tfq.layers.Expectation. Now your input has grown to include three objects: circuit, command, and operator. The output is still the expectation value." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ci3WMZ9CjEM1" - }, - "source": [ - "### 3.1 New model definition\n", - "\n", - "Lets take a look at the model to accomplish this task:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "n_aTG4g3-y0F", - "colab": {} - }, - "source": [ - "# Define inputs.\n", - "commands_input = tf.keras.layers.Input(shape=(1), dtype=tf.dtypes.float32, name='commands_input')\n", - "circuits_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string, name='circuits_input')\n", - "operators_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string, name='operators_input')\n", - "\n", - "# Define classical NN.\n", - "dense_1 = tf.keras.layers.Dense(10)(commands_input)\n", - "dense_2 = tf.keras.layers.Dense(3)(dense_1)\n", - "\n", - "# Since you aren't using a PQC or ControlledPQC you must append\n", - "# your model circuit onto the datapoint circuit tensor manually.\n", - "full_circuit = tfq.layers.AddCircuit()(circuits_input, append=model_circuit)\n", - "expectation_output = tfq.layers.Expectation()(full_circuit,\n", - " symbol_names=control_params,\n", - " symbol_values=dense_2,\n", - " operators=operators_input)\n", - "\n", - "# Contruct your Keras model.\n", - "two_axis_control_model = tf.keras.Model(\n", - " inputs=[circuits_input, commands_input, operators_input], outputs=[expectation_output])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "VQTM6CCiD4gU" - }, - "source": [ - "### 3.2 Adding to datapoints\n", - "\n", - "Now you will include the operators you wish to measure for each datapoint you supply for `model_circuit`:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "4gw_L3JG0_G0", - "colab": {} - }, - "source": [ - "operator_data = tfq.convert_to_tensor([[cirq.X(qubit)], [cirq.Z(qubit)]])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ALCKSvwh0_G2", - "colab_type": "text" - }, - "source": [ - "### 3.3 Training\n", - "\n", - "Now that you have your new input outputs pairs you can train once again using keras." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "nFuGA73MAA4p", - "colab": {} - }, - "source": [ - "optimizer = tf.keras.optimizers.Adam(learning_rate=0.05)\n", - "loss = tf.keras.losses.mean_squared_error\n", - "\n", - "two_axis_control_model.compile(optimizer=optimizer, loss=loss)\n", - "\n", - "history = two_axis_control_model.fit(\n", - " x=[datapoint_circuits, commands, operator_data],\n", - " y=expected_outputs,\n", - " epochs=30,\n", - " verbose=1)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "Cf_G-GdturLL", - "colab": {} - }, - "source": [ - "plt.plot(history.history['loss'])\n", - "plt.title(\"Learning to Control a Qubit\")\n", - "plt.xlabel(\"Iterations\")\n", - "plt.ylabel(\"Error in Control\")\n", - "plt.show()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "sdCPDH9NlJBl" - }, - "source": [ - "The loss function has dropped to zero.\n", - "\n", - "If you inspect the parameters of the model, you'll see that the parameters have been recovered to control the qubit correctly with these new measurement operators." - ] - } - ] -} diff --git a/docs/tutorials/images/barren_1.png b/docs/tutorials/images/barren_1.png deleted file mode 100644 index 72b3a8957..000000000 Binary files a/docs/tutorials/images/barren_1.png and /dev/null differ diff --git a/docs/tutorials/images/barren_2.png b/docs/tutorials/images/barren_2.png deleted file mode 100644 index 2042f894f..000000000 Binary files a/docs/tutorials/images/barren_2.png and /dev/null differ diff --git a/docs/tutorials/images/barren_3.png b/docs/tutorials/images/barren_3.png deleted file mode 100644 index 1e273ae4e..000000000 Binary files a/docs/tutorials/images/barren_3.png and /dev/null differ diff --git a/docs/tutorials/images/nn_control1.png b/docs/tutorials/images/nn_control1.png deleted file mode 100644 index 75c1ba346..000000000 Binary files a/docs/tutorials/images/nn_control1.png and /dev/null differ diff --git a/docs/tutorials/images/nn_control2.png b/docs/tutorials/images/nn_control2.png deleted file mode 100644 index 055914746..000000000 Binary files a/docs/tutorials/images/nn_control2.png and /dev/null differ diff --git a/docs/tutorials/images/qcnn_1.png b/docs/tutorials/images/qcnn_1.png deleted file mode 100644 index 6417a0bd1..000000000 Binary files a/docs/tutorials/images/qcnn_1.png and /dev/null differ diff --git a/docs/tutorials/images/qcnn_2.png b/docs/tutorials/images/qcnn_2.png deleted file mode 100644 index 6603a9a05..000000000 Binary files a/docs/tutorials/images/qcnn_2.png and /dev/null differ diff --git a/docs/tutorials/images/qcnn_3.png b/docs/tutorials/images/qcnn_3.png deleted file mode 100644 index f9c9d973a..000000000 Binary files a/docs/tutorials/images/qcnn_3.png and /dev/null differ diff --git a/docs/tutorials/images/qcnn_4.png b/docs/tutorials/images/qcnn_4.png deleted file mode 100644 index a7f9691d9..000000000 Binary files a/docs/tutorials/images/qcnn_4.png and /dev/null differ diff --git a/docs/tutorials/images/qcnn_5.png b/docs/tutorials/images/qcnn_5.png deleted file mode 100644 index 31b0e9304..000000000 Binary files a/docs/tutorials/images/qcnn_5.png and /dev/null differ diff --git a/docs/tutorials/images/qcnn_6.png b/docs/tutorials/images/qcnn_6.png deleted file mode 100644 index 97522dfce..000000000 Binary files a/docs/tutorials/images/qcnn_6.png and /dev/null differ diff --git a/docs/tutorials/images/sensing_1.png b/docs/tutorials/images/sensing_1.png deleted file mode 100644 index 5999c75dc..000000000 Binary files a/docs/tutorials/images/sensing_1.png and /dev/null differ diff --git a/docs/tutorials/images/sensing_2.png b/docs/tutorials/images/sensing_2.png deleted file mode 100644 index b7a2af5dc..000000000 Binary files a/docs/tutorials/images/sensing_2.png and /dev/null differ diff --git a/docs/tutorials/mnist.ipynb b/docs/tutorials/mnist.ipynb deleted file mode 100644 index f3ae4839d..000000000 --- a/docs/tutorials/mnist.ipynb +++ /dev/null @@ -1,642 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "mnist.ipynb", - "provenance": [], - "private_outputs": true, - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5rc1" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xLOXFOT5Q40E" - }, - "source": [ - "##### Copyright 2020 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "metadata": { - "cellView": "form", - "colab_type": "code", - "id": "iiQkM5ZgQ8r2", - "colab": {} - }, - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "j6331ZSsQGY3" - }, - "source": [ - "# MNIST classification" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "i9Jcnb8bQQyd" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "udLObUVeGfTs", - "colab_type": "text" - }, - "source": [ - "This tutorial builds a quantum neural network (QNN) to classify a simplified version of MNIST, similar to the approach used in Farhi et al. The performance of the quantum neural network on this classical data problem is compared with a classical neural network." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "X35qHdh5Gzqg", - "colab_type": "text" - }, - "source": [ - "## Setup\n", - "\n", - "Download and install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "fAVlg7rxkvUw", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade pip\n", - "!pip install cirq==0.7.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "TorxE5tnkvb2", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade tensorflow==2.0.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FxkQA6oblNqI" - }, - "source": [ - "Note: If the following code cell fails, execute the first code cells and then restart the Colab runtime (*Runtime > Restart Runtime*)." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "saFHsRDpkvkH", - "colab": {} - }, - "source": [ - "%%capture\n", - "h = \"2dfcfceb9726fa73c40381c037dc01facd3d061e\"\n", - "!cd ~/\n", - "!rm -r -f TFQuantum/\n", - "!git clone https://{h}:{h}@github.com/quantumlib/TFQuantum.git;cd TFQuantum/\n", - "!pip install --upgrade ./TFQuantum/wheels/tfquantum-0.2.0-cp36-cp36m-linux_x86_64.whl" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "hdgMMZEBGqyl", - "colab_type": "text" - }, - "source": [ - "Now import TensorFlow and the module dependencies:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "enZ300Bflq80", - "colab": {} - }, - "source": [ - "import tensorflow as tf\n", - "import tensorflow_quantum as tfq\n", - "\n", - "import cirq\n", - "import sympy\n", - "import numpy as np\n", - "import seaborn\n", - "import collections\n", - "\n", - "# visualization tools\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "from cirq.contrib.svg import SVGCircuit" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "b08Mmbs8lr81" - }, - "source": [ - "## 1. Load the data\n", - "\n", - "Load the MNIST data distributed with Keras. Since this tutorial demonstrates a binary classification problem for the numbers 3 and 6, remove the other numbers. Then display the first training example:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "p-XEU8egGL6q", - "colab_type": "code", - "colab": {} - }, - "source": [ - "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n", - "\n", - "print(\"Number of original training examples:\", len(x_train))\n", - "print(\"Number of original test examples:\", len(x_train))\n", - "\n", - "# Keep the 3s and 6s and remove the other numbers.\n", - "x_train, y_train = zip(*((x, y) for x, y in zip(x_train, y_train) if y in [3, 6]))\n", - "x_test, y_test = zip(*((x, y) for x, y in zip(x_test, y_test) if y in [3, 6]))\n", - "\n", - "print(\"Number of filtered training examples:\", len(x_train))\n", - "print(\"Number of filtered test examples:\", len(x_test))\n", - "\n", - "print(y_train[0])\n", - "seaborn.heatmap(x_train[0])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "fmmtplIFGL6t", - "colab_type": "text" - }, - "source": [ - "But an image size of 28x28 is much too large for current quantum computers. Resize the image down to 4x4 and scale the numbers between 0 and 1:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "lR4tKxLOGL6u", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def reduce_image(x):\n", - " x = tf.reshape(x, [1, 28, 28, 1])\n", - " x = tf.image.resize(x, [4, 4])\n", - " x = tf.reshape(x, [4, 4])\n", - " x = x / 255\n", - " return x.numpy()\n", - "\n", - "\n", - "x_train = [reduce_image(x) for x in x_train]\n", - "x_test = [reduce_image(x) for x in x_test]\n", - "\n", - "\n", - "# Remove examples where the same input has multiple labels.\n", - "def remove_contradicting(xs, ys):\n", - " mapping = collections.defaultdict(set)\n", - " for x, y in zip(xs, ys):\n", - " mapping[str(x)].add(y)\n", - " return zip(*((x, y) for x, y in zip(xs, ys) if len(mapping[str(x)]) == 1))\n", - "\n", - "\n", - "x_train, y_train = remove_contradicting(x_train, y_train)\n", - "x_test, y_test = remove_contradicting(x_test, y_test)\n", - "\n", - "print(\"Number of non-contradicting training examples: \", len(x_train))\n", - "print(\"Number of non-contradicting test examples: \", len(x_test))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pOMd7zIjGL6x", - "colab_type": "text" - }, - "source": [ - "Again, display the first training example—after resize: " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "YIYOtCRIGL6y", - "colab_type": "code", - "colab": {} - }, - "source": [ - "print(y_train[0])\n", - "seaborn.heatmap(x_train[0])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_3kBLeodGL61", - "colab_type": "text" - }, - "source": [ - "To process images using a quantum computer, Farhi et al. proposed representing each pixel with a qubit, with the state depending on the value of the pixel.\n", - "\n", - "To classify these images, Farhi et al. proposed taking the expectation of a readout qubit in a parameterized circuit. The expectation returns a value between 1 and -1, so choosing 1 and -1 as the targets is natural." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "aOu_3-3ZGL61", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def convert_to_circuit(image):\n", - " \"\"\"Encode truncated classical image into quantum datapoint.\"\"\"\n", - " values = np.ndarray.flatten(image)\n", - " qubits = cirq.GridQubit.rect(4, 4)\n", - " circuit = cirq.Circuit()\n", - " for i, value in enumerate(values):\n", - " if value > 0.5:\n", - " circuit.append(cirq.X(qubits[i]))\n", - " return circuit\n", - "\n", - "\n", - "x_train = [convert_to_circuit(x) for x in x_train]\n", - "x_test = [convert_to_circuit(x) for x in x_test]\n", - "\n", - "\n", - "def convert_label(y):\n", - " if y == 3:\n", - " return 1.0\n", - " else:\n", - " return -1.0\n", - "\n", - "\n", - "y_train = [convert_label(y) for y in y_train]\n", - "y_test = [convert_label(y) for y in y_test]" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Zkr6jVDgGL64", - "colab_type": "text" - }, - "source": [ - "And display the circuit diagram for the first example:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "qP-wmm-0GL65", - "colab_type": "code", - "colab": {} - }, - "source": [ - "print(y_train[0])\n", - "print(x_train[0])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4USiqeOqGL67", - "colab_type": "text" - }, - "source": [ - "## 2. Quantum neural network\n", - "\n", - "There is little guidance for a quantum circuit structure that classifies images. Since the classification is based on the expectation of the readout qubit, Farhi et al. propose using two qubit gates, with the readout qubit always acted upon.\n", - "\n", - "This following example shows this layer approach. It uses *n* instances of the same gate, with each of the data qubits acting on the readout qubit:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "JiALbpwRGL69", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def create_quantum_model():\n", - " \"\"\"Create a QNN model circuit and readout operation to go along with it.\"\"\"\n", - " data_qubits = cirq.GridQubit.rect(4, 4)\n", - " readout = cirq.GridQubit(16, 0)\n", - "\n", - " symbols = []\n", - " circuit = cirq.Circuit()\n", - "\n", - " # Generates a layer of the gate type.\n", - " def layer(gate, prefix):\n", - " for i, qubit in enumerate(data_qubits):\n", - " symbol = sympy.Symbol(prefix + '-' + str(i))\n", - " circuit.append(gate(qubit, readout)**symbol)\n", - " symbols.append(symbol)\n", - "\n", - " # Prepare the readout qubit.\n", - " circuit.append(cirq.X(readout))\n", - " circuit.append(cirq.H(readout))\n", - "\n", - " # Then add layers (experiment by adding more).\n", - " layer(cirq.XX, \"xx-1\")\n", - " layer(cirq.ZZ, \"zz-1\")\n", - "\n", - " # Finally, prepare the readout qubit.\n", - " circuit.append(cirq.H(readout))\n", - "\n", - " return circuit, cirq.Z(readout)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "LY7vbY6yfABE", - "colab_type": "text" - }, - "source": [ - "Build the Keras model with the quantum components. This model is fed the \"quantum data\" that encodes the classical data." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "x2xDsYpFGL7A", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Get the quantum components.\n", - "model_circuit, model_readout = create_quantum_model()\n", - "\n", - "# Build the Keras model.\n", - "model = tf.keras.Sequential()\n", - "model.add(tf.keras.layers.Input(shape=(), dtype=tf.dtypes.string))\n", - "model.add(tfq.layers.PQC(model_circuit, model_readout))\n", - "\n", - "# Define a custom accuracy that equals the sign of the output.\n", - "@tf.function\n", - "def custom_accuracy(y_true, y_pred):\n", - " y_true = tf.squeeze(y_true)\n", - " y_pred = tf.map_fn(lambda x: 1.0 if x >= 0 else -1.0, y_pred)\n", - " return tf.keras.backend.mean(tf.keras.backend.equal(y_true, y_pred))\n", - "\n", - "\n", - "print(model.summary())\n", - "\n", - "model.compile(loss=tf.keras.losses.hinge,\n", - " optimizer=tf.keras.optimizers.Adam(),\n", - " metrics=[custom_accuracy])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lsuOzDYblA9s", - "colab_type": "text" - }, - "source": [ - "Reduce the dataset size for faster training. For better results, ignore this code cell:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "n8vuQpSLlBV2", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Comment out for increased accuracy.\n", - "NUM_EXAMPLES = 500\n", - "x_train = x_train[:NUM_EXAMPLES]\n", - "y_train = y_train[:NUM_EXAMPLES]" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "QMSdgGC1GL7D", - "colab_type": "text" - }, - "source": [ - "Using the full dataset, training this model should achieve >85% accuracy on the test set. Here, only `NUM_EXAMPLES` datapoints are used to save time." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "nl0aQ-w7GL7H", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Wrap the training and test sets so Keras can handle them.\n", - "x_train = tfq.convert_to_tensor(x_train)\n", - "x_test = tfq.convert_to_tensor(x_test)\n", - "y_train = np.array(y_train)\n", - "y_test = np.array(y_test)\n", - "\n", - "model.fit(x_train,\n", - " y_train,\n", - " batch_size=32,\n", - " epochs=3,\n", - " verbose=1,\n", - " validation_data=(x_test, y_test))\n", - "\n", - "qnn_results = model.evaluate(x_test, y_test)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "8952YvuWGL7J", - "colab_type": "text" - }, - "source": [ - "## 3. Classical neural network\n", - "\n", - "While the quantum neural network works for this simplified MNIST problem, a basic classical neural network can easily outperform a QNN on this task. After a single epoch, a classical neural network can achieve >98% accuracy on the holdout set.\n", - "\n", - "In the following example, a classical neural network is used for a 10-class classification problem (instead of the 2-class problem for the QNN), and uses the entire 28x28 image instead of subsampling the image." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "pZofEHhLGL7L", - "colab_type": "code", - "colab": {} - }, - "source": [ - "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n", - "\n", - "y_train = tf.keras.utils.to_categorical(y_train, 10)\n", - "y_test = tf.keras.utils.to_categorical(y_test, 10)\n", - "\n", - "x_train = x_train.astype('float32') / 255\n", - "x_test = x_test.astype('float32') / 255\n", - "\n", - "\n", - "def create_classical_model():\n", - " # A simple model based off LeNet from https://keras.io/examples/mnist_cnn/\n", - " model = tf.keras.Sequential()\n", - " model.add(tf.keras.layers.Reshape([28, 28, 1]))\n", - " model.add(tf.keras.layers.Conv2D(32, [3, 3], activation='relu'))\n", - " model.add(tf.keras.layers.Conv2D(64, [3, 3], activation='relu'))\n", - " model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))\n", - " model.add(tf.keras.layers.Dropout(0.25))\n", - " model.add(tf.keras.layers.Flatten())\n", - " model.add(tf.keras.layers.Dense(128, activation='relu'))\n", - " model.add(tf.keras.layers.Dropout(0.5))\n", - " model.add(tf.keras.layers.Dense(10, activation='softmax'))\n", - " return model\n", - "\n", - "\n", - "model = create_classical_model()\n", - "model.compile(loss=tf.keras.losses.categorical_crossentropy,\n", - " optimizer=tf.keras.optimizers.Adam(),\n", - " metrics=['accuracy'])\n", - "\n", - "model.fit(x_train,\n", - " y_train,\n", - " batch_size=128,\n", - " epochs=1,\n", - " verbose=1,\n", - " validation_data=(x_test, y_test))\n", - "\n", - "cnn_results = model.evaluate(x_test, y_test)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "RH3mam7EGL7N", - "colab_type": "text" - }, - "source": [ - "## 4. Comparison\n", - "\n", - "Despite a more difficult problem, the classical neural network easily outperforms the quantum neural network. For classical data, it is difficult to beat a classical neural network." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "NOMeN7pMGL7P", - "colab_type": "code", - "colab": {} - }, - "source": [ - "qnn_accuracy = qnn_results[1]\n", - "cnn_accuracy = cnn_results[1]\n", - "\n", - "seaborn.barplot([\"quantum\", \"classical\"], [qnn_accuracy, cnn_accuracy])" - ], - "execution_count": 0, - "outputs": [] - } - ] -} diff --git a/docs/tutorials/model.png b/docs/tutorials/model.png deleted file mode 100644 index c8d88bb83..000000000 Binary files a/docs/tutorials/model.png and /dev/null differ diff --git a/docs/tutorials/qcnn.ipynb b/docs/tutorials/qcnn.ipynb deleted file mode 100644 index 212b2187b..000000000 --- a/docs/tutorials/qcnn.ipynb +++ /dev/null @@ -1,1135 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xLOXFOT5Q40E" - }, - "source": [ - "##### Copyright 2020 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "colab": {}, - "colab_type": "code", - "id": "iiQkM5ZgQ8r2" - }, - "outputs": [], - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "uLeF5Nmdef0V" - }, - "source": [ - "# Quantum Convolutional Neural Network" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "i9Jcnb8bQQyd" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "4D3xaWBHOIVg" - }, - "source": [ - "This tutorial implements a simplified Quantum Convolutional Neural Network (QCNN), a proposed quantum analogue to a classical convolutional neural network that is also *translationally invariant*.\n", - "\n", - "This example demonstrates how to detect certain properties of a quantum data source, such as a quantum sensor or a complex simulation from a device. The quantum data source being a cluster state that may or may not have an excitation—what the QCNN will learn to detect (The dataset used in the paper was SPT phase classification)." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FnjolLuz8o5C" - }, - "source": [ - "## Setup\n", - "\n", - "Download and install the required packages:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "evObd2BXr5hs" - }, - "outputs": [], - "source": [ - "%%capture\n", - "!pip install --upgrade pip\n", - "!pip install cirq==0.7.0" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "Aquwcz-0aHqz" - }, - "outputs": [], - "source": [ - "%%capture\n", - "!pip install --upgrade tensorflow==2.1.0" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "e_ZuLN_N8yhT" - }, - "source": [ - "Note: If the following code cell fails, execute the first code cells and then restart the Colab runtime (*Runtime > Restart Runtime*)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "3Pl5PW-ACO9J" - }, - "outputs": [], - "source": [ - "%%capture\n", - "h = \"2dfcfceb9726fa73c40381c037dc01facd3d061e\"\n", - "!cd ~/\n", - "!rm -r -f TFQuantum/\n", - "!git clone https://{h}:{h}@github.com/quantumlib/TFQuantum.git;cd TFQuantum/\n", - "!pip install --upgrade ./TFQuantum/wheels/tfquantum-0.2.0-cp36-cp36m-linux_x86_64.whl" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "TL_LvHXzPNjW" - }, - "source": [ - "Now import TensorFlow and the module dependencies:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "QytLEAtoejW5" - }, - "outputs": [], - "source": [ - "import tensorflow as tf\n", - "import tensorflow_quantum as tfq\n", - "\n", - "import cirq\n", - "import sympy\n", - "import numpy as np\n", - "\n", - "# visualization tools\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "from cirq.contrib.svg import SVGCircuit" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "j6331ZSsQGY3" - }, - "source": [ - "## 1. Build a QCNN" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Qg85u3G--CGq" - }, - "source": [ - "### 1.1 Assemble circuits in a TensorFlow graph\n", - "\n", - "TensorFlow Quantum (TFQ) provides layer classes designed for in-graph circuit construction. One example is the `tfq.layers.AddCircuit` layer that inherits from `tf.keras.Layer`. This layer can either prepend or append to the input batch of circuits, as shown in the following figure.\n", - "\n", - "\n", - "\n", - "The following snippet uses this layer:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "FhNf0G_OPLqZ" - }, - "outputs": [], - "source": [ - "qubit = cirq.GridQubit(0, 0)\n", - "\n", - "# Define some circuits.\n", - "circuit1 = cirq.Circuit(cirq.X(qubit))\n", - "circuit2 = cirq.Circuit(cirq.H(qubit))\n", - "\n", - "# Convert to a tensor.\n", - "input_circuit_tensor = tfq.convert_to_tensor([circuit1, circuit2])\n", - "\n", - "# Define a circuit that we want to append\n", - "y_circuit = cirq.Circuit(cirq.Y(qubit))\n", - "\n", - "# Instantiate our layer\n", - "y_appender = tfq.layers.AddCircuit()\n", - "\n", - "# Run our circuit tensor through the layer and save the output.\n", - "output_circuit_tensor = y_appender(input_circuit_tensor, append=y_circuit)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "ShZbRZCXkvk5" - }, - "source": [ - "Examine the input tensor:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "ImRynsUN4BSG" - }, - "outputs": [], - "source": [ - "print(tfq.from_tensor(input_circuit_tensor))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xkGU4ZTUk4gf" - }, - "source": [ - "And examine the output tensor:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "tfff6dJp39Fg" - }, - "outputs": [], - "source": [ - "print(tfq.from_tensor(output_circuit_tensor))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "23JeZ7Ns5qy5" - }, - "source": [ - "While it is possible to run the examples below without using `tfq.layers.AddCircuit`, it's a good opportunity to understand how complex functionality can be embedded into TensorFlow compute graphs." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "GcVplt9455Hi" - }, - "source": [ - "### 1.2 Problem overview\n", - "\n", - "You will prepare a *cluster state* and train a quantum classifier to detect if it is \"excited\" or not. The cluster state is highly entangled but not necessarily difficult for a classical computer. For clarity, this is a simpler dataset than the one used in the paper.\n", - "\n", - "For this classification task you will implement a deep MERA-like QCNN architecture since:\n", - "\n", - "1. Like the QCNN, the cluster state on a ring is translationally invariant.\n", - "2. The cluster state is highly entangled.\n", - "\n", - "This architecture should be effective at reducing entanglement, obtaining the classification by reading out a single qubit.\n", - "\n", - "\n", - "\n", - "An \"excited\" cluster state is defined as a cluster state that had a `cirq.Rx` gate applied to any of its qubits. Qconv and QPool are discussed later in this tutorial." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "jpqtsGJH_I1d" - }, - "source": [ - "### 1.3 Building blocks for TensorFlow\n", - "\n", - "\n", - "\n", - "One way to solve this problem with TensorFlow Quantum is to implement the following:\n", - "\n", - "1. The input to the model is a circuit tensor—either an empty circuit or an X gate on a particular qubit indicating an excitation.\n", - "2. The rest of the model's quantum components are constructed with `tfq.layers.AddCircuit` layers.\n", - "3. For inference a `tfq.layers.PQC` layer is used. This reads $\\langle \\hat{Z} \\rangle$ and compares it to a label of 1 for an excited state, or -1 for a non-excited state." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### 1.4 Data\n", - "Before building your model, you can generate your data. In this case it's going to be excitations to the cluster state (The original paper uses a more complicated dataset). Excitations are represented with `cirq.Rx` gates. A large enough rotation is deemed an excitation and is labeled `1` and a rotation that isn't large enough is labeled `-1` and deemed not an excitation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def generate_data(qubits):\n", - " \"\"\"Generate training and testing data.\"\"\"\n", - " n_rounds = 20 # Produces n_rounds * n_qubits datapoints.\n", - " excitations = []\n", - " labels = []\n", - " for n in range(n_rounds):\n", - " for bit in qubits:\n", - " rng = np.random.uniform(-np.pi, np.pi)\n", - " excitations.append(cirq.Circuit(cirq.Rx(rng)(bit)))\n", - " labels.append(1 if (-np.pi / 2) <= rng <= (np.pi / 2) else -1)\n", - "\n", - " split_ind = int(len(excitations) * 0.7)\n", - " train_excitations = excitations[:split_ind]\n", - " test_excitations = excitations[split_ind:]\n", - "\n", - " train_labels = labels[:split_ind]\n", - " test_labels = labels[split_ind:]\n", - "\n", - " return tfq.convert_to_tensor(train_excitations), np.array(train_labels), \\\n", - " tfq.convert_to_tensor(test_excitations), np.array(test_labels)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You can see that just like with regular machine learning you create a training and testing set to use to benchmark the model. You can quikly look at some datapoints with:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sample_points, sample_labels, _, __ = generate_data(cirq.GridQubit.rect(1,4))\n", - "print('Input:', tfq.from_tensor(sample_points)[0], 'Output:', sample_labels[0])\n", - "print('Input:', tfq.from_tensor(sample_points)[1], 'Output:', sample_labels[1])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "sFiRlDt_0-DL" - }, - "source": [ - "### 1.5 Define layers\n", - "\n", - "Now define the layers shown in the figure above in TensorFlow." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "s2B9geIqLWHK" - }, - "source": [ - "#### 1.5.1 Cluster state\n", - "\n", - "The first step is to define the cluster state using Cirq, a Google-provided framework for programming quantum circuits. Since this is a static part of the model, embed it using the `tfq.layers.AddCircuit` functionality." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "qpQwVWKazU8g" - }, - "outputs": [], - "source": [ - "def cluster_state_circuit(bits):\n", - " \"\"\"Return a cluster state on the qubits in `bits`.\"\"\"\n", - " circuit = cirq.Circuit()\n", - " circuit.append(cirq.H.on_each(bits))\n", - " for this_bit, next_bit in zip(bits, bits[1:] + [bits[0]]):\n", - " circuit.append(cirq.CZ(this_bit, next_bit))\n", - " return circuit" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "e9qX1uN740vJ" - }, - "source": [ - "Display a cluster state circuit for a rectangle of cirq.GridQubits:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "9tZt0aAO4r4F" - }, - "outputs": [], - "source": [ - "SVGCircuit(cluster_state_circuit(cirq.GridQubit.rect(1, 4)))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "4xElWnRf1ZC7" - }, - "source": [ - "#### 1.5.2 QCNN layers\n", - "\n", - "Define the layers that make up the model using the Cong and Lukin QCNN paper. There are a few prerequisites:\n", - "\n", - "* The one- and two-qubit parameterized unitary matrices from the Tucci paper.\n", - "* A general parameterized two-qubit pooling operation." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "oNRGOqky2exY" - }, - "outputs": [], - "source": [ - "def one_qubit_unitary(bit, symbols):\n", - " \"\"\"Make a Cirq circuit enacting a rotation of the bloch sphere about the X,\n", - " Y and Z axis, that depends on the values in `symbols`.\n", - " \"\"\"\n", - " return cirq.Circuit(\n", - " cirq.X(bit)**symbols[0],\n", - " cirq.Y(bit)**symbols[1],\n", - " cirq.Z(bit)**symbols[2])\n", - "\n", - "\n", - "def two_qubit_unitary(bits, symbols):\n", - " \"\"\"Make a Cirq circuit that creates an arbitrary two qubit unitary.\"\"\"\n", - " circuit = cirq.Circuit()\n", - " circuit += one_qubit_unitary(bits[0], symbols[0:3])\n", - " circuit += one_qubit_unitary(bits[1], symbols[3:6])\n", - " circuit += [cirq.ZZ(*bits)**symbols[7]]\n", - " circuit += [cirq.YY(*bits)**symbols[8]]\n", - " circuit += [cirq.XX(*bits)**symbols[9]]\n", - " circuit += one_qubit_unitary(bits[0], symbols[9:12])\n", - " circuit += one_qubit_unitary(bits[1], symbols[12:])\n", - " return circuit\n", - "\n", - "\n", - "def two_qubit_pool(source_qubit, sink_qubit, symbols):\n", - " \"\"\"Make a Cirq circuit to do a parameterized 'pooling' operation, which\n", - " attempts to reduce entanglement down from two qubits to just one.\"\"\"\n", - " pool_circuit = cirq.Circuit()\n", - " sink_basis_selector = one_qubit_unitary(sink_qubit, symbols[0:3])\n", - " source_basis_selector = one_qubit_unitary(source_qubit, symbols[3:6])\n", - " pool_circuit.append(sink_basis_selector)\n", - " pool_circuit.append(source_basis_selector)\n", - " pool_circuit.append(cirq.CNOT(control=source_qubit, target=sink_qubit))\n", - " pool_circuit.append(sink_basis_selector**-1)\n", - " return pool_circuit" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "LoG0a3U_2qGA" - }, - "source": [ - "To see what you created, print out the one-qubit unitary circuit:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "T5uhvF-g2rpZ" - }, - "outputs": [], - "source": [ - "SVGCircuit(one_qubit_unitary(cirq.GridQubit(0, 0), sympy.symbols('x0:3')))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "NWuMb_Us8ar2" - }, - "source": [ - "And the two-qubit unitary circuit:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "aJTdRrfS2uIo" - }, - "outputs": [], - "source": [ - "SVGCircuit(two_qubit_unitary(cirq.GridQubit.rect(1, 2), sympy.symbols('x0:15')))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "EXQD1R_V8jyk" - }, - "source": [ - "And the two-qubit pooling circuit:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "DOHRbkvH2xGK" - }, - "outputs": [], - "source": [ - "SVGCircuit(two_qubit_pool(*cirq.GridQubit.rect(1, 2), sympy.symbols('x0:6')))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "AzVauXWD3v8C" - }, - "source": [ - "##### 1.5.2.1 Quantum convolution\n", - "\n", - "As in the Cong and Lukin paper, define the 1D quantum convolution as the application of a two-qubit parameterized unitary to every pair of adjacent qubits with a stride of one.\n", - "\n", - "" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "1Fa19Lzb3wnR" - }, - "outputs": [], - "source": [ - "def quantum_conv_circuit(bits, symbols):\n", - " \"\"\"Quantum Convolution Layer following the above diagram.\n", - " Return a Cirq circuit with the cascade of `two_qubit_unitary` applied\n", - " to all pairs of qubits in `bits` as in the diagram above.\n", - " \"\"\"\n", - " circuit = cirq.Circuit()\n", - " for first, second in zip(bits[0::2], bits[1::2]):\n", - " circuit += two_qubit_unitary([first, second], symbols)\n", - " for first, second in zip(bits[1::2], bits[2::2] + [bits[0]]):\n", - " circuit += two_qubit_unitary([first, second], symbols)\n", - " return circuit" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "fTzOm_t394Gj" - }, - "source": [ - "Display the (very horizontal) circuit:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "Bi6q2nmY3z_U" - }, - "outputs": [], - "source": [ - "SVGCircuit(\n", - " quantum_conv_circuit(cirq.GridQubit.rect(1, 8), sympy.symbols('x0:15')))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "3svBAfap4xhP" - }, - "source": [ - "##### 1.5.2.2 Quantum pooling\n", - "\n", - "A quantum pooling layer pools from $N$ qubits to $\\frac{N}{2}$ qubits using the two-qubit pool defined above." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "jD3fgcWO4yEU" - }, - "outputs": [], - "source": [ - "def quantum_pool_circuit(source_bits, sink_bits, symbols):\n", - " \"\"\"A layer that specifies a quantum pooling operation.\n", - " A Quantum pool tries to learn to pool the relevant information from two\n", - " qubits onto 1.\n", - " \"\"\"\n", - " circuit = cirq.Circuit()\n", - " for source, sink in zip(source_bits, sink_bits):\n", - " circuit += two_qubit_pool(source, sink, symbols)\n", - " return circuit" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "NX83NHDP_Q_Z" - }, - "source": [ - "Examine a pooling component circuit:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "pFXow2OX47O5" - }, - "outputs": [], - "source": [ - "test_bits = cirq.GridQubit.rect(1, 8)\n", - "\n", - "SVGCircuit(\n", - " quantum_pool_circuit(test_bits[:4], test_bits[4:], sympy.symbols('x0:6')))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "23VcPLT45Lg7" - }, - "source": [ - "### 1.6 Model definition\n", - "\n", - "Now use the defined layers to construct a purely quantum CNN. Start with eight qubits, pool down to one, then measure $\\langle \\hat{Z} \\rangle$." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "vzEsY6-n5NR0" - }, - "outputs": [], - "source": [ - "def create_model_circuit(qubits):\n", - " \"\"\"Create sequence of alternating convolution and pooling operators \n", - " which gradually shrink over time.\"\"\"\n", - " model_circuit = cirq.Circuit()\n", - " symbols = sympy.symbols('qconv0:63')\n", - " # Cirq uses sympy.Symbols to map learnable variables. TensorFlow Quantum\n", - " # scans incoming circuits and replaces these with TensorFlow variables.\n", - " model_circuit += quantum_conv_circuit(qubits, symbols[0:15])\n", - " model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],\n", - " symbols[15:21])\n", - " model_circuit += quantum_conv_circuit(qubits[4:], symbols[21:36])\n", - " model_circuit += quantum_pool_circuit(qubits[4:6], qubits[6:],\n", - " symbols[36:42])\n", - " model_circuit += quantum_conv_circuit(qubits[6:], symbols[42:57])\n", - " model_circuit += quantum_pool_circuit([qubits[6]], [qubits[7]],\n", - " symbols[57:63])\n", - " return model_circuit\n", - "\n", - "\n", - "# Create our qubits and readout operators in Cirq.\n", - "cluster_state_bits = cirq.GridQubit.rect(1, 8)\n", - "readout_operators = cirq.Z(cluster_state_bits[-1])\n", - "\n", - "# Build a sequential model enacting the logic in 1.3 of this notebook.\n", - "# Here you are making the static cluster state prep as a part of the AddCircuit and the\n", - "# \"quantum datapoints\" are coming in the form of excitation\n", - "excitation_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string)\n", - "cluster_state = tfq.layers.AddCircuit()(\n", - " excitation_input, prepend=cluster_state_circuit(cluster_state_bits))\n", - "\n", - "quantum_model = tfq.layers.PQC(create_model_circuit(cluster_state_bits),\n", - " readout_operators)(cluster_state)\n", - "\n", - "qcnn_model = tf.keras.Model(inputs=[excitation_input], outputs=[quantum_model])\n", - "\n", - "# Show the keras plot of the model\n", - "tf.keras.utils.plot_model(qcnn_model,\n", - " show_shapes=True,\n", - " show_layer_names=False,\n", - " dpi=70)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "9jqTEe5VSbug" - }, - "source": [ - "### 1.7 Train the model\n", - "\n", - "Train the model over the full batch to simplify this example." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "_TFkAm1sQZEN" - }, - "outputs": [], - "source": [ - "# Generate some training data.\n", - "train_excitations, train_labels, test_excitations, test_labels = generate_data(cluster_state_bits)\n", - "\n", - "\n", - "# Custom accuracy metric.\n", - "@tf.function\n", - "def custom_accuracy(y_true, y_pred):\n", - " y_true = tf.squeeze(y_true)\n", - " y_pred = tf.map_fn(lambda x: 1.0 if x >= 0 else -1.0, y_pred)\n", - " return tf.keras.backend.mean(tf.keras.backend.equal(y_true, y_pred))\n", - "\n", - "\n", - "qcnn_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),\n", - " loss=tf.losses.mse,\n", - " metrics=[custom_accuracy])\n", - "\n", - "history = qcnn_model.fit(x=train_excitations,\n", - " y=train_labels,\n", - " batch_size=16,\n", - " epochs=25,\n", - " verbose=1,\n", - " validation_data=(test_excitations, test_labels))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "2tiCJOb5Qzcr" - }, - "outputs": [], - "source": [ - "plt.plot(history.history['loss'][1:], label='Training')\n", - "plt.plot(history.history['val_loss'][1:], label='Validation')\n", - "plt.title('Training a Quantum CNN to Detect Excited Cluster States')\n", - "plt.xlabel('Epochs')\n", - "plt.ylabel('Loss')\n", - "plt.legend()\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "GyrkcEReQ5Bc" - }, - "source": [ - "## 2. Hybrid models\n", - "\n", - "You don't have to go from eight qubits to one qubit using quantum convolution—you could have done one or two rounds of quantum convolution and fed the results into a classical neural network. This section explores quantum-classical hybrid models." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "A2tOK22t7Kjm" - }, - "source": [ - "### 2.1 Hybrid model with a single quantum filter\n", - "\n", - "Apply one layer of quantum convolution, reading out $\\langle \\hat{Z}_n \\rangle$ on all bits, followed by a densely-connected neural network.\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "lKXuOApgWYFa" - }, - "source": [ - "#### 2.1.1 Model definition" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "Ut-U1hBkQ8Fs" - }, - "outputs": [], - "source": [ - "# 1-local operators to read out\n", - "readouts = [cirq.Z(bit) for bit in cluster_state_bits[4:]]\n", - "\n", - "\n", - "def multi_readout_model_circuit(qubits):\n", - " \"\"\"Make a model circuit with less quantum pool and conv operations.\"\"\"\n", - " model_circuit = cirq.Circuit()\n", - " symbols = sympy.symbols('qconv0:21')\n", - " model_circuit += quantum_conv_circuit(qubits, symbols[0:15])\n", - " model_circuit += quantum_pool_circuit(qubits[:4], qubits[4:],\n", - " symbols[15:21])\n", - " return model_circuit\n", - "\n", - "\n", - "# Build a model enacting the logic in 2.1 of this notebook.\n", - "excitation_input_dual = tf.keras.Input(shape=(), dtype=tf.dtypes.string)\n", - "\n", - "cluster_state_dual = tfq.layers.AddCircuit()(\n", - " excitation_input_dual, prepend=cluster_state_circuit(cluster_state_bits))\n", - "\n", - "quantum_model_dual = tfq.layers.PQC(\n", - " multi_readout_model_circuit(cluster_state_bits),\n", - " readouts)(cluster_state_dual)\n", - "\n", - "d1_dual = tf.keras.layers.Dense(8)(quantum_model_dual)\n", - "\n", - "d2_dual = tf.keras.layers.Dense(1)(d1_dual)\n", - "\n", - "hybrid_model = tf.keras.Model(inputs=[excitation_input_dual], outputs=[d2_dual])\n", - "\n", - "# Display the model architecture\n", - "tf.keras.utils.plot_model(hybrid_model,\n", - " show_shapes=True,\n", - " show_layer_names=False,\n", - " dpi=70)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "qDqoLZJuWcgH" - }, - "source": [ - "#### 2.1.2 Train the model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "EyYw9kYIRCE7" - }, - "outputs": [], - "source": [ - "hybrid_model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),\n", - " loss=tf.losses.mse,\n", - " metrics=[custom_accuracy])\n", - "\n", - "hybrid_history = hybrid_model.fit(x=train_excitations,\n", - " y=train_labels,\n", - " batch_size=16,\n", - " epochs=25,\n", - " verbose=1,\n", - " validation_data=(test_excitations,\n", - " test_labels))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "yL3jhGiBRJHt" - }, - "outputs": [], - "source": [ - "plt.plot(history.history['val_custom_accuracy'], label='QCNN')\n", - "plt.plot(hybrid_history.history['val_custom_accuracy'], label='Hybrid CNN')\n", - "plt.title('Quantum vs Hybrid CNN performance')\n", - "plt.xlabel('Epochs')\n", - "plt.legend()\n", - "plt.ylabel('Validation Accuracy')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "NCNiNvheRNzq" - }, - "source": [ - "As you can see, with very modest classical assistance, the hybrid model will usually converge faster than the purely quantum version." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "nVUtWLZnRRDE" - }, - "source": [ - "### 2.2 Hybrid convolution with multiple quantum filters\n", - "\n", - "Now let's try an architecture that uses multiple quantum convolutions and a classical neural network to combine them.\n", - "\n", - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ldo_m5P3YBV7" - }, - "source": [ - "#### 2.2.1 Model definition" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "W3TkNVm9RTBj" - }, - "outputs": [], - "source": [ - "excitation_input_multi = tf.keras.Input(shape=(), dtype=tf.dtypes.string)\n", - "\n", - "cluster_state_multi = tfq.layers.AddCircuit()(\n", - " excitation_input_multi, prepend=cluster_state_circuit(cluster_state_bits))\n", - "\n", - "# apply 3 different filters and measure expectation values\n", - "\n", - "quantum_model_multi1 = tfq.layers.PQC(\n", - " multi_readout_model_circuit(cluster_state_bits),\n", - " readouts)(cluster_state_multi)\n", - "\n", - "quantum_model_multi2 = tfq.layers.PQC(\n", - " multi_readout_model_circuit(cluster_state_bits),\n", - " readouts)(cluster_state_multi)\n", - "\n", - "quantum_model_multi3 = tfq.layers.PQC(\n", - " multi_readout_model_circuit(cluster_state_bits),\n", - " readouts)(cluster_state_multi)\n", - "\n", - "# concatenate outputs and feed into a small classical NN\n", - "concat_out = tf.keras.layers.concatenate(\n", - " [quantum_model_multi1, quantum_model_multi2, quantum_model_multi3])\n", - "\n", - "dense_1 = tf.keras.layers.Dense(8)(concat_out)\n", - "\n", - "dense_2 = tf.keras.layers.Dense(1)(dense_1)\n", - "\n", - "multi_qconv_model = tf.keras.Model(inputs=[excitation_input_multi],\n", - " outputs=[dense_2])\n", - "\n", - "# Display the model architecture\n", - "tf.keras.utils.plot_model(multi_qconv_model,\n", - " show_shapes=True,\n", - " show_layer_names=True,\n", - " dpi=70)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "2eNhDWwKY9N4" - }, - "source": [ - "#### 2.2.2 Train the model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "suRvxcAKRZK6" - }, - "outputs": [], - "source": [ - "multi_qconv_model.compile(\n", - " optimizer=tf.keras.optimizers.Adam(learning_rate=0.02),\n", - " loss=tf.losses.mse,\n", - " metrics=[custom_accuracy])\n", - "\n", - "multi_qconv_history = multi_qconv_model.fit(x=train_excitations,\n", - " y=train_labels,\n", - " batch_size=16,\n", - " epochs=25,\n", - " verbose=1,\n", - " validation_data=(test_excitations,\n", - " test_labels))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "-6NR7yAQRmOU" - }, - "outputs": [], - "source": [ - "plt.plot(history.history['val_custom_accuracy'][:25], label='QCNN')\n", - "plt.plot(hybrid_history.history['val_custom_accuracy'][:25], label='Hybrid CNN')\n", - "plt.plot(multi_qconv_history.history['val_custom_accuracy'][:25],\n", - " label='Hybrid CNN \\n Multiple Quantum Filters')\n", - "plt.title('Quantum vs Hybrid CNN performance')\n", - "plt.xlabel('Epochs')\n", - "plt.legend()\n", - "plt.ylabel('Validation Accuracy')\n", - "plt.show()" - ] - } - ], - "metadata": { - "colab": { - "collapsed_sections": [], - "name": "qcnn.ipynb", - "private_outputs": true, - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5rc1" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/docs/tutorials/sensing.ipynb b/docs/tutorials/sensing.ipynb deleted file mode 100644 index 9c2ad38d9..000000000 --- a/docs/tutorials/sensing.ipynb +++ /dev/null @@ -1,645 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xLOXFOT5Q40E" - }, - "source": [ - "##### Copyright 2020 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "cellView": "form", - "colab": {}, - "colab_type": "code", - "id": "iiQkM5ZgQ8r2" - }, - "outputs": [], - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "j6331ZSsQGY3" - }, - "source": [ - "# Quantum sensing" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "i9Jcnb8bQQyd" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Sb8xt8c9habl" - }, - "source": [ - "Quantum sensing applications measure the quantum properties or quantum phenomena of physical systems. This includes motion, gravity, electric and magnetic fields, and imaging.\n", - "\n", - "This tutorial shows a simple sensing problem and uses TensorFlow Quantum to learn to amplify a weak quantum signal. You will use a Greenberger–Horne–Zeilinger (GHZ) state that interacts with a \"signal cavity\" to amplify the signal." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "bK3mhgcgg56v" - }, - "source": [ - "## Setup\n", - "\n", - "Download and install the required packages:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "fAVlg7rxkvUw" - }, - "outputs": [], - "source": [ - "%%capture\n", - "!pip install --upgrade pip\n", - "!pip install cirq==0.7.0" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "TorxE5tnkvb2" - }, - "outputs": [], - "source": [ - "%%capture\n", - "!pip install --upgrade tensorflow==2.1.0" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Rn6Bj5sVhBck" - }, - "source": [ - "Note: If the following code cell fails, execute the first code cells and then restart the Colab runtime (*Runtime > Restart Runtime*)." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "saFHsRDpkvkH" - }, - "outputs": [], - "source": [ - "%%capture\n", - "h = \"2dfcfceb9726fa73c40381c037dc01facd3d061e\"\n", - "!cd ~/\n", - "!rm -r -f TFQuantum/\n", - "!git clone https://{h}:{h}@github.com/quantumlib/TFQuantum.git;cd TFQuantum/\n", - "!pip install --upgrade ./TFQuantum/wheels/tfquantum-0.2.0-cp36-cp36m-linux_x86_64.whl" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "AiqayQeNhNDq" - }, - "source": [ - "Now import TensorFlow and the module dependencies:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "2NGNsLzqN4Lb" - }, - "outputs": [], - "source": [ - "import tensorflow as tf\n", - "import tensorflow_quantum as tfq\n", - "\n", - "import cirq\n", - "import sympy\n", - "import numpy as np\n", - "\n", - "# visualization tools\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "from cirq.contrib.svg import SVGCircuit" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "b08Mmbs8lr81" - }, - "source": [ - "## Amplify signals for sensing\n", - "\n", - "Start with the following circuit:\n", - "\n", - "\n", - "\n", - "If you follow the state of the `(0,0)` qubit to the end of this circuit, you see it is rotated on the x-axis away from the zero state by an angle of $N \\theta$. This effectively \"amplifies\" the input rotation by a factor of the number of qubits involved in the entangled state.\n", - "\n", - "It turns out this amplification effect is sensitive to the signal injection of exactly one rotation on the z-axis. Can a parametric circuit be trained to perform the same task for an arbitrary axis of rotation?\n", - "\n", - "For the correct values of $\\vec{\\phi}_n$, measuring $ \\langle \\hat{Z} \\rangle$ should produce 1 for any $\\theta$:\n", - "\n", - "\n", - "\n", - "But can $\\vec{\\phi}_n$ be found in practice? Let's explore this using TensorFlow Quantum." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "u7hmqfTR9VuD" - }, - "source": [ - "## 1. Create layers for in-graph circuit construction\n", - "\n", - "Use the above diagrams as reference to define the static parts of the circuit. First, build a circuit that prepares a GHZ state:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "b6ir7z3Y-GFQ" - }, - "outputs": [], - "source": [ - "def get_ghz_circuit(qubits):\n", - " # This method will return a Cirq circuit representing the GHZ\n", - " # state preparations circuit to prepend to the input of this layer\n", - " # at runtime.\n", - " circuit = cirq.Circuit()\n", - " circuit.append(cirq.H(qubits[0]))\n", - "\n", - " for control, target in zip(qubits[:-1], qubits[1:]):\n", - " circuit.append(cirq.CNOT(control=control, target=target))\n", - " return circuit" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "zTZvLBCZQPlN" - }, - "outputs": [], - "source": [ - "SVGCircuit(get_ghz_circuit(cirq.GridQubit.rect(1, 4)))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "q3ZgFfaTRJRh" - }, - "source": [ - "Create a layer that applies an arbitrary single qubit rotation to each qubit:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "AJM-MuvGRcrn" - }, - "outputs": [], - "source": [ - "def one_qubit_rotation(bit, parameters):\n", - " return [\n", - " cirq.X(bit)**parameters[0],\n", - " cirq.Y(bit)**parameters[1],\n", - " cirq.Z(bit)**parameters[2]\n", - " ]\n", - "\n", - "\n", - "def get_single_qubit_rotation_wall(qubits, params):\n", - " circuit = cirq.Circuit()\n", - " for bit, param in zip(qubits, params):\n", - " circuit.append(one_qubit_rotation(bit, param))\n", - " return circuit" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "9YN4sLasSgIj", - "scrolled": false - }, - "outputs": [], - "source": [ - "SVGCircuit(\n", - " get_single_qubit_rotation_wall(cirq.GridQubit.rect(1, 4),\n", - " np.random.uniform(0, 2 * np.pi, (4, 3))))" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "gG1TSEJWk8mp" - }, - "source": [ - "The GHZ circuit previously defined can be inverted to collect the entanglement:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "1maK-MOElnut", - "scrolled": false - }, - "outputs": [], - "source": [ - "SVGCircuit(get_ghz_circuit(cirq.GridQubit.rect(1, 4))**-1)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## 2. Define the data\n", - "The signal input is $Rz(\\theta)$ on each qubit. This leads to a state rotated on the x-axis by $N \\theta$ at the output of the GHZ un-preparation. During the model building stage you will see how to input these datapoints in the middle of your model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Qubits for this problem.\n", - "sensing_qubits = cirq.GridQubit.rect(1, 6)\n", - "\n", - "inputs = []\n", - "un_rotations = []\n", - "thetas = []\n", - "\n", - "for theta in np.arange(0, 2 * np.pi, step=0.05):\n", - " thetas.append(theta)\n", - " # Signal injection is an Rz on each qubit.\n", - " inputs.append(cirq.Circuit(\n", - " cirq.Rz(theta).on(bit) for bit in sensing_qubits))\n", - "\n", - " # During training you want to undo the x rotation at the end of the circuit to send\n", - " # the qubit back to the zero state.\n", - " un_rotations.append(\n", - " cirq.Circuit(\n", - " cirq.Rx(-1 * len(sensing_qubits) * theta).on(sensing_qubits[0])))\n", - "\n", - "signal_injection_tensor = tfq.convert_to_tensor(inputs)\n", - "un_rotation_tensor = tfq.convert_to_tensor(un_rotations)\n", - "\n", - "SVGCircuit(inputs[5])" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "3W5vCsgTlyc-" - }, - "source": [ - "## 3. Define the model\n", - "\n", - "Now build a model. Signals are simulated with one qubit rotations." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "5stYOW-ll3Tf" - }, - "outputs": [], - "source": [ - "# Some random signal injection angle (this will be the random rotation to undo).\n", - "signal_injection_angles = np.random.uniform(0, 2 * np.pi,\n", - " (len(sensing_qubits), 3))\n", - "\n", - "# Phi parameters that need to be learned.\n", - "phis = [sympy.symbols('x_{}_0:3'.format(i)) for i in range(len(sensing_qubits))]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "RDvejUwLofa-", - "scrolled": false - }, - "outputs": [], - "source": [ - "# Input for the wall of Rz gates that inject the simulated signal.\n", - "signal_input = tf.keras.layers.Input(shape=(), dtype=tf.dtypes.string)\n", - "\n", - "# Input for the Rx gate that should undo the signal injection during training.\n", - "expected_unrotation_input = tf.keras.layers.Input(shape=(),\n", - " dtype=tf.dtypes.string)\n", - "\n", - "# Wall of random 1 qubit rotations that randomly select a signal injection axis.\n", - "injection_angle_randomizer = tfq.layers.AddCircuit()(\n", - " signal_input,\n", - " prepend=get_single_qubit_rotation_wall(sensing_qubits,\n", - " signal_injection_angles))\n", - "\n", - "# Wall of parameterized 1 qubit rotations that will be trained.\n", - "injection_angle_selector = tfq.layers.AddCircuit()(\n", - " injection_angle_randomizer,\n", - " prepend=get_single_qubit_rotation_wall(sensing_qubits, phis))\n", - "\n", - "# GHZ prep and unprep.\n", - "ghz_prep = tfq.layers.AddCircuit()(injection_angle_selector,\n", - " prepend=get_ghz_circuit(sensing_qubits))\n", - "\n", - "ghz_unprep = tfq.layers.AddCircuit()(ghz_prep,\n", - " append=get_ghz_circuit(sensing_qubits)**-1)\n", - "\n", - "# Add the \"unrotation\" to each input using lower level append_circuit op.\n", - "expected_unrotation = tfq.append_circuit(ghz_unprep, expected_unrotation_input)\n", - "\n", - "# In this case keras can train all of the circuit parameters, so you can pass\n", - "# circuit parameters to trainable_params instead of feed_in_params.\n", - "expectation_output = tfq.layers.Expectation()(\n", - " expected_unrotation,\n", - " symbol_names=list(np.array(phis).flatten()),\n", - " operators=[cirq.Z(sensing_qubits[0])])\n", - "\n", - "sensing_model = tf.keras.Model(inputs=[signal_input, expected_unrotation_input],\n", - " outputs=[expectation_output])\n", - "\n", - "# Display model architecture\n", - "tf.keras.utils.plot_model(sensing_model,\n", - " show_shapes=True,\n", - " show_layer_names=False,\n", - " dpi=70)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "cDF4EsxI48ZG" - }, - "source": [ - "## 4. Untrained performance\n", - "\n", - "Let's see how the *untrained* circuit performs at amplifying the input signal:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "3g1U-GYd9Dci" - }, - "outputs": [], - "source": [ - "# when evaluating the performance of the model you don't want to un-rotate\n", - "# at the end, so send some empty circuits into that input of the model.\n", - "empty_circuits = tfq.convert_to_tensor([cirq.Circuit()] *\n", - " signal_injection_tensor.shape[0])\n", - "\n", - "untrained_outputs = sensing_model.predict(\n", - " x=[signal_injection_tensor, empty_circuits])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "N_J_QNhc5elN", - "scrolled": false - }, - "outputs": [], - "source": [ - "plt.plot(thetas, np.cos(thetas), label='Un-amplified signal')\n", - "plt.plot(thetas, untrained_outputs, label='Untrained Amplified Output')\n", - "plt.title(\"Untrained Amplification Performance\")\n", - "plt.legend(loc='lower right')\n", - "plt.xlabel(r\"\\theta\")\n", - "plt.ylabel(r\"$-\\langle \\hat{Z} \\rangle$\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "vLE6ucT8zqnI" - }, - "source": [ - "## 5. Train the model" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "DaINoOSuzsAE" - }, - "outputs": [], - "source": [ - "optimizer = tf.keras.optimizers.Adam(learning_rate=0.05)\n", - "loss = lambda x, y: -1 * y\n", - "\n", - "sensing_model.compile(optimizer=optimizer, loss=loss)\n", - "\n", - "history = sensing_model.fit(x=[signal_injection_tensor, un_rotation_tensor],\n", - " y=tf.zeros_like(signal_injection_tensor,\n", - " dtype=tf.float32),\n", - " epochs=10,\n", - " batch_size=20,\n", - " verbose=1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "hDqB6EXs1TEM", - "scrolled": true - }, - "outputs": [], - "source": [ - "plt.plot(history.history['loss'])\n", - "plt.title(\"Learning to Sense A Randomized Feild\")\n", - "plt.xlabel(\"Epochs\")\n", - "plt.ylabel(r\"$-\\langle \\hat{Z} \\rangle$\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "IwlCaAgvEvSC" - }, - "source": [ - "## 6. Trained performance\n", - "\n", - "And here's how the *trained* sensing model performs:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "qXzCn2_B2Y3U" - }, - "outputs": [], - "source": [ - "empty_circuits = tfq.convert_to_tensor([cirq.Circuit()] *\n", - " signal_injection_tensor.shape[0])\n", - "\n", - "trained_outputs = sensing_model.predict(\n", - " x=[signal_injection_tensor, empty_circuits])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": {}, - "colab_type": "code", - "id": "oduATtbj3SAp", - "scrolled": false - }, - "outputs": [], - "source": [ - "plt.plot(thetas, np.cos(thetas), label='Un-amplified signal')\n", - "plt.plot(thetas, trained_outputs, label='Amplified Output')\n", - "plt.title(\"Trained Amplification Performance\")\n", - "plt.legend(loc='lower right')\n", - "plt.xlabel(r\"\\theta\")\n", - "plt.ylabel(r\"$-\\langle \\hat{Z} \\rangle$\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "8z58CjVejOCV" - }, - "source": [ - "From the plot you can see that the trained model has amplified the signal from the cavity using the GHZ state. You can measure this signal (which is easier now since it's amplified) or use it as a component in another quantum algorithm." - ] - } - ], - "metadata": { - "colab": { - "collapsed_sections": [], - "name": "sensing.ipynb", - "private_outputs": true, - "provenance": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5rc1" - } - }, - "nbformat": 4, - "nbformat_minor": 1 -} diff --git a/layerwise_learning/images/layers.png b/layerwise_learning/images/layers.png new file mode 100644 index 000000000..4c0a64197 Binary files /dev/null and b/layerwise_learning/images/layers.png differ diff --git a/layerwise_learning/layerwise_learning.ipynb b/layerwise_learning/layerwise_learning.ipynb new file mode 100644 index 000000000..ab79b7474 --- /dev/null +++ b/layerwise_learning/layerwise_learning.ipynb @@ -0,0 +1,470 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "layerwise_learning_copy.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "mFq2aRw_w3cL", + "colab_type": "text" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Quantum Authors." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "eOzjTj_JxBnv", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "CdEtQYqo32Ej", + "colab_type": "text" + }, + "source": [ + "# Layerwise learning for quantum neural networks\n", + "\n", + "Author : Andrea Skolik\n", + "\n", + "Contributors : Masoud Mohseni\n", + "\n", + "Created : 2019\n", + "\n", + "Last updated : 2020-Feb-27" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "O8hXbFbkv_D_", + "colab_type": "text" + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tensorflow/quantum/blob/research/layerwise_learning/layerwise_learning.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8nKBqagSxl6N", + "colab_type": "text" + }, + "source": [ + "In this notebook, we will use the technique introduced in [1] to efficiently train a quantum neural network without making any initial guesses about the structure that's neccessary to solve a certain learning task. To do this, we successively add layers to a QNN during training, which does not only make training faster, but also ensures a better signal-to-noise ratio compared to training the full circuit when done on real hardware.\n", + "\n", + "It is well known that randomly initialized parametrized quantum circuits suffer from exponentially decaying gradients as circuits grow in size [2]. \n", + "One strategy to avoid this is finding clever initialization schemes for deep circuits. Another approach which we take here instead focuses on the structure of the circuit, and shows how a deep parametrized circuit can be constructed during training. By training individual partitions of the circuit as it grows, we avoid the randomization effect that causes barren plateaus. This is mainly of importance on noisy intermediate-scale quantum (NISQ) devices, as these will suffer most from the unfavorable signal-to-noise ratio when running variational algorithms. As the gradients produced by circuits grow smaller, we need more and more measurements from a quantum device to accurately estimate them. When using layerwise learning (LL), gradients stay larger during training and we therefore need less measurements to get sufficient training signal for the optimizer. Additionally, we decrease the overall number of parameter updates, so that LL provides an efficient strategy to run variational algorithms on NISQ devices.\n", + "\n", + "LL works in two phases as shown in the figure below:\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "KwuUOTKdL6sa", + "colab_type": "code", + "outputId": "aed2a77f-4ce4-4592-bc94-37453f87fbdf", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 444 + } + }, + "source": [ + "# Do not rerun, or image will be deleted\n", + "from IPython.display import Image \n", + "Image(filename='layers.png', embed=True, width=700)" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAABMAAAALlCAYAAAAi+5opAAAAAXNSR0IArs4c6QAAAARnQU1BAACx\njwv8YQUAAAAJcEhZcwAAFiUAABYlAUlSJPAAAPYKSURBVHhe7N0HdBRV38fxHxASAqEl9NB77yAI\nUqSKIqioYBf0VRR77zz23uvziL1gR0GlSu+9SgmdEAIktPQC79zZ2WQTkpAGZJfv55w9mZ3dJLsz\n99659z+3FDthEQAAAAAAAOCjijs/AQAAAAAAAJ9EAAwAAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAA\nAPg0AmAAAAAAAADwaQTAAAAAAAAA4NMIgAEAAAAAAMCnEQADAAAAAACATyMABgAAAAAAAJ9GAAwA\nAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAAAPg0AmAAAAAAAADwaQTAAAAAAAAA4NMIgAEAAAAAAMCn\nEQADAAAAAACATyMABgAAAAAAAJ9GAAwAAAAAAAA+jQAYAAAAAAAAfFqxExZnGwAAAADSrAqL0Oqt\nEZq9eqf2H47RmWo6tKxfTbUrl1PD0BAN6NTI2QsAQP4RAAMAAACQQUx8ksb9vVyTl2yRipdQsZKl\nVbJUGWuzhPOO0+d4aqpSkxOUnBBnPWLtANidQ7soKNDfeQdQOEw6NwHesPAoO9hrnp8J1YLLqmFo\nsNo0qK62Das7ewGcbgTAAAAAAKQxQYAXv52t5Vv2qnRITQWWr6ziJfycV8+suMMHdDRyh0qVLK7x\nT11NEAyFxgS8Xv5utvYfjlWgfwk1rFrW/nkmRMckKiwyxt42vRyfvbmvqgUH2c8BnD4EwAAAAACk\n+WXOev33z2UqW62BSpUNdvaePcdTU7Q/bKW6NgvVcyP7OnuB/DNp/IvJy1WjQikN71pHwUFnJ7Bq\ngmDfL9hpB8T++8BQOxgG4PRhEnwAAAAAtn3RMfrs7+XyL1u5SAS/DNP7rEKNBpq/bqf9AArCpHET\n/OpYr6Ju7ln/rAW/jIZVg/TUZS0UGlxaT302zdkL4HQhAAYAAADAtnVvlOITkxUUUrTmJTLBuJL+\nAfZcTUBBvPL9bAX4FdeA1tXP2JDHUxnZs74iD8XqgwmLnD0ATgcCYAAAAABsJsDkH1BKJUoGOHuK\njhIBZbRpNwEw5J9r0vt9GtoxtMgEvwzTC61nsyqasnSLswfA6UAADAAAAIAtLDxa8iuaE82bVSjp\nAYaCcKef0Iql7Z9FSY2KgWdsFUrgXEUADAAAAADg87bujbZ/ns15v7Jj5gEzCPICpw8BMAAAAAAA\nAPg0AmAAAAAAAADwaQTAAAAAAAAA4NMIgAEAAAAA4M1iIrV9+25FJTrPAZyEABgAAAAAFLoYRXpL\nQGL6R7rkyjvV79qPNNXZBe+QuPMfvXrvfbro5mf1fw+/rOHX3avbv17nvArA0zkZAPv7lbvVjwL+\nnDfrvYd1kZUOLrr+k/yngx2/6/5rrbR05b2689uNzs6CSFTUrn81f/Jk/faH6zFjGXdyzqgp79vp\nwpzT56c7+4DCtmG8brHT2d1WJbUwyg4AQNGyU9899riuMwGJB7/TamdvkZWUbNVCPX7CS6zTF6//\nomnhSUrx81eFCmUVXEbaumuv8zoAT94XAIv5VzOcwMDJjzlatj1SMc5bs5OSkuraoIDPlcSd8/TF\n2x/q2Vf/px9W+s4RO3Y0VinWz5SExPynA+tvHEoyG8mKSzB/Lb8StWPql7pj1EMa/sD7Gjtuoj78\n2vV4+RWr4nTTw7p/3EL5+qUsceY4jTABxRH36c6zdecq9bidLsw5TbTPLXAapByX60qUaqWzgpQd\nAIDsWfWrxTPsNsK6UzUQCt0xHYtx2hxHjumgawsoXAuX6p99ZiNElz38sn7638v64YtX9Ns9Pe2X\nAWTkdQGw1T98r5edwMDJjx/02MPP6rJrn9aT3/h+sOC0S9ylmeNe03WPfq9v56/X3KWrNHnFFudF\nFKbVX7+mMf9boi1HXRWlgDJlFWzfwSlpP1dKrNZO/kZ3vjnXp4O2G7ft1EETdEpJ0ubl67TetRsA\nACAftmjKN7/abYQf5ji7zpiWGnx1L/Xt1FbDruul7s5eoDDt2B6haLNRpbG6tAuw91ktCQUFubcB\nePLiIZAlVLqcEyQwj3L+8nNeUVKUFv/+je58YSpBsHyKWf+Xxt77ul6cvEOH6ZxwekX9qW/+inAF\ntvyq6LIHntMvX7ysH5w7OP8b01F1nMQds3CxJkS5tgtq1odP6epbH9XVj/1UZLrl1wwpn5aPg+rV\nUgtnG+eqnRr/HyuNWun0xjdmqJCSPs6EHb/rUVO+3PqUXpzCmQNwbqrR/Uo98vCtuq1/ExGOwOlw\nJD7BtVG8uNU6BnAqXhwAq6BB9zhBAvMY95b+/uY5vXZdM1V1BwtWTdKL34W5niB3EjdpwmtPa/jY\nPzX/YKrkH6Kebas7L+J0iJr1r1Y5Qcaafa/QHV2CPSpJAarb8xpd3cG9Z6+2rXE2C+jYoWhFHz6m\n6LAI7XL2nW0hQ8fomzce0n9ffVpf39PN2Ytz1zEdOWilUSud7t0RqT3OXniBo7GKNOXL4Wht2RPh\n7AQA75AYtVvbt+/Q9sisxk2aie2t1+xHYc7Tmv53d+X6j5q5Y/P+WdK+HysGnoPc6Sz7c5+ePqzH\nrujTMPokf+nWLf3znXrqIyAz35oEPyBYbYeM0avXN1GQvSNVmybP0Ax7G7kyY4o+WBJlFUslFdr5\nYr31ybN6sl0550WcDnsOH3W2pJrVWzpbngIUUr6Ms11SpUo5mz7J+q6166pevapOHgYAAMirGRo7\nwiw08pF+tudHkhZ9bp47jxs+8WgfrNMnd5n9/9GnGxK14rvXdN2Yl/V/D7+m/xvztF6a6XqXGR3x\n6iOPa8iIR3Sdec1+uFbcu+m1ydqYRSN+9bin7f930SuZWyNT9ZT5HCPe199WrXvjxI9007Xpf3fU\n7Y9oxDO/aEW2gYEYrfv9I91yvZk71uOz3HSfbnntr+znO4tYqE+eeUxX3O58P/M7Ix/Tkz/+SyDB\ny7gXdXtwstPLet98PZhV+v7r3fQ0GDFLr4550kln1rl/+hftcN5m7J33k5689z4NTUsf1uOBp3RJ\nNtMLRU14w1k4KuvHyek+7+nWnYdu+XKjK/0++bDH53tWV17/H706gxttyD3fCoA5agy6UAOrOU/i\n92n7Bmf7FAoUTY6JdH7XeuQ5Uu5xJymvv+v5f7O8S5VH/gGqENpSo8c+ry8eGqSWZz0K4XmHYIfy\n9RXTjlFB7nJ5fI7COM4ealZIDzDuichi4vfEJZq9zB7dL/mFqmF71+bZlX48cjocGe4gFfJxyyAx\nWruc/3M6/83JTtdd4Nzw/N/5vwOWdo6yPXCeebCQ7rR5nK/Tli48/0eB7l56fP/Tchc0M4/zemYT\nc+54XHNy30MhjwrtulbEjyWA06iUKgSbaVLKqLQzMiRtflXzKBuQxXCxVB1e+Ime+81M/1FSQfY8\nrCmuBXF2TNATz/+paduOKDWgqtp0aqELrEfn+uUUoGSFL5mo+56fmO3UK2kLcGWWEqP1H7ys+75a\np3AFqn7Teqpv/99kHdzwj557PavpXKI0/+M39NA367QzobhCm7o+ywWtQ1WpeJJ2LvlTDz353cnB\ns8TV+uSV7/XzhqPWtcz6frXqWZ8/WEE6qsU/faTbJzqRQniFwLLl7bRcoZSTkt0rQGaTvlMSI6zz\n/6umRSbLr1SgK18kJivW9bL2/vWu7nxnlhaHWwm+VHk1bW3SlZMes5teqESAyrn/p8cjyN/1csZ0\nn89060iOXKiHHvtGP29KkH9IqNrUL29/h5SE/Zr26X/1iQ8t1IbTq9gJi7PtFUwU2BXpDtGwp57V\nba1d+zOb+MKdeneV2Tr5femvNdHdP92t3uv/0tv/m6b5ZvlY+x0W/3Jqe+Glum9UV9Vwdp0k5l/9\n+cM0/TF3m7bFJjs7Hf7WRazthbprdA5BJOv3f/3oR325Yr/iPOfZsv530849deftA9U0ywkDErVj\n9h/69LcFWu75mS1+paqo27CrdO+QZoXXg8bcOfh8k71Zc+BofT4qq15KGcWsn6NpW+NUrkF39WmR\n109iVuz5Wz/+ukTzdx3JeGzM3G8htTVoxFW6qWdtj6GCJzN36k46r35l1KjLhbrbOrZbXs+YDgbb\nb8jELATwzY/67z/bXZOzOwKqNtGw4VfqpnIzdfNz8+2hWbk9NifZ8pNuf3yWtprtMq316Ce3qY/7\ni1n/f8Lr7+qDVfHWkxKqM3CUPhjVJsfvnaOlX+qaV5fogPM0S361dPNLj+qaus7zDeN1yzNztVNV\ndPV/ntEtFWfp1Rcm2BdQm19FDb3/ed3ZyfU0cecy/TphqiYt26f9CRkrfH7WBbVlr0G687ruqpvV\nl9hgHYvnrGORUkLNhz2gd66u47zgmPK+Lvr0X6X4NdP934/RRVnmoZKq1LyzRt9+jXrkZ/SuR3rv\ncvMHem6QvekhRusm/6HxU1do1e74TAERU6FsoutHXa/LPdP9nt91/0NTtdb6jAGtBuvLpwdaJVM2\nEufqpdvG6x+rRhLQ+UpNeqiX84KLWZX1+29m6Ld1mcoNq+JTp30/3ZttmbNRn977nn4Il+pccpc+\nvbG8pr3zkd6ZZ3p8GiVUvf+N+urWDvaz7NK+/X9anq8bRl6Z5+Nrryj72Z+aaFe+05lyq8OA/rrj\nOld5a+4oXvfttgxl20nKtNUTX9yqjEfHle8//Gqu5mzL+D9cZfrF2ac9u7fAr5pvNXp63va2nuxr\n7lJ+rbd//teqqKWn40IrY9d8l7HsuK5c1mVNxXoafMO1uq17xoM9/72HNXaOlUgy5b+sbPnuRd39\nW7iVb6rrmqee1M3NnRfyIMvy1BJQMVQ9Bl+hewab+W2i9PMz/9EnG7Jp6DmCul+j304a4lyA65q7\njHJ/vwbZlNvmWF576msHcK576rPpWrr9kCrVKXozcR47sEcBSdH68Znhzp5TMb27XL3Asr6mG+nv\nMfwqtdbdj96ki+oEKDEqUjFBVRUSM1ljX9iuJldfrMvPy1iG7J34lkZ/FaY4her61x7XDe76kyWt\n3dJ2iKY90d/Za5geYL9rkb1dQhVa9dNzjwx26v0xWvK/N/TM1P1WWXhyOyZx5ica/uEaxdjzxj6g\nOzp6lIwR1t99zPq71uWhwZD79PF1DZ0XpCUfP64nZhyxvmCm34tZoS9e+E7fhpm6ppFDvdgH/DJn\nvT6YsEhvXV8k7ihnEH4oXq9P+lf/fWCoGoZmW1PMIC2NVeum19+7Rm2c/Wk86rVSoJoPuUYvXNfe\nup7GKDJSqlrVSgdbftGYp//RJqv+XbXrUD1/54UedSXP9FhCba99Rq8NzeGzpf2tACvPPWHlOdd7\n85tu09v+Fv/qGjx6lO526kSJOyfr+Wcm2r/nZ+WxCVYe4/qOU/HJHmCmIp2ac/3bkaoDv7ylm5//\nU7MzVbiVdFSrJn+ve97JZtW9LRN0z23v6+3Jm04OfhlJ8dqWUyQ7aoaev+cjfbTEacT6B6ZHzK3/\nvXHeXxo/y35nJhGa88HLGv2+K0JvftW+o+WsFmii4LO/eV/XW4VBVl2xz4ioP/Wf53+wV+Z8eey7\n+sqzb+0pJWrGO09p9OvT7LtsGYNfRqriorbr5/ff1fN/Zd/dde9f72d9XlNitWXeRN33yOfaZBWW\nOUpcrU8ff1MvTs4UALAkRm7St++8oYfmHtZxZ1++NbpUV3YNdG3HrtG4T5fYaS5xo1WoP/BmWvCr\nQsdL9GxBgl9G9DEdcjazlZKouDhn20g5bh11I1Wp0eYC9VN68MtIOaR9TkQtceb/dN2jn+uzeeEn\nBb+MlIQjdr66L8s7mpaUZCXaJyxVR2OO2bsySD3uOp/WZ0rcOFEPjXk/PQ+lMXdN5+ulJ/+nGYWe\nB8L03WNP6r5x87X4pOCXkayY3ev00fNvZLwTVbOFWjuR9MS1K/RnDnki6u8lMnENo25oPdeGY++8\nz3WnWZV1lWe5EehKEymuu2f3jXlDP2SZ+VPSysVU65xNfOENvZoW/DJSFbHfqWBYjYHPPdO+c1fR\nvsto/s+qWfpq6kbXW3NrzU+653H3nWcT1ChjfXbXnXlTbi3+far+cnrr7jsSkzHfZiUxUe6qukui\nNv7yhq4fa+7Qe/wP9wIpTpk+2sr7c7IsOqz0bf9TKw3G79KEV5517lJmTMeuMvYj3TtudRbnP59S\ndumT7MqaQ1Z5Z5U1T2Uq7zo2r6fSZsM6l1PnzrX3ZW25/p4d7jqeJSurdj6CX1FTPspQnrp6UbjS\nXeKhcE37bpr+sd95QIePnPrim5DgTNibpoDXNXcZZaXNpLgVOR/LU1w7AJzj/Orppqdvs4NfRkBI\nVYWYzZCBGvvmaI3IFPwyagxurXb2hSZcW3I56sRTUNtL9M7T7uCXEaTON/RUN7tqGKUtW9zXZmOn\nfpi8RqZPa+XuF2mUZxDBqN5fN/R1BQe2Ll7iscjRXM1YcMTeqtzzkozBh6D2umns3bqzrVMXhc+q\n3OsqvWoHv4wgV/DLMmPifG0yF9/gdhqdIfhlWOnx1v/TTU1Nn7JUrVo4K8OwyYwi9MNXs+2/VbpV\nf412gl/5T7ce/Kpo2IMPpQW/jIA6A9N+L2V7uJbZW0DOfDQAtlrb02b1LqcKoc7mScL0/fgwHVYZ\ntepzmV56/xVN+vhRvZQ2kX6qDs/7W58std+c0ZZd2mBVru07yqNu0Teff6BpP7kev710lQbXdiru\n4fP1359Onoh/yU8zNPuoqbIHqssNj2jSt6/bk/n/9u0r+uaRi9WzqtUwzOI6tPeXb/XSLBOBt4qj\nht30+OtvapJZMfCLt63PfrtGtipjN/ZiNkzT6+OzGE53JoQf0UHzAW0Jik+f4ioXtigsLFYpppdJ\n534a+8JzmuQc12lmkYMrG6qCfW7itWj8BE3LqjEUMVnvjf/XWb2ypBr0uUpvWed22udP663R3dSq\nXAnrvCzTlC32u7ORqBVf/aIfdrkCPX7lGmrkI49q/Ddvavwbt2h05xCrAhSvVbPWZ9vdPfcC1GdE\nf3Vxpvk6MGuCHn7mP7rqPxM12wSa/Kz0OeQmff5I/+x7I+ZWr2v0yatmkvmHNDKts1oDjXT2uR63\na3iWjeREzf56kusuS6Vmum7U1Xp0VH8N69ZJnRu53rFx227ruJdQ6dCWGnnfvdbxcucL67g92U8d\nrWNvxKyarM8KFJ3apS9fnqxVsSVUoUln3fvI0/rNSh/jrLzTrZLrf6QcXaVxny+3twvPNv0bZp0T\n05vIKTPc+X7a5w/o0T7VnWDUfv38zR9WanZrqG7t3BfscC2Yl93iHFGauXKnE/yppwsGePSAi5io\nNz9Ypp3mxTK1NGyMu9x4XZM880bsNn3x8W85ruwZt2SiPjSBVb8K6jb0cj16x+UaObCtLm5X3349\nasIU/Win/RKq03+Ufvv+Lf1klU8/ff2axr9wlYY1KadypfJSUY7Sz7/M1VY7mFZFlz3yiv7++lXr\ns7+q3794Tu+bfFmxtErbER2pxRW3O2nxGl1cxbVPVTrpCc90+so1utB5yUhc+J2eHb/NrlyZ9Dl6\nrPM/zAIpHufG5P23vsp5Rcl1v7yrD5ZZ5ZB1nN3l+6SP79XYofWdSmOqdk7+QR8uLJwQ2J7pf+pn\n63gHVE3PN7+9b5UzHSs4q6Ja5d3X3+oHj7hNQPdm6uScgrhV67IuC42l67TIGUEdfF479XFt5sFy\nffvrOld5WqaJ7nRfc0y6s8rUl65rqdCyAdaVzGiq4Y855+euTmnlVY3e16SfN+vxyS3dnVdcCu+6\nFqPJ739hH0uTBjyP5b3dqmR7LAHArcVl1+rq9PZ1LlVV9UrOZp6FaODgLOp3AdVULdi1GXXYo8BK\n/Ff/bjMbQerUvvNJwTijUYu6rr+372D6IkdrdmuzfdfI+r02Tk9vTwG1NbRdbecJfFL59rr9lqzS\nzBKt2eCqRNTs3EndskpUqq6+bZ30sW2XVmVT59g7cby+22i1b/3q6ZpbB6an6/ymW08tu+q2dif/\nZqPKTl3pyDE51R0gRz4ZANv7yxxNdd3kkF/zZuqbUw9Suxvm03rz9r7qWDVIASG11HHIGD1zsfvq\nd0Qr12RR4e4zQE/cMUbj//ug7h7YTk4A3RbUsKfufrCX3B3Ht276N1Nja6PWbHQ+YNOeunew592k\nIFXtOEhPvv+SHu7h7HJLnKXPft9uNxL8QrvpqbHXqLdzh8oICGmlEU//n0bUd0Xod86Ym32j6HQK\nLa9KrpaGpZQC8zSHfksNvvFqPffyy/r0oaHq1thjRUSzyMFVd+iu852DHb9D61a4Nj0t+X2Oljld\nQ2r2v1nv3N5TLc0JCqqqlhdeozffuUnDnABltqKm6/t/nLMW2FC3PX+fRnSspZAAM0l7O13+0BN6\n/4ZmTjCuEFTvr9FXNHT16LDS3IYNpoePCe5YjcGXn9ObaXdr0sVE5mO+K+sY1q5nJpmvq6C0z+6n\nIGef65HdBPQxOhCdaqW9rvrPa2N048Ae6jNwiG679yYNdgJgbQYN1aMPPqgf3x6tEec3ct01tVnH\nrc1Qjb25jVz1uUSt3ZhTiOZU4hUTa4IzN+nz52/UxR2tz2y+m5V3xt5/gRo47zqwflOOgaC866lr\n77pab33ykl5zyow0QfXV5/ZRutY5Ftq1Uys8Mn6ji9qkdUvfunKlR3DMQ9QiLd7s6kGTsexK1LRv\nZ2q1nfmraNg99+k2z2Fcdt64Ty8ODrUrASnhSzQxhwBjdPQRpdhl32Mae20f9endRyNG3ap7B7m6\nnC/Z5AThyrfTzbd6pj3rPDbuaeWHl/Rm5uGpOVqldc73Cu5+UcY7z9Znb2Ly5X8fSh92a+VVV1os\nr0D3Vaq4vyp6ptPanqulhun7n5e5hvaWaaI7nh6TcQiqfW4e0n1OT8uYZQs1MYcAeExsvJXOO+qp\nlx5NK98DQhqp27UP6NXLXMfY5NOp/8wutF5gQW0H6/030vNNUFWrnHlkjO5w35FP2a7Jf3lciwJ6\nqfd5zneM36LF87L+JEuWb3KGPJdXt86d7a082bBFa5waZZvB12qoxzXHnKeOQ0bri/+mD0UNquqc\nnwr+aRWM4gHl08+b9aidXjAU8nUt0Tp3qQpqPlBvvT0mw7G8+N5H9aC7p23mYwkAjrJBp4p+xShy\n2Rz99sfPeufVD/Ws/ZiqpU61vvA0VdXKzqanTdFyjdSM16xPH9XVt2bxeH+59tvvOaRId4+0PQed\nlZSrqV7RG/mHMyEwUOU9Lr/pDsuqFtqyXozLJaRGiFOHt9KVe0SlJ9MB4SczFLiEmgwemjGQnN90\nmxvVg61UDeSebwXAYrZpxriXded4V2XaNBSHDu2b/Vw7pvfV9f+XsTHm8Gys7tmXRR+fgCbq1TuH\nOWCqN1Qzd248fCzT8v3pQ5EUl5DraHXU38s13w7sBKjb4CvUPstCrKEu7uxE6K1G0bIFrs0zKuRi\nPfPc9brv+ss19qW7M8yFkBs1OvZQF89GVgYB6tHA3aUvRtEnjedbrkUrnVI8sKWuuSGLIYNB7XXb\nizfrshxKy6i5G7XOTkRSg/6DNfSk+lCA6g4eo3eucQetCiZm2U96fcJ266LhIbCerr0zY2PQbf2X\n/9FlY17TdTebFYucnWeCX6hG3HmdOmeX8Ku3V58shge4BXSvpcbOdvThw85W/pghA89mCM44Gl2o\n3k2d7ezuIuVbgJr26JHD4hDV1byBu8Q5qsPhzqYRcr66NHf1TtOutfpnjWvTU3q6K6GO53mUXVHT\nNXW5q9VfukNP3ZTFHTCj0YBWTuA9UUtXL7G3smYqJ9dmWfYZaZOWJiUorlAiPO7hhVJCfFyhBY3S\nLJ2vqc6JbtB3kAZn2X4JUJ+uLZzKW4SWLd1pb2XJBNEeuTnLOc4aXXG+MyzFOk7rNjtD/wrGDvw8\nODCLucmqa/BV56UFdPesXaf1zrbRrXNzudpHiZq/KKtgnEd5WK2leuYwT1i20oZAW+VU/CkHUOdZ\nYV/X7GP5uOdQIjfr/A/O/lgCwKnsnfed7h/1uK57xUzzMVOTlq7XXPuxVTudG69nTqrijh6z6lJZ\nPI66hpL7lauheu5CL00JM285kG6NOzh6CmUCcmjzROnvr6bbHRD86vfUPVb7KGv5TbdA4fHiANhh\n/fWOZ+T4QV1y8xt6efJuewiMHdy65uZsG4outdV5UDZ3ekKqmo5MLlYjunB7kbRUw3rO59o1V698\nME87ctEiXLXNmcdFNdSobfbfK6RRDdW0txK1dVce5+kpJEENu2jQpX3UrWG2kYLTY8cObXZHFBvW\nU4/sDlNAG9XKIQCWfqyD1bpFdoW4dSbqVHUa1PnlzFv0yiytNUNi/UPUtJbTuo4P0yevZD2HXPqq\nKiXlXxgRuFyqceHFusHdw+ksa9kuuyGhIapZzZ3uDmpvFoGmsyNEg89r6PQeitLsBZmHZ3oMfwxs\nph5pKyFY1uzRZleCVKN6rbINMCqknho56TpuV3j2DfwqnTUy28qJ1KqeE2SOX6cPX5yQ/XLqudZE\njVyjKxW3eFKhL7e+Y0u408spSE0aZP+91DU0LQC7dXd2w1AtjZpmE0SzBHRVS/e/SNmvXYWQvqq1\nap1N4MfSqLFau4eBhu+XPYLArVMH9XTOd8q6tZqYeVynx/DHBud1Pnli3NxoXUuNnSJp65/f6q3Z\nuwo1gFnY17V8H0sAyMma8Xr6g/lWXU2q2rGfxo4102K4p3kYrWE51ClPDzM5vvv/Z/34e9yt6Ysq\npUlVaqHfhYJXa5LLHlSxiRlv1HuImvK9/mtHv6roimuvUPZNhfymW6DweHEALHME2T0htWv+odFj\nx+q5DEML86p0lnNwZSkmUusWzNBvf0zWlx/lrjt0nxF91NZeIjZVO2d9r9G3/EdPjpuuVdkuK79T\n4ZHu16I1f5z7/2Tx+G1LWq+y1BSn1eyVEhW1a6VmWMf1t5++0yvu7zc3h2WaD8XLPXV6zdCa+Tz/\niToW4z7WlVWrnbN5GkRN+Z8ec89bVK6Z7nzxCb330vUaFurMYxU+X8+N/eWkINjBI+7wQZAq5rGH\nXUEUL+70YMqFxKjdWjbTOnd//K5P3nanzxWu1S5Ps6BSZ+LKGaPI9Yv0l0mf336Zlv++X5t9aCeg\nT+u03kMHVq5Thj5aUQu0yFk9r3TrZhmCtzsiotIqHXuX/ZD2v05+eJQ77gUDslK8uFVSZq/uFX01\nuLLrHWbepftue0x3vTNB88PyG7aqo6uHdlZ1O/oXr1U/va/h//eiXv1xkfL9Jz1s3XfQ2UrW+qlZ\nHRf3Iz39Zbsk/SkFqHZ1d9j7sCI8pmaZ9eFTHjdlMj/e14ScJh7LVk2FpEXZMw8LaKkLz3MidSk7\ntXhuxn8wf8kGJzBYXV3O8wwMztfbd2b1GZ3Hy5M9hu1foBsvr+8qS1P266/3X9dV976rTyavVbaX\nq1w709e1pqpR1dnM6xALAOe0GdOXuObgbHShXn1kqLq1MNNiuF47o0LLOr3DM15/Tsm6brluGkZr\nL2UfPAWUUTmnbronIvvpAaL2Rrmuw35VVNtjVVKzevl/vzc3NkuoTt+hut7zNbf8plvgNPDiAFiQ\nOl86WHdc7/G44xaN+/g1/f726Izzv5wm9pL+LzytS25+Vve99au96uE3/+SyO3T1i/X8U4PVs2r6\nKleLJ/+mh25/RCOe+U4zd2ZuWRxTfFrY/Yj+Tet2ncVj3f60xnKZwLLOljeJ0brf/6c7Rj2k4Q98\nqpet4/rhj/M13f39wnKILEZYF3ZnM/+2KNJZ1fD0MpNLOz1hzNDCR8e45tcJaKPbHrlE3dwTxof9\no+den+zRS3CdwrY7T+qHqpVrq8iIWf+XXn3oYQ29/WU99qF17r6eqp/nu9PnbqdB7sUSd2nmN+/q\npmsf0XVjv9ZbJn1OWJKW/5aH5xAV8Jy3KXqD5i90bRpRs9zzlQWoS4deGYK3R+LTV807EOY+llk9\nPMqd0gFyd2LNs4DOuvupERrWxDX5uGtl2mka+9jjGnLvR/p1fd6jVgFdb9TLd7oWoTDs1QN/+lqj\nb71Pt7z2V4F6mcXFu495onauy+q4uB/p6a90YG7vcJysRPFizlZqhhWHjx2K9rgpk/mxX5GeQ2Jz\nLUSlSzmb5v9liv2kD9dP1arlCzwCV0u0ZKVzUOs3U68Mt2NjFbU/q8/oPMKjMwyHqDF0jF6/oaVC\nnZs2ceGb9PO4jzV85GN6aFzuejBn7Uxf1wJUIi3ye/KxBODb4uPzdRfC5r7O1KhX7+Te54lJSirw\nkuC5lNbTO1Ur185ybv7nQvMqqm1f0KO1Zn0WPaATV+uTqTn0jIYPa65G9VwXxz2r1mQz6ilMExds\nt7f8GtdT+oyiiZrxwe/6x6xeXq2z7rgum9Xq85tugdPAiwNgAardZqAuu9Tj0btdxsl1T6eIqXr+\nme/17aooKxObXmf11Hdgf4+AXE91PkXrM6DpQD35/isa/+RlGta2ikrbF6ZkHdwwXy8+/po+XZld\n8ZB51b7sHk/r5WvyMlF1UZCoFePe0EPfrNIWe0hgOTVt3VnXeQY6s5qYxxvNXKYZTpeG0h3O19We\nDdTq/TX2+UvSVoaMWTVR9704URtNkli6XLOdTnA1GzfRGewAdkqJK7/Tvc//qWk7YpWikqpUv5Eu\nGepx7q5vnzYHj3eK0MTX39WLv29SeJJVCShVRW26ddNIj/Q5onXOwff0eZtiNGvhXHtLitL0Va6K\nhcq3Uo/ers2sdLgqq7x+8uObp64oWNqo3lW3Pf+qfnrpeo3sHqpKacGPdfro+Wc19q+838Kr0f0a\nvTnuRX10Xz/1q1/OVUlKSdLOJX/qoXve18QC3xUM1sV3ZX08Mj4e1Wd3XuD8TuHpPfRqj7Se6TFq\nqAZndVe0oDznltu4Vn+61yZfuF6LnHsFbTr3ypQWrPLmjiw+o/O478bemYZLBqjp4NH64rPn9Nqo\nbjov1D8tMLpq8vca8/j4LIdq540vX9cAnD1lFehMFbF+1YJ83yQtHehqX+xdszZjeZe4ST+8+J3+\ncM3efQa01IVdXYuxmCkFnp+Y1bD0GIUt+Fu/L/QI+AX0VI8Oru9ghrN/4tnGiFioT8Z+rZ/D89sz\nGt4tRIN7tXDNqbtvkd47aWqeCM354Fv9YM+1Wl79B6bPUWtW4B630Nx5La9B11yZ/RQE+U23wGng\nW5PgnzFR+vnjSVpkot1mrrEbzKp3D+qRUUM8AnLNVTtXHQzM6nh9ddsTz+jH929PX/Y+KUK/jJ/k\nsVJcdVWo4GyqmAJrpK+olf0ju9X8irCl4/X6ZNdy+Ga1wRc+eUnvPXWjbvQMdDbIYVnJmpWceWKk\n6CP5nbC5pWqkDYaP0SF3g7KQeQ5rC65Y6eQ7JtX767mXhqT3BNswWQ8+/qU+/Hu1qxeLX6h698h+\ntZYzb7k++Xi+a4iAWWHwkef1/Sv36p5rPc7dpbWc4I93iprwnT5c5epiZVbt++jTZ/T6vddohEf6\n7FAj26u/i8e8TXGr1rlWtItaoKXOtEaVO7ZWN9dmmpoV0tN8sVLVs8jrJz88F6gsCDOf34h7Htf3\nnzygRwfWcpUpKbGa/+MfymGhyRwEqeH5Q/XwKy9p/EtXaVhDV0GZcvRfffb93HzdFQyukP5l/ctm\nfTwyPgo2dGXXPnflLEQ13QWOJahFD4+0nukxsH02c9adykaPHqmVVOOkIJrn3HLhWjDPdQd//oqN\nzlCF+urSK205BUeQWvbO4jM6j0Eds7nJYFYbHXiNnn/7ZX3z5MXqVsnpzbdrgT77JT89B870dW2d\n9qaNoM/qWALwPXXUtZWrTEvZOE2jR5mh3g9ryLXv6297b+5079rKVX/Zt1BP3/WinrCHZ7+uW275\nwF6MqE7omatxN7rmZmeF4Hgt+uoVXfF/7s9jPZ56Vlde/7hGvzVJEzZ43lUKUL9rnelXUvbr51cf\n1ZX3v64nHnlcQ+7/Rj+HpajBJV3V0fVmnGMCel+qkR1Nr3/31DyP667nrPT03Iu69vqX9Nws0zYr\naaWRq3VH1/QK1LyFa52e9Uf015v3q9+Vd2Z8mHzmVOzyl26BwkcALF/Sl/RX/a4aVaC5xtIFhLSy\nl713TzKesi3cavq4eU7svU/bVzibPmb9hh1OQRqgHkNyWG0wO2ljzKW4yP3KPnaVmGHoUmbpDeqD\nCnc65mQp9bjy2+u9bIDdpcYWl5A2DigjuyfY5bowraG5RL+tdV1JKnfvm7HX2Nm2YYvWuHu0dbow\n2xUGvdmSTc4k9QrVZddmtWpfbnjM2xS/RYvnJWrHlLVOl/Py6tKhg73lKX3paWnbrsJdkiPXguqr\nz6j7dF8P57zGhissq2Ww8yCoYU/dNvYKDXB6y8Zs3+1R5uVe02rusGq0tm0rhDuHVr7OPhC3RGHu\nMsGvkmo0cbYL4PjxHAqjqC3a6B46Wa2SnLUQM/CcW27rypXaormas9g1/NGvcVP1zhz/KjBz42aQ\nxj59obPqaKo2bc/PlPKFf13L+Vhu1xZ3ACybYwnA9zS6crCut4f0u+cPjlVS+fJp9cV0JTyGSWcU\n0PUavXhrW9UpVcIewr/EHp69XfsCa2vYPQ/orlY5Vwj8/LL5w+Z/uu5gZKuEX+Y3VNfgJx7QU0Oa\nZPo81mNjpA4f91f9tt10dbdMlUTP6VdSknR493Yt2XZESaXratiY+/XOjVVcN1P8ijs3VeAN0tJW\niVOctxxft9LUI0/rteta2mkqJeGINq6x0tOacO1PSFVAxXpOGsk4xLFs+hwNWTueqNi0KS7ymW4d\n2echB+kWuUQALF885g4pU8bKzllIPKZjOc0Blq3qqp3NXaRuzes7y8/GaNb8Ij5+OjFau7bvUGQe\n5/VJn5i6jMqn9QzIaG9MNsEiw2MVPG1brSlZrtBmVl58X1+udZ5moWM9Vzdd895FC6Z6zKvjIWKh\n3vpySb6703sGNaKti8z87E5o9V567LVrNCBDTS1YbVrWKpTAa6FJOW7lDJcKZbPppRcRl7ZIgTdK\nT5+BKpd1xteRuFPnzEbdm8kVN0nU/EWTNGu1E+Go3U4XdXJtZtC1oeybZpboxctdvcbOigC1qFHI\nffgCGqtWNh2OciukQz3neEqrF/zj0XM2n9ZOz6Z7vnXGZizXLGdooV/jBjq/EDLh3n8mZByO4mHL\nlLVpq3lWbtEk65UcA3qpq3sFxV1r9c93G7TIvv4EqFuP9KEKha56DdXJ90RzLoV9XTPH8sNlWV94\ntvy9Om1uk2yPJQDfE9BGNzz/qiZ8/GjaUOqfPrzeYx6jlrrtPbMC3bsaO8DZdZIA1e1/qz79+jWN\nf8MZkv3Gc/rlvw/qtu7V1WbUs/YKds8Nct7ucO//+5E+zh63/nrOXvXuGd3S3NmVyeAnXKvifXpj\nU2ePp+rqcd3dGT+Pebz/iqZ9+7o+eeIaDWx68gXKNf3K2/rt/fTvMGHcQ7qtp7mZ73ym78foIuf9\nKPpa3PiMnU6mvT3cuSmVyaC7c349TZBaDhl9Upoa9/GbmmTSuZ1GMup8+4uuv53d4/sHNCxDJSTv\n6Tb7PORofY0+t/8X6Ra5QwAsXyoo2F3p/3edJmTuqRmzQl+M/UVTspurfcfvevqR/2U9kXTiEi1Z\n4+zPfIe6dzcNcg+dWjxJT/7oTKB+kkRFrZ6uT76bm3Xg5nRLnKuXbntKox5+Tdfd9laeVj5LH+oV\nrXkLlmRqDCVqx9RP9PBvu53nWUkfY26Gqk747BstyXCQYrTkm7fSVl7MTkCfdurhzL8Vt2qq3s7U\nGE7c+Y9eevZ7/VWQ+RK6dtKF7mBd9DK9/uKELCcCjwmbrU9e/lUzMhzHaE23Lha3f5z/Cajd81lY\nCVKr8zeWLSOP3nd7l604KaDnOmb/pDXmvVF6z8DtmjfppIxvp6235uSUshx1e6mXM29TyrrZ+sHp\nPNOgXbtslo6+QIN6Okc3fp0+zCatGIlRa/XnuF8ypZe8mK+3H3lXX8zOKgAUoemr7EkglKfeT3O+\n0F0v/JTF4h6WiAVa6oye87PKvIzV/PS5W7Rvm1ZmF9myjueAtADQXL1y0vwV6WLCFun7cX9mM8mr\nm+me/64e/mZFhnIicedkPf/1OmfociEGl+zhKM/qpakZj3nMsm/0ysRwp9dhdfXvc3LvQLc+F7Rx\nhhdb5d7E9a7PGNhI53U/uQGUFzt+fk93vZP1IgWJC9enrTpas1qmAZ7lAuQUodqz4d/sg5KFfV2z\njuVvb7yU9bH8051ncz6WAHxTQEitQhhKHaCQ2s6Q7NrBJwUEzjyPz2MeuZz/IKhqUfoOKFoypqnT\nM792/tItUBiKnbA4215h9bin9eBkU/0N0bCnntVt+ZjDY+ILd+rdVWarie7+6W4Ntvdmtk6f3PWR\nfjbDJap10+vvXZPhbvHqL/+jRye55qpSmarq16+zGpWVEg5s1pR/XBNkB5UJVExs/Mm/v+Y73fzc\nfO2xJ8+vrfNbNVLjygE6Gr5Ocxfs0s4EE1QpoTbDH9PrV2TsGmEmGb/zVWeeJUtAxVC1aV5XLesH\nq9SxSK3dFq6tW/drb2xylp/7lHb8rkdfWGg17z0kJyja/D2LX6kyKlcqPW5aqt1gfXlHphmL0r6f\nkcfzFDVDYx/4VfPt+dVKqGqrLrq4rfXdrCbRlrlLNNNMru4XqKCS8YqxDm2Xm0++22avZPPIuPTJ\nPM1E+k1rqXJAnMI37NE28138KujCDmX1z2ITTMs6Hez9613d+fkmpzHmOlcdapRWYtQerdt1RHHW\nOQhq2FpNj67Rsv1WA3DgaH0+Km9zcmU+n/LzV5XaddQkxAyPTNKBbTsVFpXkNICt8127s0b3SND3\n49co0tnpV6662rdqp6vuvThP5zpxxke66mOnMe9XRo2sdFQt6aBWbjuhi5/wuCPpcT5z/o5R+vuV\nl/TmMlfXR79KDTX0wmaqYl03j25fpkmLInQ4pYSVL/ysfGE1TdsO0bQn+tvvTXOq/2Wdk37WOTGy\nPPeOApUTOf2PNeN1y0tznfMVqPrdu2hgPeuinRit5bOXaHGklbasfB9k5fuYU/zvROv/DLX+j/vc\nmmGV17/2uG7Ibub6LNN1fbW00naVgBjt2rhHYTv3aPv+eKvhn9X/zrlMSzdVT135uxZZW3b50q6x\nOoYGWWXbNi1YvFkbD7nKgsq9btTnd3bOXcU57ZiahRHqqkvrpqpdNlH7N63XrFXhOmiVl/YEqvc/\no/s85pYwtnzzvO743Qlc+IeoTZtqCjB5cE8N3f6tx92+COtzP2Z9brvssNJfqSpq0aquWjWtqnLW\n+dm8fa+2bQrXrqMmP2WV59O/d/o5dI5BwwoKOHZAy8P22/neCLLS7wdW+s3fvF6WtLQepCYN/bQ1\n7LD1uaxyJqSaVZ5XsLLTbq3edtQJ4pRQnYGj9MGobFZXsnmcX0fprsP1+/0Fm+w/LS9ZZVOdhs3V\nqbWT3pau1z/u4+FXT7e8+aCuznC5CtMXD7ylb514aUDVeupc288u07bXu0STPO7kFvi65lFulLbO\nXZJ17uxjmVZm5/VYAue2pz6brqXbD6lSnZz7i5wNxw7sUUBStH58ZrizB8ibX+as1wcTFumt69s7\ne4qO8EPxen3Sv/rvA0PVMPS09d8Gzmn0AMunNjf+n+6wJwu0xEZq2oSJ+vDrifpssgl+lVSDPtfp\ng6uymWGkSQt1q13S2jArqm3X9MlT7d/95p/truCXXxm1GnKTxmYKfhkB7a7R2w/0UitnYnR7/PT8\n+frM+v0PJyyxx2rbjQTTWGlcK21C+FzbZDUUMi+J7wS/jJSE2Ayv7d257+R5tjzu/JvGxqnmNsgg\npI8eu+cCNXBWnItc63y3r2faKwuqXEPd8sz1GpjTsJuANrrtkfR5s8xKZfY49qXb7eCXX7m6uvaB\nx/RYc3fXkqzVGHSbnrrMmfTbOVdmnLqZLyEupaRCu1+ht8Z2U90C5CL7fD55sWs+BiMlSfu3bXGN\nh1+6RRud4JdpzPe8bozGv3GjLh5ym8a9fIX6Ob+TcjTCSgNb5bQzcy2gz1Dd4h5XlxKrLeYYbYxU\njB2MyI8QXXTv9braTtvWnzwYpp9/dOWLb+ZF6LCVKjoOv1NPXeDFd3laD9ez1zdTBTtNx2vbvJn2\n9/vwx/l28MsEKJ96qb9yEwb1nLfJ1rSVLs5p2UaTrp+/Sdfa85hY7HS9yjnGMzXJpBc7+GUa/nWs\nipP9W/nQSp2dss0uX/5xfcfPJq93gl+utP/iLbkMfhntmjqLOSTroJW+J9nl5VT9vMQJfvmHqN+t\nt2eYWNXNzN0yLNSdl6O02p0HM0++V72/nvyPR75I2G+9d4m+cc7PdJP/7eBXSVVqXjvn+Z8a9dcH\nt7dVqFUOpc1RsdEd/CqhCq0G6qUHCxD8yiBArUY8pv8MqWulK6uciXL9vyVpARtzPRmhZ08ZsPGY\nW84WpF5dC77SZdP2zV3lsVmtc6NHenOOh6s8vT1T8MtoqBHXdVMdp/xPjDTlp6tMy9xvtjCva8EX\nXK8XLnOOZVqZnddjCQAAAF/kdT3AEjfO11+bzSxCZdVsQDdlMbz9lPYum6HFe63KtH8VnZftylyJ\n2jhnpv49bG1WqKdBPZpkUWE2QzIWa9KCddq8I0IH/aurft166tn3QnWpY707bJF+22D9gcAa6tGv\ntTLG8V2/O3XZJm3Ze0Bb90k161RQldCW6jnwPLU9ZXdTs1TsYs1fvUk7Dx7W1l0pqtyoksqVraJW\nTRvo/E7t8rcKXOImzZqyXbldPzGgRpssVgwzx26ipiw6pFKtu+vagc3y3tU8Zpvm/7NI8zbu1bad\nSapUJ1SNO3XWFb3N34rSymlLtSNeqtx8oLo3dH4ns8RorZo5WVPW7NeenUcVUKe6GrXuomG9W7lW\ngItYob+W7rc+bQW1vLRLNkPPXEOmJk5bYZ2ng9qaVE4N6tZVn3591a2h+Vbp6aR07U4a0Da/d2s8\n0kNUesAxICRUHdt00Xkds+quH6N1k//Qn2sOKzGkuW4Z1SsfDXLrb1jHaIZ1HFwjmcqoTpcuGu6Z\n3j3SRMXGvdXrlJnOpM25mrFku7bsilBixepqWL+l+gzpoZbWl4haNVNzdlnN0ZDGuqxbfed3HKf6\nX2nnrKRqduqjTifHiG0FKidy8T/MMMN5k1dovumZcshfDWrXUKuu3TXoPDM/wjbN+2OzDlhbdbv1\nVrscksTGr57VXRMjra0SanvtM3ptaO7Sj0mTcxZv0rLwQzqwc5+SgmsrtKx17prWU4vz2qtjlpk/\nN2VauvT/cUDhW+JU1ipfKofUU/eeFzhpP688yiwrL62PLa0W1cqratNW6nNhF+X4JxN3adGUeZq9\n0Urr5rl/BbXq3luXZblaoWdeOmJ99iPyr11dlctWUKOm9dXeKhubZFm+evQAc/dOjPlXf/4wT8ut\n8t2V92uq2/kXqnubQhgyErVGU+bvlXVk09Jo4s5l+vWvJVrnXE9Ca9TwKGtyIXGmnh31s+aagxTc\nWS98cqPHHDcFYMpSK08vXh2uXRER2mNdzRqEVFSdTu11yflOeZoN853+mr5Sa51yLcBKQxcOuiib\nvJvP61oWPUczH0tzbc5/2gXOLfQAgy+jBxhwbvO6ABgA+AaPIWJ+9XXb+5knCsWZlUUAzMt4Dmuu\n3Gekvrv9HJnnKosAGID8IwAGX0YADDi3MQQSAM6GpfM11Rm3WrpDBw2mnoMCidLEOf86E/QzyTsA\nAACQGQEwADjjEjVt9modsLcD1Kld14IPqcO5bcs/mr7BmV2rdhN1y25MNwAAAHCOIgAGAGda4kIt\nXuOalluBTdS1D+EvFMyWxZu01dlu0qFztnMaAgAAAOcqAmAAcIZF/b1c8+Nd25XP76g+rk0gn5br\n77kRrk2/+uo1oI5rGwAAAEAaJsEHgDMtJlLbD5gIWKAq18tqhU+ceYmK2hWho6lSiXLVVfuUK/EW\nJd782QtDjCK3H7TnPytduW7+VkAGkIZJ8OHLmAQfOLfRAwwAzrSgqqpXr671IPhVdAQopLY5J3W9\nMIDkzZ+9MASpqp2fCH4BAAAgewTAAAAAAAAA4NMIgAEAAACwVQsOUolixZxnRcvx1BQFBbJwDAAg\nfwiAAQAAALAFBforOSHWeVa0pCYnqm71is4zwDcR5AVOHwJgAAAAAGxm8u2UlBQ72FSUmN5fJ5Lj\nVKdKOWcPkHemh6MRHZNk/yxKwvYds3+aIDSA04MAGAAAAABbmwbVVblCkA7tDXP2FA1mBchiJ1LV\nrWUdZw+QdyZ9G2t3H7Z/FiV7D8WrQY2KBMCA04gAGAAAAACbaXw/dk0PJcUe1ZF9O5y9Z1dsdIT9\nGHVRB7uHGpBfJn03qBGsKWsinD1FQ/iheK3bc0TdW9V19gA4HQiAAQAAAEjTtmF13Tm0ix10OrBt\njRJjjzqvnDlmyKOZiyx69yY7EDeke3Nd0aOF8yqQf2/debHik1L12axtzp6zy3yW35ftUbXgslYa\nb+nsBXA6FDthcbYBAAAAwBYWHqUnxk3XgcMx9nP/gFIqUcLP3j7dkpMS7LnISpfy112XddGATo2c\nV4CCm79up576bLpCKwZqRLe69s8zzQS+TM+v7xfsVGLKcT03sp8dfAZw+hAAAwAAAJCtfdExWr01\nwvp5TDHxZ2bycNMbpk2DavZP5kTC6WACvE99Nk2Rh1yrnraqVcH+eSZExybpUEyS4pJS7HT+yIie\naRP0Azh9CIABAAAAAM5JpjeYCYatCttnPTszTeMypfzVqGaIvagD89oBZw4BMAAAAAAAAPg0JsEH\nAAAAAACATyMABgAAAAAAAJ9GAAwAAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAAAPg0AmAAAAAAAADw\naQTAAAAAAAAA4NMIgAEAAAAAAMCnEQADAAAAAACATyMABgAAAAAAAJ9GAAwAAAAAAAA+jQAYAAAA\nAAAAfBoBMAAAAAAAAPg0AmAAAAAAAADwaQTAAAAAAAAA4NMIgAEAAAAAAMCnEQADAAAAAACATyMA\nBgAAAAAAAJ9GAAwAAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAAAPg0AmAAAAAAAADwacVOWJztc1pM\nfJL2RR9T5KEYe9s8DLMPAAAAAACcm4IC/Z2fAfZ21YpBqhZc1noE2fvhHc7ZAFhYeJRWb92neWt3\nKiY+UfsOxSjWCXoZpUu5EnjpUiUlQoQAAAAAAJybikkHD8c6TzJq06C6GoYGq23D6va2O1iGouec\nCYCZHl0m6DV/3U5NXrrFDnaZIFftqhVVq0pFhVQoY2+XDvBXSPkyaQEwAAAAAAAAIy4hSXGJydod\neUi7IqMVdSRWm3ZG6qD103AHxK7o0ZIeYkWMzwfA0oJeS7bYwxsrVSijdo1qqV2Tmnbgi0AXAAAA\nAAAoCBMYW7l5jzbtitT8NdvsfSYY1r1VHV3Ro4X9HGeXzwbATI+vL6as0JSlW2S+YbvGNdWvU1PV\nqlrReQcAAAAAAEDhM8GwaUs22gEx45ERPTSgUyN7G2eHzwXA0gJfS7bYU3ddekErdWtVn55eAAAA\nAADgjDJDI/+Yu8buFVaverCu6tWSQNhZ4jMBMBP4Mr29fp69Tses7b6dmtg9vgh8AQAAAACAs8nM\nGTZt6UY7ENagRrCG9SQQdqb5RABsVViEPpiwSFv3RqtJ7aoaObirKpUv47wKAAAAAABw9pkhkZ9N\nXGj3DOvWso7uHNqFyfLPEK8PgH05ZYX1WKmmdapqeN8OzPEFAAAAAACKNNMTzAyNjE9K1nM391Xb\nhtWdV3C6eG0AzAx5fH/CIk1dusWe52vIBa2dVwAAAAAAAIo2s3Lk+z/PsXuFmZ5grBZ5enllAGxf\ndIxe/m62wvZG272+urWu77wCAAAAAADgPcZPW27PD2YCYCYQhtPD6wJgZr6vV76fo9TjJ+y5vsyc\nXwAAAAAAAN7KBMBMIMxMkP/WnRcrKJAF/QqbVwXAfpmzXl9MXmHP83XzJUx0DwAAAAAAfINZKfLV\nb6ereDHpzTsGqWFoiPMKCoPXBMCmLN2i939bpPNb1bfn/CpdimgoAAAAAADwHWZ1yA9+nq3oo7H6\n7smr6QlWiIo7P4u0sPAoO/jVrnFNgl8AAAAAAMAnmZFuD13bV8dPSPd98KezF4WhyAfAzGqPL38/\nRyFWIri6bweCXwAAAAAAwGeZuMfD1/bV1r3Reuqz6c5eFFSRDoDZwa/vZturPo68pCvBLwAAAAAA\n4PPM3OdjhvXU/HU77SmhUHBFeg6w9ycs0uQlW+zglxn+mFdxCUlauXmPtkdEa+POSHsMbWJSsvPq\n2VU1pIIahgbbibpd41pM6F8EmfSzadd+bdyxz/oZqagjsYpLLBrpJ6RcaTWtU1UhFYLUrXWD05J+\nzASMG63vHRkdU+TyT5O6NVS7cjnr+5dRv05Nnb043Uw+MGlhk5Un4hKTtHv/EeeVs69to1DVrhas\n2lWD83W9yA13nti9L1q7rO2iVCa0bVhdlSuWP615wl0mbtsbbV1bdxepMqFCWVeZWKXi6SsTAV/i\nWZ6buWaijsY5r5x97vLc5OnTtdq7N5Tnpo3QrXV9Z+/pZcr33futY2LqvFa62L3/cJE5HqUDSqpx\n7Sp2mmhvtZnMccG5p6jXQepY6bNONZNnT08d5LNJCzV/zTY9N7KvurWs4+xFfhTZAJhZ8fGDCYs0\nvF+HfFXmzYX9v78v0LH4JJUoGaDifgEKKFPOefXsS0lKVHJirJJij9rPTZDvTF3kcGom/Yz7Y4Fd\nIQwNLq3gMv4KrRioUv4lnHecXYdikxQeHaewyBj7uZkbb8gFre3twmAK2O+mLlPy8RN2/ikZWE5+\n/gHOq2fX8dRUpSYnKDkhznrEqpRVMbpl8PmnLegBV6Xj9zlrNH3ZJgX6+1l5oZSdLypa+aIoSEhK\ntfLCMYUfile8tV2rSgWNubJXoVZAzLLUf1jHwFwyg8uUVMNqZYvU9492ygRzDExjYWQh5wlTJn75\n9zJFRh2Wf2AZ+5paspT1s0TRKBMzX1PzW3cAfJ0pz8dPW6b5a7db5XkJu27TsGrZIlO/yVyeN6ld\n2SrPuhVaeW6+//w1W/XH3LVFtjw33z38UJyiY5Ls69koqzw/nUEfEwycMGe1Vm0Jt9OESQ9Fqc7r\nThPuOq8p200Zj3OHSaOfWO36iIOH7faIX0CZIlUHMW2TxLgjaXWQ05VGX/1muvYcOKS3WBmyQIpk\nAMwMebz1jd90fsv6eU485sL26+w1mrl8k/zLlFPFGg3tBnxRdnhvmOIOH7Dvco0Z1oOhnmfZ91NN\nxXCrffEf0bWOgoOK9vmYsjpCk9dE2JWkh6/rV6D0Y/LPpxMXavWWPSpdobLKVa1rXVz8nFeLnuOp\nKYres8m+4FAhOj1MpeO9n2ZZaSNRA1pXV89mVZxXiibTcPhs1la74WC6jBc0CGR6RoyfulQrrYaB\n+e7mGJgGQlFlGozfL9iptbsPF1qeGD9tuWatClMJq8JZtkodu9JZlB3Zt0Ox0RGqWaWiHrmuL9dU\nwGEC2e//NFsnThzX0I411blB0W5AmYDH9wt2FGp5/r1VnpseTp0bBBf58txcz76fv8P+ebrqOObm\njrnBZQKBA1pXU6taFZxXiqbZ/+7XhGVWHTWgpMZc2fO09RBE0fH73DWavGij5Oev8tUbFPk6iKl/\nmHpI9UoVdNuQwg1em3baa99OV2Jysr5/8mpnL/KqxFiLs11kmKGPBw/H6bah3VXSL28Xpj/mrdfM\nFZutSnptVahev0g33t1KlQ2Wf+ny2r1ru/ZFHVXn5nWdV3Cm2b085q3VJe1qWI9QlQss6bxSdJk7\nl61qV9Ds9RF25dZ0vc2vj36bp3Xb96liaCMFVQpVseJFe50M8/lKV6hi5/MNm8Pshm6D0ErOqygo\nc6H9z7i/VLZUCd3Zr7GahRadXrTZMXnWBKr2Wg2GSYs22cMlygcFOq/m3WcTF2hnxEGNOL+O/XdL\nlijaecJ8vnZ1K9qNur+WbStwnrDLxLlr7Wtq2cq1rfpnKeeVoqtUUAX7uro/Ilxbdu9X9zb5LxMB\nX2GCP699M001KpbSbRc2tOsORZ25AWmCdPuPJhZKeT7OKs93WeX5lefV8ory3FzPzm9cOa08N0P8\nq4cU3nXY1Bm/+nuJWtUspxsuqGf37C7q6lYuY5+7sH1HNWPFVvVq3yjPbUV4DzOV0deTl6h0SKjK\nV6vnFXUQ/8Cydh3kUPRBzV+9RRef38J5peBMWjd1uj8XrFe14LL0AsunIlfyrwqL0NSlW9S3c9M8\n37U1PRWmW5X1wIrVVSa4urPXO5jhmcG1mtgZ3Txw5pnKobkLZi6s5lGU7wpmZnqrjezVwKrMHLAb\nrPlhhj2anl/m7oopuL2Jye/mYXqqmHIAhcMMkzE9Be7s16jI94TMbGSv+na+eO/nWc6evDN5yfT8\nGtoxtMjfFc/MXY4VJE+YMvHXWWvS8pc33FByM3eIK9ZsYgfA8lsmAr7E9HwqVbK4Rvas71XluamL\nmfLcfOaCludmiJ+3lufmM5sbMqZcLgzmBpdJE2ZKA9Mb0JvqvK400cCun5igJnyTSaP/+2OB3SYp\nW7mm19VBgms3V0Jist7/ebazt3C45g+vaXcYQv4UuQDYl1NW2OP8u7XK+3xYX09ZZnePLBNczdnj\nXUwGN49PKczPClOxCLQqh6ZLvDdqWDUorcGb1wqSuch8O3WZPezR24JfbuWr1bXnJnr3p8K90Jyr\nTCDezBFT1IeI5MRUkKOOxOUrAGLykJnzy93w8EamUeMKAuYvT/w2e7WOFytuVzy9kbmxZAJ3v81e\nU2iNRsAbmRtc7uCPt5bnpheyKc/Nd8krXyjPTS9kE8D8bOJCZ0/BmOviwcOxXhf8cjOf2RwTk67p\nOOCbTO/zlOMnvLYOYgJ27s4tprdlYRrer6Ni45NYFTKfilQAzCzvuXrrPl16Qes89/4yF7ete/Yr\nqJJ3RYgzq1CjgR0tLuyMgpyZAJBZWcTMf+CtlUPDHbwzq6PkhfnuZiWVspVrOXu8k8n/ZlUYcz5R\nMCs37bKHQ5gGg7dyD58x3yWvTK8pswJWj6be+/2NAW2q26ub5ScAZCpt3tbzKzNTcTZlW17LRMCX\nmDLQni7BS4M/hrs8X5GP8tzM+eXt5bmpm5o6amG1D8xCAGYeNHOTxFuZ9BwcFGB/F/ieJf/uUqly\nlYv8nF85MZ0KzKT9ZsXdwmR3FmpdX+//Ri+w/ChSAbAfZ62zJzPMz2qI7iEe3pxJDNPQMA+zJDPO\nHLP0s2FWvvFmpoJkKjNmWe+8OHgkRiWsdFfUF4w4Ff/Srrkx3OcT+Rcbn+DVFWM3s4LrrsjDzrPc\n2xUZbVesvW3oZ2buMi2vwyDN+03gyNvLBHM9Nd8hMtq1ehhwLjJ1Sl8pz80Nu7wydRx7RW8vL8/d\nc3Tld1i7m7lJaHrT1fCBNNGqVnk7wAnfYm7aHYmJs3tyezuzYuW2vXlrl+XG8L4dFGvlZTN6DnlT\nZAJgpgvf2m37NKRHK2dP3piLW8mAUl5fWTfMd/h3B4X5mWQqhyZ45O2VI8Pc5c1rd/B1Vt4rUaro\nT356KnZjlwByodi8+6AaVA1ynnmvGsGBik9MznMPqF37on2iwWjKNfMwAb282Oj0MnAHlb2ZuQO7\nI483BQBfYcq+qKNx53h5HmUHz7yd+zu4y+f8Mr2CDW+/6WuYIJ7p3Qff4isdWwzzHcx8pIXNjJYz\nnYZ+nr1e+7jJlydFJgD28+x1at+4Zr6XszXRf18IfhklS5VOuzjhzIg6HGv39vAFgSXzPoTTDBss\nGeD9FxmjhH9Age+OnutM48JUKE3gxNuFVszfHXPTi9AXAuKG6TWQ1+9vykT/gFJePfzRzXyHiIN5\n7wUI+IKoI66GkS/UcfJfnh/2ifLcdaM2wC6fC8J9k9AXrvHudE29z7eYji2GL7TtTbve9Kg/HdOz\nmF5gJ6yf9ALLmyIRADMrP27dG63zW7NUOVBQpXygQgMAAFBY8nNzEACKMrsXWKv69jzqyL0iEQBb\nvTXCPoFNanv3ZMMAAAAAAACnmxkGGcOKkHly1gNg5oRNXrLFjl7mdeVHAAAAAACAc02tqhXtVSHN\niDrkzlkPgJneX5GHYtSuSU1nDwAAAAAAAHLSrnEtzWMYZK6d9QBYWHiUM/wxf5PfAwAAAAAAnGua\n1Kmq2PgkeoHl0lkPgJkTxdxfAAAAAAAAudeucU27Q5FZVBCndlYDYGb+r9Vb99nd9gAAAAAAAJB7\ntapUpAdYLp3VAJgZ/miYbnsAAAAAAADIvSZ1qmjVVgJguXFWA2Dubnpm5QIAAAAAAADkXu2qwfY8\nYPuiY5w9yM5Z7wFWu2pF5xkAAAAAAAByyz2n+r7oY/ZPZO+szwEWGODvPAMAAAAAAEBumUnwDSbC\nPzV6gAEAAAAAAHgpM61UTHyi8wzZOasBMAAAAAAAAOSf6QXGHGCndnaHQCYkKbBUSecZAAAAAAAA\n8sJMLWWmmELOzmoAzKxUUKl8kPMMAAAAAAAAeVGajkW5ctYCYEQnAQAAAAAACsYMgSTGcmrMAQYA\nAAAAAODFCICdGgEwAAAAAAAA+DQCYAAAAAAAAPBpBMAAAAAAAADg0wiAAQAAAAAAwKcRAAMAAAAA\nAIBPIwAGAAAAAAAAn0YADAAAAAAAAD6NABgAAAAAAAB8GgEwAAAAAAAA+DQCYAAAAAAAAPBpBMAA\nAAAAAADg0wiAAQAAAAAAwKcRAAMAAAAAAIBPIwAGAAAAAAAAn0YADAAAAAAAAD6NABgAAAAAAAB8\nGgEwAAAAAAAA+DQCYAAAAAAAAPBpBMAAAAAAAADg0wiAAQAAAAAAwKcRAAMAAAAAAIBPIwAGAAAA\nAAAAn0YADAAAAAAAAD6NABgAAAAAAAB8GgEwAAAAAAAA+DQCYAAAAAAAAPBpBMAAAAAAAADg0wiA\nAQAAAAAAwKcRAAMAAAAAAIBPIwAGAAAAAAAAn0YADAAAAAAAAD6NABgAAAAAAAB8GgEwAAAAAAAA\n+DQCYAAAAAAAAPBpBMAAAAAAAADg0wiAAQAAAAAAwKcRAAMAAAAAAIBPIwAGAAAAAAAAn0YADAAA\nAAAAAD6NABgAAAAAAAB8GgEwAAAAAAAA+DQCYAAAAAAAAPBpBMAAAAAAAADg0wiAAQAAAAAAwKcR\nAAMAAAAAAIBPIwAGAAAAAAAAn0YADAAAAAAAAD6NABgAAAAAAAB8GgEwAAAAAAAA+DQCYAAAAAAA\nAPBpBMAAAAAAAADg0wiAAQAAAAAAwKcRAAMAAAAAAIBPIwAGAAAAAAAAn0YADAAAAAAAAD6NABgA\nAAAAAAB8GgEwAAAAAAAA+DQCYAAAAAAAAPBpBMAAAAAAAADg0wiAAQAAAAAAwKcRAAMAAAAAAIBP\nIwAGAAAAAAAAn0YADAAAAAAAAD6NABgAAAAAAAB8GgEwAAAAAAAA+DQCYAAAAAAAAPBpBMAAADgN\n4g8dUMT+GCU7z88tCUo9GqmUhHPz2wMAAKDoIQAGAEAh2zH9Jz310e9649PxevKntc7ec8Vuxcwb\np4MLf1bU7E91eJezGwAAADiLCIABAApX5FL9983P9eCLn+mJ92dqlbP7XJJ6/LizZW2npm+fG1J1\n4oSzKeu7p20D8ErneJm+b94kPfXqp3rwlS/0/KSNzl4ARdaxhTo042NFTvlI+2dPU4KzGzAIgAEA\nCldcog4lpFobx5V4NF5xrr0AAG90jpfpsXGxik+xNlJTdDgm0bUTLtHbNXfqdH318yxNWbf3HB3y\njyInKcHKrq4y60RCHPfhkAEBMLe903Rw+keKnPKxDixdKHOdA/Js3Uw9//pnevDFz/XUt0u1z9l9\nTiEvAQAA+L6t/+r3ZTu0ZnOYps3dJka8AyjqCIC5Jcc5w1RSdTwhwQzaAPIuPl6Hk1zpKP5oomJd\ne88t5CUAAAAAQBFDAAwAAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAAAPg0AmAAAAAAAADwaQTAAMBL\nxcXHO1sA4Jso5wAAQGEhAIYiY8Y/c5wtALkRHxev5154w3kGAL7nuRfeJAgGAAAKBQEwFBl/TJqi\nidYDQO7t3rOXIBgAnxUVFa1nn3+DIBgAACgwAmAoMk6cOKGJf04lCAbkEUEwAL4sOvoQQTAAAFBg\nBMBQ5BAEA/KOIBgAX3bo0GGCYAAAoECKnTDdbs6CmPgkXfrE1xp5SVd1a13f2Zt/7/88W//uPabg\n2s2dPXm083dFbtzj2i7dXBUv6C1/17Mz7vDeMAX7p+g/twxy9hScqTCa+YKKsv88/7oSEhKdZ9Lg\ni/tr8CUDnGen1/hpy7Vh6y49eHFTZ08+Lf1LD07b69qu2FSjR3dXA9ezM2b2v/s1dW2k3nvgKmfP\nqT31vz91JDVQ5avVdfYUwFnOSwe3r1GnRlXtsqWgTCDWBJaKqngrX2/avNV55lKrZg099cQDzrP8\nOXgkVo98MEEje9VXq1oVnL15sH2eXvl+ow7YT2ro8scH6Xx7+8yLjknSc7+t05hhPdWucU1n76k9\n/MFvalkjSEM75v53PG2d+oM+WnbM9aR+J70+vI1r+yz4YNoWlS1X3j4GuWXKxNlrdqhSg3bOnrzY\noWNz/1RcnOtZQNM7VaGOa/tsOHZgj5KP7tMHeSgTzxULFi7VqtXrnGdFk+fnK1asmCpWrKCnn3xA\npQMDnb3IyaZdkXr1m+l68JJmCq2Yz2NWRMr0gpTnneqU14A21Z09eVOUyvPnfluvDs3qaXi/Ds6e\nvJu/Zps+m7RQL17dRoH+JZy9+XSW671hkTH6YOpmjR01SLWqVnT2wttNW7rRrofUaJ7PunzUTB1c\ntkGp9pOaKjdgiM7WFSPhWLSid2/Se/dfqdKlTm+LyOTrfQeP6H8PDnX2ICsEwNx8PABmJpj/4aff\nnWfe40wFwQiAEQDLyocff17kG4dZKWgQjAAYATACYOcGUy+YOWuejh8/7uwp+giC5Q0BMAJgmREA\nQ1FHACx/CIDlDkMgUaQxHBLIO4ZDAsit4sWKOVvewdy3ZTgkAADIDwJg55Dixb3zdBMEA/KOIBgA\nX0UQDAAA5AcBMHgFgmBA3hEEA+CrCIIB8E6x2rP0b81culPOTAGnkNf3J+rIv7P099z1OpLk7AKQ\nhgAYvIKZ82PajDmKiop29gDIjcaNGzpbAOBbTBCsXdtWzAUGwHvMfVqDB12uUYO66LW5zr6c5PX9\n4V/rjh4X6c7LO+qO7505eQGkIQCGIq9EiRLy9y+pkTeNUEhIsLMXwKn0ubCHrr5yiPMMAHwLZRwA\nr5MYpzz1Wc3r++PjFeNsAjgZATAUaSb45edXQqNuvlZt27R09gI4FRqGRUhqkiIiD5ziceSkCm58\nxBYtXrxIE6cu1eINkXmrABchJ1IilXL0FI+4BOfdbglK2bdKcZtn6tjWZUqIzvw6Csvxs7MYeIFR\nxgHwSj0e0FevvKBHxk3Sbec5+3KS1/c3vFovjLPe/8rfevbK/K1kDfgyAmAosgh+Afnjaw3D+EMH\nFHks2XnmhXau1hvjfj/FY6r+2eW83/LvlB/07OfzNGnRVq1ct04/TZioF8evVpTzujdJCvtZUQtP\n8VixWOlneIuOzf5c0euWK/5wlJJ2LNORpV/r8E6GwJ8Ox48fd7a8B8Ev75Z8LEoRh86xoHZyjCIj\nt2vdhj3efT1Dwfk3VOeR9+u2Szuomr+zLyd5fb+qqOWl1vtH9lKD0s4u5NnxuEilJGSRV1OPKeXY\nMXnnrSMYxU6YCRTOgpj4JF36xNcaeUlXdWtd39mbf+//PFv/7j2m4NrNnT15tPN3RW50xkmXbq6K\nF/RWrsqY0+Dw3jAF+6foP7cMcvYU3IKFS/XHpCk6UYQruocOH3G2znzwa/y05dqwdZcevLipsyef\nlv6lB6ftdW1XbKrRo7urgevZGTP73/2aujZS7z1wlbPn1J763586khqo8tXqOnsK4CznpYPb16hT\no6p22VJQZhL5ojzvXFxcvL74arzzzKUwGoYHj8TqkQ8maGSv+mpVq4KzNw+2z9Mr32/UAedpbtQ6\nf4ju6VXZeeaI36O5f83XX5uOKbl+J70+vI3zQu5FxyTpud/WacywnmrXOPd3Qh/+4De1rBGkoR3z\nd/d069Qf9NGyY86z3CirniOu1uB6rmf/LlioxIYd1bZKSetZstZN+klfrElWu0tu1LWtXe/JrQ+m\nbVHZcuXtY5BbpkycvWaHKjVo5+zJix06NvdPK306T3MjQ1mxRXFhx1WqXhMVL2E9Td2tmMV/KDal\nmSr2uDDP5cmxA3uUfHSfPshDmXiuMOXbwahDzrOi6Y23PnS2XAh+5c2mXZF69ZvpevCSZgqtmM+5\n0gqrTE8+qFUz52vCygOKKZf3OlJByvNOdcprQJvqzp68yXt5Hqz+oy5X/6quZ+FLp+jrmbt1MMX1\n3KrlqkbnC3VX3zoyJXxePPfbenVoVk/D+3Vw9uTd/DXb9NmkhXrx6jYK9DeFbAGc5XpvWGSMPpi6\nWWNHDVKtqhWdvfB205ZutOshNZrnsy4fNVMHl21QqvM0N0rWv1LBjao4z1bo8BSrHmbVOEq1vEnl\nQz1yqt3OkcoNGKLclKgJx6IVvXuT3rv/SpUudXpbRCZf7zt4RP97cKizB1khAOZWqI32ZB23EvuJ\nUlVVIq9XNsvpCIB5g7vve1wJCYlnpedX0QyAJevo/sNKKVtZwXmos/pGACxBqUf3KyUuWSXKhcqv\ndCln/6kVZgCsqDON18eefMF5VngNw7MRAKvcsb8e6V/beWbZvkQf/L5eB4Krq+qhPdpa1bsCYIre\nrMlztmt/WoPnFPyC1fXiTmqU3TXDKVua97tFIzs5+3LpzAfApNSIuYrZdyTXd0iLlWuucg3qq5jz\nPLP45R/o6NFGqtC7vwKcfblFAMy7/d/oB5wtgl/5cbYCYCeV6Uf/1Y/fLtJK1bDKud3akOzLAbCM\nNzSWTvpD+2qdp75tqiowPlKzfpusSTtK6vxh1+jyxq735FaRDoD5l1fjuuWUc43NXw3P763zazhP\nk/dqycxVmvtvlKKTTyigdAW169lDg1vkru5BAMw3nY0AWInaF6tSM3c7yBUASype3GrPN83YliEA\n5vUYApmVuA06NOUDReb4+K+OhDvv93Di2HodW/yVDi74WYfCdjh7kVtmtUefGfZ4aKM+evFTPZjj\n40t9u8Z5v4fk/Rs18avxeuHT3/W/uR7jorxNPvLS8X2zdWjm5zq4cKIOr56sqLmf68CKFXm6iJ2L\nilTDsGYttasdpHJlAnP5CFLNkEwV16QSatBnsJ64oblqnt76wukR3FgDhw7QDcNy+RiaQ/DLcjTJ\nRNJKqWyQ63lRV6L6BSrf7hJVyOWjfA7BL8WuUOJhq8ISXKMAN6bg7Qh+nUWFUabHp1h5vZ+eHj1A\nTb2kHHOr3aCOGlbI6ntm86gQrCohzi9bOl1yqQab4Jd5ElhVvRqZnnFx8hj44BuSjmjz5t1ak+Nj\nqzZ61Pm2zpyv37eXUMvuXXTDoFZq4h+l2ZOm6c/tzhuA/KhQR6UqllXxgMBcPsqqZBmPTOvwr9FI\nJeI2KnYrUzD4EgJg+ZasE5nu7B8Pn6KoxXOVGFBJBbyfco4qZq/2eG7N+ZWsxERn03F0zT9644uF\nWm8VxE7veR+XMS+lHj2i4nUvUuW+d6pq3xtUoWaQjh9YrKPb83L39dxS5BqGJeuo/3XD9fQ91+by\nMVzXdijr/LKjSQcNbFUpz8NDfNMuzV6zX6pYV+2bObt83bFVOrZykg4v+U4HFi5VSsXOqtCyZfZB\nMvg0gl9nWWGU6VVbqX+XmrnqMVHUlGzQRbffkdX3zOZxRz+dV8755Swk29ORlFRAXruzFkWB/nk8\np8VV3KP1WbvrRXrytgEa0KGRmjRvp6t7N1BI6hGF7cxLf0MgkxL1FdT5BlXuNTKXjxtUvnamMssI\nOk9lKpVQ0o5FSuBOvM8gAOZWqZ5KBWUVEc7uUVl+me5gHU8to9Ltb1KltrUIgOVDmTKlvT/41aCO\nOlbO4m5gto8Q1ajk/K4jLrm0ul09Qo9eEap8DD47+wqYl0o2vlTl69V1zf1ToqwCqtW08tNxpSZ4\n4/Tfpx8NQ1+XrC1TF2ve0fLq2f+8Mz6n4FlTsrz8KlaTf0h1+Zfx1/EDy3Vk/Rp6gp6DKOPgWw5r\n3ub9UqkaauYLNzRa9tXjo4fogVG5fVyhqz1Gb5YsG5RFAK24ypfNNH8ccFaUVWDD5ip5fLtiNjOy\ny1cQAHMr01rlu2UVEc7ucZWCMvWU9KvdXaWDcz9XETK66Ybh3t/zK7iFht+axd3AbB+XaaAzR4Rb\ntQ5ddEEdL05HhZCXMnCmKSzux1I2mYWEBNMw9HFRS6fpq2XxanJhPw1ucA71hytVT4F1O6p0g94q\n3/U6VaxXQccjFikmi6kH4Nso4+BLopYu0D+7/dT8gk5q7yNFemDFyqpeNbeP8tn0GEvW0fAN+ml6\nmA6HNFa3PC72Apw25c9TmeqllbpnueLOscVrfRUBMBQZTRqfM30bkGvJSty3U6nFQ+Rfxb0yC3Bu\niFo1TZ/8E6mKnXvq+k5e2R+0kJRUyZCqVoUlWalx+519AOBdkrfO02f/7FO5jud6mZ7RgvGuOXGf\n/XKBFsdVUq8Lmqou8x+gyCipgIZtFFB8n+LCNuV6cR8UXQTAABRZJw7OU0xEgvxqdlaZ8s5O4Bxg\nN5Sm7FbJdvlbKt+rHdmohGjP26zJSorco1SVll95AuEAvE/y7mUa9/tGHa3TQSP7n2Nl+imcP/wW\nvf74LXpudB8Na5CkORP+0NvTvXgBKPieUu1VpnaIjkesVGymuZvhfQiAASiaYlfoyOoNOh58nio0\ny2GFOMDnhGvq9I2KTC2mo//O00vvfKtn3Y9vl2qf8y6fFbtdx5Z+rv2zf9DhlZN0aN7nOrw7RiWq\ndVAZ4l8AvE30an390yqF1+ike4e3UU6zPpzLAivWU5dL+6pvjeOKXL9V65z9QFFQsn47lfKPUtzu\nSGcPvBUBMABFT+y/OrpysZICmqt82/YsKnEu2r5c3/88RV/9vEYbY63nB7ZZ29bzaVbacL3Dh1VR\nq24dNLRPWw3o0lwXej7ahPp+46nGRQrp0ltla1axA9/FQ1qpXMfrFNKmNWUB4K2O/qtJdpk+RcvN\nAn+xEZphnk9Yrq2ud/ioA5r+x3JtSCilSsX26U/nGJw717O8KuFaBKlkCfnCIpnwISWaKKhuTSkl\n2dkBb0UADEDRkrpNx1bOUoKaqkLX3vKnxXuOK6mqdWupdXVnEYSUFMW5tnxYSdVu2U7dz8vi0bLG\nOTF0pnj5pgo0E+C3u0Tlm3VVqZCy9AIFfET56laZXrec7OV+rDI9yd7rq0opODRUrRtXVkXqM5mE\na8q3f+jXZdsVHW89TY5R2NxZ+me3VKFuTTVyvQk4C8qqZJX68vdYpd4oXucClatdRwHVq3NDzosR\nACtEx8Nn2cM1Dq8MU4p5HrXCfn5kG8umIveOrpnn3B3cpr3W82Pb19jPv19wbsyHkLB+huJii6u4\nf6zi1pj85DzWLPHxSjIyqNdBI4YN0A2ZHxe1UjXnLQAAL1GumS7Jqkwfdp6aOW/xTWXVvl9W39t6\n9Gumcs67zk1VVD9UWv/PDL341qd68LXx+nhBlMq16KJb+tV33gOcDY1Upt1FKn1Sl/tglWp2iSq0\n7ix/Zw+8DwGw06K0/KrUkX8ZV9Y4cZyuksiP0qrduJYah7j6eyQnnxvpqHhQTQVUCZUfM8QCAAD4\nqJJq1OtSPfWwmQB/iB4YdaWee+QmPTSkpapRBwRwmhAAK0TFQ3upQrtLTn40pBMvcq9c6+5Z3yns\n2cB5h2/zr39R1vmIuy0AAAA+J7BiZVWvWl6BznMAOF0IgAEAAAAAAMCnEQADAAAAAACATyMABgAA\nAAAAAJ9GAAwAAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAAAPg0AmAAAAAAAADwaQTAAAAAAAAA4NMI\ngAEAAAAAAMCnEQADAAAAAACATyMABgAAAAAAAJ9GAAwAAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAA\nAPg0AmAAAAAAAADwaQTAAAAAAAAA4NMIgAEAAAAAAMCnEQADAAAAAACATyMABgAAAAAAAJ9GAAwA\nAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAAAPg0AmAAAAAAAADwaQTAAAAAAAAA4NMIgAEAAAAAAMCn\nEQADAAAAAACATyMABgAAAAAAAJ9GAAwAAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAAAPg0AmAAAAAA\nAADwaQTAAAAAAAAA4NMIgAEAAAAAAMCnEQADAAAAAACATyMABgAAAAAAAJ9GAAwAAAAAAAA+jQAY\nAAAAAAAAfBoBMAAAAAAAAPg0AmAAAAAAAADwaQTAAAAAAAAA4NMIgAEAAAAAAMCnEQADAAAAAACA\nTyMABgAAAAAAAJ9GAAwAAAAAAAA+jQAYAAAAAAAAfBoBMAAAAAAAAPg0AmAAAAAAAADwaQTAAAAA\nAAAA4NMIgAEAAAAAAMCnEQADAAAAAACATyMABgAAAAAAAJ9GAAwAAAAAAAA+jQAYAAAAAAAAfBoB\nMAAAAAAAAPg0AmAAAAAAAADwaQTAAAAAAAAA4NMIgAEAAAAAAMCnEQADAAAAAACATyMABgAAAAAA\nAJ9GAAwAAAAAAAA+jQAYAAAAAAAAfNpZD4DFJSY5WwAAAAAAAMiLuIQkBQX6O8+QnbMWAOPkAAAA\nAAAAFBwxllM7qz3AylgnyEQqAQAAAAAAkHdxCckEwHLhrAbAOEEAAAAAAAD5F3UkhvhKLpzVAFi1\nimW1O/KQ8wwAAAAAAAB5Usz5iRyd1QBYw9BgAmAAAAAAAAD5cPBIrA4ejlXbhtWdPcjOWR8CGZeY\n7DwDAAAAAABAbpnhj0aDGiH2T2TvLPcAC7EnwTcRSwAAAAAAAOTeLmdUHXOAndpZDYC1aeDqordp\nZ6T9EwAAAAAAALlj4ikNagQTAMuFsz4EslpwkDbtIgAGAAAAAACQF7v3H2L+r1w6qwEwo1vLOlq5\neY/zDAAAAAAAAKdiFhVkAvzcO+sBMHOimAcMAAAAAAAg9zY6o+mYAD93znoAzD0P2MrNu+2fAAAA\nAAAAyNnmnZF2TMVMLYVTO+sBMDMPWP9OjfTH3LXOHgAAAAAAAGTHjKJbsXmPureq4+zBqZz1AJjR\njmGQAAAAAAAAuTJ/zVaVCfTXgE6NnD04lSIRADMT4ZueYH/MXePsAQAAAAAAQFZWbd6jtg2q27EU\n5E6RCICZE3ZFjxaav2YbvcAAAChkpUvlvWKUmpribHm/QH8qhgAAwHdMW7pRuyIP2XEU5F6RCIAZ\nV/RoqaoVg/LdC6xS+SAlxftG8Ox4aqpCypdxnuFMCCxVUtExSc4z77b3ULyVfko7z3KnjNU4Tk1O\ndJ55txPHU1U6gMZuQZQOKGn/9IU8ER3r+g61qla0f+aWuab4Splgvkde84Q5XqkpvhEAS06Itct4\n4FxUq4qr7AuPjrN/erP8l+dlFG7VjXxBfFJqnr9/Zu4bIu7j6c3c6Zp2k28xdTDDF9om5jsE+JfM\n143InJjpo6Yv2WiPpGvb0LWoIHKnyATATC+wmwa2z3cvsCZ1qup4aopvZJSkONWpVrCLG/KmdtVg\nq1KR4hMN3rDIY2kV3txqVreqkuKOOs+8l2noJicm2OUB8s9cpGtVLq+tkTHOHu9lKseBASXtBlBe\nmDLB5CVvZ8q06JjEPOcJ9/tNnvJ25js0rFnFeQacW0x5HlKutA75QP3GlOfmBk1ey/NaVXyjPDdB\nPFNXLWhDukltV3kYts/7j4m56WvqK4UdXMDZ5U6jvlAHSYw9qhqVKzjPCo/p/RWfmKwbB7Rz9iC3\nikwAzDCTt5leYOOnLXP25J6vZBR3A940vnDmuNPP2t2H7Z/eytwZNA3epnXzdifApDdfCCC7P39B\n747CBECq+USDwQTx8tpYMkwAyOQn8/Bm4Ydcd8fzmifcvQC9PTDuLtcahoY4e4BzT6UKQT7RA8qU\n57Wq5r0h2bRuNZ8oz90BK3edNb/soGj50nbwyNuZdF27Gm0mX2PSqOk1lZzg/T1XUxJj1bBG4dZB\n7N5fSzdpQOdG1G/yoUgFwIwxl3XRys17tGlXpLMnd0xGqVmloo7s2+Hs8U7HDuxRKSvDF/Tihrwx\n6cf0mpqzcb+zxzvN+Xe/q7dLhbw1+N3pzZvzj2noxh3apwY1q+Qr4IGM3A0Gbw4Km4pxmNVgatek\nlrMn99x54vsFO+2f3sicvzkbD1jfpXKe84QpE9s0qmlfk7xZbPQ+uxJNr1Ccy7q1bmCX5aY89Fbu\n8tzcnMkrd3k+YZn3lmemPF+6Ldr+LqZ8Lqj2jWvbacKbRz6Yz296BbZrUtvZA1/SsWktxUVHePXN\n+Vjr86ckJVpptKazp3CY3l/FrJ/DerR07UCeFLkAmBnH2qBGsD6buNDZk3t3XdnTziQmsXmjuMMH\nlHAsWrdcen6hXNyQN2Os9GMqAt5aQTIVw9lWY7dfpyZWBSlvjT2T3sYM62mnP/PwRqahezwxTlf0\nau3sQUG0a1xT7RqF2gEgb6wgm8aCycsmGNyvU1Nnb+6584Q3NxpNQNw0Gof0aOPsyZtbBne1A8vR\nuzc5e7yLGXZw7MBuDTivKUFxnNO6ta6vWlUq6LNZW5093qUwyvPh/TpoydYory3Pp6yJUHRsskb0\n6+jsKZhLL2hlHZcA6xrvnTc+TZow9RNTTzH1Ffie4X072FMkHdob5uzxLiYmEXNgj3p3yHu7LCdm\nqijT+8tMfF8t2DVXGvKmyAXAjDuHdrFPrpkPLC9MBddcGE0vFhNM8iZ2RX3/DnVpWY+C/Cwx6cdU\nkGZbjUbz8CamQmcqArWrVVS/zs2cvXlj0p3p8XEkYqudHr2JCXonHI5QXyv/F+ZF5lw3cvD5Klas\nuF1B9qYgmLuxFH4oQSP6d8r3DQWTJ8zd9s9mbbMDSd7ENPRcAfH85wnPwLi33VgyZdjRfVvtHqH5\naTADvmbMlb3ssvGDqZudPd7BXZ7vLWB57ioLTXm+1evKc3e9dEiP1oU2xYM5jqMuPT+t/uhN7ODX\nwl12/cTUU+CbTBq9fmAnJdk3s7yrc4KZ0ijGatebAN7lPQv3xvwP05ZZaV8a1pPeX/lVYqzF2S4y\nqgWXVVh4tP6Yt07tG9dS+aBA55VTa9mghqKOxGpL2FY78upfupyKFS+ScT6bubt+dP8uHdm3TQ1D\nK+lW62JU0q+E8yrOtAbWOYhPSNZfy7Zp3e7DalqjvAL9i+75MJWAP1fu1STrUaFsGd12WY885ZfM\nWln5Z/Ou/dqza4eVNlNVKqjwJ20sTCb/HArfYjfOe7ZrpEu7tyD/FCJzLBvUrKQFG3Zp8qpwBQf5\nKzQ4byuMnmmmMv/ff7Zq72GrsdSvo9pZ15CCML+/cnO4Jq/crYTkVKtMKOe8UjSZMuHruTs0Y32k\nzmtex+79VZA8UT2knF0mbtgcpoRjh+wyoXgJP+fVosd9TY09uEt1qlbQ/w05v0BlIuArTGOyaZ2q\nmrhwoz3dQ9nAkl5Vng8/B8tzE6j7efFu+3z17djEDoAVJrPSnnlMWrzZrvPWqRykcla6KMrMzZ3/\n/hOmQ7HJdvCrNnO++jRTBzHW/rvFroP4BwaphF/RHiVl2iRH9m5V+UA/XdOvQ6GmUTP0cdrSTbr/\nym5qXofpkvKr2AmLs13k3Pr6b4qIjtEzowblefiC6T327dRlSkxKln+ZcvIrGaCSpcqoWPGiUXE3\nkWEzufCJlEQVL1ZMl/dqzV3qImR35CG99/MsRR2JU2jFQLuSWMP6WVSCYWY1J3vuBqsCYG4DDLmg\nlT3HR37vjGZmCtjx05bb26XKBquEk3+KghPHU+zx9MnxR3U8OVF+xYvZw4bpOXn6mMk2x09fbper\nJg80rFrWzhcVg4pGJcQEfUzlPT75uD0fiJnzauTgbnm+buTEM0+0qlXBDgaaMqEoMN//UGyS3Vg0\nPfWKWWWCaRgUZp4wZeI7P87WoWOxdllQslTpInVNNTe8TE+149Y1tYT1/c3wnp5tC69MBHyFKc8/\nm7hAK7eEU55binp5bhYwMDcyTmcdx5Tv46w0sXv/YdeNroqli1SaMHVesyiPCQia42N68pneyZTv\n5w7POohpkwRYbfsSJUvZ20WBqYMkxh1RSkKcfSPOjOi6tn/HQk2jpg7+2aSFurxHC40Z2sXZi/wo\n0gGwmPgkOwjm719SY0cNcvbmnrnIz1+7TVGHY7V6a4T2Rx9xXjn7qoZUsMft1qlW0Q5cFOaFHYXH\nFDam0N0ZcUCb90Q5e88+s6S5WQmpdrUQu5fk6Vj10Hzvjbsi9e+OSO2IiNaRmKKxEouZ0Dq4XBk7\n79StHqxurepTCTpDzAIluyKjtWnnPit9HFZcYrLzytllViw089vUqhpsrwZlGgqnI02488SmHfus\n43BIUUeLRp4w3z/EuoaYu4y1rO9/OvOEu0zcuPugdu876Ow9+8w11ZQLjWoGn7YyEfAlaeW5VZ5t\n2l10pg3xLM/NYiynK/BjyrEVm3db5Vh0kSzPzdD1ShWDzmgdx5TvB4/EWGkiwkoTRad8d6cJs5hJ\n+ya1Kd/PYUW1DmIWsKsXWuW01UHMd3712+nq3rKOvWCgGVqJ/CvSATAjLDxK//fGBPtC8PB1fZ29\n+WeCYkUBDXbvdK6nH/IPPBWV9GCQJ/j+APKP8pzyLDOOB4q6cyGNmu/4n3F/qVyZAL1958UEvwpB\nkQ+AGfPX7dRTn023JyhnmCAAAAAAAPBVJvj1+aSF2rR7v966Y5AahoY4r6Agiu7s8B66taxjL/Vp\nxuubro8AAAAAAAC+xgS/zPy7m3bt16MjehD8KkRe0QPM7ZXv52jK0i325LZDLijclVAAAAAAAADO\nlrSeX7v223N+DejUyHkFhcGrAmDGl1NWWI+VhTYnGAAAAAAAwNlkJrx//+fZik9M1qPX9LBHwqFw\neV0AzFgVFqH7P/zLXmHBLIPLCooAAAAAAMAbbdoVqfd/nqNqwUF6fmQ/+ycKn1cGwAyzOqSZGP9Y\nfJLGDOth9wgDAAAAAADwFtOWbtQfc9eqUWiwnhvZj9UeTyOvDYAZMfFJdhBs9dYIVogEAAAAAABe\nwcz39cP05Zq3Zpsu79FCNw1oT/DrNPPqAJjbBxMW6Zc56+2hkJde0FrdWtd3XgEAAAAAACgaTODL\n9Piav3abihWTxgxlsvszxScCYIYZEvnznPWaunSLPTeY6Q1GIAwAAAAAAJxtJvBlhjtOX7pJxazn\nV/RooWE9W9Lr6wzymQCYm3tusMhDMfa8YCMHd2WSfAAAAAAAcFaYSe4/m7jQXuGxTcPqdq8vJro/\n83wuAOY2ZekWfTF5hR0Ia9e4pvWoRY8wAAAAAABw2rl7fK3avEe7Ig+pTYNqunNoFzUMDXHegTPN\nZwNgbmZuMBMMMz3DDDM0sl2TmqwaCQAAAAAACo0Jem3atV/Tl260g15mqKMJeN04oJ3aNqzuehPO\nGp8PgLmtCouwV4s084TFxifZ+0zPMDNfWNM6VRVSPoihkgAAAAAA4JRMsCvqSKwOWo9dkdFpPb2M\nNg2qq3urOurWsg5DHYuQcyYA5skEw7bujda8tTvtoJgnEwQzwbDSpUraz0uX8lfpACalAwAAAADg\nXBSXmGQHvOISkhVvtq3HwcOxzqtS1YpBdk8vE/RqUCOYYY5F1DkZAMtsX3SMtu6Nsn/uiz5m/4yx\ne4mdsH/GWAkdAAAAAACcW4JKuTrEBAUGpK3YaHp1VQsua8/rZX6ykqN3IAAGAAAAAAAAn1bc+QkA\nAAAAAAD4JAJgAAAAAAAA8GkEwAAAAAAAAODTCIABAAAAAADApxEAAwAAAAAAgE8jAAYAAAAAAACf\nRgAMAAAAAAAAPo0AGAAAAAAAAHwaATAAAAAAAAD4NAJgAAAAAAAA8GkEwAAAAAAAAODTCIABAAAA\nAADApxEAAwAAAAAAgE8jAAYAAAAAAACfRgAMAAAAAAAAPo0AGAAAAAAAAHwaATAAAAAAAAD4NAJg\nAAAAAAAA8GkEwAAAAAAAAODTCIABAAAAAADApxEAAwAAAAAAgE8jAAYAAAAAAACfRgAMAAAAAAAA\nPo0AGAAAAAAAAHwaATAAAAAAAAD4NAJgAAAAAAAA8GkEwAAAAAAAAODTCIABAAAAAADApxEAAwAA\nAAAAgE8jAAYAAAAAAACfRgAMAAAAAAAAPo0AGAAAAAAAAHwaATAAAAAAAAD4NAJgAAAAAAAA8GkE\nwAAAAAAAAODTCIABAAAAAADApxEAAwAAAAAAgE8jAAYAAAAAAACfRgAMAAAAAAAAPo0AGAAAAAAA\nAHwaATAAAAAAAAD4NAJgAAAAAAAA8GkEwAAAAAAAAODTCIABAAAAAADApxEAAwAAAAAAgE8jAAYA\nAAAAAACfRgAMAAAAAAAAPo0AGAAAAAAAAHxasRMWZ/ucMevDp/TRymSpbCvd/+a1Os/ZDwAZ7Phd\nj76wUNtVUk0G36JnL63jvAAAmcz5Qjd+vVEJKq1etz6t0Z2d/YBPi9G637/XJ1M3afv+eCX6+atC\n9Xoadv1turpdgPMeFMSOn9/UI1P2S/6hGv7QXbqsrvMCACDPvLAHWIwit+/Q9iwfkdarp3bsULSi\nDx9T9O4oWZcTZMk6zsvm6Ldvv9Szr36oZ9/+Tt9PXqRNUYnO68A54GisIk1ZcThauw8cc3YCp7b6\nt3d1262P6sZHvtS0CGdnPhXm30Ima/7Uk/c/qqvvfFHvzCjgwY05qr12eRGpvQedfYBPi9Lfr4zV\nfd+s0kYT/DK7UpJ0ePcmTV6xxX4HCu7IkcOudsv+A9p/1NmJHMRo3czJ+u2PyZq1kXYLgIy8LgC2\n/ss3dN3Dr+n/snw8q8uuvFeX3f+Rvl+8y3UhRt4k7tLMca9rxLWP6LpXftCHE5Zo7tL1mjt/vj4b\n97XG3P6IbnrnH+3g4AJANtZp0T+btM1qsOzdtkS/z9zp7M+PwvxbyGz10sVavNs0LMM1aeoC7XD2\nA8iFpZP09bJ413aZ+hp2xy3679jrdcfAFmpRuaxrP3AGxaz/S8+PeVL3fThRH349US/8Mtt5BQBc\nvC4AlpKS6mxlJ1kxu9fps9ff1D1friYIlidh+urp1/Xi5O06mOTsUkkF+DubtmSFz/tF970+VXud\nPQBwblinn9/+0O4V+3GOvYXKKrC0s6kSqlihmrPtIWqu/mt61776P/2wMqcrVS7+FvKtfGApZ0vy\nK1dW1Z3tdFGa8bnrnL/yM3UKnMq5lV7Wb9ihA/ZWgHpeP0a39W6nei266LJRd+hBpgzAmWRu4H/8\nvIaP/VOzI5OdnQBwMi+eBD9Ew576QNN+ch7fPKdxY6/XyM4h1mXYSNbWSV/rpSlR9jPkxjZt2ZZq\ntQLKqFH3fhr70ivWsX1bk779QJM+vl0jW5WRn/POmFWT9dkMmgIAziV7tXr+ertX7C+L1jr7slJH\nV99xs+67frDuuGO0HhmUxTw44bu10PSuXbrqFEOFcvG3kG91r7hOz42yju31V+u1e/o79QdPEQpb\n4Trn02ev10ZnL5C1cyu97NrnrmPXUsvulE04GxK1Y/Z3uv+O1/XijAjrWQlVqFpBafeNACAT31kF\nMiBYtVt00YiHntCzA6s4gZp4zf91kpbY2zi1Dup1WS89/vJz+vCeoerWMMjZbx3ekFYa8fQNGprW\n+SBRS1dzZAEgKwF1OmrQpQN1We9mSi9J86cw/xYyCaitLgOtY3tpD7Xk4AJ5kj4qo4RKEP/C2bDh\nNz3//nytPZoqv3J1NWzMg/rm/1oo2HkZADLznQBYmgC1H3WR+pd3nkaHayOTeuRSiPpcc6V618mu\nFtNSA7qGOttSXOR+5ksBAAAAcOalHFeqXxm1GjhCH334kG7rWTuLnrwAkK7YCYuz7RVWj3taD042\nXa7NEMhndVtr1/7MJr5wp95dZbZOfl/6a0109093a3DMv/p13G/6Zdk+7U8wd7NKKqhWfQ0ZMlQj\ncixIYxS2YLr+/HuNFuyOVnSsM+bcXgI6VN17XqxRQ3K6Y59p6Wizy1k+um//frp2YPa/m7hzmX6d\nMFWTV+7XXvf/9Q9UjZpNNOSGEbq8xWm6lf3Xu+r3+SbXdv0L9b9XrlCWqzHHRGr7gXiVrlxXVfPx\nURJ3ztP33/yjPzce1GH7nEgBZYJVr10HXTv0InVxB+l2TNbTr83SpiSpao/r9O71LV37M1n99ct6\ncc5h6xjV003P3qaLQpwXMlinzx/7RpMPSqXaDdaXd3Rz9qeLWT9d305YoOken8uvVBnVbtpJw6+7\nNIfgoZt1zif/ofFTV2ltRKziUsy+EipdrpJa9b5Qt1zZXXWz+ROzPnxKH61MVoUuV+qTUR2sP5U5\n3Vp/J6Saul10me7IMd3lRqJ2LP5bP/66XAv3RCvGPSecncbqq/eAgRp2Yf1M/2OnfnvpU43fZqXH\nss1190s3qFsOh2PxuGf15qI462821egPblIvZ79bfo912nLhNbvqlWeGqPrGyXrt4ymaH56kFCtv\nV2rbT88+cbEaOe/P0ZrvdPNz87XH2qw5cLQ+H5VF+orZpvl/zdVvi/7V1shjacfK/qx1W2jA8Cuy\nz49zvtCNX29UgvsYRCzUJx//oYkbjtrd+EuHdtTDr2Q8jmaC13Hjl2neDvdxMemntEplup1Rqn4v\n/eexgSfnTyvd/PnDNP2xYLt2HTXHxOKUOxfnVObt+F2PvrBQ21VFlz1xv4ZX36QJ747X5yv22+k4\noGIT3fTQ3RqWqwM7X2/fOVELk0qr161Pa3Rn1/f68Kt5mr/riCtfWJ+pSu3muuJU5dkZOP5VJryu\nJ6ebZf1SFHvYKautvBBc2j0o3DreTfvpzQf6WFcclx1/vKdnJoZbf9uzzEnUjPee13/XWHnkeKKO\nOsfffNZyHicwc1mW9d/KQlbn1ilfGnXsqOuu6qO2Idlkysznt64ZVvKHPv19sVbtdn1nv1Ll1bBj\nd902alCOvaVc53KhR9nh+gytzj9fw6/um8ueVh7lSaUOevylK9XGeSWDNT/p7veWK9LK222G3avH\nB2R9cNKvAZnKm7RrSMbfT5w5Tv/33RYl6LgSYpyy2uSToID0O4eZP5fH9bHLzR/ouUHONf7vDQqL\ncp2PgDJV1bZvzmV9jtxptmwr3f/mtTrPXY/w+B+5O0+Jilo9Vz9PXaIFG83Kcu70YtV/qlRR+649\ndH0urkfua+Xeed/pra+WaNUh63xZx6lOr+H64LbznLKkcP+XIlZY9YNJ+nFVpCt9WXmxfvPzMl0X\nMh8XKw2G1Fafywfrtv5NTtlAzuv1J1/pxS1xlxb9PkXfzvaoCzrHpmv/nK7n7nK0pLpe/5zu7RGh\nOeO+1Uf/uOZx9StVRYNGP6q7znd/VneeXupR/3D+T8/+umpIx9ylSY96V1Kcu8y1/k6FUnJPGZtW\nTzHyee3Isp6blma6acTQnvIYqJBR5rJdEVr64wR9+tcmbbP/lvV3sqjnZ74OmbzUsls/jbq5t5rm\n5thk5v7u/qEa/tBduiyLcjXr9JvRSW2fJrs086cJGj99m/N9TPrOTf2voO2W/Keh09duSVRiYoAC\nPP+vR91NbYdo2hP97d0AYJQYa3G2vULkypmaGmZWnCmt5j17q2NV1/7MNs/9S4v3ma2T35f+WiW1\nr79LH4/9UX9vPabYFHcs8LiSjkZp7ZIVWnWihi5qWcXZ7yFqrt54+AO9M32LNh+MVXzycecFy/FU\nJRw9rM1rlujPLSXVo0cDnbQWTuJqffXUu3p11h4djE2Rq3plsX83ShtWrtaBygN0QT1nfxrr4jPx\nY41+a5oW7jimY57/NzVFxw7t07JZs7XgWBX1al89rTJSWHYsnaGJG4/Z234N2mlM9/r2dgaJc/XS\nHR/o3b8W6Ne/N6tcr65qmofB+HtnfKK7Xp2hhXtjlZB2Tqyvlxyvg7u2aWZYsnr0a64KZmeFCC36\nYYU2WDWwg9Y5bXBpO9W23+1poyZ9Pk3LDiQpPvagTlS+UBc2Sm+4plkzXe9OCFNkQpJK1m2vKzun\n93YzlYYl417VXZ9aF/19GT/X8ZRkHdq3Q3P/madtAU3UtUn5tLnSMohZoU8ee0tvzdqh8KPJSj91\nJ5ScGKvwTev09/TtCmrbRk0rnPwX1k79RbN2JOlQUG1dXXaVHnjmh0zp1vo78ce0Lad0lxsmbY59\nRy9P3KiwQ/FKSkucFjuNHdDaZQv158oENb6guaqnfdQKil4zRX9vjFH80X1KKJ/NcbbN1ddvL7bP\nW3zZehoyqJXSp/Yu2LHeufBv/b7usOKLV1P36uv1wsvTtPJwqpWrjeOK21dcDa46T03s56cQuVa/\nz9kts+p4uYadNLR9xrIgymr03PHsr5q4NlyRVqPO81jZn/VguJUf52tLUCvrWGRxNtbP0YeL9lrp\nsqzaXXBE3z/5q6bsSXDKA+t8HktQpRbu8itRK757Xfd+vEzrrDIn/biY9GMdRyvdej6OnaisnhmO\nq3Vkl32nu5/4RX9vitKhRPcxsTjlztoli/TX9mLq3KmhTkqCu1bo26kmf1jHr1OwVr31P32+4Vha\nOk5NiFLJ2hdb39P1PGcb9ecXa7QlIVblm/RVhaWu77XxUGJ6vrA+U6xdnmV//M7U8U9c9K2d9+IT\nPMpqKy9kON4pwericbx3zvtTv5h0aJXvoe3c53Cjpn0zV8ut1mm8x/E3n9Xzbx0MDNUNVv51y/pv\nZbR3xv9013N/aFrmc+uUL5Hbt2ja5MUKr9hcF9TP4lhkOL8NtOerV/SfCVu0+2j6dz6ekmiVwVs0\nY/5eVe/aQfVPKtfT0+jGDGWH6zOEh23U3P3lNKJrbibHrqC9SydpWlic4qMPyq9WP3Wt5bzkYf3k\nn/XNmkPWcYvXnuPldKV13E4uddbp909nuI67Xw318bxO7Fqir/423zteKVWapuXx9f9McB3zBI+y\n2uQTj/MUHx2gpp5lyZbF+nqVa06kmi2ra9eX7+k1c42PTz8fqcmusn7mmmQ169s0Q/7MFXeaPVpa\nLYeU0mJ3PcLjf7jP05TpW1XxPOvznXS61+m7J9/W2N/WaF24VY/IkF6s+k/sMe0016PFR9S8dyuP\nMj6d+3p0rEJ99Y35W/d8ukI74py/Yh2nI3FBam/nh8L9X5eXXqy7n/1d/+yKTU9fVl60rwvzt6h0\ni/PVPGiTfnjxbb02fbf2px0XKw3GW/WyFcs0/1gNDWhfLevrdD6vP/lKL5bEnZP1/MNf6suVERnr\ngs6xMdfzCdaxaditlUJPqtC5y9F4hbRor9hfrfrFnP2Kcf7I8ZRYxVrHzJWmIzTtzdf05K8brTzt\nWf9w/s+GVdp8oqUGtbJrVjlLyzOeZa71dzy+66H4IHUY2FJ2bsrztcOUI2/p/g9nnVzPTUsz6/Xn\n9LVSnU5qk1Wi8SzbB5XUrMc+1HsL9+lQ+slJq+dvKFlf/ZuV0cZf3tM9H1v1kqj065DJS/u2b9C0\nHNJnjtzfPfaE6nRpqkPfus9BxmuJK12tVHyDruqYxT/xbPs0aX1C01//TF+tPODxfUz6NulluWZF\nBKlnlzonz4FV0HZLvtPQ6W63+Mkv8yHzqLupWtMM11MA8NEA2GrN+GG5Nsaa7WB1GHyB2niUx+kB\nsMNatXi7IhNLqELdNhp2zSW6uXtdVT6+X5vC462LU4oObI6Q2li/n/mm8qzf9cy8/Va5W0aNzu+u\n/xtxpW4Y0k2D+3XT+bVTFLE5XAeSpaR9Ydrh31n9M0WAtvz4mV5bEG1dNkqoTq8rNfbBG/TgzZfp\n8p41VM8/Sdu37VfZlhepV0PnFxzmLuOtn67XUev6Yca6Xz7yOj1+540aPbSj2tf3V0zYTu2Osy6m\nYRu0NbC1+p5c8y2ARM394zfr2LkqhI3OG5x1ZWnDfH1mVTztC09qMdXJpsGWpcS5ev+FGVpnbouV\nqa+R99+mR0aP0G2Xd9V5dcqoxKFwbYuvqt7uipVVvU7YOkvzw62qRGKiKjXN4n9FLdR331sVEPvJ\nCUUHlNPl59c9qfK7Y8ZEfWsH94LU+/KbMjS2tliNukcm7ZW5ZxVQtaX+b8xIPWh9rpv6t1arqkmu\n852YrN2rNyq+0fnqdFIFJkxfPfWJxu8wt8tKKrTzRXr8/ht0362DdWmXWqqZekDrth9VQuJBLV1z\nVC37tD6popWebg9p/rR1CvNIt2MubqFapeK0a0e0Yq20YdLdvymNrPOT11kQIvTzc+/rs43mPrb5\nrk102eWDdOt1A3RFv+ZqUea4IvceUHTiCSVFb9eibRkrSrWL7dXk+eGKs47zvqQSuqh345MrYZbE\nGX/prUX77ePZoM8w3dI6/XMW9FinlRGlj2v33HVW48D8nXrqfV5jmXZ/gn919R7QWp7hzWydIgA2\n47txmh5u8mJ19bnoYt06cpCutsqAwdaxanDcdU5N34M96/aodG+rYZb5YKQ1mP11eNNKrdxn5rCo\nro7dmqtdJX+lxJVX28s72wHkxIVf65HPN1qllvX/Qtvo7jusvHHvVRres77qHY/QqjDzv6QqHS7U\njX1bqlOH5upap1J6Ot/yi+5/cY62mAPrH6KeI67V03dcp7uGd1evRhV0InKnNkcnKWHvZq2MqamL\nOmRqIKYdi0Clhi/X7B1WJrX+TpsurdSpdmkVTyypBt16qmOuDuxWzfxpk+vu7IGV+mNJhBI8ytIr\nOgerbOx+bd5v0qE5fluzLIfP1PHvXrW0qtRtoE5tghS9JsLKgZba7XXHpR2tfU1cj85t1Kt2RfOK\nLetrVRWVL2+V+Y2s99c9oa2bomXeEdy6p2620rf7b/Xs1FENq6V/2FNd9xIXfql7PlquvebcZrom\nXdjSOpZWA3PrPuuadjxB21dZ+SarRlba+S2h2G2LNG1TnI5b5/e8fn2s/N9DXar46cjufdpv5f3j\ncfu0KqqcLutaJ2Ma2fK7XvhktSLM9cn0nnvgFj1w51W6ZVBHNQ8trpRd4doR1FDXZXXjJAuhcds0\nYZkpJ6xGdfk6J+U/q3DXzF8maYXpnGc5fqSkag7qpAaZvpp2zNUXv2233m0d6679dL/nzY1s8ni1\ncoEqW6OOdT6qq9jOnQo316TyjTTiqm7q4T7nVtrv3qBqehnnEQDbt2GNVh9MSk+bw9urdQWrTLKu\n7cesy1VS9E7tzE8Z7fk/Fi/UnJ0e/2Nkb11QraRi9u7V3jjrOp0cpeWbEqxz2DytZ6JtzXS9+etW\nqyyxrkVtu+r6EUN06+U97frLhS3LOPUI6zwf263VR6rpio41nF9Ml3Y98o/Rv/O3KTzFXI8aqm/7\nhqoakKKYkIYa0b2hShfm/7Jy3qIZG7U9uYSqtjpfN147RJe3L2Onq53mwpd8SKvD9yt68iSNtwr+\n4lYd6eKrBmv0Rc0UWuqottjlwQkdCdurYu16qG0WnQXze/3JV3qx6juvPfCr5sRY58oM4ep/mR67\n19QF+2lAi+qqELNLa/cmKPnIbutaW0I9ezbMFJRIL0cDjm7V9PVRSjH5v2U7nd+iggJTElSl5QD1\nalLaqjt+p0d+2ynrcqig5v30n0dvtuofw3RNn8ZqXPG4onbsU2ytdrq0bSX7L+eogr8qlKmiVtZ3\nCj68WVuPmJ3VNeD63hrg/q4dW6uHuzzM47Vj74R3dP+P2+26jHmfqwwaomsGWfXrRgEqfuCAdh1O\n1vHko1qzYkfWQV7PfLJskRbuSZFfpYa6/Aorn1jpoXLyvrR6/r5/IxV1cI4+mbhLscWt89B7gO64\n0VXmRe9w1eVN+tye2kSXeNRVcsWzXN06X39tiPEoV4doQHPr++yL1LajVt3weKw2rIhQ6EUdVT9z\n8Zx2DUiyF1nYaqWZtPqZnb5jtX33YSUcP6HYXVsVXbmnLqiX6Y8UsN2S3zR0VtotHuU6ATAAmflk\nACxx5u96dVak3RhU7Q66/eoWGSp/6ZUpq9J1PFBth9+u9+/prw71qqtKrfpq2+181TmwSLPMRfr4\nMSWWy+KORlC84hLr6f+evEM392iu+jUqqGJF16NGw7a6qP4RzbQL3xPapyBdbVVc0i9FO/XnN5O1\nyrSiavfUS08OUrMg1z0P/6Dqqt/aqoQPuEAdm/tnuhOyXB++PFnrzNcv00RjXrxf17SuLPtX/YOs\nz95UvfqFKGrWam2JT9Xe3QlqOrht7hr6uRHxt/739RZFmkqJX30Nu/uikxuURvxmTZ/mamyYJfxb\n9eml9pXtJ6c2d7JeXxBpVT4D1HPU47qze7DsUUZ+gapUq5HOu7CHhnZvppr+7qPpp5qHN+lHq6Jz\n3GpKpoacfK4SZ03Re/brLsnRfqp9mVXBcJ67RGnKj3+4GlKBTXXF3R6vR/2pl99eob3m4h3aTc+/\nNlIX1i5vfy6/0uVd5/u841ozLcw6NvEKizo58BM14XM9O8cJeA4cqY/GXKDa5QOtT++n0uWrq3HH\n7upR4l9NWndYx2MidDDw5MpHWrqNidURK902H3K93n3wEnW20m3FKjXVrH1X9a+2T/8sjrADUFE7\n4xR60vfMWdSE/+qpf1zHKqjtYL37n2vVt0VNVbHTtkmbHXRx59LavXi9dljpMGnfLh31rGiFpip8\nxkpttl47fihVIVkFHZSomb/+qFkmaGlVmgfdODS9IVIIxzqtjIg9piirMhfUfKBeff5mDe3SVt17\nXqDLchv8MrJpHLuVTT6slKZD9NLDV6h36zqq4ZQB5liZc9rs2GLXZzl+SMWC+1oVO3e6daRV0mMU\ndeiEHTR47Ol7NLJ3W3WxyqHBl7qCX+aY/f3VV5ppgs+BDTV67Bhd2iTILh/8gyqrfvtOCtnlCgTH\nHi6j3veN0MUNPIJfVvr++d0vNSXSNLSqaNhDj+v+XjVV3j6wgSpfo77O69tKAWvnW3nghI7uPKSy\nmc9d2rGItz6rVVkvU1+3PPGg7r+kg7p06aJBF+c2+GWkN9yOHovV8TK1dO29D+mJy9vaZWkVK693\n7HmeakcutRr4rnI4LO7knkNn6viXrlZPzZo0tB4HtdgduKvXWS/eOsDZbz08gl9GdteqkNrO+4tt\n019O2gppN0iPjjg/7W95Br+MnK971nXh+Ulabm74WOf2sgce01jnOJpjYR/LHuerffI6Tdt4zLrm\nxWpzePLJQZG085ukI8esxqJ1LJ549n7r+tZItatYeb9FWw3onOLkPSvvRxU/qRzdMWWC3bPD5OsR\nj92jy5u60qi5PtWo11w9BnXX0LZN5Z/xwpYtv9BD2vCHdbyt/3f0eLn0nr9uifP0w2eu120pRxRQ\n7eSe01Ez/tInVtkq67rSddDIjK9nl8eDrTLVPh8nFDZtmax2q1SxmW667zL1cs5TM89ghuHR6D5u\nNURN2fPay7dqSDsrbVpldCPr2n5Rjf2atnCvq4xOKn3ydzqVtP8Rb50n63807KWnXhytqzqa9F9Z\ntZu0Ut9eFbR/7hpttZP+XsVn7k1e4bgSD5fVwDvH6L7B7dSsVmUn35j0YtUjupbQlhnWcbWyeVxs\nKbW8uLV1RjNKux4dscrZ4+ZG3gi9+sSV6teprXpe2FNXmuCXeWNh/i/n2mfqbO+M7q2W1t+y01Wf\nMgqfsk7bTaDi4F5tPnJcAbW7aewrt+vyVtb1q0YdterYSY2PLNN0c1Cs/H6oZBYB1YJcf/KRXpZ8\n+onGbTKRskB1ufF+vXJ1S1VxVegUZKWXVt27q55TF03eH6nUen10XoYyNr0cPXTIyttW/h9kHefn\nr++uLp06qf+Avnbwy5j923f6Z491zQ1sqXtfu14XhJj6h/leIVaasfL2wAt0YcsaSqta5cSvkuo6\n3ylqmbs+HaqLn7pWQ9zf1bM8zMu1wzoHz762ROEmT1v13Dufv0939G5qlUGuNGPOd7f+LVVt92rN\n32Mdu+QorT+cVTA+Uz4xefGFGzXQ1Ges9GDq+VX3zNE8k/COH9aW7TFK9a+uYfc/oicubZFW5l1Y\n55DmzN1jlw+HjpRQm0y9qk8pc7laqbXufeZu3eZ8J/v79GqqkhsWaaV17VVypA5k0Ysq/Rpg2i0m\nv12rN8cOU+8m7vTdTd2LOXVIpSjSr+LJPW0L1G7Jbxo6S+0Wj3KdABiAzHxsEnwzNv1L3fPpGqt6\nYwRaF8oLc5jrxwQjrtfzV2SeDyLA+r12cheXm7Y7c155qt5Lt4/OYX6N1i3UyV23OhCdaSnsY4qP\nczZLl8p6pZKgoJPG4CfOWKIZ0WarhNoOvVaDM9cSjYDOuqync+mI3qD5C12bBZeoGd/P1mqrrmBU\n7n6BBmdx99RW9xKNubWb+loV4WGjrtLw5s7+3IhPtBoGRkmVKZ3VRAIB1qHJdLbOa6CWzgV30+pl\nJ03MP2/jDqs6YF2k/Uu6Lubx27Qm83FJXK51Ya7N0q2bqLtr07bl75VaZX/v8rro2mvUPquPVX2w\nLurkeiFlw7+a7moDOcI0ce42+zMouIP+b1SbTOnNpcYV56lHoNlK1arlC5wAYlZc6fbV69qflEaC\nug/V5c1LuJ7Eb9KSma7N3PH4nH71NHzkwKzncqjeUw9e1dKpyMdrzoLZVupw66CenZ2EkbJTi+dm\n8S0SF2rxGuc36jdTL48MWvBjnUlwRz34+OD8zd2RCzX6XKt7cpgvo037xnL3Z9i3f4uzlQ3rmN/0\nyM3qkVW+1mqFbTcBQ0vDZrropPcEqF9zZ7av+C1atsC1mWbLP5q+wfX7lXteotvaZXlgdfWFLVzn\nNWWbFs3K6cCW16DbxujqwjiwJiB3z326qeNJqVk9rummtk7ejlu1QTNcm2nO3PEvutKvC6Zcvkij\nTjqORoCaXnuVrnbG/aVsW61/1ri2s2Q1VO7I6lhU768BHZxzHh+usEx/4//buw/4qur7/+NvyCID\nAgl77yWEvbeC4Naqv2qHbbW2tdrfr7a2dtmff+2v2061w9FaZytOXAwHyJC9IRHCCCMESAiQvfif\n77nnhku4GTe5Se49eT0fnMc999wbkrO+43O+43ShaRtgtFGs/4zNZG11FzNOowY76dm+NK2omriv\n3q9tduIepRj7OinWll1VZycu1urUDM9q7CCNn+pZbWwmgPKAn7QnZsoMXe7tf7nviLY7q/Vh/44H\nb9bEi26dKfrKFf2dymuxPtn8sb1WKWaUPntXDRPeJM/SOG/r8+M5OuKsVidy6Dw9dPeUynvtAkH9\nXdWU2WJm6MoZ5wsk5rg89PPPVTkuMRr7mQmV428dPnLYJ9/yCHr+U5Pij7Rktd10SpHD5+jbV/ot\n0GnaVeOcLpOntWpdTTNvR2jUTV/TvdP9J2AFVtnKFt1G/otW1r1ZzSkKrprzjvPnwNqfa27W9X6v\nm26ae8/lmmWXl6S89Zu1pOrJ9OH/XozRZXPHqqfzzs6H7vuevl4l/YwZc4UWDHXeHMvS/hp+T61M\nuvrTr+uKqvsU01u3XjtO3ufE/sqxvhJGX+33fut+9XhNc45Jwf5DVqmhigbVW+p3DTVvvQUA/Avj\nAFie1r/2uB76tXf5rb7+5ft156PrlG43/bIKSpf9l75VzYC4HgN1XTXBCA3qqj5ORmIygp3Oat1F\nKKLaoztI3bs6hfrU5frDooyLCmL+bEg94ASHumqkv7b7jr7DejsZY57S9x+01xqq+MN/6tE15hGO\nJX6YvnjLRP/HzWZVti7/nO7//p36eg0D+fvVs6NTIMnTkn8/p3WeSGbNkidrvLcAvW+/1l5QIF2n\nbbs8R/eS6y7TeLtGkGdt22Fv8ypeme6pSFnnLWX4FJ99y9amtEzPamwfXTLBs+rP9EHePpNHtc+3\nYpi9Wzuc+lfckP6a6Fn1Y6KGeffj4FHZ8zT4VcN1q2TNSfHWroqVnlG1CFODA1u1wfk7NXCI5tYQ\nCIi5bIQmO/dHWdp+rfSs2kbNHOkEj8u1Y/MnFwXyilfu1nrnUho1cbbPIO1BONYXSND8L36uxoH4\nG52VCNQ1kb3khs/rs9Ue81zleOpK9v/pd5diY5ygZLFVULVXKmVvO6h0ey1GI4c4AxP7M7WPvPHq\nPQd3OWsXS5p5jb45JUgHdsSUagJyFt97u/CY9lf/J/kXtOMfulZu3ePkCwmaMLamdHmgZo+rbGah\n7Tuqzxt6zrjUf0XF+t8Hdfd2b8lVpnO7eg3t5m11uF8v/fU9mUbUDeObnh3Rto0Xpiardu3z7PvA\n2friCE+empO698KKX/EabU3zBH8jhwzQzCZKD8ZbeaDfAIp1Hnr18H5wSlmBXtM+qv8d1pGbMViX\nOOsFGUcCLMPEKMIpotSul2775jX+g191Esjvqj7vG2Vde17VHpfkLurhnSH8ogp+sPOfWmzar+1O\nOj1gyIgLW2P66jtQw52gRE7GoeoDI4Mu1XdurD4B693V+Q2nN+sfT2xyHhI3vZrzjoNas92bqPTW\nhNk1FUJma8po5/8pO6AdVR/6+Kj2ekjxljct1eZDyerZ1VuKte5XP8/D66r6dNUyYZgmeK/NjEzt\ndlYvlqwF11xeTbC5n3p7//+TOdbVGaia6i31u4aas94CANUJ4wBYsQ7u2KmP13uX/c5MKJboZM36\nwl36wzcmVl+oqFUHJXkzo3LToLhmxdmHtH//AW348D299qZZdimjSiX0vBhdfuVEJ+Mt1Cf/+q2+\n8L0n9OLqfTVkKNk6fMz7aYWOb/P+Hj/LjpN2H30jv9AzYH2DZC7Rz57xtqpL1g3/c2f1s5A1VMoc\nXTfSUwgpO7JGP/76T/WTp5ZpS3ZNNalkzR3t7d+RoR1rfb67ba+22YGDZA0aOlUjnMr04U/TLihI\nVlYiIwdq4mW+haB0HfWWx6ILddDf8XaWdw7Yg85ZqgQgPj2pQ85qm/xMvz/rWT7UXm+Qo7IlXOCS\nuydXtio8kX3CWasDq/DvCZJYBbW+/Wq5d3qrh7egVVigs76nZ9AYTfa2NLnoCXmxVmz2Huv+mjzb\n97cE4VhfIEZt2/kr0DaWYmVnHLDSgc163/u3bj1e5/PYNqGGwr51vPt4Yxc5p+WvLdOeIyec39VZ\nfapM/Zh6zHsdRCnv8IXH8oJl8RHP+FaWgsJqD6zi4tr6rYQGn2/l47ROeAbyq0ZjHv9Qlar9Gd6b\nr4cGTnFWq9G3W3JlF6yz+aZLYOD6tveej3KVO40SvWIum6VrnP5BeVsW6a5v/ky//s8n2tuA2nby\n7CGVrXZ2pm60zrLXDu1I9fzHPc3YU0OdROfYAW31TdxXpzkB9wiNHzOria7bmnVM9DmGtRUu6ss3\n2GOlGXYvteqYWZut8suO1e9XpgVpTqvC2sUpoFunQb+roeIU532weZFg5z81yz6aLe9ul2Xv9vt7\nPEuaZzwxo6DYSgWrER9XYxBy1JVTNd7e93IdXPKUbrnnT/rbe9tVY9GqEdScd+zVPu9DuK7dNbSW\ncuaArt6gZ7HO5DXejiS0aYpUY4C6e++jsjM6UVMTsGr1UYd2zmpZsOst9bmGmrHeAgA1COMAWIz6\njLhEMyb4LJfO0b33flsvPf2QflLjNL5BYqau/s9f9PUvf1tXf+OX+tr3f6MfPr5Ijz9rluVaV21J\nxfrrx3xB//eN0c6sPuXKPbBFT//+Ed38xf+nnzy30s+T80zlVtZXMrXY/h3VLG/uqSxYRVw0NUqA\nirfqb796S5/Y5b0oDbj6Rt1RXWuNoOim6+//kr44JN7TkqAkW2vfe03f+8b9uvX+J/TqTv81qeRx\n/ZxuAuXalnq+m0D2nqOe8XoS+2lESrImDXOiCBd0pznfSixy8ABVzhhu82l5c3qPXvR3vL3LCm/p\nOeLCJ9qnzlaej5xty/3/rLMs9hb+Ils7LSnqId7bEqjmIMZFfAK9ye1rq9H4FLQueirq29Jkv9b7\ndqXz6f4YOXio5lxQwA3CsW4GZmrvF3/zkG74/Hd0y3d/Y6UDT+qX3r/T515smKGaNd45Jxlr9fdX\n0nwCAdbfkPq6/rbEOSZJfZVSpdtxTq73vsnTujd9juNFy6bKIGhkZDMfWMf5yof/ymbTHP9QVXY+\nCJXY1n93el/J579z9HiN0cT6iRmlb/7oZs3rEmW/LTuTqaUvP6u77rxXX/2/l/WhGc8tUMkpSnEG\nGrugtWn2fu2xozoJ1vU+wicPOKJ1q8+3IKhsJRbZR6MmNUVFNlT4BHv8PlDJ0473nrHy1nt1xVce\nsssv9/7+1cq0oDIvCoqm/F311bT5z+Fce3QiW/oKP7+jcvEpS0Y0oFzQbYEevPdSjWzn+YOLrUx7\n4VN/1S23/1Df+uM72tGAIHXw+ASE27c93zqrGueD8XXo4h7yks2IKI4iFZ6/PIKrAfWWwK+hZqq3\nAEAtwjgAlqAJN3xTP/2+z3LXTbpy6iAlN0UZ1wSG7v+tHnh5h6flWXSsuvcf5BOQG3C+C2U1ul92\np/75t+/qwZtHa2gHp8JQdFxr33hRd337b1rqLWNdxMy2c42+WevyWX37Ru/gBfVg7eOTP/qHFtqD\nlZuxN76iP36puq53QWRVom772cN67ic36KbRnT2D4KtUJ/dt0V8efFD/88LWCyr/tr6jNN5pAHB+\nrKDzY7/EDR+oadZr3xF9nUKVT3eaylZi0iUpk6tv+WRmdPJ7nKssd9ymL892fqYKM9ub35+pstz7\n7Wt1qfMzoc+qBFQpr/SdP7Ky1cbWLefHMzvf/TFC4yfNbdRj3RSKN7+gu3/wDz29Lkt5JdalG5+k\noSk+QfkR1vXrfLehBs0YLs/dXKgtL/1JN375Id3368d133fu040PLNVWc1zNOCbfuKWGbrYJmnit\nn+N40fIZ/e8tviPhhaamPP5u02gBzm6z9f1Hf6a/3DtP8/q38+QXZSU6uOUj/fwHD+kX71ebsVWj\nj6aMdIK/PuMaFq/d7+nWF9tfKablW98hGum0Pkvbvc1Jc863EtPAwVUC7i1Zphb934O696l12pJd\norLIaLXv1e/8fWMtw2qNptZVU/6uIGni/GeA6Rbo7/+vsvzgzqsru7XWR8yYG/W7xx/Ub+6Ypkk9\nop2HjGeUuvJt3XvPL/WvzfUIUIcIgiZ1EIR6S/2voSaqtwBAHYRxAKw5Fev9x549HxiafaueePq3\neuZX3/YJyF1+vj9/TRL6a9p/3ak///1XeuK+eZrlfXJ+cpsefXpJZeDAzKYYW1mTa6ch1y7QDbUu\nM6sf7LI2xRl6/bfP6t8ZpltphNqPv1oPVTvuVGOIUfKoufr6j/9Xbzxxj759WV+1t3PaQu167RX9\n86JxNwZqzHCndlM5yP067bDHfolQylAnJJAyRGOcwra3O82BHQc8rcTUTxMu6JJnxJ5/ih7bWeP8\nHucqy4LxFw4eXzk2kxTXfbj/n6myXDmpd/2PdWZOZXeX7p39TJNaB9m5tVVSU5VV2buyg7pUnegg\neaomewfjT92ut+3WdsVa8sluT0uE2GGaeUFXUyMIx7opFX+s3/1xlQ6aJ9Zm9q177tcr/3xYf37A\nJyh/w6DaW+XUSbYWPbfSHrMmsmNnu+VocX6Wtq7fqa2HCu3rOKZDP33+u9/1O45JnHUNesSo9yg/\nx/Gi5TJNrm7Q6iZ29KT3EXKSuvk2TGzS4x8GTp9vaVotn7Sha8f6pQ11k6CBU6/X93/1C73y26/o\n9onJTiAsVx8887IW+TQKrYtBKf2dBxfnB7mvnNxkSD9n0pIRGnOJk+nu3a/VduKepu3ODo8aPbWW\nbt1uk6VMM6ux0TnpglnV9rzwDz2+xdOcMmH4PP3+id/r5d/dd/6+sZZ53qHXGqgpf1fDNG3+kxhb\n2dxHnQb4+b/9LJddEoR+DTFJGr3gc/rZH6zz8OANusbb2j7/kJ59+pWLB05vLrlnnXJZ9Xae8HbY\nl7p1CvegSZUylac5axAFsd5S52uoiestAFBHBMDqZZ3WOQU6dZ2sb909PQiV8Bj1nXS9fvLIV3SD\n8xS7YEuqzo/r2Uc9unh/yQkd2uysNopMLfrtn/SYt9A6+mr98f5qBt1sCgnDdNU3vqc/fs6ZVl3Z\n+mT9hYPYG6MmDHUqSc4g92v2yrMLvTWisuvLOI1L8eSunu40B7ViqzPvVP9+mnRRDclnrKv6zgDU\nr6O8Q+b6m3kq2LYePl7ZlbFTUm2dCHz07WxdZR6HD+z3Cb76UZyhg94eVD06y+mh5CNZc8Z4pyV3\nuiQVL9e6HZ7+WnEpw/wMRh2EY92UVu9yugZLPefeqHtnNSBoWZsDH2nxFnNAEnT5bf+rfz79sB69\n9zOVT0x/8cjDeuXv9/mZSdHj/Fgp2TpQn25ozWaHMsy067ZEJftWlJvy+IesEerd09uS64j21jJ7\n1vm0waoA9RxhrzW2mD7jdev3fqyHLvc+oEjTurWe1TpLSdFkJ1/0DHLvM7nJ0HGV533iKCfg6QyK\nfWB1mjy9s3soZVzLCn9pW5YnOGwkJVam7fZA41aeZ38UOUS3/+j6RqxwNuXvaqimzX98x+PLONI8\nYaeES+bqv392r77ujLuqY6n6pL6D+geFz1iXx44qteZCiPYd9n7h4nEvw45vmSqxvToFPTNrjHpL\nbddQU9ZbAKDuCIDVS+H5sWisiqW3q9eFys2QSoGLGaXxI/w/gpk+tK8TUMjRyrUb7bXgy9SiX/3+\n/BPb0dfpsR83Y/DLR/c5QzTMWfcrZaBSnEN3eNduLd2139Miokpga9rw/p6Cp2kp9sE2bdtnb9aA\nkaN8ZiT06qNRg70/vF+r3g2w6YLh0zVHu3cG3PrhYnv1xlN+uoHaNmr5Ou8vCLDSZx0XbwMK7d2u\nt2sYUuPoW9u0walcDRg/3m+3jOQrxlVOyW2m9d7zfqrzMwmaPW22n2BFEI51U/IZV6dnt2qCCVYi\nUJ9k4CJ7j1dO2lBebp35mCQNmXpZ5RPT8b2Tagz+nO/6K23ZsKzm4GZT27FGf6uu28R663r2Nlmq\nGqBuyuMfwiYO9EYF87RizcfVB9iL1+kDb9oQO0zWLdiEYjR2zACrmlpfIzRiqBM5ObZPm9/3dluv\nksZNGajRdppjWoot19rdzsON3oM0JdwryH5sWPKCNlVzwtet3lHZgmbIsBSf1m9nzbwlHh07qrff\nhKP4ogkO6qcpf1dDNXH+M7WfUjwFOh3dsEXnRy5tat00d1QAD8oa1VANG+SNkGbo48V7nXU/Mpdo\nufNATb2Ha1bVVugh6PDHH2hRNY3rs9/dXlmm8g7ZEVyNWG+p4RpqmnoLAASGAFi9+DSVT0vX0osK\noJla8djrWnTceVtV9sf611PLqhl0dK92feoMSFVlUOOYy8ZoZrxn/cTyt/T4hhpGLc3bp1VrLxwo\nu3bF2vTU363/N99+YpswfIF+cV99gl879I/v36srbr5bV9/zL60K4I/Y+toLenFtht+/u3jtwcqp\nof0P0j5RE8c4haeMzfqHU9lL6t3rwsBWZcEzT+sXrnWmh0/WmBRnisgqRs0cpSH298u15fXn9XoN\nLWiKs7dr1YaqJZwRunRKD08hoGyfnv/re34mOfAqVvbWT7S+xh6I5Tr43lO6+7E1Vaa5NufvLb3r\n9IOKHD5KVwVU6Zto/Z1OBaDsiF58zH/lyoy79NOF+52n+v019wr/x00xUzQpxanx7NutPy7f6/mZ\npOGaVs1sdQ0/1k3Ip2vrtl0fXXzNZq7R759ZV4+pyP0Y2E0DnGt28V8e1LceflwP/dpa/mDdL5Uz\nKa3Qhowcv/eOUiZqTn9PS6GyXR/qD4v832O24hxtWb0pOH93XZQd18Jf/1K/X1nlXJrxSp7fKE+v\nkAiNnjL7wvu4KY9/JZ/x7jIyG1ZpjWxt/W8exzIO1jsomTw7RZOd/ChvzRv62Tv+7gnTqvc/esdJ\nGzpNnah5NUVM6yVb77/wgt6uZqKSPamH5MkSE5TUwV4JyLSxQ538MFNL/uMEdxKt++KCi2KiRgzx\nHNWcbUv1tlN/7jl8mAZ5Vush8vxA5ycztSeEosdlR1bpx/f/Q5VjsjtMGv335U45wkqjZ88/3/7r\ngm5JJph40YOOYqW+8qie2e68bZCm/F0NF5z8p47XS8wszZ7g3Lg5G/X3JzZZqXt18rR39QalVv/n\n1GKHFj71uj7xuz/FWrPHGyptp/a+fWWbwbQZo5yHNeVKW/S8/4cj9uRMS7XVLlBYecOMSxtwfzeh\n/DT96YeP6N9VT2TmEv3h9X2e8pESddmMGfZacDWw3lLPa6jx6y0AELiIBy3OeljI2vyhluw1jzHi\nNHzWHI2vxzAmn378jtbarQo6atJ/TXJmjqrquDa+u0G7TFqd0FuXXzlS3kY8stZK9q/QysNWdmVV\n3jZYhaDE5CRFFefqyO6P9PdfPa9/7Tyr1pERqqg4d/HP712lPzy9RC+/u1LrD+dKFa0UWZav9C3L\n9Nzjb+qtg6V2q4Wec67Tt8b6PDOP7KMB0elatiVbJRX5Sl21XB/syVFhQb5aRZQpP32bPlq1Sm++\n9Kp+88wyLd0bqbEX/N21WP+8vvvMp04hLEKRxce08t1lWrio+uXj0111xagqz/W3LdOf3jggM3JP\neX6ROl5S9/O0/tW/6q+vfqLXV+/RmbNWNhh3TsVHU/XRolf121d3K8c88Isdos9+Y46GeAvWPnrn\n79Mr67NUamWhnqddMZpy5e2a0c/+2COyvU5s/VCbTkoFBUWeFiJdx+r2r1RzrJIHqOvpzfpob74q\nSrO1/oOV+uTIaZUUFquN9XuO7N6oVSs+1nPP/1t/enaV3s/voNtmDnB+2CN5cDvlr92iXWfPqeTY\np3p32WYdzStQUUmkWhVnatfajfpw6Vt66omF+uuSzcrvepUurVKiO3/dWucmslyn9m3TuyvSdTb/\nlA6lb9GiJ5/TUxtyncBUZ33mjts0o5u3tl43XS9JUqHzd1acPaSPrL9z/8lTyjl8QKlpO7Vi4b/1\n89dSddL+JbGa/KWv655Rbe2fvVikBhQfcM5HnrJPWfeCpdPU+frv8dWEVYNwrIORRlTK2q43VhyS\nmZCp3cAJut73fuxeqAOLd2h/qVR6ZI91biLVtXNrFeda53PpQv30z8u0Kbe1da6sY2ldZBf9vLFn\nrZ617mej55iLz3ml9v3Ur1W61qTmqKi0WCezTijjqLUcOqTN2z7VenvZqfeXfKhXlu3W2Q79NL63\n73lJ0tCOp7RqzSGdrijT4a2f6J31h1VYlK+yaM89tuYTa9t/Xtcfn3pDb6wqUO+qaWNNxyJg6frw\n5TRPEMNKJyPL8pS2do0nPcs9or0bPtSfH31PK096nvBH9piq/7lrki64nJvy+FdK1MntK7TxhHUt\nFx7Tpm3HVZ5nvX7wuv7yzgmNnD1U7Z1v1noddirWgfc361PrKxUnDmjNwTydy96jFW++qie2ttY1\nE87396zx/4oboKExTr5g3f2Ht1jHcXuWTlt/1/60vUq1juXjj76mt52oe2SPaXrge/MvPJZGXc9v\ntcdsn5b+/VU9/dZyvbvxkPLLS61T68mX3nz5Wf3lw0wVmcS26wR9/WsB5EtevfK09y3rfFtpT4GV\nHhhxY2fq+1N8gzuR6nhipxbusHKfwiLl2Yl7si6/9bPVpwO17ndHlR1YpQ9Mn7iKU9qxPkOlZdna\nvXKJ/v7cTsXNH30+MFvH66lBaZTP7zATGZSdPqIV76/Vp7lndNpKp9e995J++e9dyrLT6Aj1ufwm\n3Tutq+cBjK294k5s1eLUs1bel6fd6z9Veaf2SrTKILnpG/Ts40/q0eXHVWTuS6v8UlFNOalu5agm\n/F11OvY1lessQch/6n69RKq/tbJnhZUOlp7T6fTNet3Oz/Os39TaKjvt16aP12npu2/pr4+9qudX\nHlT8RWUpn3S061A/f4vXFr34i3f07AcfakVajnVvlinB3p81eu1fL+hfG8/KpLRxo2fp3vkDrKuy\n7up0bgLJO7oMVu+zzjmwyrm71ljnYP8Jncm28gUrPduy8h398U/va80pT96QMPpaPWylJxeVQup0\nPdTt+DXofvXZd3O/VhTnWOfVua6OZ9j788jfVml3kefrCaMX6Hu3DLhof+r6N1R/PhpYb6nvNdTY\n9RbbDi38w0ItXLFey1c5y/ZD2nc83yp7WkoKdGD3tvOffVqkEWP6BHSdA3AXWoDVS4wuu/uLuqmH\n5zFfccZm/eFXZvp9M7X3Ui3PKlXCwNn6f7dU0zKmRxf1MU9E7JlTPtTvf/+oMxXxh1p6wLS+ilD7\nkQt0/xcu7tbT/cqv6xfW/+sZEL5UR7as0tNPPat7rZ//2q/+rcf/s0rLUo+rwMrjYhLjrepaAE6c\ndlpbGOUqOHNWObk1L7sz/LSv8GnZEKjePTtbxULrdx9J08L//Fs/NPv14LN6/L00HSmxvhDdTdd8\n7WZd49sVytec4ZWtIWyRfTViqrNeKVlzR/tGxKy66CVDqmkSbsRo7B1f0w8v6+bpZmYV3vdY5+1x\nq/DuOeev6vHX12ndvjNWccA6d+2cx12+Ykbp6/ffqmt6O5McnMnU0tcX6Zf2dfOoHnxqkZ77YI9S\nT1nZdWS8fGb39mOg7vzuNKtQYV17Zhrq/5hppL3XjiWyvS796tf8DoZeK/vv/IzmeSdjsP7OFe8t\ncaaqXqKFWzzXlqKTNe/O/9ZPrvTXEs/HnDGafcFFmKxZU8c56/4E4Vg3lZgZ+s7/TFMf515M//g1\nz/VqzufLO6zrNVbDr/uy7gzaUEsVKnMqtV0Ge2dtcpahXZQU7zlnxaf2a+Fjf9eTVcZyiRnzOT10\n90T7ujH3d+6BLXru2fP32O+ta+itbUd00rrPIq3jWl1YM+hGLND/W2DOt5OemWvt9S3ac8YJfnVM\n0Xfu/5zGVr2cm/z4G8m65vrJzu+0jmHaOvvvfe6D/dp3Ml81zSB/sXG66bohViXCsPZ9nXWdm3ts\n3RHtO+UMblZHJl944AbvRCHW/5W6zjq3zvTy1rG00xVzvwyx8qWf+TmWQdHNSr9N4uuZsfc5n3zp\nuZWZyrWu3ch2A/XVu2+sIa2tyQxNHO37h/tMbuIjeXaVtDxpgMakOOv1NPG62ZrsJDXFWTvsY/v0\nezuVeuiMzno2N4vxt3xZN5k8pSRba5102nus7XRiymf8Tl4z6HNf0Tc9fUWtNH6vnnPKIF/71Wta\nlGZde+ae+8bEwIOUfjTl72q44OQ/db5eul2un/xggca3c8qSdn7+qh580KRjT+qXJj1YtV8Hi6y0\nMLqtEtvZX6uH7upjyqv2bKxWGlu5P29ZeXq2tS/WnveeqO/efrmVwjU3zzn4znTvxBnWOVjn5Av2\ncUzzHA9Fqcf0G/X7evVSaB5dL/0v3T0+3n7gbV9Xzv7YZVtLwsBL9UCj7U8D6y0NuIYatd5i7Nqh\n91bt1MfrfZYd1v/nfKycQxd+tmyH6IwJtGxh1wIs6vQxZVlVsz49emn0rJHqX48QflH2URW2SVaf\nfv00afLgagpeETqddVyt2ndWn4GDNLfq04LIrho/5xINjihVzlmrMFRSrnMxCerSa5CuveU2/eTO\n6ep35qgOlMaqT+8+mjS+f2XLAMX11+y5w6yKaLkKigpVWF6u0vIItWsXp+59R+uGL39eP/68VSC0\nM4uqItVx+BRdN6e3Olu14cJizxOO0uLWimsfr4ROPTR+/Bh97vN36HufGyPv0Nd1ktRKOlWmxK7W\nPveo2zJh4iyN71+lqtwpWbFnDynzZGt1GT9Dt1w3oM6Fqq6jp+gK07SrvFhFZ63jUlGiVm2sgl+H\nTpowY66+/YMv65qBNVXNuyq+PEvFcdb5tf6+wRMm6roxPS560hPXpVxFWeeUbO9HP8276goNqXG6\nOOuaGz9T145NVvw565zlm1Z651R4LtIqBMcpqbt1jidN1W133K7vVtclsG0vTZo/XVN7tlKrgmLl\nWSWGc9b+VUSZgJf1/w8eotmXz9O3/ucrutrP00rfJ3tTv/1VfXd+fyVkH9HRM2UqKWuttu06asjU\nqbrnrq/q1jGVV1vg2vbVtCuna2y7QpUXltiVqVZlFYpKSFDbzj00beocfesHd+iGYYk+rQqq00Md\nI0/qTFR7zzWTMlG3XtG3luBKw451MNKISpG5Ou5cJ8NGTNa4fhf+Z5HdRmr+hC5qU3Zap0+VqtgU\n4Nq1V/+USfrqPXfqm7P76MzRgypN6Kz+g0Zo0sAq56XwpDILo9WjRw+NHDtOw6t5qlu85ll954md\nyrXSpT4L7tDf7rtGl02boFne5dJZuvn6OZoZe0AfbvU8ad1f2k63XtA6xjqyvUfr6vk+ac85qaLo\nnPU3W+c2sZNGjk7RdTf9l35wz2Ua7PxMpVqORWB8n7wP1w+ta/7aS1opJyNbuSVlKo2IVftuPXXZ\n/Bv03W9fq3HVXM5Ndfx9md956bBWyj92Riet+y62TYxirXT3ymuv1dU+aWFdrsO2g0ZrcpdinT5p\nVYyt69v8Xx26D9Jnb7hBI3yaaNX+f0Wq28hpus4+Fvnn75lWMUpq285KW0bq2i9a+cptU9XHDoD6\nUdfzW+0xi9Og6VN90rcKlfukG/a5/O6Nmu4/Y6uTHm2KdbyojfW7rbSk3yV2C4XeVf/MuI6KyHfy\nbut7KTMv042DauhzWZf9bjtA08YkqlXuKWXnW+fDnPN2Vp509TzdNtyndVUdr6cGpVG+LVvGfVXf\nu8dKq2NO6uCxfBWVlisyPkG9+nnKET+4eZT873lbDZk51U7jC0/n66zvPXftLXrwniuVEnFCh3Jb\nq7O1L6OmDascQ9CrbuUoo4l+V52OfS3lOlvD8h9bXa8XS2THIZq7YJJSOpToXGV+W6rWsVa5J76D\nBo+8RPOuv0Y/uucGTbyoQFeonKOFirPKbIOHjtD0YdW1rOqssXN8f0eFle63UqxVZuxopTfzP/NZ\nPXT3XA2qx1OPOp2bgPOOthoweY6VJ8SovNRTFjyncpVFxlllpSQNHjdat95xl75/zRAnqOJHna6H\nuh2/Bt2vvq3fBs3QD751k6Zbaf7xzGzl+9QbzDn432/MdB6uXKyuf0ON56Mh9ZYGXUONWG8xOkkl\nznn01k1qXEaOtOoFtAADWrJW5yzOOoAQtuj/7taftpi1Ifrvl/9b19hb4X7F1rn/nnXuy6XEsXrg\nsTv8zKDptU6//+ozesc0Reo9W489cvPFgayQsEQP3PyGPjGro6/T0h9fbm8FUIt3/qR5//DMbTn5\nK4/p4SvtVQChaNsL+srDq+yHPT0X3KV/3BHUJskAgHqgCyQAhLQ9OnrM0x1QsbFKrDb4ZcnO0hHv\n2Nedk0I0+AUAAAAATY8AGACEtEHq3tUZVe/YJi2sbgbHvN168U/LtdV+E6uZE6fbawAAAAAAAmAA\nEOJidPmV3oHXC/XJv36lG7/8gL718ON66Nee5cf3/0jX3fmont5lZokyg19frS/NqampGAAAAAC0\nLATAACDEmRkc//DdeZrUI9oeQLk4P0ep287ParRu32nPDEodemjeF+7SX78zO2xmxgIAAACApsAg\n+ECYOLrhfa09WipFd9akBWMJcLRQeVkHlLorVYcumE8/Tr1Gj9DI3kmeqeNDXqbWv7dVh0ukmO6j\ndOX4bs52ADXK3KR31h9XsaLUc8JlmsCtA4Su4jR9tHi/TlmrHQbP0eyhtMwGgOZGAAwAAAAAAACu\nRhdIAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAA\nAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAA\nAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALga\nATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALhaq3MWZx0+8gpLKl/zCovtdQAAAAAA0LIkxMZY\nS7S9IHy16ACYCW7tPZKt9KM5OpZzVlv2HrODXXlFJcp3AmAAAAAAAABeXZISNLB7srpar12T2mpA\n9ySNHtjN+RShqsUFwLbszdTW9Eyt3J6hY6fO2oGuuDbR6pgYr2Rr6ZiYoOT28YqLiba3x7WJcn4S\nAAAAAAC0NAVFpdbiaSRTUFxir6cdPK7s03k6eTrf3m5MG9FHA3skaf6EwXZwDKGlRQTATCuvVTsO\nauX2g3ZrLxPY6t2lgwb37qyxg3vZgS+zDQAAAAAAIBCbPz2sjKwcOyiWlpFlbxs1oJsWTByk+RMG\n2e/R/FwdADOtvZ5ZvNlu8WUCXEN6d9Z1M1IIeAEAAAAAgEaxdH2qNqcdrgyG3TjzEmsZQauwZubK\nAJhp8fXe+j16dcVOu6XX1JT+mjayP0EvAAAAAADQJExXyVXb9+mlpRvt9yYQdvf1k+11ND1XBcDM\noPb/XLxJi9ftkdmpa2eMJPAFAAAAAACa1dNvrdGqbfsU3yZa99wwma6RzcAVATAT+Hpm8Sa9R+AL\nAAAAAACEoENZp+zukSYQZmaOvGnWCAJhTSjsA2Cmu+MDTy/T2cISO+g1d+JQe0ZHAAAAAACAUOMb\nCDOD5f/+7iudT9CYwjoAZga5N8EvM6j97VdPUa8uHZxPAAAAAAAAQpcJgJmukaY12MO3z2OQ/EYW\ntgGwhct32t0ee3buoHtumkl3RwAAAAAAEFZMa7BHFy5XYXGpHr59rkYP7OZ8gmALuwCYGe/r0dc/\n0ZL1ezRvwlB7vC+CXwAAAAAAIByZ2SJ//fwyOxhmZok0s0Ui+MIqAGaCX798cYXd9fGWueM0LaW/\n8wkAAAAAAED4emnpRntsMBMAM4EwBFfYBMBM8OvO375mD3ZvxvsaM7in8wkAAAAAAED4e+PjbXrz\n4+0Mjt8IWjuvIc3b8ssEv/73jisJfgEAAAAAANe5bkaKvv+FudqanqlXVux0tiIYwiIAZsb8Mt0e\nTcuvjonxzlYAAAAAAAB3GdK7iz3m+WOvf6JVOw46W9FQIR8AM7M9mgHvzWD3tPwCAAAAAABud8u8\ncXYM5IGnl2nvkWxnKxoipANgi9fv0TOLN9mRT7MAAAAAAAC0BKYXXK8uHXTv4+/YQ0OhYUJ2EHwT\n4bz3sXc0uHdn+6THtYl2PgmMmU40LeO4MrJylH0639navJIT4zW0Txf16tyh3vtVV2b/Dx0/Ze3/\nKWXn5quguPlvmriYaPsm7m0t5rUxmf035z01Iytk9t/w7r9p2tpUfK8FM71uKDD3Qu8uSfbxaOzu\nzVwLaAjv9eO9f0Ll+vHmJ1w/CCXe/Cb1oJXehlDZy+Q3Q6xyZUsteyW3t9ILK61oqrLXSWtJs66B\nlpxehmre0ZRlD3MdmH1vqXUx1M7cJy25vl4X5hjd//gb6paUoCfuu8HZivoIyQCYiWze+9jbKi0/\np+99fm69Lro0q5L7zLsblJWda7+PiIhUVEyMvd7cigrO39hJ7eL11WunBD0DMhnNayu2afeBLJWU\nlioi0tr/6NDY//LycpUWFznvZHdvNQP9BZNJJF5dvk1rtu9TUUlo7b/hew1MS+lvB3kbi7kWnlq0\nxi6MG7HRkUpKCI3MvrCkXDl5xc47zxMOczyCyVwLLy3doM2fHrYKnqUhvf+mpatp6ozQYa6fN6y0\ndLWVloTa9WMcySlw1mQ3kb9l3njGykSzMWWvF5dssPIbT9krNjrCul9CI+81aa1Jcw1TsTHljsbI\nb55etNquyLXE/Mbsv5m1bNW2dHv/jR5JcfZrKGjK9DLcyqG3zB0X9Er+qm379IKVHpj9N0K1Ljag\nZ2d9cf74Rg8M42Itvb4eKHO8fv3cMs2fMEj33zrT2YpAhWQAzHR7fGbxZnvmg0AvNG+G8+HGNLVJ\nSFR8ck+1johQVJvQqhCUlxartChfeTmZKsk/E9SCyNL1qXr1o21SZLRiEpLUpm0HRUTFWMch0vlG\naCg6m6Nia9/zrWPQrWN7ffuzs4NSEDGJw9/fWG3PGhrXoVtI77+5Dk4fO6CY6Ch9/vLxQS2Meyvu\nq7anKyk+WjOHdrILombdVEpChSmU7806qx2HTmtderZ1z3fW7ddMDdq18NSbq61jUaxZQztrRO/2\nIbf/xvZDuVbFpESvbzisuJgoe/8Z87D5mevnxSXrdTI337p+OoXs9bM3K8+u2K1IPW5fR40RSAZq\n4pvf9OgQa+c3JvBl1kNJ1fzG3CfBegBlKvvmYcu5cxVhk9/06txed1j5TTAq/p6Hbaut9DIvTNLL\nE3ZAsDHSS5N3mEr9ydN5YVEOPXvisKIiWgWtHGrSgycXrdHWPVaZpn0nxSZ2tuphcSG3/966mNl/\n88pDyKZDfb3+TD3/paUb9cJPPquuSQnOVgQi5AJgpvXXrT/7t0YP6hlwocTcTH96ebn2ZeYooWNP\nxSd1cz4JbSYAZIIgg3p11g++OM/ZWj+vfLRN76zebu97204mMQmtzMYfk7DkHEqzE5lf3X19gwIf\nppXPowuX24lpYrcBdoEj1FWUlyn3aLpdELnnpllBC3yYp/DmKez8lK6aOCA55Aqh/piC6YurD6qo\ntMK+FhryNNKbQQzskqBbp/YNqVY71TGVs6c/SrePQzCvBQTOVGD+bOUnPTq00fXje4ZcRd4fc/2Y\nSq2p2BMEQ1N69OWP7O6O4ZTfLN993L5fTJnD5DcN4c1vzL6b9CIc9v/IqUK9uOqA/frgHVc2KAhm\ngl+/fm6pOsRHWfltn7BLL02F1lRsg8HkHX/493K1jolT2859Qq5C748ph57JOqCC3BMNLnuYutjP\n/7VUx0/lqW2XvnYALBx462Km4YVpgIHGQ329Yczx+39PvaPBvTrqZ7dzrdZHxIMWZz0kvPD+Vu06\neEJfv356wJXfN1fu1Ma0Q2rfY4hiEzs6W0NfdGxbRccl6nDGAXufB/So399uMt3nl2xQfHIPtevS\nW61ah/wkn7aIyGg7gyw6m609h45r5ugBzieBMQnCL59dqqi49tY1MMj+f8OBOU/mei0tKtDGXema\ndIlVYGhgM3QTCHxp2UZdPaa7Zg3rrKiI8LgWTJDKVCCW786yC9QTrWNRH2a8ib+9tlLTBnfUbTP6\nhUVlxDDnyez/UatC8sHmfZo9dpCiIsPjb3cTk5aY66dDXIRun9U/LIKnhrl+RvZqr1P5JXpr7aca\nO7iXEhNCvyKK8GaCP8vWp1lpbV87/QqX/KZvp3j7731vyxEVFpVqxIDuzieB8Q4zYPKbmyf1Dpv9\nbxcbpamDO2nHoVyt3nlQ8yYOcz4JjEkvTbfPCJXr7nmDwjK9XLwhPSjppTkWj72yUsWKssqhgxUZ\n3cb5JLSZcmibtklBKYf++/3NVj0uS0l9hqtNQntna+jz1sUOZey3xwrsltzO+QTB1tLr6w1l6gXm\n97+5coemjeijpHah0808XIRULn0sJ0+vrNipuROGBNwKyBRAllmFsNgO3RQTH36JlvmbTQTcPEE0\n+xIok+ma5tatouOs/6erszV8mJZqid0Gav+RE3Zhuj5M8K+s/Jzadx8QFi3fqjJ/d7l1S5rumw3h\nLYyagp0JfoUbE6wyT5A37zliB/Lq46Ul6xUb1VrzU8LjqVJVZv9NN5qn31rjbEFTMmmQ6cYTLi05\nqvK2wPjzwuXOFqBxmIcNb67YZuc1Js8JNyZYY+5zc8/Xt+z14tL1SoqPCtv85vbZA5R9usAuf9aH\nOXZmgPdwTi/NdRCM9NIci+NW3mFafrXEcqgps5kubaZVTzi0fKvK1MVMIPCJN1fbaRuCr6XX14PF\ntPC3Wy+/uMLZgkCEVABs4YodMv0x69MM+b21u3WudURYBn+8Erv2VWR0TL0CQCbTMQMIJnbpG5aZ\nrmESFdMSzIwjEiiTUX2yY78SwqTbpz/m707o2Evph483KONd5QzWbQp14cpUpMyydN0uZ0vdmQzJ\nBM9MV5xwLIwb5u82lQlzX1MIa1qmQrtsXao9hk04dOOpjqdSm2+3DAYai+lmb0bSCNfgj2GCd2a8\nsjc+DrzsYSaXScs4EbbBH8MEfxZY58+cy/pYtTXdyq8T7eEGwtXd8wYHJb1cvDbVHvMrHIM/hm85\ntD6V+4+tayE+MSlsurT54w0C1vd+QM1aen09mMwkHulHc7Rqx0FnC+oqZAJge49ka/G6PXbrr/o0\nuzVjT0THdwjb4IeXGbR+n3UxB2p/Zo6iY+PDNtP1Mk9eCotLA670ezNq8/PhzAyUamz+9JD9Wh+H\njuXYFfdwLYx7mX3IyPLMChMI8yTaGNilrf0arrytKZrzCVNLZCpBJoDcPSm8uw6aSq1JA7z3A9AY\nMo5l24GPcM9vTAAnzSpHBsrcX2bfwzn4Y5gB6026F2h+Y76ffaZAI6zjF86CkV6aY1FcUmqVw8O7\nO5K3HJpaj2CgqYu0jg7veoipR0bGxNerLobatfT6ejCZsfrM2I2Pvv6JswV1FTIBMDPrY2xMVL1a\nf5kn9rlnC8I++GOYBMFUwAJluo+2jgz9Ad9rEx3naQ4baEHUzLQTFdMmLAa9r4k5/2YfsnPr3+rH\nZC4Du4Z38McwAYj6BEPNE1wTPDMF2nBmCuOmVUJGFoWwpuQt9Id7ANUw90HagWPOOyD4TKU/3NNa\nY0CXBDsAZMqTgUi17q9wbinqZWZrNAINenjz5x4dwn8Mmoaml95j5y3HhitvOfRAZmBlD5MWnMkz\ndbHwvxZMffLTjOPOOwQL9fXgM5NWZOXkafH6Pc4W1EVIBMBM8MY035ua0r9erb+8F2C4Bz8Mk3GU\nlAb+FC7z5ClX7L+d8VqLCWgFwgTMWoXJoPe1MV1BDxyrf9DDPI3t7oICuTcAEWhruILCYvVIcseA\nkGZcGdOiD03HpL3m+gn3Fi2G2Y80CvFoJOZeyT5TaAePwp03gGO6NAbCfN8N+Y1J78x+BFr2NGU1\n86DGDUFQs/8ZAZ5/X+bYtYmLt8ux4c70pjh8PLAW+N7Wc24IbnjrYgxBEVzU14PPjANmZi795+JN\nzhbURUgEwEzwywS+pqXUc/a/Ys8Tu9YR4V9hqW+iYCLqpj+yG7SOjLRnZApEgfX91q3Dv9DhlR/g\nU2ivQJ9eh7L6BiBOni5w1sJfbHSk3SoBqI/YqPDPExH6TDrlFqYsEaiWfJ81pLV6qAnGeawwAxm7\nRMW5+u1Mq9bhfz+0cuoThS4qU4cC6uuNwwyIb1qBmQZFqJuQCICt3H5AQ3p3DnjmRwAAAAAAgJbG\njAVmLF7/qf2K2jV7AMwMfr81/Vi9W38BAAAAAAC0JJ5edP31yoqdzhbUptkDYCb4ZZgWYAAAAAAA\nAKjdmMG9lFdYQjfIOmr2ANiWvZnq3aVDvQa/BwAAAAAAaIm8DYnMuOqoXUh0gTSzFwAAAAAAAKBu\nTEMiM5a6aViE2jVrAMwEv7JO5WlIHwJgAAAAAAAAgTDdILemEwCri2YNgKUfzbFfe3XpYL8CAAAA\nAACgbkyDIjMOmFlQs2YNgJkTZJrrmQUAAAAAAAB1542nHMs5a7+ies0aADP9VJMTE5x3AAAAAAAA\nqKtkJwC2Nf2Y/YrqNe8YYEez1bE9rb8AAAAAAAACZQbCN0teYbGzBdVp1gCYzlknKybaeQMAAAAA\nAIBAmG6Qx3LynHeoTvOOAVZUotg2Uc47AAAAAAAABCI2xrQAYxD82jRrACzfOkGmqR4AAAAAAADQ\nWJq3C6SFLpAAAAAAAAD1E9cmihZgddBsATBODgAAAAAAQMN4BsEnxlKbZm8BBgAAAAAAADQmAmAA\nAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1\nAmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAA\nAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAA\nAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUC\nYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAA\ncDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAA\nAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJg\nAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABw\nNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAA\nAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAA\nAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1AmAAAAAAAABwNQJgAAAAAAAAcDUCYAAAAAAAAHA1\nAmAAqlV6NluZWadV6LxvGTK0fOGr+sUfn9cjz67UpjPOZgAAAABA2CIABjiKdvxTx5c+pqxlT+hU\n+nFnawu28T395M+v6ZGnXtYDf1ulA85m9zul9E9zlJ1fqMxDR3Qk29kM1FmqFj76T93/8yf1w9+9\npQ+ynM0AAFTjXNFRFWdsUMGRdJWXOhtbgqIdOrvuBZ346F/K2bxOJc5mwB/qa2goAmDwUaTyM1kq\nK2pJue5554rzda7CWikvsf4VeDa2ZBUVKndWVXHu/DqAWhTrzJky+54pLcpXPskJUK3CUyeUeTxP\nLbPkYSk8rcysbJ1psQfAKNWZ49Z1cKrIed8yFe18Tbm71+rsjvd0am/Leeyo/BMqPnVKFcVnVXo8\nk/ImakR9DQ1FAAyOQ8pb+ZROrlmo7OVPKjfD2QwAANAIDix7WQ/85Q098uRL+snL252tLUjGKv3m\n9y/rkade00O/e09rnc0tzdqXn9VDT1rXwV+e02+WHXG2AgAQfATAHOdO7VLezreUu+1DFRzPcba2\nJOU6d85ZVYV1QJxVAACARlBeYR7je5SXn19vMcrPmRKXh7X/LbXli++5r6ig/Q8AoPEQAHOUHtuo\n/MMHVZy5S2cPtaBmxwAAAAAAAC5HAAwAAAAAAACuRgAMAAAAAAAArkYADAAAAAAAAK5GAAwAAAAA\nAACuRgAMAAAgxBw6fNRZAwAAQDAQAAMAACFj0VuLtWXrDuddy/XI7x4nCAYAABBEBMAAAEBIMMGv\nRW8vcd61bAWFhfrNI48SBAMAAAgSAmAAAKDZEfy6WFFRMUEwAACAICEABgAAmhXBr+qVlJQSBAMA\nAAgCAmAAAKDZ+At+vf/BCv375Tcabfnlr/+ke+97oEHL/z7060ZdvCoqKgiCAQAABAEBMAAA0Cyq\na/m1Z+9+ffjRykZb9h/IUH5+QYOWzMysRl18EQQDAABoOAJgAACgydXU7dEEfBpzOXfunPObwof5\nuwmCAQAA1B8BMAAA0KQY86t+CIIBAADUHwEwAADQZAh+NYwJghUXl+g3jzym7OwcZysAAABqQwAM\nAFDFLi365izNuOJ2LUp1NtWosb8PwJfpwjlt6kQlJyc5WwAAAFAbAmAAgAulvqe/vrxORza8qL8u\n3eVsrEFjfx+ucs3V83XNVZc771Afl106U5+9+TrnHQDAdQpO6mh2vvOmDhr7+4BLEAADgqk0V2U5\nqSo+kaWyolJnowucK9XZrBPKrHHJ1pnqdrn0qLas3ayV2w6r0NkUHvL1yRvP66E/1rw8snif830j\nVS/97knd9/MLl5+8ttv5PAx07aXe0WYlXiOHDbA31aixv98ClZ7NVuapIuedryLlZJ0Os/voYgTB\n6o/gFwC4XO7LumtQL00f2kN3vZbrbKxBY3/fTcrPquxMriqct74qCrJU7qLqG/wjAAb4UZ7xtrIW\nP1bz8v47VlXU66yKd72k4x89r5xNH+v0loXK/vhfyj3okvFZctP13FNv6JFalle3Od+/QKl2LP5I\nz72/Ua+/tU2bna3hoULFBYU6k1/zknnqrPN9o1gF1oXRfcQ4XX/Z+eWm8QOdz8NA+5v1l/2Z2nzw\npH45N8bZWIPG/n5YO6vlL14cEK26XBggzdB7z72mR/7ygv740TFnm2P/Bj3x1Htatt95H8aqC4K1\natVKrVu3btTF/I5wRPALaBnO5Weo4MCGmpdD+/1W4o3ywx8qd/NbOnMw09kSLjKV99HTOlHLknvA\np9x1do1yll5cTs/Zc9z5Qhg6dkgZJWalWEez6rAfjf39EBV4fU0q+fR1Za95XifXrLRqKL4OKH/j\nQp3ae8B5D7ciAAa/zpVlqexMLYup5V+gSGXHtqjg0w91Nn2Lik/7a73gImWlOj+Rfq5KS9qr7bg7\n1Hnunep86XWKTyhR8aerwr6lRt1VqMJfSWz/Wr29LULD+7d1NoS6DureOcJZr5vY2IuDOO27jdH0\nSeeX0b2inE/CRHR7JcY563XR2N93uaJiuyR6gQjrMjy0YZNWn3E2uJC/IJgZ32rypHG66oq5jbZc\nOme6pkyZUO9lzuzp+u6932zUpSrXB7/KS/y0LK661ND6sfSkdm3crC2HwvXxfYWK/O7zhUtO1QNQ\nmKUd1n4vXrJSH2zcoyNhXuioKCnwu98XLMfzqlRcS3Vm/y6t/HilFn24WWvTTlb5PPxUZG/X2bS1\nNS+7tqnY+f4F8jfoTFqqSk4eVOHJcAuAlauiuLDWpaww2/m+paTIKn/GKqbvJLUd4l2mKaF7Z+cL\nYWjoAn3jttmasuBHuufKwc7GGjT298PZBfU1R+vW0pldyj/o+wAbLUUrq6B50TXRFPIKS3Ttj5/V\n7VdP0bSU/s7W+knLyNKvn1umTv1TFNUm3tkamJLdz+pUhlPL6DhFXcaN9aw3sdKifJ3Yt00P3nGl\nenXp4Gyt3R0/f16JXfsqPqmbsyVQB3T247dVUOC8rYv4EUqaPkueav0e6+ffV0FxhFpHWrW2MiuD\nKo9WzNAb1b5PYIP0nkjfrNkpfXXLvHHOltqZ83/wVKmSeg1xtgSu/PC7yt2bWe3TtIu0GaTEyTNk\n9+Tyw3NNRSl+6i1KCCD2k3t0r9pHlerhO69yttRdQVGJvvW7l3Xr1D6aOCDZ2VpPpUe1csl27atz\nYTpOwy+dofEXnO5jWvL0O1rX2aq4RW/U3za01Wd+dKWmOp/W5t5nN9nXwbwJQ50ttXvwyXfUrW1r\n+xg0j616+ufrpXlf1e0TnE319PRH+1TSKlrf/8I8Zwsa29NvrdGBw8d031V1v+YulqsNbyzTOwf8\nVk386jByhr51aW/nXYYW/WWJdvUaqMSde3Xyktm6/+qBnrR2/0r96sUjGn7rZ3VNP/vL1Vq8NVMr\nrIrgn7/7X86W0FV1ZshvfuMrGj1qhPOuZfraXd911kIz+HUo65QefOod3X35YA3skuBsDUz6kn/r\nLxsCqYAkas4XbtZV3lvFkb1zpV5a+qn2F1So0/jLdf/lVb5Qi5y8Ej382g7dc9MsjRnc09lau+8/\n9pom9EnU/FH1LHvZ93OqTjhv66LLxCv0vbk9PG/SPtYv30jT6chYxUaUqzC/RKVtuuszX7by2QDn\nR/jt26nq27OrXS6vq5eWbtTG3fv1wA2XOFvqZ/VLT+pV35EEahPRUzffv0CT7DcntPKFd/X6YSm5\nY3vFF+ToyJkKdRx/mb59eR+njFo7k16uP3hav777BmdLYEzesX5Pljr2S3G2BK5w01905kSdS6GW\nnmo3/zrFOu88SlW09Z86U9xHbYr3qDAu8DrN6WMHlBhRGFA5dNW2ffYx6DpkglpHRDpb66hoi06v\nWaOikrrue2tFDbhRSQOdAFf2hzq54bBixn9RbRtY9DWK888o++DOgOtiqFkw6utGQ+trdv3sZBfF\ntklXYcEAtZ9+uWLs596eunBxx6vUcVhf+7vVqW99vbGZe/DYydN64r7rnS3whxZg/uTttZsN17ys\nVHG58/0LlKrs6Ic6teJpnUoP46a3dXGu4oKIekSXGUqec6c6zb5dnebMtRKWEhXv/eSCZqehLKLn\nFUo2f3tdlxqCX1ZxWmX5hVai21FR4dLwqaqo7pp+1XzddlNdl6rBL6tMsnaDVuR217yZA1tcYrPn\nY2d8sGcX65VPwm3sMzRMe42/7ib99H8+X+flfPDLR3R/zRvTVrk7t+nDo842l2JMsOrR7dFXhSqq\nlL2OrXxDv337iNpNHiyXt2OwVVxwAOI0+orr9dB3TDpymx66warMFR3VO8vDaMzJ+iivkO9RiOo9\nWvd+6zb98PZr9d/33KIbB0Uoa8N2rQ2zZmCxI69SYmULpjosI8epjfOzXueOfai8420VN2Swwqaj\nd5vRSpxzl7rMv7uOy13ng19ocYJTX4tSm/5DFVGUrvx9Li9g4SIEwPwpOqHi4wdrWfarpOqYgaUZ\nyt/wL+XsSlNpYaHKSwJpTtXc+ipuYIradO6jmDoubbr7PlkbpLjBl8g0/rJFDFGUeSDsr9mpa2Wq\n2IzJkP6hTq9+TXl5yYq/ZJbcPsJRtUp3671Vx9Vj0mRNaudsaxH6KGXGCF01bbguNRWy2DxtX/6e\nfv7SVvk02AfqZMCcFI2JytGKFVvl4p6QNoJgF3N78GvA+DGaO7yXUgbXcRneX0OrNNCK6z5CX/vW\nZ/XFyW0VYLuT5tdvhK6Y3Nf/vvpd+mriAJ/WZkPGacHIjpVlsahhSepuvfrrUh3KJkwbryl+97ea\nZVwfnW/v30mTpqeoR2UzqDZKTjRhoQqrLO7ZEjaieqtN3/GKq+vSvWeVINcB5e9Nl3pMVHyis6nF\nyFPhNjM+mFUP2/Cu8o6eaEH1D9RL8nQldI20qu/rVejyUXtwIQJgjtZRMQE+KTGD6DqrjpK9q1QY\nmaIOsybW0DIodEV0m6HEMVerfR2XxAH9azhmZz1PaaNjW9BFlqeyU8dUciZbZYWlOld2RqUn9l7w\nlLLlKNWe97dpW/vhumZae2dbS9Fe42dMdsb+mqBrbrpRX53QXoX7UrXaBQOXo4lFDdOCSZ1Vum+n\nln3qbHMxgmDntYiWX0mDteB6fy2Kq1mun6BBVfq0tes/QP0u7AMWRtor5dK5/vfV7zJXcwbU0Knv\nTKk9JlTbhPp3L2oOUb1G60a/+1vNMv8SVdvTrfSgdu3PV0TXzhrUoh6+SeXpG1RQ3EcJQ2oqn7tQ\n+z6K6zdaCf1SFN+3vyIrTqhg+0Ll7N5HEAw1iFKbgSmKrjis/PQ9zja0BATAHJEDb1DHqTcpeUod\nl6nXKr5Kd6/oYbeq4+gJiqrrgANulr1BhadkFUAGt6AWUIMUbwcHrevjsjuU1DtWpRkf6cz+FjjA\nYtZmvbe1XKPH9FWEM2jt2RJTDPEM8nvRIL4u17NfV3XSWZ046WwAApA8bbSmdyjQ2rUbWkQrQhME\n69XTtGNp2ej2iEClf5KudLXV2EvCaNbhoEjX0oWL9a8XX9XP/vi+1scM1uevn6y6j+bmAkVbdPZA\njqIHzlCbwObxCX8R/RU3eIrTMm662k28Ve26RKjs8HYVtcyn0Kir+EmK79lO5Ue3Kj/f2QbXIwBW\nKUqt23ZRZLs6Lm3btqynK4Eo36ezu1NVET9c7QbXPIige0UpavBgRZsm+KdbYLOfE2d0vLxAG999\nS4889Ya9PLctz/rgmN6x1p9dG8iQv+Ev+/BJnVCSegY2LjPg6K0F0/oo+tAuvXcwkAGSw1dycoAj\neAMtXGn6Sr26OV9dxk/WglomyHCfBHXt1Vn9+3fXsC5tVZ71qV596xPtCfepIANQsn+7iiO7KvJc\nmgrMcBwHMlReZn1QfMJa36XSFhUIilKbDl2kijMqqzpcDVBF9OAxatM6SwV7Dztb4HYEwBBkOSrc\nYmaD7K62Y+YouqU8hSo/q/KiC0ta546dlCl7RMZ39WxoSUbM1cM/+qp+67PcNd7MBtBdn7HW/2d2\nJ8/3XOjAh2/pT298ok17TMu3/dr04Xv62+qTih00SJOt8hhQH1EpE3RprzJt3rBfTNoN4AI5W/Xs\nG6k602ecbg9g5kP36KKR9rADk3XjF2/WT+b3VPnhHXpnVQt62BYRo9bnTqrwwDbl28selZhCaEGG\ntb5XpS0oGGiG4Sg6k20VwJMVFYRZIeFyESOU0LeLVW/bpcLwGj4R9UQADEGUo6Kti3T2VILiR12p\n2PAagqJhTm9S7sf/UPa6Zco7sEH5u99Q9o49qojuo9hezFTTkvQY2FOdsw/ozTcW6ZGnPtQrW/PU\nZcJMffvmkWphw5EgqNprzvTB6lRiFeydLQCgnJ16/oWN2tNumG67cVT1Y2O1ILGju8g0gjtxquWM\nOxA9+L+qzH53mWLNXAAdxlnr1yqu6nSRLlK47QXl7t6g4pwsleWkqmDnazp7tFiRPYZfNEsm4E/E\ngPGKjTPjNzsb4GoEwBAkpSrZ/a7OHI+0Ch43KaFjC3v+mDRViSkjFRWVq+ID21SQU6qoHlPVYfrV\nngII1GngJbp+/jCfmZvcyQzme8vtt+jB+27Xb390u/7v2zfpjksHUylBAGLVuW8vXdK1g/Pe0W+S\nPjPTzBjXQz24oACUHtSil9dqW9Rg3f6laRdNENAinDmoLWknrVLoedlrD2mfVcXp2bVFjQLWYkV3\n6iXlbNOZTQuVvf4j5WefU/SQa5Q0tKUOw4KatI7vpphOnarMGtxXCcPGqU3nPoppRwHL7QiAIThy\nVutsRq7OtSpQ0Y5ndeIjMxWxZzmVftz5kptFKbLLFLUzA+CbJ2/TblLisDFMiOCjXf8Rmj6uH4Eg\noFadNOmK+bo6xXQb9hWlQdPNjHHTNZbmhIDt2Mq39NAfn7eWrTLzeOVsW26//8VbqZ4vuNiB5eu0\nPLtCrfMP6sXHzTHwLm/pgyznS25XmKWPX39dP/3dQv1l4WI9+fTz+u37x9Sq+3BdZg+90FL1UPxo\nq0x6yUjnvXuZWezbT7PK3nPvVpf531CnmZ9VYt+ejNUMvyJ7z1X7oSMuCoK06jhZiWOuVtseLTnd\naBkIgAVTaa7KzmRZS7Fn2t3yAs/7KmNDuVLiAMUPmaS2A8covq+Zhvj8EptUpRUDAABAECT3GahL\nJw+3lpG66rJxumbGCPv9jP7uL3v0GDJS11v7fNU0s/++y0ANbCnzSHSZqG9883LdPK2nzIAT0V0G\n6Prrr9GPvjy5ZbaIq+RM7tWGJ7EA4IsAWBCVpr+t7DULrWWTzBh65Uc+9Lzfud3zBTeL6Kk29vTD\nFy9tOpD5AgCA4IvqNVTT7QHQqyzD3T/rSLX7PmmoeregoldU294aawbAv2m+brtqsiZZ5z7W+QwA\nAF8EwIIoaujn1WW+aX5bZRk31vkGAAAAAAAAmhoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAA\nuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAA\nAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEw\nAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4\nGgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAA\nAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAA\nAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALga\nATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAA\nALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAA\nAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoB\nMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAA\nuBoBMAAAAAAAALgaATAAAAAAAAC4GgEwAAAAAAAAuBoBMAAAAAAAgDBVUFSihNho5x2q02wBMO/J\nKSgusV8BAAAAAACAxtCsLcDiY6PtSCUAAAAAAAACV1BUqq5JCc47VKdZA2CtnFcAAAAAAAAErpCe\ndXXSrAGwLh3a6lDWKecdAAAAAAAAAmGGlmIMsNo1awBsYI8ku6keAAAAAAAAAmOGlTqZm6+BPZKd\nLahOswbATITy0HFagAEAAAAAAAQq+3S+/dqlA2OA1aZZA2CjB3azo5UMhA8AAAAAABCYDGdYqa5J\nbe1XVK9ZA2CjBnSzX2kFBgAAAAAAEJiTp/PUJSmBWSDroNm7QMZbS+rBLGcLAAAAAAAA6mLLp4c1\nsDvjf9VFswbAjGkj+jATJAAAAAAAQADMcFKmC+T0kX2cLahJswfAxgzsps2fHmYcMAAAAAAAgDpK\nyzhuv3qHl0LNQqIFmGGCYAAAAAAAAKjdqm3pGtA9ifG/6qjZA2BmHDBzsjZ/esjZAgAAAAAAgOqY\nXnSmBRjdH+uu2QNgxo0zR9gtwE6ezne2AAAAAAAAwB8T/DJBMG+vOtQuJAJg8ycMsl9pBQYAAAAA\nAFCz1U73x4E9mAGyrkIiAGa6QZog2Jsfb3e2AAAAAAAAoKq0jCxt+vSwFkwc7GxBXYREAMz40vyx\ndvO9Vdv2OVsAAAAAAADg640V29WlQ0JlbzrUTcgEwMxA+JdbJ++lZRvtQFggenXu4KyFv4rycvs1\ntk20/VpXbaKjnLXw18p5DUTH9vHOWvgz10BsTGDnv6rCEs91FM68+9AxMbAZTeLaRLli/73iAkwL\n0DBx1r2Xk1fsvAtvOfklSk50T9qI0OK9tgpLyuzXcFZYWr88w6QX9f3ZUGPyTbM/gXJLfmvSy/rs\nv5f52bISd+Qd5aWB74e3rHKuIvyvh3MVnjQt0LoYakZ9PbjM+OmmBdiXF4y1e9Oh7kImAGbcc/1k\nO/gVaFdIb6JbUnDGfg1nZcX5iomOsir9gVVaenXpoOL88N//0qJ8lRQXaUifLs6WujEFcTecf8Mc\ng/7dkpx3gTH3Qq/OiUrPynO2hK8jpwrtV3NtB6J3lyTtzTrrvAtv5hgEGgBEw5i0x1To3FCpM/vQ\nq3N75x0QXCa/SW4Xp6M5nrQ6nB3JKbBfA81vhvTuUvmz4cykFSbwH/D+2+llmfWzgT24DkUNTS/N\nsagoL7OXcGfKoZf07eq8q5shvTvbr24IApoAYGJCXMB1MdSM+npwLV2XqpH9u9L6qx5CKgBmope3\nXT5GS9enBjwjZJfk9q4IAJl96FmPCHnfrkmuSFC8T53qE/QwhY76PLUKJd59CDQA6GtI766uCACZ\nSkVcTOCZi1sCGCb4ZSokDbkWEDhvId4bgA1ne7Py1Lsbg6Ki8XRsn+CKe8U8NOrVKTHg/CbOKre6\nYf+9+xBofnM+vQz/IKBJL4f27ea8C5z3WJQWhfexqG851AQ3TEDADXURUxcLtB6Cumnp9fVgMbES\n0/rrKwvGOlsQiJAKgBk3zRph92V98+Ntzpa6SenfVaVhnuiaTKe08Ey9Wv+YjMr++aLAAoehpuhs\njjq0jQ886OEUPApyT9iv4arwtOfvb0jGO7RvVzv4E86FcvP37zh8ul7HwXstbD+Ua7+Gq73HPEFM\nCmFNyxTiTeB1R5hfP+b6Ny0zaEGIxmTKHuaBSzi3APLml727Bl72Gmrtv/n5cM9vTHpXnwdOditA\n62fWp+c4W8KTN730tlCpD/OzZjgSU44NZw0ph5r7oSD3uPMuPJngX5mpi3WvX08M1Kyl19eDwfSW\nW7YuVVNH9NHogfUP2rdkIRcAM63ATF9WMxj+oaxTztbaXTtjpMqtCzLnUJqzJfzkHk1XZOtW9r4E\naszgnnYkOpz33wSvzHLDrBRnS92Zgse8CUN19sShsA0Cmkw378RhzRk3JOBCqC9zLZhm/E9/lO5s\nCT/r0rPtCsl1M+t3LYwZ1EOvbzgctpUys++Lt2Vq7viGXQuon2ut62757uN2i4BwZCrkL64+aN8H\nJj0AGovJd+PaxFjX2wFnS/gxaW1OfqnmTRzmbKk70wXS3GfmfjP3XTgy6ZxJ70y6Vx+3zhtvB5DC\nNQgYzPTyeqv8mp+TGbYtXBpaDv3c5ePt/8Mcg3B1+tgBuy5q0jYEX0uvrweDaf1VWFyqL88f42xB\noEIuAGaYvqwDuifr0YXL6zwgvqn03nPTLPvJSzg+fTGBH/N3f97KPOr7BOpbN8+yM57co3udLeHD\nRNPPZh3QqEE9NS2lv7M1MLfMG6duHduHZaJq9v90Zrqd6X6mHgHAqu65ebYd/Fm8NfwKIZ7gzzFN\nGznArlzUx+3XTA3bSpkpjJvgnelaVJ8AIBrOFHyH9O4UlkFkb2WuVavW9n0ANCZTXrnj2ql2EMU8\nuAg35m82wZ/rrIpMfVvbmvvM3G/mvgs33vxmSK9O9a7wm6BRuAYBg51emmPYvVN7nTocnuXQvOOe\n4E99y6EmaGaOgQkihePDaBO4M3WxL8yfUO+6GGpGfb1hTOOgZevTtGDiIA3swRAX9RXxoMVZDykT\nh/bUs0s3a9+R7DoHRLolt7MvjP3791sJebnaJIT+4L8mwzlzPENnrWXyiH66vgEVXnMjmu4u67al\nWTfnKXv/W0dEOp+GLpOYnDqUqujIVrr/C/MUFRnhfBK4lIE9tOSTnfb+R0bHWkuM80noMk8KTx/d\nI5UV6StXTVbvIHR58ybKb69PV3rWWY3s1V5RESEZ765kCqJvbz5qFcaP2MGfO6zCaH2vBfNzpjvL\nojWpdteOPp0S1C429GdKNZXIv3+QrlP5pXalsltyovMJmtqYwb305sodWpF6XG2ta6dHUpzzSegy\nweOnl+/T0dwiuzIXjLQEqI0pd2Sfztc7Gw6oqLRcQ7u3cz4JXd785v2dxzWyfzfdevkE55PAmfym\nW8dEvfVJmp3fDO2eqNjo+pdjmoo3v8krKtc3bpihxIRY55PAjRjQ3a6ULd+d1eLTy8mX9NU7q3co\nP+eYWkdGK6pN6Lfi9pZDz5UUNrgcaq6FTWmHdeywCQi3Ukx86KcHlXUxp/Xb/Em0/mpMLb2+Xl9m\nfPSfP7NY3ZLa6v5bZyo6KvTzmVDV6pzFWQ85W/Zm6juPv2MHwG6/eoqztXamaeBLSzfamU6btkmK\njmtnrceFTDDI3ERmkEwzUGTh6eNqfa5Cn5mdErTmtiZR+eN/luvU2XzFJ3WzMx9zLCKiQiMY5N1/\nM4NGcd4pFeWdthOTYEXTzf7/7Y3VyjyZq7j2nex991wDoVEIMftvWuqZxTxFKLGOgXny/KUFE4I+\n3pPpSvzS0g06Z11js4Z2VvekWPXoEKekhNB4smUqIaYQaga8X78vx+6GMm1kf7vlU7CuhacWrdah\n47maNayzOsRHa2DXttYxqH9BP5jM/pup183+m0GYtx86rd5dO1iF8Wl0fQwBpgXy09b1s3nPETuI\nPKBLgl2xG2i9hgpz/5jJEsz1sy49pzJ4zNhxaGrespcJ/pj8ZoCT1oZKMMhffmNafk1LGRC0/ObP\nCz9S9ukCO7/pbu27SS9CLb8x40uaoM+6vSftlq7Bym9MemnKG6u272/x6aU5Fk8uWqOtew5XlkMj\nY+JDJhjUFOXQNz7eZs/q71sXC6VgmDkGph5m6iONURdD7Vp6fT0QJk35zfPLVFxSqifuu8FuqYn6\nC+kAmLF4/R796sUVdgAskK5xpiBiEt/Nnx52toQeM1uKecry1WunNkpl9+m31mjHvmM6nReaM9JE\nR0XZg6feOHtUo4xTYxLW5ZvT7UBYKPLu/6wxA+ygTzAK4P7YhdJlG7U57ZAKikudraHFDL5rCl1m\nDJbGuhZWbd2rQ8dPO1tCS2xMpN2KYtqoxr0WUD/m+tmclqG0jNCdZMOM+zdmSC+7YMb1g+YSTvmN\nGbsq2IEPw1ToNlnpRfaZ0Cx7+eY3jVGRMw/eVm3bS3ppCfVyqGGGDmmscqipi71o3Q9mtrpQ1dh1\nMdSM+nrdmGGhPs04rodvn8vA90EQ8gEw45cvrtCS9Xv0/S/MrdeYQKbJoLnBTp4OjQGNzU0UFxPd\nKAWv6piEJVT23xhqnUcT/GmqiprZf1MwLygOjUHRm3r/vcwxSLMS0FC5Fsx9YO4HroWmvxZQP6F2\n/Zj7p1dnK0/h+kGICbWylze/oezVcvPb5kwvW3rZw6QHaQezQupaaOq6GGpGfd0/b0s5E/yaNqKP\nsxUNERYBMOPex97RniPZ+v7n55JYAQAAAAAAV/IGvz4z8xLdc/1kZysaKrRHxfZhop7dkhL06+eX\n2dFhAAAAAAAANzHBr0Ufb9flEwYR/AqysGkBZuQVltgtwdKPZuuWeeMYqBAAAAAAAIQ901XajKVp\nxlM0Lb++PH8sg94HWVgFwLweff0Tvbpip66dMVLXzWj6aUgBAAAAAACCwQS/zCR2ZsD7e26YrPkT\nBjmfIJjCMgBmPLN4k7VstgfFN4PjAwAAAAAAhBMzxJOZ7bGwuFQ/+NxMBrxvRGEbADO27M3Udx5/\nxx4U/46rpzA4PgAAAAAACAtmlljT8suMd/7w7fPU1XpF4wnrAJix90i2fvqPZTqWk2ePCTZ34lB1\nTIx3PgUAAAAAAAgdaRlZevPj7Uo9mGW3+Lr/1pmM99UEwj4A5vXKip167PVP7HUTCDOD5AMAAAAA\nAIQC093RDHJvZnoc0D1JX14wli6PTcg1ATCvX724QovX71Fcm2jdMnecpqX0dz4BAAAAAABoWmaQ\ne9Pia9X2fWplvTeBLzPQPa2+mpbrAmCG6RZpWoSZQJgZF8y0CCMQBgAAAAAAmsrJ0/lati61MvD1\npfljtWAiga/m4soAmJdvIMwwQTCzmJkjAQAAAAAAgsm09jKD2y9bn2oHwFq3kt3a68aZIxjkvpm5\nOgDmZQJhW9OPaeHyHco6lWdvGzO4p906bOzgXsweCQAAAAAAAmYCXoeOn7IHtN/y6WE76GW2jRrQ\nTdNH9qGrYwhpEQEwX1v2Zmpreqb1esx+9TIzR5pAmFnM+GFxMZ4LtGN7ZpQEAAAAAKClKigqtYNa\nBcUl9vvs3HxlZJ1S9pk8nbTWDRPkMkGvgT2S7IHtB/ZItrcjdLS4AFhVq3Yc1LGcPGs5q71HcuxX\nbysxAAAAAAAAr/jYaHXtkKCuSW3tLo3mddSArvYrLb1CW4sPgNUkr7DEWYqdLQAAAAAAoCVJiI2p\nDG4R5ApfBMAAAAAAAADgaq2dVwAAAAAAAMCVCIABAAAAAADA1QiAAQAAAAAAwNUIgAEAAAAAAMDV\nCIABAAAAAADA1QiAAQAAAAAAwNUIgAEAAAAAAMDVCIABAAAAAADA1QiAAQAAAAAAwNUIgAEAAAAA\nAMDVCIABAAAAAADA1QiAAQAAAAAAwNUIgAEAAAAAAMDVCIABAAAAAADA1QiAAQAAAAAAwNUIgAEA\nAAAAAMDVCIABAAAAAADA1QiAAQAAAAAAwNUIgAEAAAAAAMDVCIABAAAAAADA1QiAAQAAAAAAwNUI\ngAEAAAAAAMDVCIABAAAAAADA1QiAAQAAAAAAwNUIgAEAAAAAAMDVCIABAAAAAADA1QiAAQAAAAAA\nwNUIgAEAAAAAAMDVCIABAAAAAADA1QiAAQAAAAAAwNUIgAEAAAAAAMDVCIABAAAAAADA1QiAAQAA\nAAAAwNUIgAEAAAAAAMDVCIABAAAAAADA1QiAAQAAAAAAwNUIgAEAAAAAAMDVCIABAAAAAADA1QiA\nAQAAAAAAwNUIgAEAAAAAAMDVCIABAAAAAADAxaT/D0ZEYpy7mQ50AAAAAElFTkSuQmCC\n", + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [], + "image/png": { + "width": 700 + } + }, + "execution_count": 2 + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Io7rJZMkMEDm", + "colab_type": "text" + }, + "source": [ + "\n", + "\n", + "In the first phase, we start training with a small number of layers and train those for a fixed number of epochs. After that, we add another set of layers and freeze the parameters of the previous step's layers. We repeat this process until the desired depth is reached. In phase two, we perform additional optimization sweeps over larger subsets of the layers using the final circuit configuration from phase one. The parameters from this circuit give us a good starting point to optimize quarters, halves, or even the full circuit without initializing on a barren plateau.\n", + "\n", + "This kind of learning scheme can be used for various types of learning tasks and input data, so long as the QNN structure allows iteratively building the circuits. In this notebook we look at a simple example of classifying MNIST digits with randomly generated layers.\n", + "\n", + "\n", + "\n", + "[1] Layerwise learning for quantum neural networks, A. Skolik, J. R. McClean, M. Mohseni, P. van der Smagt, and M. Leib, in preparation.\n", + "\n", + "[2] Barren plateaus in quantum neural network training landscapes, J. R. McClean, S. Boixo, V. N. Smelyanskiy, R. Babbush, and H. Neven, Nature Communications 9 (2018)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gqx89K2caCR7", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade cirq==0.7.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "JuXxC5fbaGAS", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade tensorflow==2.1.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "fyrqkto1aHQV", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install tensorflow-quantum" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "SW1LRRSuY935", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import collections\n", + "import itertools\n", + "import random\n", + "\n", + "import cirq\n", + "import sympy\n", + "import numpy as np\n", + "import tensorflow_quantum as tfq\n", + "import tensorflow as tf\n", + "import matplotlib.pyplot as plt" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "KNVb3cr2QMPZ", + "colab_type": "text" + }, + "source": [ + "First, we need to create the layers we want to use in our circuit. We construct layers that apply a randomly chosen X, Y, or Z gate on each qubit, and a ladder of CZ gates that connect them. This is the same structure as used in [2]." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gChRsDMxZARo", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def create_layer(qubits, layer_id):\n", + " symbols = [sympy.Symbol(layer_id + '-' + str(i)) for i in range(len(qubits))]\n", + " gate_set = [cirq.rx, cirq.ry, cirq.rz]\n", + " gates = [random.choice(gate_set)(symbols[i])(q) for i, q in enumerate(qubits)]\n", + "\n", + " for control, target in zip(qubits, qubits[1:]):\n", + " gates.append(cirq.CZ(control, target))\n", + "\n", + " return gates, symbols" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d2B3UfdrQ4Cx", + "colab_type": "text" + }, + "source": [ + "We also need to prepare the training data. For simplicity, we borrow the training data and data input scheme from the MNIST classification example in the TFQ docs. Namely we downsample and flatten the images, such that we have vectors with binary entries. These bitstrings are then fed to the circuit by applying a layer of X gates to qubits that correspond to ones in the image vector." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "zhhlWTikZ4qC", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def reduce_image(x):\n", + " x = tf.reshape(x, [1, 28, 28, 1])\n", + " x = tf.image.resize(x, [4, 4])\n", + " x = tf.reshape(x, [4, 4])\n", + " x = x / 255\n", + " return x.numpy()\n", + "\n", + "def remove_contradicting(xs, ys):\n", + " mapping = collections.defaultdict(set)\n", + " for x, y in zip(xs, ys):\n", + " mapping[str(x)].add(y)\n", + "\n", + " return zip(*((x, y) for x, y in zip(xs, ys) if len(mapping[str(x)]) == 1))\n", + "\n", + "def convert_to_circuit(image):\n", + " values = np.ndarray.flatten(image)\n", + " qubits = cirq.GridQubit.rect(1, len(values))\n", + " circuit = cirq.Circuit()\n", + "\n", + " for i, value in enumerate(values):\n", + " if value > 0.5:\n", + " circuit.append(cirq.X(qubits[i]))\n", + "\n", + " return circuit\n", + "\n", + "def convert_label(y):\n", + " if y == 3:\n", + " return 1.0\n", + " else:\n", + " return -1.0\n", + "\n", + "\n", + "(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n", + "\n", + "print(\"Number of original training examples:\", len(x_train))\n", + "print(\"Number of original test examples:\", len(x_train))\n", + "\n", + "x_train, y_train = zip(*((x, y) for x, y in zip(x_train, y_train) if y in [3, 6]))\n", + "x_test, y_test = zip(*((x, y) for x, y in zip(x_test, y_test) if y in [3, 6]))\n", + "\n", + "x_train = [reduce_image(x) for x in x_train]\n", + "x_test = [reduce_image(x) for x in x_test]\n", + "\n", + "x_train, y_train = remove_contradicting(x_train, y_train)\n", + "x_test, y_test = remove_contradicting(x_test, y_test)\n", + "\n", + "print(\"Number of filtered training examples:\", len(x_train))\n", + "print(\"Number of filtered test examples:\", len(x_test))\n", + "\n", + "x_train = [convert_to_circuit(x) for x in x_train]\n", + "x_test = [convert_to_circuit(x) for x in x_test]\n", + "\n", + "y_train = [convert_label(y) for y in y_train]\n", + "y_test = [convert_label(y) for y in y_test]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "GHoO_pDUVKTT", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# increase for more accurate results\n", + "NUM_EXAMPLES = 128\n", + "x_train = x_train[:NUM_EXAMPLES]\n", + "y_train = y_train[:NUM_EXAMPLES]\n", + "\n", + "x_train = tfq.convert_to_tensor(x_train)\n", + "x_test = tfq.convert_to_tensor(x_test)\n", + "y_train = np.array(y_train)\n", + "y_test = np.array(y_test)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "RBnZIePfTDU4", + "colab_type": "text" + }, + "source": [ + "Now we will set up our training loop. We specify the number of qubits in the circuit, how many layer addition steps to perform, and how many layers to add in each step. The latter is a hyperparameter of our model that can be tuned for the learning task at hand. There is a trade-off between keeping the trained partitions as small as possible, but at the same time not too small to make significant progress on the learning task. You can play with the hyperparameters below to notice this difference." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Q10lo6OmTNgp", + "colab_type": "code", + "colab": {} + }, + "source": [ + "n_qubits = 6\n", + "n_layer_steps = 5\n", + "n_layers_to_add = 2\n", + "data_qubits = cirq.GridQubit.rect(1, n_qubits)\n", + "readout = cirq.GridQubit(0, n_qubits-1)\n", + "\n", + "symbols = []\n", + "layers = []\n", + "weights = []\n", + "\n", + "training_history = []\n", + "\n", + "for layer_id in range(n_layer_steps):\n", + " print(\"\\nLayer:\", layer_id)\n", + " circuit = cirq.Circuit()\n", + " for i in range(n_layers_to_add):\n", + " layer, layer_symbols = create_layer(data_qubits, f'layer_{layer_id}_{i}')\n", + " layers.append(layer)\n", + " symbols.append(layer_symbols)\n", + "\n", + " circuit += layers\n", + "\n", + " # prepare the readout qubit\n", + " circuit.append(cirq.X(readout))\n", + " circuit.append(cirq.H(readout))\n", + " circuit.append(cirq.X(readout))\n", + " readout_op = cirq.Z(readout)\n", + "\n", + " # setup the Keras model\n", + " model = tf.keras.Sequential()\n", + " model.add(tf.keras.layers.Input(shape=(), dtype=tf.dtypes.string))\n", + " model.add(\n", + " tfq.layers.PQC(\n", + " model_circuit=circuit,\n", + " operators=readout_op,\n", + " differentiator=tfq.differentiators.ParameterShift(),\n", + " initializer=tf.keras.initializers.Zeros))\n", + "\n", + " print(model.summary())\n", + "\n", + " model.compile(loss=tf.keras.losses.squared_hinge,\n", + " optimizer=tf.keras.optimizers.Adam(learning_rate=0.01))\n", + "\n", + " # set parameters to 0 for new layers\n", + " model.set_weights([np.pad(weights, (n_qubits*n_layers_to_add, 0))])\n", + "\n", + " model.fit(x_train,\n", + " y_train,\n", + " batch_size=128,\n", + " epochs=20,\n", + " verbose=2,\n", + " validation_data=(x_test, y_test))\n", + "\n", + " qnn_results = model.evaluate(x_test, y_test)\n", + " training_history.append(qnn_results)\n", + "\n", + " weights = model.get_weights()[0]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "dTDPaUmNXhhQ", + "colab_type": "code", + "outputId": "d4c63990-af23-42d2-ec29-ab71bc1f5761", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 282 + } + }, + "source": [ + "plt.plot(training_history)" + ], + "execution_count": 0, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "[]" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 11 + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYAAAAD4CAYAAADlwTGnAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3deXwU9f3H8dcnF+EOkAAhN4dAuCUC\nggjegCCKaLEe0J9HW2vb36/1gHqjFltttfaw9QTUeqFVQBRRoKKcQe5whZCQhCvcRyDn5/fHDnZN\nA1nIZifJfp6Pxz7YnfnO7HuG7H52vnOJqmKMMSb4hLgdwBhjjDusABhjTJCyAmCMMUHKCoAxxgQp\nKwDGGBOkwtwOcDaio6M1OTnZ7RjGGFOnrFy5cp+qxlQcXqcKQHJyMunp6W7HMMaYOkVEciobbl1A\nxhgTpKwAGGNMkLICYIwxQcoKgDHGBCkrAMYYE6R8KgAi8pqI7BWR9acZLyLygohkishaETnfa9x4\nEdnqPMZ7De8rIuucaV4QEan+4hhjjPGVr1sAU4FhZxg/HOjkPO4CXgQQkZbAo0B/oB/wqIi0cKZ5\nEbjTa7ozzd8YY4yf+VQAVPUr4MAZmowGpqvHUiBKRGKBq4B5qnpAVQ8C84BhzrhmqrpUPdejng5c\nW60lOYPPN+zmn8t21NTsjTGmTvLXPoA4INfrdZ4z7EzD8yoZ/l9E5C4RSReR9IKCgnMK9+G3+Twx\nO4Pdh0+e0/TGGFMf1fqdwKr6kqqmqWpaTMx/ncnskwev7kqZKk9/utHP6Ywxpu7yVwHIBxK8Xsc7\nw840PL6S4TUioWUj7hrcno9W72Rlzpl6sowxJnj4qwDMBG5zjgYaABxW1V3AXOBKEWnh7Py9Epjr\njDsiIgOco39uAz72U5ZK3X1JB9o2i+SxmRmUl9ttMI0xxtfDQN8GlgCdRSRPRG4XkZ+IyE+cJnOA\nLCATeBm4G0BVDwBPACucx2RnGE6bV5xptgGf+meRKtcoIoyJw7uwLv8wM1bmVT2BMcbUc1KXbgqf\nlpam1bkaqKoy9u9LyNl/nPn3DqVZZLgf0xljTO0kIitVNa3i8Fq/E9ifRITHRnVj//Fi/vzlVrfj\nGGOMq4KqAAD0iG/OjX0TeP2bbLYVHHM7jjHGuCboCgDAvVd1pmF4KE/OznA7ijHGuCYoC0BM0wb8\n4rJOLNhcwIJNe92OY4wxrgjKAgAwfmAy7aMb88TsDIpLy92OY4wxARe0BSAiLISHR6aSte840xZn\nux3HGGMCLmgLAMAlXVpzSecYXvhyKwVHi9yOY4wxARXUBQDg4ZGpnCwt45m5m9yOYowxARX0BaB9\nTBN+NCiF91fmsTbvkNtxjDEmYIK+AADcc2lHWjWO4PFZGdSlM6ONMaY6rAAAzSLDuf+qLqzMOcjH\nq3e6HccYYwLCCoBjbN94esQ1Z8qnGzleVOp2HGOMqXFWABwhIcJj16Sy50gRLy7c5nYcY4ypcVYA\nvPRNasm1vdvx0qIsduwvdDuOMcbUKCsAFUwc3pWwEOGpOXadIGNM/WYFoIK2zSP52SUdmbthD99k\n7nM7jjHG1BgrAJW4/aIUElo2ZPKsDErL7DpBxpj6yddbQg4Tkc0ikikiEysZnyQiX4rIWhFZKCLx\nzvBLRGS11+OkiFzrjJsqItu9xvX276Kdu8jwUB4ckcrmPUd5a9kOt+MYY0yNqLIAiEgo8FdgOJAK\n3CQiqRWaPQtMV9WewGRgCoCqLlDV3qraG7gUKAQ+95ruvlPjVXV19RfHf67q1oZBHVvxx3lbOHi8\n2O04xhjjd75sAfQDMlU1S1WLgXeA0RXapALznecLKhkPMBb4VFXrxOE1IsIjI7txrKiUP87b4nYc\nY4zxO18KQByQ6/U6zxnmbQ0wxnl+HdBURFpVaDMOeLvCsKecbqPnRKSBj5kDpnPbptzSP5G3luWw\ncdcRt+MYY4xf+Wsn8L3AEBFZBQwB8oGyUyNFJBboAcz1mmYS0AW4AGgJPFDZjEXkLhFJF5H0goIC\nP8X13f9dcR7NG4bz+KwNdp0gY0y94ksByAcSvF7HO8O+o6o7VXWMqvYBHnSGeV9a80bgX6pa4jXN\nLvUoAl7H09X0X1T1JVVNU9W0mJgYnxbKn6IaRfCrKzuzNOsAn63fHfD3N8aYmuJLAVgBdBKRFBGJ\nwNOVM9O7gYhEi8ipeU0CXqswj5uo0P3jbBUgIgJcC6w/+/iBcdMFCXRp25QnP9nIyZKyqicwxpg6\noMoCoKqlwD14um82Au+p6gYRmSwi1zjNhgKbRWQL0AZ46tT0IpKMZwvi3xVm/ZaIrAPWAdHAk9Va\nkhoUFhrCo6O6kX/oBC99leV2HGOM8QupS/3aaWlpmp6e7tr73/3WSuZv2sv8Xw+lXVRD13IYY8zZ\nEJGVqppWcbidCXwWJg3viio8/andPtIYU/dZATgLCS0b8eOL2zNzzU5WZB9wO44xxlSLFYCz9JOh\nHYhtHsljMzdQVl53us+MMaYiKwBnqVFEGJNGdGXDziO8n55b9QTGGFNLWQE4B6N6xnJBcguembuZ\nwydKqp7AGGNqISsA50BEeHRUNw4UFvPCl1vdjmOMMefECsA56h7XnHEXJDBtcTaZe4+5HccYY86a\nFYBq+PWVnWkYEcoTszPsOkHGmDrHCkA1RDdpwC8v68S/txQwf9Net+MYY8xZsQJQTeMHJtMhpjFP\nzM6gqNSuE2SMqTusAFRTeGgIj4zqRvb+QqZ+k+12HGOM8ZkVAD8Ycl4Ml3VpzZ/nZ7L36Em34xhj\njE+sAPjJQyNTKSot4/efbXY7ijHG+MQKgJ+kRDfmfy5KYcbKPNbkHqp6AmOMcZkVAD+655KORDdp\nwGOzNlBu1wkyxtRyVgD8qGlkOA8M68yqHYf4aHV+1RMYY4yLrAD42fXnx9MrIYqnP93EsaJSt+MY\nY8xpWQHws5AQ4dFRqew9WsTfFmS6HccYY07LpwIgIsNEZLOIZIrIxErGJ4nIlyKyVkQWiki817gy\nEVntPGZ6DU8RkWXOPN91bjhfL5yf2IIxfeJ4ZdF2cvYfdzuOMcZUqsoCICKhwF+B4UAqcJOIpFZo\n9iwwXVV7ApOBKV7jTqhqb+dxjdfw3wHPqWpH4CBwezWWo9Z5YHgXwkKFJz/Z6HYUY4yplC9bAP2A\nTFXNUtVi4B1gdIU2qcB85/mCSsZ/j4gIcCkwwxk0DbjW19B1QZtmkdxzaUfmZexh0dYCt+MYY8x/\n8aUAxAHet77Kc4Z5WwOMcZ5fBzQVkVbO60gRSReRpSJy6ku+FXBIVU/tJa1sngCIyF3O9OkFBXXr\ni/R/BqWQ2LIRk2dlUFJW7nYcY4z5Hn/tBL4XGCIiq4AhQD5w6spoSaqaBvwQeF5EOpzNjFX1JVVN\nU9W0mJgYP8UNjMjwUB66uitb9x7jzaU5bscxxpjv8aUA5AMJXq/jnWHfUdWdqjpGVfsADzrDDjn/\n5jv/ZgELgT7AfiBKRMJON8/64orUNgzuFM1z87aw/1iR23GMMeY7vhSAFUAn56idCGAcMNO7gYhE\ni8ipeU0CXnOGtxCRBqfaAIOADPXcPWUBMNaZZjzwcXUXpjYSER4Zmcrx4jL+OG+L23GMMeY7VRYA\np5/+HmAusBF4T1U3iMhkETl1VM9QYLOIbAHaAE85w7sC6SKyBs8X/tOqmuGMewD4lYhk4tkn8Kqf\nlqnW6dSmKbcOSOLt5TvI2HnE7TjGGAOA1KVbGaalpWl6errbMc7J4cIShj67gE5tmvLuXQPwHAhl\njDE1T0RWOvtiv8fOBA6Q5o3CufeqzizffoA563a7HccYY6wABNK4CxLpGtuM387ZyIliu32kMcZd\nVgACKDREeGxUKvmHTvCPr7a5HccYE+SsAARY//atuLpnLH//9zbyD51wO44xJohZAXDBb0Z0RRWm\nzLHrBBlj3GMFwAVxUQ35yZAOzF67i2VZ+92OY4wJUlYAXPKTIR1o1zySx2ZlUGa3jzTGuMAKgEsa\nRoTym6u7snHXEd5dkVv1BMYY42dWAFx0dY9Y+qW05NnPN3O4sMTtOMaYIGMFwEUinttHHios5vkv\n7TpBxpjAsgLgsm7tmjOuXyLTl+Swdc9Rt+MYY4KIFYBa4NdXnEfjiFAmz86gLl2byRhTt1kBqAVa\nNWnA/15+Hou27uOLjXvdjmOMCRJWAGqJWy9MolPrJjz5SQZFpXadIGNMzbMCUEuEh4bwyKhUcvYX\n8trX2W7HMcYEASsAtcjgTjFc3rUNf5m/lb1HTrodxxhTz1kBqGUeHtmVkjLl6c82uR3FGFPP+VQA\nRGSYiGwWkUwRmVjJ+CQR+VJE1orIQhGJd4b3FpElIrLBGfcDr2mmish2EVntPHr7b7HqrqRWjbl9\ncAoffpvPqh0H3Y5jjKnHqiwAIhIK/BUYDqQCN4lIaoVmzwLTVbUnMBmY4gwvBG5T1W7AMOB5EYny\nmu4+Ve3tPFZXc1nqjZ9d0pHWTRvw2KwMyu06QcaYGuLLFkA/IFNVs1S1GHgHGF2hTSow33m+4NR4\nVd2iqlud5zuBvUCMP4LXZ00ahPHAsC6syT3Eh6vy3Y5jjKmnfCkAcYD31crynGHe1gBjnOfXAU1F\npJV3AxHpB0QA3rfCesrpGnpORBpU9uYicpeIpItIekFBgQ9x64fr+sTROyGK3322iWNFpW7HMcbU\nQ/7aCXwvMEREVgFDgHzgu4PZRSQWeAP4kaqWO4MnAV2AC4CWwAOVzVhVX1LVNFVNi4kJno2HkBDh\nsWu6UXC0iL/Mz3Q7jjGmHvKlAOQDCV6v451h31HVnao6RlX7AA86ww4BiEgz4BPgQVVd6jXNLvUo\nAl7H09VkvPROiOL68+N57evtZO877nYcY0w940sBWAF0EpEUEYkAxgEzvRuISLSInJrXJOA1Z3gE\n8C88O4hnVJgm1vlXgGuB9dVZkPrqgWGdCQ8Vnvwkw+0oxph6psoCoKqlwD3AXGAj8J6qbhCRySJy\njdNsKLBZRLYAbYCnnOE3AhcDEyo53PMtEVkHrAOigSf9tVD1Setmkfz8sk58sXEv/94SPPtAjDE1\nT+rS1SfT0tI0PT3d7RgBV1RaxlXPfUVoiPDZ/15MeKidv2eM8Z2IrFTVtIrD7ZukDmgQFspDV6ey\nreA405fkuB3HGFNPWAGoIy7r2pqLz4vh+S+2sP9YkdtxjDH1gBWAOkJEeGRkV04Ul/Hs53b7SGNM\n9VkBqEM6tm7KbRcm886KHazPP+x2HGNMHWcFoI755eWdaNkogsdnbbDbRxpjqsUKQB3TvGE4917V\nmRXZB5m1dpfbcYwxdZgVgDroxrQEurVrxpQ5GzlRbLePNMacGysAdVBoiPDoqG7sOnySF/+9reoJ\njDGmElYA6qh+KS0Z1asd//j3NvIOFrodxxhTB1kBqMMmDe+CCEyZY7ePNMacPSsAdVi7qIb8dEhH\nPlm3iyXb9rsdxxhTx1gBqON+PKQ9cVENeXzWBkrLyquewBhjHFYA6rjI8FAevLorm3Yf5e0VuVVP\nYIwxDisA9cDw7m3pn9KSP36+mUOFxW7HMcbUEVYA6gERz+0jD58o4fkvtrodxxhTR1gBqCe6xjbj\nh/0TeWNpDpt3H3U7jjGmDrACUI/8+orONGkQxuTZdp0gY0zVfCoAIjJMRDaLSKaITKxkfJKIfCki\na0VkoYjEe40bLyJbncd4r+F9RWSdM88XnHsDm2po0TiC/7u8E99k7ufzjD1uxzHG1HJVFgARCQX+\nCgwHUoGbRCS1QrNn8dz4vScwGZjiTNsSeBToD/QDHhWRFs40LwJ3Ap2cx7BqL43hlgFJnNemCU9+\nksHJErtOkDHm9HzZAugHZKpqlqoWA+8Aoyu0SQXmO88XeI2/CpinqgdU9SAwDxgmIrFAM1Vdqp6+\niunAtdVcFgOEhYbw6Khu5B44watfb3c7jjGmFvOlAMQB3geY5znDvK0BxjjPrwOaikirM0wb5zw/\n0zwBEJG7RCRdRNILCgp8iGsGdYzmytQ2/HVBJrsPn3Q7jjGmlvLXTuB7gSEisgoYAuQDful/UNWX\nVDVNVdNiYmL8Mcug8NDVqZSWK7/7zK4TZIypnC8FIB9I8Hod7wz7jqruVNUxqtoHeNAZdugM0+Y7\nz087T1M9ia0acefgFP61Kp+VOQfdjmOMqYV8KQArgE4ikiIiEcA4YKZ3AxGJFpFT85oEvOY8nwtc\nKSItnJ2/VwJzVXUXcEREBjhH/9wGfOyH5TFe7h7akTbNGvD4rA2Ul9thocaY76uyAKhqKXAPni/z\njcB7qrpBRCaLyDVOs6HAZhHZArQBnnKmPQA8gaeIrAAmO8MA7gZeATKBbcCn/loo49G4QRgTh3dh\nbd5hZnybV/UExpigInXphKG0tDRNT093O0adoqpc/+Jidhw4wYJ7h9A0MtztSMaYABORlaqaVnG4\nnQlcz526TtC+Y0X8eX6m23GMMbWIFYAg0DM+ihv6xvP6N9vJKjjmdhxjTC1hBSBI3DesMw3CQnny\nk41uRzHG1BJWAIJE66aR/OKyjszftJcFm/e6HccYUwtYAQgiEwamkBLdmCdmZ1BcarePNCbYWQEI\nIhFhITw8sitZBceZviTb7TjGGJdZAQgyl3Zpw9DOMfzpi60UHC1yO44xxkVWAILQwyNTOVFSxrNz\nN7sdxRjjIisAQahDTBMmDEzmvZW5rMs77HYcY4xLrAAEqV9c3olWjSN4fJbdPtKYYGUFIEg1iwzn\nvqs6k55zkJlrdrodxxjjAisAQeyGvgn0iGvOlDmbKCwudTuOMSbArAAEsZAQ4dFRqew+cpIXF25z\nO44xJsCsAAS5tOSWjO7djn98lUXugUK34xhjAsgKgGHi8C6EivCUXSfImKBiBcAQ27whdw/twGcb\ndrM4c5/bcYwxAWIFwABw58XtiW/RkMdnZVBaZtcJMiYY+FQARGSYiGwWkUwRmVjJ+EQRWSAiq0Rk\nrYiMcIbfLCKrvR7lItLbGbfQmeepca39u2jmbESGh/LQ1V3ZvOco/1y+w+04xpgAqLIAiEgo8Fdg\nOJAK3CQiqRWaPYTnXsF98Nw0/m8AqvqWqvZW1d7ArcB2VV3tNd3Np8arql2j2GVXdWvLwA6t+MPn\nWzh4vNjtOMaYGubLFkA/IFNVs1S1GHgHGF2hjQLNnOfNgcrOLLrJmdbUUiLCI6NSOXqyhMdnbeDI\nyRK3IxljapAvBSAOyPV6necM8/YYcIuI5AFzgJ9XMp8fAG9XGPa60/3zsIiIb5FNTerSthk/HtKB\nj1bvZMBvv+Shj9axdc9Rt2MZY2qAv3YC3wRMVdV4YATwhoh8N28R6Q8Uqup6r2luVtUewGDncWtl\nMxaRu0QkXUTSCwoK/BTXnMkDw7ow++cXMaJHLO+l53HFc19xyyvLmJexh7Jyu26QMfWFVHUhMBG5\nEHhMVa9yXk8CUNUpXm02AMNUNdd5nQUMONWvLyLPAQWq+tvTvMcEIE1V7zlTlrS0NE1PT/dx0Yw/\n7D9WxDsrcnlzaQ67Dp8koWVDbh2QxA/SEmneKNzteMYYH4jISlVNqzjcly2AFUAnEUkRkQg8O3ln\nVmizA7jMeaOuQCRQ4LwOAW7Eq/9fRMJEJNp5Hg6MBNZjap1WTRrws0s6suj+S/jbzecT27whv52z\nif5TvmDSh2vZtPuI2xGNMecorKoGqloqIvcAc4FQ4DVV3SAik4F0VZ0J/Bp4WUT+D88O4Qn6n02L\ni4FcVc3ymm0DYK7z5R8KfAG87LelMn4XFhrCiB6xjOgRS8bOI0xfks2H3+bz9vJcBrRvyYSByVze\ntQ1hoXZqiTF1RZVdQLWJdQHVLgePF/Nuei5vLMkh/9AJ2jWP5JYLkxh3QSItG0e4Hc8Y4zhdF5AV\nAFNtZeXKFxv3MG1xNou37SciLITRvdoxfmAy3eOaux3PmKBnBcAExJY9R5m22NM9dKKkjLSkFowf\nmMyw7m0Jt+4hY1xhBcAE1OETJbyfnsv0JTnsOFBIm2YNuKV/Ejf1TyS6SQO34xkTVKwAGFeUlysL\nt+zl9W+yWbR1HxGhIYzsGcv4gcn0SohyO54xQeF0BaDKo4CMqY6QEOHSLm24tEsbthUcY/ribGas\nzOPDVfn0TohiwsBkRvSIJSLMuoeMCTTbAjABd/RkCR+szGP6khyy9h0nukkDbu6fyM39E2ndLNLt\neMbUO9YFZGqd8nLlq60FTFuczcItBYSKMKKHp3vo/MQo7PJQxviHdQGZWickRBjauTVDO7cme99x\npi/J4f30XGau2UmPuOaMH5jMyJ6xRIaHuh3VmHrJtgBMrXK8qJQPV+UzbXE2mXuP0apxBOP6JXDL\ngCRimzd0O54xdZJ1AZk6RVVZvG0/r3+TzZeb9hAiwrBubRk/MJkLkltY95AJGmVOV+klnc/9ponW\nBWTqFBFhUMdoBnWMJvdAIW8szeHdFbl8sm4XXWObMWFgEqN7x1n3kKnXMvce4/4Za/h2xyH+dfdA\n+iS28Ov8bQvA1Bknisv4aLWne2jT7qNENQrnBxckcOuAJOJbNHI7njF+U1auvLIoiz/M20LD8FAe\nuyaVa3vHnfOWr3UBmXpDVVm2/QDTFmczd8NuAC7v2oYJg5K5sH0r6x4yddrWPUe5d8Za1uQe4srU\nNjx5XXdaN63e4dHWBWTqDRFhQPtWDGjfivxDJ3hzaQ7vLN/B5xl76NymKbcNTOK6PnE0irA/b1N3\nlJaV84+vsvjTF1tp3CCUF27qw6iesTX6g8a2AEy9cLKkjJlrdjJtcTYbdh6hWWQYN6YlcNuFySS2\nsu4hU7tt3n2U+2asYW3eYYZ3b8vk0d2Jaeq/a2ZZF5AJCqrKypyDTF2czWfrd1OmyqWdWzN+YDKD\nO0Vb95CpVUrKyvn7wm28MH8rTSPDeWJ0d67uGev397EuIBMURIS05JakJbdk9+GT/HNZDv9cvoPb\nXltO+5jGTBiYzJjz42nSwP70jbsydh7hvhlr2LDzCCN7xvL4Nd1oFeAr5doWgKn3ikrL+GTtLqYt\nzmZN3mGaNAhjbN94xg9MJiW6sdvxTJApLi3nbwsz+cv8TKIahfPktd0Z1t3/v/q9VasLSESGAX/C\nc//eV1T16QrjE4FpQJTTZqKqzhGRZGAjsNlpulRVf+JM0xeYCjQE5gC/1CrCWAEw1bVqx0GmLc7m\nk3W7KClThpwXw4SByQw5L4aQEOseMjVrff5h7n1/DZt2H2V073Y8NqobLQJw+9RzLgAiEgpsAa4A\n8oAVwE2qmuHV5iVglaq+KCKpwBxVTXYKwGxV7V7JfJcDvwCW4SkAL6jqp2fKYgXA+Mveoyd5e1ku\nby7LoeBoEcmtGnHbhcmMTYunWWS42/FMPVNUWsZf5mfyt4XbaNk4gqeu7c6V3doG7P2rsw+gH5Cp\nqlnOjN4BRgMZXm0UaOY8bw7srCJMLNBMVZc6r6cD1wJnLADG+EvrppH88vJO/HRoBz5d7+kemjw7\ng2c/38z158czfmASHVs3dTumqQfW5h3ivvfXsnnPUcacH8cjI1OJalTzv/p94UsBiANyvV7nAf0r\ntHkM+FxEfg40Bi73GpciIquAI8BDqrrImWdehXnGVfbmInIXcBdAYmKiD3GN8V1EWAije8cxuncc\n6/IOM3VxNu+uyOWNpTlc1DGa8QOTubRLa0Kte8icpZMlZbzw5Vb+8VUW0U0ieHV8Gpd1beN2rO/x\n16EQNwFTVfUPInIh8IaIdAd2AYmqut/p8/9IRLqdzYxV9SXgJfB0AfkprzH/pUd8c/5wYy9+M6IL\n76zI5c2lOdw5PZ2Elg25dUASP0hLpHkj6x4yVVu14yD3zVhL5t5j3NA3nodGptK8Ye372/GlAOQD\nCV6v451h3m4HhgGo6hIRiQSiVXUvUOQMXyki24DznOnjq5inMa5o1aQBP7ukIz++uD2fZ+xh6jfZ\n/HbOJv44bwvX9fF0D3Vp26zqGZmgc7KkjOfmbeHlRVm0aRbJ1B9dwNBqXMWzpvlSAFYAnUQkBc+X\n9DjghxXa7AAuA6aKSFcgEigQkRjggKqWiUh7oBOQpaoHROSIiAzAsxP4NuDP/lkkY/wjLDSEET1i\nGdEjloydR5i2OJsPv83j7eU7GNC+JRMGJnN51zaEhdr9jA2szDnIfTPWkFVwnHEXJPCbq7vW+gMK\nfD0MdATwPJ5DPF9T1adEZDKQrqoznSN/Xgaa4NkhfL+qfi4i1wOTgRKgHHhUVWc580zjP4eBfgr8\n3A4DNbXdwePFvJueyxtLcsg/dIK4qIbcPCCRcRck0jIAh/OZ2udEcRl/+Hwzr36znXbNGzJlTA8u\nPi/G7VjfY5eCMMaPysqVLzZ6uoeWZO2nQVgI1/eN5/aLUugQ08TteCZAVmQf4P4Za9m+7zg3909k\n4vAuNK2Fv/qtABhTQzbvPsrr32znw2/zKSkv57IubbhzcAr9UlratYfqqcLiUp6Zu5mpi7OJi2rI\n76/vycCO0W7HOi0rAMbUsIKjRbyxJJs3luZwsLCEXvHNuWNwe4Z3b2v7CeqRpVn7uX/GWnYcKOS2\nC5N4YFgXGtfya0tZATAmQE4Ul/HBt3m8+vV2tu87TlxUQ340KJlx/RLtInR12PGiUn732SamL8kh\nsWUjfnd9Ty7s0MrtWD6xAmBMgJU7+wleWbSd5dkHaNogjB/2T2TCoGRimzd0O545C4sz93H/B2vJ\nP3SC8Rcmc/+wznXqhkNWAIxx0ercQ7y8KItP1+0iRISRPWO5Y3B7usc1dzuaOYNjRaVMmbORt5bt\nILlVI34/thf9Ulq6HeusWQEwphbIPVDI699k8+6KHRwvLmNgh1bcObi9XY20Fvp66z4e+GAtOw+f\n4PZBKfz6ys40jAh1O9Y5sQJgTC1y+EQJby/fwdRvstl95CQdWzfhzsEpjO4dR2R43fySqS+OnCxh\nypyNvL08l/bRjXnmhp70Tap7v/q9WQEwphYqLi3nk3U7efmr7WTsOkJ0kwhuuzCZWwYk2YllLli4\neS+TPlzHniMnuXNwe/7vivPqRUG2AmBMLaaqLN62n5cXZbFwcwGR4SGM7RvP7Re1t7uWBcDhEyU8\nOTuD91fm0bF1E54Z25M+idMeXXoAAAvcSURBVC3cjuU3dk9gY2oxEWFQx2gGdYxmy56jvLpoO++t\nyOOtZTu4vGsb7hzcnguSW9iJZTVg/qY9TPpwHQVHi/jp0A788rJO9eJXvy9sC8CYWmrv0ZO8sSSH\nN5bmcKiwhF4JUdw5OIVh3ezEMn84XFjC47M38OG3+ZzXpgnPjO1Fr4Qot2PVCOsCMqaOKiwu5YOV\nnhPLsvcXEt+iIT8alMIPLkiwE8vO0byMPTz4r3XsP17M3UM7cM+lHWkQVn9/9VsBMKaOK/vuxLIs\nVmQfpGmkc2LZQDuxzFcHjxfz+KwNfLR6J13aNuXZG3oFxbkYVgCMqUdW7TjIK4u28+l6z4llo3q1\n447BKXRrV/+/zM7VZ+t389BH6zlUWMzPLunIzy7pSERYcHSlWQEwph7KPVDIa99s590VuRQWlzGo\nYyvuGNyeoefF2A5jx/5jRTw6cwOz1+4iNbYZz9zQM+gKpRUAY+qxw4Ul/HP5DqYu3s6eI0V0at2E\nO+zEMuas28XDH63nyMkSfnFpJ34ytAPhQbgD3QqAMUGguLSc2Wt38vKi7WzcdYToJg0Yf2EStwxI\nokUQnVi271gRj3y8njnrdtMjrjnP3NAzqO/jXK0CICLDgD/huSXkK6r6dIXxicA0IMppM1FV54jI\nFcDTQARQDNynqvOdaRYCscAJZzZXOjeRPy0rAMb4RlX5JtNzYtm/t3hOLLuhbwK3X5RCcj0+sUxV\nmbV2F49+vJ7jRWX88vJO/Pji9kF/2Ow5FwARCQW2AFcAeXhuEn+TqmZ4tXkJWKWqLzr3B56jqski\n0gfYo6o7RaQ7MFdV45xpFgL3qqrP3+hWAIw5e5t3H+WVRVl8vHonJeXlXNG1DXdd3J6+SfXrxLK9\nR0/y8EfrmbthD70SonhmbE/Oa9PU7Vi1QnXOBO4HZKpqljOjd4DRQIZXGwVObV81B3YCqOoqrzYb\ngIYi0kBVi85+EYwx56Jz26Y8c0Mv7hvWmemLc3hzWQ6fZ+yhd0IUdw5uz1Xd2tTpX8iqyserd/LY\nrA0UFpcxcXgX7rgopU4vU6D4sgUwFhimqnc4r28F+qvqPV5tYoHPgRZAY+ByVV1ZyXx+oqqXO68X\nAq2AMuAD4EmtJIyI3AXcBZCYmNg3Jyfn3JbUGAN4Tiyb4ZxYluOcWPY/g1K4sQ6eWLbnyEke/Nc6\nvti4lz6JUTwzthcdWzdxO1atU50uIF8KwK+cef1BRC4EXgW6q2q5M74bMBNPP/82Z1icquaLSFM8\nBeBNVZ1+pizWBWSM/5SVK/MyPCeWpeccpFlkGD/sn8SEgcm0bR7pdrwzUlU++DafybM2UFRazn1X\ndeZHg1IItXsqVKo6XUD5QILX63hnmLfbgWEAqrpERCKBaGCviMQD/wJuO/Xl77TLd/49KiL/xNPV\ndMYCYIzxn9AQYVj3tgzr3pZvdxzklUVZvPTVNl79OotRPdtxx+D2pLarfUfO7D58kkkfrmXB5gLS\nklrw+7E9aR9jv/rPhS8FYAXQSURS8HzxjwN+WKHNDuAyYKqIdAUigQIRiQI+wXNU0DenGotIGBCl\nqvtEJBwYCXxR7aUxxpyT8xNb8Leb+7Jjv+fEsvfSc/lwVT4XdYzmjsEpDKkFJ5apKu+n5/HEJxmU\nlJXzyMhUxg9Mtl/91eDrYaAjgOfxHOL5mqo+JSKTgXRVnekc+fMy0ATPDuH7VfVzEXkImARs9Zrd\nlcBx4Csg3JnnF8CvVLXsTDmsC8iYwDhcWMJby3OY+k02e48W0blNU24fnMLo3u1cuWjazkMnmPjh\nOr7aUkC/lJb8/vqe9fpwVn+zE8GMMWetuLScWWt28vKiLDbtPkpMU8+JZTf3D8yJZarKOytyeeqT\njZSVKxOHd+HWAUl2/+SzZAXAGHPOVJWvM/fx8qLtfLWlgIbhodyQFs/tF6WQ1KpmfonnHSxk4gfr\n+DpzHxe2b8Xvru9JYqtGNfJe9Z0VAGOMX2zafYRXFm3n49X5lJYrV6aeOrHMPzdOLy9X3lq+g6fn\nbARg0oiu/LBfov3qrwYrAMYYv9p75CTTlmTz5tIdHD5RQp/EUyeWtT3nHbO5Bwq5f8ZalmTt56KO\n0UwZ04OElvarv7qsABhjakRhcSnvp3tOLNtxoJCElg25fVAKN6Ql0NjHE8vKy5U3lubwu882ESLC\ng1d3ZdwFCa4feVRfWAEwxtQoz4llu3l50XZWOieW3TzAc2JZm2anP7EsZ/9x7puxluXbD3DxeTFM\nGdODuCi7w5k/WQEwxgTMyhzPiWVzN+wmNES4plccdwxOoWvsf04sKy9Xpi7O5vdzNxEeGsLDV6dy\nQ1q8/eqvAdU5E9gYY85K36QW9E3qS87+47z29XbeS8/jg2/zGNwpmjsGtyehRUPun7GW9JyDXNI5\nht+O6WH3NXaBbQEYY2rcocJi3lq2g2mLPSeWATSLDOPRUd0Yc36c/eqvYbYFYIxxTVSjCH52SUfu\nGJzCzNU72br3GLdflHLGfQOm5lkBMMYETIOwUG5IS6i6oQkIu2OCMcYEKSsAxhgTpKwAGGNMkLIC\nYIwxQcoKgDHGBCkrAMYYE6SsABhjTJCyAmCMMUGqTl0KQkQKgJxznDwa2OfHOP5iuc6O5To7luvs\n1NdcSaoaU3FgnSoA1SEi6ZVdC8NtluvsWK6zY7nOTrDlsi4gY4wJUlYAjDEmSAVTAXjJ7QCnYbnO\njuU6O5br7ARVrqDZB2CMMeb7gmkLwBhjjBcrAMYYE6TqXQEQkWEisllEMkVkYiXjG4jIu874ZSKS\nXEtyTRCRAhFZ7TzuCECm10Rkr4isP814EZEXnMxrReT8ms7kY66hInLYa109EqBcCSKyQEQyRGSD\niPyykjYBX2c+5gr4OhORSBFZLiJrnFyPV9Im4J9HH3MF/PPo9d6hIrJKRGZXMs6/60tV680DCAW2\nAe2BCGANkFqhzd3A353n44B3a0muCcBfAry+LgbOB9afZvwI4FNAgAHAslqSaygw24W/r1jgfOd5\nU2BLJf+PAV9nPuYK+Dpz1kET53k4sAwYUKGNG59HX3IF/PPo9d6/Av5Z2f+Xv9dXfdsC6AdkqmqW\nqhYD7wCjK7QZDUxzns8ALpOavyO1L7kCTlW/Ag6cocloYLp6LAWiRCS2FuRyharuUtVvnedHgY1A\nXIVmAV9nPuYKOGcdHHNehjuPikedBPzz6GMuV4hIPHA18Mppmvh1fdW3AhAH5Hq9zuO/PwjftVHV\nUuAw0KoW5AK43uk2mCEiteHGqb7mdsOFzib8pyLSLdBv7mx698Hz69Gbq+vsDLnAhXXmdGesBvYC\n81T1tOsrgJ9HX3KBO5/H54H7gfLTjPfr+qpvBaAumwUkq2pPYB7/qfLmv32L59omvYA/Ax8F8s1F\npAnwAfC/qnokkO99JlXkcmWdqWqZqvYG4oF+ItI9EO9bFR9yBfzzKCIjgb2qurKm3+uU+lYA8gHv\nSh3vDKu0jYiEAc2B/W7nUtX9qlrkvHwF6FvDmXzhy/oMOFU9cmoTXlXnAOEiEh2I9xaRcDxfsm+p\n6oeVNHFlnVWVy8115rznIWABMKzCKDc+j1XmcunzOAi4RkSy8XQTXyoib1Zo49f1Vd8KwAqgk4ik\niEgEnp0kMyu0mQmMd56PBears0fFzVwV+omvwdOP67aZwG3OkS0DgMOqusvtUCLS9lS/p4j0w/N3\nXONfGs57vgpsVNU/nqZZwNeZL7ncWGciEiMiUc7zhsAVwKYKzQL+efQllxufR1WdpKrxqpqM5zti\nvqreUqGZX9dX2LlOWBupaqmI3APMxXPkzWuqukFEJgPpqjoTzwflDRHJxLOjcVwtyfULEbkGKHVy\nTajpXCLyNp6jQ6JFJA94FM8OMVT178AcPEe1ZAKFwI9qOpOPucYCPxWRUuAEMC4ARRw8v9BuBdY5\n/ccAvwESvbK5sc58yeXGOosFpolIKJ6C856qznb78+hjroB/Hk+nJteXXQrCGGOCVH3rAjLGGOMj\nKwDGGBOkrAAYY0yQsgJgjDFBygqAMcYEKSsAxhgTpKwAGGNMkPp/qxECZONBCmQAAAAASUVORK5C\nYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eX2Ln6dfUb47", + "colab_type": "text" + }, + "source": [ + "As already pointed out in the MNIST example notebook, a classical neural network is hard to beat on a simple learning task like this, especially with a basic data encoding scheme as used above. In general, layerwise learning can be used in arbitrary configurations that allow successively stacking and training layers, and it is independent of the data encoding scheme used - so feel free to play with more elaborate data sets as well!" + ] + } + ] +} \ No newline at end of file diff --git a/metalearning_qaoa/metalearning_qaoa.ipynb b/metalearning_qaoa/metalearning_qaoa.ipynb new file mode 100644 index 000000000..d5f8897db --- /dev/null +++ b/metalearning_qaoa/metalearning_qaoa.ipynb @@ -0,0 +1,620 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "TFQ_Example_MetaLearning_QAOA.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "d2azAL4KJk-T", + "colab_type": "text" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Quantum Authors." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "4UFpHoRvJmwd", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2BswWgdtnmSP", + "colab_type": "text" + }, + "source": [ + "# Meta-Learning for QAOA\n", + "\n", + "In this notebook you will explore the application of meta-learning techniques from [here](https://arxiv.org/abs/1907.05415) to improve initialization of QAOA.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BTb-AuXh1OY2", + "colab_type": "text" + }, + "source": [ + "Authors : Michael Broughton, Antonio J. Martinez\n", + "\n", + "Contributors: Guillaume Verdon\n", + "\n", + "Created : 2020-Feb-06\n", + "\n", + "Last updated : 2020-Apr-09" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MH1spy1rJ2ko", + "colab_type": "text" + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tensorflow/quantum/blob/research/metalearning_qaoa/metalearning_qaoa.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IrWw_xv4fs44", + "colab_type": "text" + }, + "source": [ + "## Import dependencies" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gqx89K2caCR7", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade cirq==0.7.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "JuXxC5fbaGAS", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade tensorflow==2.1.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "fyrqkto1aHQV", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install tensorflow-quantum" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "kW2sb1rAfhwt", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import cirq\n", + "import networkx as nx\n", + "import numpy as np\n", + "import random\n", + "import sympy\n", + "import tensorflow as tf\n", + "import tensorflow_quantum as tfq\n", + "np.random.seed(123)\n", + "random.seed(123)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "r0rC9eTXqPaR", + "colab_type": "text" + }, + "source": [ + "## QAOA\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JywerwWEqsqY", + "colab_type": "text" + }, + "source": [ + "The QAOA ansatz consists of repeated applications of a mixer Hamiltonian $\\hat{H}_M$ and the cost Hamiltonian $\\hat{H}_C$. The total applied unitary is\n", + "$$\\hat{U}(\\eta,\\gamma) = \\prod_{j=1}^{p}e^{-i\\eta_{j}\\hat{H}_M}e^{-i\\gamma_{j} \\hat{H}_C},$$\n", + "where $p$ is the number of times the mixer and cost are applied; the parameters $\\eta_j, \\gamma_j$ are to be optimized to produce a bitstring of minimal energy with respect to $\\hat{H}_C$.\n", + "\n", + "One traditional family of Hamiltonians used in QAOA are the Ising models. These are defined as\n", + "$$\\hat{H}_\\mathrm{P}=\\sum_i h_i \\hat{Z}^{(i)}+\\sum_{i,j} J_{ij} \\hat{Z}^{(i)}\\hat{Z}^{(j)}.$$\n", + "There is a one-to-one mapping between weighted graphs and Ising models: $h_i$ can be thought of as the weight of a graph node $i$ and $J_{ij}$ can be thought of as the weight of a graph edge between nodes $i$ and $j$. In applications such as [MaxCut](https://en.wikipedia.org/wiki/Maximum_cut), we have $h_i = 0$ and $J_{ij} = 1$ for all indices $i$ and $j$. The importance of this graph correspondence motivates us to define the following function, which takes a graph and returns the corresponding Ising model:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "OsYSfSCrqVMp", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def maxcut_qaoa_from_graph(graph, p):\n", + " qubits = cirq.GridQubit.rect(1, len(graph.nodes))\n", + " qaoa_circuit = cirq.Circuit()\n", + " # Initial equal superposition\n", + " for qubit in qubits:\n", + " qaoa_circuit += cirq.H(qubit)\n", + " qaoa_symbols = []\n", + " # Stack the parameterized costs and mixers\n", + " for l_num in range(p):\n", + " qaoa_symbols.append(sympy.Symbol(\"gamma_{}\".format(l_num)))\n", + " for e in graph.edges():\n", + " qaoa_circuit += cirq.ZZ(qubits[e[0]], qubits[e[1]])**qaoa_symbols[-1]\n", + " qaoa_symbols.append(sympy.Symbol(\"eta_{}\".format(l_num)))\n", + " for n in graph.nodes():\n", + " qaoa_circuit += cirq.X(qubits[n])**qaoa_symbols[-1]\n", + " # Define the cost as a Cirq PauliSum\n", + " cost_op = None\n", + " for e in graph.edges():\n", + " if cost_op is None:\n", + " cost_op = cirq.Z(qubits[e[0]])*cirq.Z(qubits[e[1]])\n", + " else:\n", + " cost_op += cirq.Z(qubits[e[0]])*cirq.Z(qubits[e[1]])\n", + " return qaoa_circuit, qaoa_symbols, cost_op" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1qsBNoLYr3M4", + "colab_type": "text" + }, + "source": [ + "## Meta-Learning for MaxCut" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "EbUFDaCtgc78", + "colab_type": "text" + }, + "source": [ + "The idea of meta-learning for optimization is to train an outer-loop optimizer on many instances of a problem class, to enhance the efficiency of solving unseen instances. In other words, the learner is attempting to extract the common structure among instances of a particular class of problems.\n", + "\n", + "Here, you will use a recurrent neural network to find good initial parameter settings for MaxCut QAOA instances. As shown in the [original paper](https://arxiv.org/abs/1907.05415), this is an effective method for overcoming the challenge of [\"barren plateaus\"](https://www.nature.com/articles/s41467-018-07090-4) in quantum machine learning.\n", + "\n", + "To this end we define a function that generates a set of random MaxCut QAOA instances, based on graphs sampled from an [Erdős–Rényi](https://en.wikipedia.org/wiki/Erd%C5%91s%E2%80%93R%C3%A9nyi_model) distribution:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "F2iWPCuIf1DZ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def generate_data(n_nodes, n_points):\n", + " datapoints = []\n", + " costs = []\n", + " for _ in range(n_points):\n", + " random_graph = nx.gnp_random_graph(n_nodes, p=3. / n_nodes)\n", + " circuit, symbols, cost_op = maxcut_qaoa_from_graph(random_graph, 1)\n", + " datapoints.append(circuit)\n", + " costs.append([cost_op])\n", + " return datapoints, symbols, costs" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kqp69jRYgqUy", + "colab_type": "text" + }, + "source": [ + "Since our recurrent neural network will have both classical and quantum components, we will need to define a custom RNN layer:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "1ufim5zxtK5n", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class QRNN(tf.keras.layers.Layer):\n", + " def __init__(self, symbol_names):\n", + " super(QRNN, self).__init__()\n", + " self.shared = tf.keras.layers.Dense(25, name=\"shared\")\n", + " self.state = tf.keras.layers.Dense(25, name=\"state\")\n", + " self.params = tf.keras.layers.Dense(2, name=\"params\")\n", + " self.expectation = tfq.layers.Expectation()\n", + " self.symbol_names = symbol_names\n", + "\n", + " def call(self, inputs):\n", + " circuits = inputs[0]\n", + " ops = inputs[1]\n", + " state = inputs[2]\n", + " params = inputs[3]\n", + " prev_output = inputs[4]\n", + " joined = tf.keras.layers.concatenate([state, params, prev_output])\n", + " shared = self.shared(joined)\n", + " s_inp = self.state(shared)\n", + " p_inp = self.params(shared)\n", + " exp_out = self.expectation(circuits,\n", + " symbol_names=self.symbol_names,\n", + " symbol_values=p_inp,\n", + " operators=ops)\n", + " return [circuits, ops, s_inp, p_inp, exp_out]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "q4XJuhHfw8ZA", + "colab_type": "text" + }, + "source": [ + "This layer is stacked to produce the meta-learner RNN. We choose 5 shots of optimization:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "XPL-CRJUgj_X", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Generate random MaxCut instances as training data.\n", + "N_QUBITS = 10\n", + "\n", + "# For a more accurate optimizer on testing data, increase N_POINTS\n", + "N_POINTS = 500\n", + "circuits, symbols, ops = generate_data(N_QUBITS, N_POINTS)\n", + "circuit_tensor = tfq.convert_to_tensor(circuits)\n", + "ops_tensor = tfq.convert_to_tensor(ops)\n", + "\n", + "# Unroll the RNN through time.\n", + "state_inp = tf.keras.Input(shape=(25,))\n", + "params_inp = tf.keras.Input(shape=(2,))\n", + "exp_inp = tf.keras.Input(shape=(25,))\n", + "\n", + "op_inp = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string)\n", + "circuit_inp = tf.keras.Input(shape=(), dtype=tf.dtypes.string)\n", + "\n", + "rnn_0 = QRNN(symbols)\n", + "rnn_1 = QRNN(symbols)\n", + "rnn_2 = QRNN(symbols)\n", + "rnn_3 = QRNN(symbols)\n", + "rnn_4 = QRNN(symbols)\n", + "output_0 = rnn_0([circuit_inp, op_inp, state_inp, params_inp, exp_inp])\n", + "output_1 = rnn_1(output_0)\n", + "output_2 = rnn_2(output_1)\n", + "output_3 = rnn_3(output_2)\n", + "output_4 = rnn_4(output_3)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pCmvJEUrGsXs", + "colab_type": "text" + }, + "source": [ + "Now we can set up a loss function over the 5 timesteps of our RNN QAOA optimizer:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "l7Fp2eOYzsAT", + "colab_type": "code", + "colab": {} + }, + "source": [ + "@tf.function\n", + "def value_loss(unused, outputs):\n", + " return tf.reduce_mean(outputs)\n", + "\n", + "# It's important to have a good guess on the last shot of the optimization\n", + "loss = tf.keras.layers.average([\n", + " 0.1 * output_0[4], 0.2 * output_1[4], 0.3 * output_2[4],\n", + " 0.4 * output_3[4], 0.5 * output_4[4]\n", + "])\n", + "\n", + "# Penalize jumping around randomly in the landscape.\n", + "penalizer = 10 * tf.reduce_sum(\n", + " (output_0[3] - output_1[3])**2 + (output_1[3] - output_2[3])**2 +\n", + " (output_2[3] - output_3[3])**2 + (output_3[3] - output_4[3])**2,\n", + " axis=1)\n", + "full_loss = loss + penalizer" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "UM-UGe11UPXp", + "colab_type": "text" + }, + "source": [ + "Finally we set and train our full Keras model:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "sykuyNseURcC", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Can change these to random along with longer tine horizon and greater training\n", + "# data for more robust test set performance\n", + "initial_state = np.zeros((N_POINTS, 25)).astype(np.float32)\n", + "initial_params = np.zeros((N_POINTS, 2)).astype(np.float32)\n", + "initial_exp = np.zeros((N_POINTS, 25)).astype(np.float32)\n", + "\n", + "# Our model will output it's parameter guesses along with the loss value that is\n", + "# computed over them. This way we can use the model to guess parameters later on\n", + "model = tf.keras.Model(inputs=[circuit_inp, op_inp, state_inp, params_inp, exp_inp],\n", + " outputs=[\n", + " output_0[3], output_1[3], output_2[3], output_3[3], output_4[3],\n", + " full_loss\n", + " ])\n", + "model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),\n", + " loss=value_loss, loss_weights=[0, 0, 0, 0, 0, 1])\n", + "\n", + "model.fit(x=[circuit_tensor, ops_tensor, initial_state, initial_params, initial_exp],\n", + " y=[\n", + " np.zeros((N_POINTS, 1)),\n", + " np.zeros((N_POINTS, 1)),\n", + " np.zeros((N_POINTS, 1)),\n", + " np.zeros((N_POINTS, 1)),\n", + " np.zeros((N_POINTS, 1)),\n", + " np.zeros((N_POINTS, 1))\n", + " ],\n", + " epochs=20,\n", + " batch_size=64,\n", + " verbose=1)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gYxwwKM3g4vL", + "colab_type": "text" + }, + "source": [ + "(Doing validation data.)" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ptNh_hPig1zk", + "colab_type": "code", + "colab": {} + }, + "source": [ + "circuits, parameters, ops = generate_data(10, N_POINTS // 2)\n", + "\n", + "circuit_tensor = tfq.convert_to_tensor(circuits)\n", + "ops_tensor = tfq.convert_to_tensor(ops)\n", + "\n", + "initial_state = np.zeros((N_POINTS // 2, 25)).astype(np.float32)\n", + "initial_guesses = np.zeros((N_POINTS // 2, 2)).astype(np.float32)\n", + "initial_exp = np.zeros((N_POINTS // 2, 25)).astype(np.float32)\n", + "\n", + "out1, out2, out3, out4, out5, _ = model(\n", + " [circuit_tensor, ops_tensor, initial_state, initial_guesses, initial_exp])\n", + "\n", + "one_vals = tf.reduce_mean(tfq.layers.Expectation()(\n", + " circuit_tensor,\n", + " symbol_names=parameters,\n", + " symbol_values=out1,\n", + " operators=ops_tensor)).numpy()\n", + "two_vals = tf.reduce_mean(tfq.layers.Expectation()(\n", + " circuit_tensor,\n", + " symbol_names=parameters,\n", + " symbol_values=out2,\n", + " operators=ops_tensor)).numpy()\n", + "three_vals = tf.reduce_mean(tfq.layers.Expectation()(\n", + " circuit_tensor,\n", + " symbol_names=parameters,\n", + " symbol_values=out3,\n", + " operators=ops_tensor)).numpy()\n", + "four_vals = tf.reduce_mean(tfq.layers.Expectation()(\n", + " circuit_tensor,\n", + " symbol_names=parameters,\n", + " symbol_values=out4,\n", + " operators=ops_tensor)).numpy()\n", + "five_vals = tf.reduce_mean(tfq.layers.Expectation()(\n", + " circuit_tensor,\n", + " symbol_names=parameters,\n", + " symbol_values=out5,\n", + " operators=ops_tensor)).numpy()\n", + "\n", + "average_cost_function_values = [\n", + " one_vals, two_vals, three_vals, four_vals, five_vals\n", + "]\n", + "std_of_param_guess = [\n", + " tf.math.reduce_std(out1).numpy(),\n", + " tf.math.reduce_std(out2).numpy(),\n", + " tf.math.reduce_std(out3).numpy(),\n", + " tf.math.reduce_std(out4).numpy(),\n", + " tf.math.reduce_std(out5).numpy()\n", + "]\n", + "\n", + "print('-' * 80)\n", + "print('Average cost function values for each guess number across unseen instances:',\n", + " average_cost_function_values)\n", + "print('Variance of parameter values guessed:', std_of_param_guess)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cvs5E__KhEHu", + "colab_type": "text" + }, + "source": [ + "Explore a singular instances. Now this instance is on 12 qubits and not just 10." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "rWMazq46hBB6", + "colab_type": "code", + "colab": {} + }, + "source": [ + "test_graph_circuit, parameters, test_graph_op = generate_data(12, 1)\n", + "\n", + "test_graph_circuit = test_graph_circuit[0]\n", + "test_graph_op = test_graph_op[0][0]\n", + "\n", + "resolution = 100\n", + "input_vals = []\n", + "for i, a in enumerate(np.linspace(-0.5, .5, resolution)):\n", + " for j, b in enumerate(np.linspace(-0.5, .5, resolution)):\n", + " input_vals.append([a, b])\n", + "\n", + "cost_vals = tfq.layers.Expectation()(test_graph_circuit,\n", + " symbol_names=parameters,\n", + " symbol_values=np.array(input_vals),\n", + " operators=test_graph_op).numpy()\n", + "\n", + "output_vals = np.empty((resolution, resolution))\n", + "for i, a in enumerate(np.linspace(-0.5, 0.5, resolution)):\n", + " for j, b in enumerate(np.linspace(-0.5, 0.5, resolution)):\n", + " output_vals[i][j] = cost_vals[i * resolution + j]\n", + "\n", + "import matplotlib.pyplot as plt\n", + "\n", + "plt.imshow(output_vals)\n", + "\n", + "guess_0, guess_1, guess_2, guess_3, guess_4, _ = model([\n", + " tfq.convert_to_tensor([test_graph_circuit]),\n", + " tfq.convert_to_tensor([[test_graph_op]]),\n", + " np.zeros((1, 25)).astype(np.float32),\n", + " np.zeros((1, 2)).astype(np.float32),\n", + " np.zeros((1, 25)).astype(np.float32),\n", + "])\n", + "all_guesses = [guess_0, guess_1, guess_2, guess_3, guess_4]\n", + "all_guesses = [list(a.numpy()[0]) for a in all_guesses]\n", + "\n", + "\n", + "# This should be cleaned up...\n", + "def f(x):\n", + " sim = cirq.Simulator()\n", + " final_state = sim.simulate(test_graph_circuit, {\n", + " parameters[0]: x[0],\n", + " parameters[1]: x[1]\n", + " }).final_state\n", + " q = sorted(list(test_graph_circuit.all_qubits()))\n", + " res = test_graph_op.expectation_from_wavefunction(\n", + " final_state, qubit_map={h: i for i, h in enumerate(q)}).real\n", + " return res\n", + "\n", + "\n", + "all_costs = [f(a) for a in all_guesses]\n", + "\n", + "plt.plot((np.array(all_guesses)[:, 0] + 0.5) * resolution,\n", + " (np.array(all_guesses)[:, 1] + 0.5) * resolution,\n", + " c='r',\n", + " linestyle='--',\n", + " markevery=[4],\n", + " marker='*',\n", + " markersize=20,\n", + " linewidth=3.5)\n", + "plt.show()\n", + "\n", + "print('All guesses for test graph:', all_guesses)\n", + "print('Cost function values for test graph:', all_costs)\n", + "print('-' * 80)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "__ae8ksZdTkb", + "colab_type": "text" + }, + "source": [ + "As we can see from the above visual the RNN immediately begins guessing near the basin of attraction and continues to explore around the region looking to improve the estimate. Note the values in `all_costs` decreasing." + ] + } + ] +} \ No newline at end of file diff --git a/qaoa/qaoa.ipynb b/qaoa/qaoa.ipynb new file mode 100644 index 000000000..cda6cf537 --- /dev/null +++ b/qaoa/qaoa.ipynb @@ -0,0 +1,472 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "TFQ_Example_BasicQAOA.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "kKeM2IR1dGN0", + "colab_type": "text" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Quantum Authors." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ZQGwm207awkt", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Z91w8WrIeYQX", + "colab_type": "text" + }, + "source": [ + "# MaxCut QAOA\n", + "\n", + "Authors : Michael Streif, David Von Dollen\n", + "\n", + "Contributors : Michael Broughton\n", + "\n", + "Created : 2019\n", + "\n", + "Last updated : 2020-Mar-05" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NO5t8JWLeaqO", + "colab_type": "text" + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tensorflow/quantum/blob/research/qaoa/qaoa.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7mlhXBELak9p", + "colab_type": "text" + }, + "source": [ + "This notebook shows an example of how to optimize variational parameters for QAOA using TFQ" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "MDDFanHIbDfS", + "colab_type": "text" + }, + "source": [ + "First we must install the required libraries" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "rFqxhKypZoSJ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install -q --upgrade tensorflow==2.1.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "xcDb1zbSdXKi", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install -q tensorflow-quantum" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VWkUxydDbI_C", + "colab_type": "text" + }, + "source": [ + "Next, we import necessary modules" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3kQL6cljZyJq", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import tensorflow as tf\n", + "import tensorflow_quantum as tfq\n", + "import cirq\n", + "import sympy\n", + "import numpy as np\n", + "\n", + "# visualization tools\n", + "%matplotlib inline\n", + "import matplotlib.pyplot as plt\n", + "from cirq.contrib.svg import SVGCircuit\n", + "#supress warning for matplotlib\n", + "import warnings\n", + "warnings.filterwarnings(\"ignore\")" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "Xc4ZYqG1aXJu", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import networkx as nx" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "IxaTUKjkbnaS", + "colab_type": "text" + }, + "source": [ + "We can use QAOA to solve Max-cut. First we need to generate a 3-regular graph with 10 nodes upon which to find the Max-cut using QAOA" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "1J6wZ6zfb0Bi", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# generate a 3-regular graph with 10 nodes\n", + "maxcut_graph = nx.random_regular_graph(n=10,d=3)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "fM3quBeNcDF5", + "colab_type": "text" + }, + "source": [ + "Let's visualize the graph" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "3GLOTJj6cFqC", + "colab_type": "code", + "colab": {} + }, + "source": [ + "fig, ax = plt.subplots(1, 1, figsize=(8, 6));\n", + "nx.draw_networkx(maxcut_graph, ax=ax)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "XaS5DlNqcJg6", + "colab_type": "code", + "colab": {} + }, + "source": [ + "cirq_qubits = cirq.GridQubit.rect(1, 10)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "g1ihNnOncvFI", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# create layer of hadamards to initialize the superposition state of all \n", + "# computational states\n", + "hadamard_circuit = cirq.Circuit()\n", + "for node in maxcut_graph.nodes():\n", + " qubit = cirq_qubits[node]\n", + " hadamard_circuit.append(cirq.H.on(qubit))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "j5tOK2UBc0rP", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# define the two parameters for one block of QAOA\n", + "qaoa_parameters = sympy.symbols('a b')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "JGgm8N3_c9lu", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# define the the mixing and the cost Hamiltonian, H_M and H_P\n", + "mixing_ham = 0\n", + "for node in maxcut_graph.nodes():\n", + " qubit = cirq_qubits[node]\n", + " mixing_ham += cirq.PauliString(cirq.X(qubit))\n", + "\n", + "cost_ham = maxcut_graph.number_of_edges()/2\n", + "for edge in maxcut_graph.edges():\n", + " qubit1 = cirq_qubits[edge[0]]\n", + " qubit2 = cirq_qubits[edge[1]]\n", + " cost_ham += cirq.PauliString(1/2*(cirq.Z(qubit1)*cirq.Z(qubit2)))\n", + "\n", + "# generate the qaoa circuit\n", + "qaoa_circuit = tfq.util.exponential(operators = [cost_ham, mixing_ham], \n", + " coefficients = qaoa_parameters)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lG52WM2wdPCW", + "colab_type": "text" + }, + "source": [ + "Subsequently, we use these ingredients to build our model. We note here in this case that QAOA has no input data and labels, as we have mapped our graph to the QAOA circuit. To use the TFQ framework we specify the Hadamard circuit as input and convert it to a TFQ tensor. We may then construct a tf.keras model using our QAOA circuit and cost in a TFQ PQC layer, and use a single instance sample for training the variational parameters of the QAOA with the Hadamard gates as an input layer and a target value of $0$ for our loss function. This translates into the following code:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "MEJ0q4qUdIQ-", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# define the model and training data\n", + "model_circuit, model_readout = qaoa_circuit, cost_ham\n", + "input_ = [hadamard_circuit]\n", + "input_ = tfq.convert_to_tensor(input_)\n", + "optimum = [0]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "8dfq7vf4dYRd", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Build the Keras model.\n", + "optimum=np.array(optimum)\n", + "model = tf.keras.Sequential()\n", + "model.add(tf.keras.layers.Input(shape=(), dtype=tf.dtypes.string))\n", + "model.add(tfq.layers.PQC(model_circuit, model_readout))\n", + "tfq.layers.PQC" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QtTxpOCRl1lE", + "colab_type": "text" + }, + "source": [ + "To optimize the parameters of the ansatz state, we use a classical optimization routine. In general, it would be possible to use pre-calculated parameters or to implement for QAOA tailored optimization routines. For this tutorial, we choose the Adam optimizer implemented in tensorflow. We also choose the mean absolute error as our loss function." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "GJlUgMFpdhhr", + "colab_type": "code", + "colab": {} + }, + "source": [ + "model.compile(loss=tf.keras.losses.mean_absolute_error,\n", + " optimizer=tf.keras.optimizers.Adam())\n", + " \n", + "history = model.fit(input_,optimum,epochs=1000,verbose=1)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "0CfbWtA6eQuH", + "colab_type": "code", + "colab": {} + }, + "source": [ + "plt.plot(history.history['loss'])\n", + "plt.title(\"QAOA with TFQ\")\n", + "plt.xlabel(\"Iteration\")\n", + "plt.ylabel(\"Loss\")\n", + "plt.show()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "Aa9e74pvxXH8", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Read out the optimal paramters and sample from the final state 1000 times\n", + "params = model.trainable_variables\n", + "add = tfq.layers.AddCircuit()\n", + "output_circuit = add(input_, append =qaoa_circuit )\n", + "\n", + "sample_layer = tfq.layers.Sample()\n", + "output = sample_layer(output_circuit, symbol_names=qaoa_parameters, symbol_values = params,repetitions=1000)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "lVo2oeNC0fzs", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Translate output in cut sets\n", + "cuts_qaoa = []\n", + "for bit_string in output.values:\n", + " temp = []\n", + " for pos, bit in enumerate(bit_string):\n", + " if bit==1:\n", + " temp.append(pos)\n", + " cuts_qaoa.append(temp)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "93R1MoMe6guh", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Generate a list with all possible subsets\n", + "from itertools import combinations\n", + "sub_lists = []\n", + "for i in range(0, len(maxcut_graph.nodes())+1):\n", + " temp = [list(x) for x in combinations(maxcut_graph.nodes(), i)]\n", + " sub_lists.extend(temp)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "9gOoGimx6z2v", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Calculate the cut_size for all possible cuts\n", + "cut_size = []\n", + "for sub_list in sub_lists:\n", + " cut_size.append(nx.algorithms.cuts.cut_size(maxcut_graph,sub_list))\n", + "\n", + "# Calculate the cut_size for the cuts found with QAOA\n", + "cut_size_qaoa = []\n", + "for cut in cuts_qaoa:\n", + " cut_size_qaoa.append(nx.algorithms.cuts.cut_size(maxcut_graph,cut))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "nW1uZWrK63PH", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Calculate the approximation ratio\n", + "np.mean(cut_size_qaoa)/np.max(cut_size)" + ], + "execution_count": 0, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/qgrnn_ising/qgrnn_ising.ipynb b/qgrnn_ising/qgrnn_ising.ipynb new file mode 100644 index 000000000..1489e6c90 --- /dev/null +++ b/qgrnn_ising/qgrnn_ising.ipynb @@ -0,0 +1,992 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "QGRNN_Ising_copy.ipynb", + "provenance": [], + "collapsed_sections": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "u3-uOLgO1R74", + "colab_type": "text" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Quantum Authors." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "t8XVZWCF08e4", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ls6iWjEfITx7", + "colab_type": "text" + }, + "source": [ + "# Quantum Graph Recurrent Neural Networks for Ising model\n", + "\n", + "Author : Jae H. Yoo, Google Research.\n", + "\n", + "Contributors : Guillaume Verdon (X company) Trevor McCourt, Antonio J. Martinez, Michael Broughton (Google Research)\n", + "\n", + "Created : 2020-Feb-01\n", + "\n", + "Last updated : 2020-Feb-02 " + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6w4cSncf1Fcl", + "colab_type": "text" + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tensorflow/quantum/blob/research/qgrnn_ising/qgrnn_ising.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gJj9eXch1bv1", + "colab_type": "text" + }, + "source": [ + "## Introduction\n", + "\n", + "In this colab, we will learn how to train QGRNN, a variant of Quantum Graph Neural Networks ([Verdon et al.](https://arxiv.org/abs/1909.12264)) to learn the dynamics of the target Hamiltonian of given Ising model. Before going further, we will install related libraries." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WgaptKeDJgg1", + "colab_type": "text" + }, + "source": [ + "### Import & pip install libraries" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "JuXxC5fbaGAS", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade tensorflow==2.1.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "Xx3k3M39y0SW", + "colab": {} + }, + "source": [ + "!pip install tensorflow-quantum" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "E-5MiNQHNpS3", + "colab_type": "code", + "colab": {} + }, + "source": [ + "import networkx as nx\n", + "import numpy as np\n", + "import sympy\n", + "import cirq\n", + "# TensorFlow\n", + "import tensorflow as tf\n", + "from tensorflow.keras import Input, Model\n", + "from tensorflow.keras.optimizers import Adam\n", + "import tensorflow.keras.backend as K\n", + "# TensorFlow Quantum\n", + "import tensorflow_quantum as tfq\n", + "from tensorflow_quantum.python.layers import Expectation\n", + "\n", + "import matplotlib.pyplot as plt\n", + "%matplotlib inline" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "1ZtU7j3LJsGq", + "colab_type": "text" + }, + "source": [ + "### Overview\n", + "\n", + "Here are steps we will follow.\n", + "\n", + "- Preparation of Quantum Data with VQE on Ising model.\n", + "- Construct a QGRNN model\n", + "- Construct Fidelity with Swap Test\n", + "- Calculate the average infidelity loss function\n", + "- Train the QGRNN & get the final result\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kJctwuR0Od3W", + "colab_type": "text" + }, + "source": [ + "## Preparation of Quantum Data with VQE on Ising model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wAcIwSsNSS5t", + "colab_type": "text" + }, + "source": [ + "We will construct a target Hamiltonian $H_{target}$ by using a random ring graph $G$ with $N=6$ qubits. On top of it, we will find a low energy state near to ground state of $H_{target}$ by using Variational Quantum Eigensolver (VQE)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "di3PZEO9U9bN", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Define the number of qubits of our target quantum system.\n", + "N = 6" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qUVwr99DTkVB", + "colab_type": "text" + }, + "source": [ + "### Transverse field Ising model Hamiltonian\n", + "\n", + "Here are basic introduction of the target Hamiltonian of Ising model. Given\n", + "* $J_{jk}$ for interacting spin pairs and\n", + "* $B_{v}$ for site bias term of each spin,\n", + "\n", + "$H_{target} = \\sum_{j,k} J_{jk} Z_j Z_k + \\sum_{v} B_v Z_v + \\sum_{v} X_v$\n", + "\n", + "It is very easy to construct this Hamiltonian using networkx library in Python." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "zd4zMjthU470", + "colab_type": "code", + "colab": {} + }, + "source": [ + "G = nx.cycle_graph(N)\n", + "weights = [4*(np.random.random()-0.5) for _ in G.edges]\n", + "biases = [4*(np.random.random()-0.5) for _ in G.nodes]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YEdTzOq1rK4h", + "colab_type": "text" + }, + "source": [ + "Also, networkx provides graphic APIs to draw graph data. You can see that nodes have Brown to Blue Green colormap, and edges have Red to Blue one." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "8XJlkeCTrSYf", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def draw(graph, pos, weights, biases, title):\n", + " label = {i:'{}'.format(i) for i in graph.nodes}\n", + " edge_options = {\n", + " \"edge_color\": weights,\n", + " \"width\": 4,\n", + " \"edge_cmap\": plt.cm.RdBu,\n", + " \"edge_vmin\" : -2,\n", + " \"edge_vmax\" : 2,\n", + " }\n", + " node_options = {\n", + " \"node_color\": biases,\n", + " \"cmap\": plt.cm.BrBG,\n", + " \"vmin\" : -2,\n", + " \"vmax\" : 2,\n", + " }\n", + " nx.draw_networkx_labels(graph, pos, label, font_color=\"w\")\n", + " nodes = nx.draw_networkx_nodes(graph, pos, **node_options)\n", + " edges = nx.draw_networkx_edges(graph, pos, **edge_options)\n", + " edges.set_cmap(plt.cm.RdBu)\n", + " edges.set_clim(-2, 2)\n", + "\n", + " plt.title(title)\n", + " plt.colorbar(nodes)\n", + " plt.colorbar(edges)\n", + " plt.show()\n", + "\n", + "\n", + "pos = nx.circular_layout(G)\n", + "draw(G, pos, weights, biases, 'Target Ising model')\n", + "print(*zip(G.edges, weights))\n", + "print(*zip(G.nodes, biases))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ifqGlfp0AQeB", + "colab_type": "text" + }, + "source": [ + "Now that we have the graph structure, weights of edges & nodes, we can construct `cirq` based Hamiltonian which can be directly calculated in `cirq` and `tfq`. To create Hamiltonian by using `cirq.PauliSum`'s or `cirq.PauliString`'s we need to assign appropriate qubits on them. We can bring qubits by using `cirq.GridQubit`." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "2AamI46wAgkN", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def Hamiltonian(graph, weights, biases, qubits):\n", + " H_cost = [w * cirq.Z(qubits[i]) * cirq.Z(qubits[j]) \\\n", + " for (i, j), w in zip(graph.edges, weights)]\n", + " H_cost += [b * cirq.Z(qubits[v]) for v, b in enumerate(biases)]\n", + " H_mixer = [cirq.X(q) for q in qubits]\n", + " return H_cost, H_mixer\n", + "\n", + "qubits = cirq.GridQubit.rect(1, N)\n", + "true_H_cost, true_H_mixer = Hamiltonian(G, weights, biases, qubits) \n", + "for cost in true_H_cost:\n", + " print(cost)\n", + "for mixer in true_H_mixer:\n", + " print(mixer)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "iGGHlPZPT0T1", + "colab_type": "text" + }, + "source": [ + "### Variational Quantum Eigensolver (VQE)\n", + "\n", + "Here, we will construct a Variational Quantum Eigensolver (VQE) to find out a low energy state $|\\psi_0\\rangle$ near to the ground state." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-S_H9EcwCqlR", + "colab_type": "text" + }, + "source": [ + "#### Variational method\n", + "\n", + "[Variational method](https://en.wikipedia.org/wiki/Variational_method_(quantum_mechanics)) provides the way to find approximated ground energy states. Let $|\\psi(\\vec\\theta)\\rangle$ be a variational ansatz. We can control parameters $\\vec\\theta$ to change the quantum state. Then, the following inequality is guaranteed for any given Hamiltonian $H$.\n", + "\n", + "- $\\langle\\psi(\\vec\\theta)|H|\\psi(\\vec\\theta)\\rangle \\ge E_0$, where $E_0$ is the energy of ground state.\n", + "- We can find out $\\vec\\theta$ giving us approximated ground state by minimizing the above expectation value as a loss function" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AlKAegEBE7zO", + "colab_type": "text" + }, + "source": [ + "#### Variational Ansatz\n", + "\n", + "We can make any quantum state by using $X$ rotation & $Z$ rotation in Bloch sphere, which could be a simple and useful variational ansatz for our problem. Because our ansatz will be used for Ising model & QGRNN later, we should not assign qubits to the rotational gates because qubits are immutable in `cirq.Circuit`. That's why we use `VQE.gates` and `VQE.circuit` separately.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "vNCnp0jXFPA8", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class VQE:\n", + " \"\"\"Variational Quantum Eigensolver\"\"\"\n", + " def __init__(self, qubits):\n", + " self.qubits = qubits\n", + " # Parameters\n", + " t_x = ['t_x{}'.format(i) for i, _ in enumerate(qubits)]\n", + " t_z = ['t_z{}'.format(i) for i, _ in enumerate(qubits)]\n", + " self.symbols = t_x + t_z\n", + " # Parameterized quantum gates without Qubits\n", + " gates = []\n", + " gates.append([cirq.XPowGate(exponent=sympy.Symbol(x)) for x in t_x])\n", + " gates.append([cirq.ZPowGate(exponent=sympy.Symbol(z)) for z in t_z])\n", + " self._gates = gates\n", + " self.circuit = self.get_state(qubits)\n", + "\n", + " def get_state(self, qubits, params=None):\n", + " \"\"\"Outputs quantum data with given qubits.\"\"\"\n", + " circuit = cirq.Circuit(\n", + " [g(i) for gates in self._gates for i, g in zip(qubits, gates)])\n", + " if params is None:\n", + " return circuit\n", + " resolver = {k: v for k, v in zip(self.symbols, params)}\n", + " return cirq.resolve_parameters(circuit, resolver)\n", + "\n", + "vqe = VQE(qubits)\n", + "print(vqe.circuit)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Xzz4vipJKUfP", + "colab_type": "text" + }, + "source": [ + "#### Construct $\\langle \\psi (\\vec\\theta) | H | \\psi (\\vec\\theta)\\rangle$\n", + "\n", + "TensorFlow Quantum provides `tfq.layers.Expectation` Keras layer to provide easy interface to calculate the expectation value of given ansatz & Hamiltonian." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "BmUn4JHHKpLb", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def vqe_keras_model(vqe, H_target):\n", + " # Construct measurement layers.\n", + " circuit_input = Input(shape=(), dtype=tf.string)\n", + " output = Expectation()(\n", + " circuit_input,\n", + " symbol_names=vqe.symbols,\n", + " operators=tfq.convert_to_tensor([H_target]),\n", + " initializer=tf.keras.initializers.RandomNormal())\n", + " # Each term in H_target is calculated respectively.\n", + " # Here, we sum them up to get the final .\n", + " output = tf.math.reduce_sum(output, axis=-1, keepdims=True)\n", + "\n", + " # Model compile\n", + " model = Model(inputs=circuit_input, outputs=output)\n", + " adam = Adam(learning_rate=0.05)\n", + " model.compile(optimizer=adam, loss='mse')\n", + " return model\n", + "\n", + "H_target = true_H_cost + true_H_mixer\n", + "model = vqe_keras_model(vqe, H_target)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "SsfJPODEJzKx", + "colab_type": "text" + }, + "source": [ + "#### Minimizing $\\langle \\psi (\\vec\\theta) | H | \\psi (\\vec\\theta)\\rangle$\n", + "\n", + "Keras model is used for training VQE. To feed quantum data in the form of `cirq.Circuit`, `tfq.convert_to_tensor()` will be frequently used to make them `tf.Tensor`.\n", + "\n", + "We have quantum input data. What's the output true value? Isn't it just a minimization problem?\n", + "\n", + "Here is some tip for training. Setting the output true value to theoretical lower bound, we can minimize our expectation value in the Keras model fit framework. The how can we calculate the lower bound? By the fact that the expectation value of any PauliString is bounded in [-1, 1], we can easily find the lower bound.\n", + "\n", + "- $\\langle \\psi (\\vec\\theta) | H | \\psi (\\vec\\theta)\\rangle = \\sum_{jk}J_{jk}\\langle Z_jZ_k\\rangle + \\sum_{v}B_{v}\\langle Z_v\\rangle + \\sum_{v}\\langle X_v\\rangle \\ge \\sum_{jk}(-)|J_{jk}| -\\sum_{v}|B_{v}| - N $" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "zIbP9w8AJ8_I", + "colab_type": "code", + "colab": {} + }, + "source": [ + "lower_bound = -np.sum(np.abs(weights + biases)) - N\n", + "vqe_input = tfq.convert_to_tensor([vqe.circuit])\n", + "vqe_output = tf.convert_to_tensor([[lower_bound]])\n", + "print('Before training : ={}'.format(model.predict(x=vqe_input)))\n", + "history = model.fit(x=vqe_input, y=vqe_output, batch_size=1, epochs=100,\n", + " verbose=0)\n", + "plt.plot(history.history['loss'])\n", + "print('After training : ={} >= {}'.format(model.predict(x=vqe_input),\n", + " lower_bound))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "u6UG2-XSlzxE", + "colab_type": "text" + }, + "source": [ + "In the next sections, we will use this low energy state as initial states of Ising model & QGRNN. Since they have different qubits, we need to create both." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "NbdHQpzcmAJW", + "colab_type": "code", + "colab": {} + }, + "source": [ + "vqe_params = model.get_weights()[0]\n", + "low_energy_ising = vqe.get_state(qubits, vqe_params)\n", + "# For QGRNN, get new qubit indices (0, N)~(0,2*N-1)\n", + "qubits_qgrnn = cirq.GridQubit.rect(1, N, 0, N)\n", + "low_energy_qgrnn = vqe.get_state(qubits_qgrnn, vqe_params)\n", + "print(low_energy_ising)\n", + "print(low_energy_qgrnn)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "QiQzwRN0oy8a", + "colab_type": "text" + }, + "source": [ + "### Construct Ising model\n", + "\n", + "In VQE, we can just use Hamiltonian $H$ directly. However, we need to exponentiate Hamiltonians to construct Ising model. Moreover, it is proved that exponentiation of two non-commutable observables is not equal to exponentiation of the sum of them. That is, $e^{A+B} \\neq e^Ae^B$ if $[A, B]\\neq0$.\n", + "\n", + "There are two options to deal with this problem.\n", + "1. [Baker-Campbell-Hausdorff (BCH) formula](https://en.wikipedia.org/wiki/Baker%E2%80%93Campbell%E2%80%93Hausdorff_formula)\n", + "\n", + " - $e^Ae^B=e^Ae^Be^{\\frac{1}{2}[A, B]}e^{\\frac{1}{12}[A, [A, B]]}e^{-\\frac{1}{12}[B, [A, B]]} ...$\n", + " - Analytic solution, but it is hard to be calculated.\n", + "\n", + "2. [Suzuki-Trotter expansion](https://en.wikipedia.org/wiki/Time-evolving_block_decimation#The_Suzuki-Trotter_expansion)\n", + "\n", + " - $e^Ae^B \\simeq \\prod e^{\\delta t A}e^{\\delta t B}$\n", + " - Tractable, but it is an approximated solution\n", + "\n", + "We will use the second option. Let $P=\\frac{T_j}{\\delta t}$.\n", + "\n", + " - $|\\psi_{T_j}\\rangle = U^{j}_{Ising}|\\psi_0\\rangle = e^{-i T_j H_{target}}|\\psi_0\\rangle\\sim [\\prod^{P}e^{-i {\\delta t}H_{mixer}} e^{-i {\\delta t}H_{cost}}]|\\psi_0\\rangle$" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "JoqmPfBMo56q", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class IsingModel:\n", + "\n", + " def __init__(self, qubits, graph, weights, biases, eta=0.01):\n", + " self.qubits = qubits\n", + " self.graph = graph\n", + " self.weights = weights\n", + " self.biases = biases\n", + " self.eta = eta\n", + "\n", + " # Construct Hamiltonian\n", + " _weights = [eta * w for w in weights]\n", + " _biases = [eta * b for b in biases]\n", + " H_cost, H_mixer = Hamiltonian(graph, _weights, _biases, qubits)\n", + " # Exponentiate each Hamiltonian\n", + " # The reason why we split cost & mixer is they are not commutable\n", + " self._cost_step = tfq.util.exponential(operators=H_cost)\n", + " self._mix_step = tfq.util.exponential(operators=H_mixer)\n", + "\n", + " def __call__(self, input_state, depth):\n", + " \"\"\"Trotterizaiton\"\"\"\n", + " add = tfq.layers.AddCircuit()\n", + " output_state = add(cirq.Circuit(), append=input_state)\n", + " for _ in range(depth):\n", + " output_state = add(output_state, append=self._cost_step)\n", + " output_state = add(output_state, append=self._mix_step)\n", + " return output_state\n", + "\n", + "ising = IsingModel(qubits, G, weights, biases)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JQW032AZT7Jv", + "colab_type": "text" + }, + "source": [ + "### Time evolution of Ising model\n", + "Let's construct an Ising model and evolve $|\\psi_0\\rangle$ with randomly sampled timesteps $T_j\\in [0, T_{max}]$" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "VTza-tRUe6Rg", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def random_timestep_sample(batch_size, T_max=0.1):\n", + " return [T_max * np.random.uniform() for _ in range(batch_size)]\n", + "\n", + "batch_size = 15\n", + "T = random_timestep_sample(batch_size)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Hxk4j9Zgm_dE", + "colab_type": "text" + }, + "source": [ + "We can evolve the above `low_energy_ising` and `low_energy_qgrnn` by using exponentiation of Hamiltonian & Trotterization. If we say $P$ is a Trotterization depth with time unit ${\\delta t}$, we can get the depth $P=\\frac{T}{\\delta t}$" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ua-G3kgGnXdk", + "colab_type": "code", + "colab": {} + }, + "source": [ + "dt = 0.01\n", + "depth = [int(t/dt)+1 for t in T] # Circuit depth for Ising & QGRNN model\n", + "print(depth)\n", + "\n", + "true_final_states = []\n", + "for P in depth:\n", + " true_final_states.append(ising(low_energy_ising, depth=P))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "oyCwkeK8T-zZ", + "colab_type": "text" + }, + "source": [ + "Now that we have a set $\\{(|\\psi_0\\rangle, |\\psi_{T_j}\\rangle) | j = 1..M\\}$ where $M$ is the number of data, or batch size, we finished to generate quantum data for QGRNN." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VwKKQMcUOhCK", + "colab_type": "text" + }, + "source": [ + "## Construct QGRNN model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZDsRvEVLpuWq", + "colab_type": "text" + }, + "source": [ + "In this section, we will implement QGRNN model with \n", + "\n", + "- Trainable parameters : $\\theta_{jk}$, $\\phi_{v}$, $\\alpha_{v}$ \n", + " - $\\alpha_{v}$ can be set to a constant 1.\n", + "- $H_{QGRNN} = \\sum_{j,k} \\theta_{jk} Z_j Z_k + \\sum_{v} \\phi_v Z_v + \\sum_{v} \\alpha_v X_v$\n", + "\n", + "Because the target Hamiltonian is unknown to QGRNN, we need to initialize a new random graph inside our QGRNN. In this example we will use 4-regular graph (each node has 4 edges. For 6 nodes, graph has 12 edges.) But, for simplicity, we add missing edges in the ring structure." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "sJu51fYwtVYL", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class QuantumGraphRNN:\n", + "\n", + " def __init__(self, qubits, G, eta=0.01, train_mixer=False):\n", + " self.qubits = qubits\n", + " self.eta = eta\n", + " self.graph = G\n", + " # Set parameters for cost\n", + " self._theta = ['theta{}'.format(e) for e in G.edges]\n", + " self._phi = ['phi{}'.format(v) for v in G.nodes]\n", + " self.symbols = self._theta + self._phi \n", + " # Set parameters for mixer\n", + " self.train_mixer = train_mixer\n", + " if train_mixer:\n", + " self._alpha = ['alpha{}'.format(v) for v in G.nodes]\n", + " self.symbols += self._alpha\n", + " else:\n", + " self._alpha = [1.0 for _ in G.nodes]\n", + "\n", + " # Construct Hamiltonian\n", + " weights = [eta] * len(G.edges)\n", + " biases = [eta] * len(G.nodes)\n", + " H_cost, H_mixer = Hamiltonian(G, weights, biases, qubits)\n", + " # Exponentiate each Hamiltonian with parameters\n", + " self._cost_step = tfq.util.exponential(\n", + " operators=H_cost, coefficients=self._theta + self._phi)\n", + " self._mix_step = tfq.util.exponential(\n", + " operators=H_mixer, coefficients=self._alpha)\n", + "\n", + " def __call__(self, input_state, depth):\n", + " add = tfq.layers.AddCircuit()\n", + " output_state = add(cirq.Circuit(), append=input_state)\n", + " for _ in range(depth):\n", + " output_state = add(output_state, append=self._cost_step)\n", + " output_state = add(output_state, append=self._mix_step)\n", + " return output_state\n", + "\n", + "# The true graph of Ising model is unknown to QGRNN.\n", + "# Think a new 4-regular random graph with at least one cycle\n", + "# 0->1->2->3->4->5->0\n", + "G_qgrnn = nx.random_regular_graph(n=N, d=4)\n", + "node_color = [2.0 for _ in G_qgrnn.nodes]\n", + "missing = []\n", + "add_cycle = True\n", + "if add_cycle:\n", + " for i in range(N):\n", + " j = (i+1) % N\n", + " if (i, j) in G_qgrnn.edges or (j, i) in G_qgrnn.edges:\n", + " continue\n", + " print('add : ', i, j)\n", + " G_qgrnn.add_edge(i, j)\n", + " missing.extend([(i, j), (j, i)])\n", + "edge_color = [-2.0 if e in missing else 2.0 for e in G_qgrnn.edges]\n", + "draw(G_qgrnn, pos, edge_color, node_color, '4-regular random graph with cycle')\n", + "qgrnn = QuantumGraphRNN(qubits_qgrnn, G_qgrnn)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "XyeCeJOpqmfj", + "colab_type": "text" + }, + "source": [ + "Let's evolve quantum input data $|\\psi_0\\rangle$ according to QGRNN ansatz, and get $U^{j}_{QGRNN}(\\theta, \\phi)|\\psi_0\\rangle$" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "NCtsOE79vCkR", + "colab_type": "code", + "colab": {} + }, + "source": [ + "pred_final_states = []\n", + "for P in depth:\n", + " pred_final_states.append(qgrnn(low_energy_qgrnn, depth=P))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4fZ-p83KSUlH", + "colab_type": "text" + }, + "source": [ + "## Construct Fidelity with Swap Test" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "VYeYcPa0TEe3", + "colab_type": "text" + }, + "source": [ + "Now we have both (1) quantum data from true Hamiltonian of Ising model and (2) predicted quantum data from QGRNN. Because they are all quantum data, we can't compare them each other as we do in the calculation of loss function at the classical deep neural networks. Measurement on a qubit will destory other qubit informations. That's why we introduce Fidelity with [Swap Test](https://en.wikipedia.org/wiki/Swap_test) here.\n", + "\n", + "- The expectation value of swap test of two quantum states $|\\psi\\rangle$ and $|\\phi\\rangle$ is the square of the inner product of them.\n", + " - $\\operatorname{Prob}(0)=\\operatorname{Prob}(Z=+1)=\\frac{1}{2}+\\frac{1}{2}|\\langle\\phi|\\psi\\rangle|^2$\n", + " - $\\langle Z_{test} \\rangle = 1 \\times \\operatorname{Prob}(Z=+1) + (-1) \\times \\operatorname{Prob}(Z=-1)=2\\operatorname{Prob}(Z=+1) - 1$\n", + " - $\\therefore \\langle Z_{test} \\rangle = |\\langle\\phi|\\psi\\rangle|^2$\n", + "- We have ground truth Ising model state $|\\psi_{T_j}\\rangle$ and predicted state $U^{j}_{QGRNN}(\\theta, \\phi)|\\psi_0\\rangle$\n", + " - That is, $\\langle Z_{test} \\rangle_j = |\\langle \\psi_{T_j} | U^{j}_{QGRNN}(\\theta, \\phi)|\\psi_0\\rangle|^2 \\ge 0$\n", + "\n", + "As you can see, the expectation has lower bound 0.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "4OuggTHHxnVZ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "class SwapTestFidelity:\n", + "\n", + " def __init__(self, qubits1, qubits2, batch_size):\n", + " circuit = cirq.Circuit()\n", + " test_bit = set(cirq.GridQubit.rect(1, len(qubits1) + len(qubits2) + 1))\n", + " test_bit -= set(qubits1 + qubits2)\n", + " test_bit = list(test_bit)[0]\n", + " circuit.append(cirq.H(test_bit))\n", + " for a, b in zip(qubits1, qubits2):\n", + " circuit.append(cirq.decompose(cirq.CSwapGate().on(test_bit, a, b)))\n", + " circuit.append(cirq.H(test_bit))\n", + " self.circuit = tfq.convert_to_tensor([circuit] * batch_size)\n", + " self.op = tfq.convert_to_tensor([[cirq.Z(test_bit)]] * batch_size)\n", + "\n", + " def __call__(self, input_state1, input_state2):\n", + " add = tfq.layers.AddCircuit()\n", + " return add(add(input_state1, append=input_state2), append=self.circuit)\n", + " \n", + "fidelity = SwapTestFidelity(qubits, qubits_qgrnn, batch_size)\n", + "\n", + "# Construct measurement layers.\n", + "state_true = Input(shape=(), dtype=tf.string)\n", + "state_pred = Input(shape=(), dtype=tf.string)\n", + "fid_output = fidelity(state_true, state_pred)\n", + "fid_output = Expectation()(fid_output,\n", + " symbol_names=qgrnn.symbols,\n", + " operators=fidelity.op)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JC5gg6zP2iEB", + "colab_type": "text" + }, + "source": [ + "## Calculate the average infidelity" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3tmY4N9dTKUm", + "colab_type": "text" + }, + "source": [ + "In fact, QGRNN is also a recurrent neural network that learns various time step evolution from the same input. We need to aggregate the results among the different timestep evolutions to train the QGRNN model. Here we introduce the average fidelity.\n", + "\n", + " - $L(\\theta, \\phi) = 1 - \\frac{1}{B} \\sum^{B}_{j=1} |\\langle \\psi_{T_j} | U^{j}_{QGRNN}(\\theta, \\phi)|\\psi_0\\rangle|^2 = 1 - \\frac{1}{B} \\sum^{B}_{j=1} \\langle Z_{test} \\rangle_j $\n", + "\n", + "We can implement this custom keras loss function like this." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "p9rvt2NGyUde", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def average_fidelity(y_true, y_pred):\n", + " return 1 - K.mean(y_pred)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ckA4FbKoS7Lu", + "colab_type": "text" + }, + "source": [ + "## Train the QGRNN & get the final result" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2LU0bfW8mBzR", + "colab_type": "text" + }, + "source": [ + "Again, we can use Keras model fit. To feed a batch of quantum data, we can use `tf.concat` because the quantum circuits are already in `tf.Tensor`. In this case, we know that the lower bound of fidelity is 0, but the true `model_output` is not used in our custom loss function `average_fidelity`. For the purpose of comparison, `initial_params` contains the initial weights & biases of QGRNN. We set learning rate of Adam optimizer to $0.05$ as our paper described." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "TW5R4fCIysbs", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Model compile\n", + "model = Model(inputs=[state_true, state_pred], outputs=fid_output)\n", + "adam = Adam(learning_rate=0.05)\n", + "model.compile(optimizer=adam, loss=average_fidelity)\n", + "initial_params = model.get_weights()\n", + "\n", + "# Data preparation\n", + "y_true = tf.concat(true_final_states, axis=0)\n", + "y_pred = tf.concat(pred_final_states, axis=0)\n", + "model_input = [y_true, y_pred]\n", + "# Lower bound of average fidelity = 0, but not used in average_fidelity\n", + "model_output = tf.convert_to_tensor([[0]] * batch_size)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "e7_rGM8QG53c", + "colab_type": "code", + "colab": {} + }, + "source": [ + "%%time\n", + "history = model.fit(x=model_input,\n", + " y=model_output,\n", + " batch_size=batch_size,\n", + " epochs=500)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "zQTZALG2Ybfq", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Learning curve\n", + "plt.plot(history.history['loss'])\n", + "plt.title('Learning curve (min loss={:.3e})'.format(\n", + " np.min(history.history['loss'])))\n", + "plt.xlabel('iteration')\n", + "plt.ylabel('log fidelity')\n", + "plt.yscale('log')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "r94jsV_VkU_y", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Target Ising model graph\n", + "draw(G, pos, weights, biases, 'Target Ising model')\n", + "# Initial QGRNN graph.\n", + "init_edge_color = initial_params[0][:len(G_qgrnn.edges)]\n", + "init_node_color = initial_params[0][len(G_qgrnn.edges):]\n", + "draw(G_qgrnn, pos, init_edge_color, init_node_color, 'Initial QGRNN model')\n", + "# Trained QGRNN graph.\n", + "final_params = model.get_weights()\n", + "final_edge_color = final_params[0][:len(G_qgrnn.edges)]\n", + "final_node_color = final_params[0][len(G_qgrnn.edges):]\n", + "draw(G_qgrnn, pos, final_edge_color, final_node_color, 'Trained QGRNN model')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vgS2kGNmmy_d", + "colab_type": "text" + }, + "source": [ + "# Conclusion\n", + "\n", + "In this colab, we implemented a variant of Quantum Graph Neural Networks called Quantum Graph `Recurrent` Neural Network. As the name suggests, this model learns time evolution dynamics of given target Hamiltonian from its quantum data. You can modify and train your own configurations of the model. Here are some suggestions for you.\n", + "\n", + "- Set `train_mixer=True` in class `QuantumGraphRNN` and retrain the model. Then the mixer Hamiltonian in QGRNN becomes also trainable. What can you observe after training?\n", + "\n", + "- We added missing edges in the cycle `0->1->...->5->0`. What if we remove the cycle? It means the QGRNN doesn't know any clue of the true Hamiltonian at all except the number of qubits. Set `add_cycle=False`. Is the loss still going to the order of `1e-4`?\n", + "\n" + ] + } + ] +} \ No newline at end of file diff --git a/release/BUILD b/release/BUILD deleted file mode 100644 index 2405d8c97..000000000 --- a/release/BUILD +++ /dev/null @@ -1,49 +0,0 @@ -licenses(["notice"]) - -sh_binary( - name = "build_pip_package", - srcs = ["build_pip_package.sh"], - data = [ - # Include the __init__.py files - # Module level - "__init__.py", - "//tensorflow_quantum:__init__.py", - - # Core module. - "//tensorflow_quantum/core:__init__.py", - "//tensorflow_quantum/core/ops:__init__.py", - "//tensorflow_quantum/core/proto:__init__.py", - "//tensorflow_quantum/core/serialize:__init__.py", - - # Python module. - "//tensorflow_quantum/python:__init__.py", - "//tensorflow_quantum/python/differentiators:__init__.py", - "//tensorflow_quantum/python/layers:__init__.py", - "//tensorflow_quantum/python/layers/circuit_construction:__init__.py", - "//tensorflow_quantum/python/layers/circuit_executors:__init__.py", - "//tensorflow_quantum/python/layers/high_level:__init__.py", - - # Datasets module. - "//tensorflow_quantum/datasets:__init__.py", - - # Dependencies to run PIP package creation. - "MANIFEST.in", - "setup.py", - - # Dependencies that the PIP package will import. - "//tensorflow_quantum/core/ops:tfq_utility_ops_py", - "//tensorflow_quantum/core/ops:tfq_simulate_ops_py", - "//tensorflow_quantum/core/serialize:serializer", - "//tensorflow_quantum/datasets:cluster_state", - "//tensorflow_quantum/python/differentiators:parameter_shift", - "//tensorflow_quantum/python/differentiators:stochastic_differentiator", - "//tensorflow_quantum/python/layers/circuit_construction:elementary", - "//tensorflow_quantum/python/layers/circuit_executors:expectation", - "//tensorflow_quantum/python/layers/circuit_executors:sample", - "//tensorflow_quantum/python/layers/circuit_executors:state", - "//tensorflow_quantum/python/layers/circuit_executors:sampled_expectation", - "//tensorflow_quantum/python/layers/high_level:controlled_pqc", - "//tensorflow_quantum/python/layers/high_level:pqc", - "//tensorflow_quantum/python:util", - ], -) diff --git a/release/MANIFEST.in b/release/MANIFEST.in deleted file mode 100644 index 30066d462..000000000 --- a/release/MANIFEST.in +++ /dev/null @@ -1 +0,0 @@ -recursive-include tensorflow_quantum/ *.so diff --git a/release/README.md b/release/README.md deleted file mode 100644 index c645395ff..000000000 --- a/release/README.md +++ /dev/null @@ -1,31 +0,0 @@ -Instructions to create and intsall TensorFlow Quantum as a PIP package: - -First create a Docker container with Ubuntu 16.04 and the devtoolset7 toolchain. -``` -./release/open_ubuntu_docker.sh -``` -Then build pip packages for python 3.6 and 3.7 inside of the docker with: -``` -./release/build_all_wheels.sh -exit -``` - -The resulting `.whl` files will be placed in a new folder outside of the docker called -`wheels`. From here they need to be repaired with `auditwheel` to ensure maximum -compatability across platforms. To repair the wheels run: -``` -./release/repair_wheels.sh -``` - -Now the `wheels` folder should contain the built wheel files and the manylinux2010 -version of the wheels for python 3.6 and 3.7. - -A wheel can be installed with: -``` -python3 -m pip install --user wheels/name_of_wheel.whl -``` - -Note, that if you are planning on running TFQ as a PIP package instead of -with Bazel, you cannot run from the tensorflow_quantum/ directory, or python will attempt -to use the local files instead of the site-package/ files and fail to find -dependencies. diff --git a/release/__init__.py b/release/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/release/build_all_wheels.sh b/release/build_all_wheels.sh deleted file mode 100755 index 004e156d3..000000000 --- a/release/build_all_wheels.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# Build python wheels for 3.6 and 3.7 and store them in wheels/ -cd .. -git clone https://github.com/tensorflow/custom-op.git -cd custom-op -git checkout 994dc6bdd5b7c0c0c0ffb55bb0ac013d9d9268cd -cd .. - -# Copy the toolchain config over from custom-op. - -# Upgrade existing 3.6 pip. -python3 -m pip install --upgrade pip -python3 -m pip install tensorflow==2.1.0 - -cp -r custom-op/third_party/toolchains quantum/third_party/ -cd /quantum -echo "Y\n" | ./configure.sh - -bazel build -c opt --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0:toolchain \ - --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" release:build_pip_package -bazel-bin/release/build_pip_package /tmp/tensorflow_quantum/ - -mkdir wheels - -cp /tmp/tensorflow_quantum/tensorflow_quantum-0.2.0-cp36-cp36m-linux_x86_64.whl wheels/tensorflow_quantum-0.2.0-cp36-cp36m-linux_x86_64.whl -bazel clean - -# Now build the 3.7 wheel. -cd /tmp -wget https://www.python.org/ftp/python/3.7.5/Python-3.7.5rc1.tar.xz -tar -xf Python-3.7.5rc1.tar.xz -cd Python-3.7.5rc1 -./configure -make -j2 build_all -sudo make altinstall - -cd /quantum - -python3.7 -m pip install --upgrade pip setuptools -python3.7 -m pip install tensorflow==2.1.0 -sed -i 's/python3/python3.7/g' configure.sh -sed -i 's/python3/python3.7/g' release/build_pip_package.sh - -echo "Y\n" | ./configure.sh - -bazel build -c opt --crosstool_top=//third_party/toolchains/preconfig/ubuntu16.04/gcc7_manylinux2010-nvcc-cuda10.0:toolchain \ - --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" release:build_pip_package -bazel-bin/release/build_pip_package /tmp/tensorflow_quantum/ - -cp /tmp/tensorflow_quantum/tensorflow_quantum-0.2.0-cp37-cp37m-linux_x86_64.whl wheels/tensorflow_quantum-0.2.0-cp37-cp37m-linux_x86_64.whl -bazel clean - -sed -i 's/python3.7/python3/g' configure.sh -sed -i 's/python3.7/python3/g' release/build_pip_package.sh - -rm -r /quantum/third_party/toolchains -exit diff --git a/release/build_pip_package.sh b/release/build_pip_package.sh deleted file mode 100755 index 67176bc94..000000000 --- a/release/build_pip_package.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -set -e -set -x - -EXPORT_DIR="bazel-bin/release/build_pip_package.runfiles/__main__" - -function main() { - DEST=${1} - - if [[ -z ${DEST} ]]; then - echo "No destination directory provided." - exit 1 - fi - - mkdir -p ${DEST} - echo "=== destination directory: ${DEST}" - - TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX) - - echo $(date) : "=== Using tmpdir: ${TMPDIR}" - - echo "=== Copy TFQ files" - - # Copy over files necessary to run setup.py - cp ${EXPORT_DIR}/release/setup.py "${TMPDIR}" - cp ${EXPORT_DIR}/release/MANIFEST.in "${TMPDIR}" - - # Copy over all files in the tensorflow_quantum/ directory that are included in the BUILD - # rule. - mkdir "${TMPDIR}"/tensorflow_quantum - cp -r -v ${EXPORT_DIR}/tensorflow_quantum/* "${TMPDIR}"/tensorflow_quantum/ - - pushd ${TMPDIR} - echo $(date) : "=== Building wheel" - - python3 setup.py bdist_wheel > /dev/null - - cp dist/*.whl "${DEST}" - popd - rm -rf ${TMPDIR} - echo $(date) : "=== Output wheel file is in: ${DEST}" -} - -main "$@" diff --git a/release/open_ubuntu_docker.sh b/release/open_ubuntu_docker.sh deleted file mode 100755 index 1c4c352bc..000000000 --- a/release/open_ubuntu_docker.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -# Load up an unbuntu 16.04 docker for building compatable pip packages. -sudo docker pull tensorflow/tensorflow:custom-op-ubuntu16 -sudo docker run -it -v ${PWD}:/quantum -w /quantum tensorflow/tensorflow:custom-op-ubuntu16 - -# Writing the permissions for the wheels directory inside of the docker doesn't work. -sudo chmod -R 777 wheels diff --git a/release/repair_wheels.sh b/release/repair_wheels.sh deleted file mode 100755 index 672b66986..000000000 --- a/release/repair_wheels.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# We use an aliase of auditwheel to allow for whitelisting of tensorflow.so.2 -# files by auditwheel. -for f in wheels/*.whl; do - sudo docker run -i --rm -v $PWD:/v -w /v \ - --net=host quay.io/pypa/manylinux2010_x86_64 bash \ - -x -e /v/third_party/tf/auditwheel repair --plat \ - manylinux2010_x86_64 $f -done - -cp -r wheelhouse/. wheels/ -sudo rm -r wheelhouse - diff --git a/release/setup.py b/release/setup.py deleted file mode 100644 index f2699e849..000000000 --- a/release/setup.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Setup for pip package.""" -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from setuptools import Extension -from setuptools import find_packages -from setuptools import setup -from setuptools.dist import Distribution -from setuptools.command.install import install - - -class InstallPlatlib(install): - """Workaround so .so files in generated wheels - can be seen by auditwheel.""" - - def finalize_options(self): - install.finalize_options(self) - if self.distribution.has_ext_modules(): - self.install_lib = self.install_platlib - - -REQUIRED_PACKAGES = [ - #'tensorflow = 2.0.0b1', - 'cirq == 0.7.0', - 'tensorflow == 2.1.0' -] - - -class BinaryDistribution(Distribution): - """This class is needed in order to create OS specific wheels.""" - - def has_ext_modules(self): - return True - - -setup( - name='tensorflow-quantum', - version='0.2.0', - description= - 'TensorFlow Quantum is a library for hybrid quantum-classical machine learning.', - author='Google Inc.', - author_email='no-reply@google.com', - url='https://github.com/tensorflow/quantum/', - packages=find_packages(), - install_requires=REQUIRED_PACKAGES, - # Add in any packaged data. - include_package_data=True, - #ext_modules=[Extension('_foo', ['stub.cc'])], - zip_safe=False, - distclass=BinaryDistribution, - # PyPI package information. - classifiers=[ - 'Development Status :: 3 - Alpha', - 'Intended Audience :: Developers', - 'Intended Audience :: Education', - 'Intended Audience :: Science/Research' - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Topic :: Scientific/Engineering', - 'Topic :: Scientific/Engineering :: Artificial Intelligence', - 'Topic :: Scientific/Engineering :: Mathematics', - 'Topic :: Scientific/Engineering :: Physics', - ], - license='Apache 2.0', - keywords='tensorflow machine learning quantum qml', - cmdclass={'install': InstallPlatlib}) diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index f5e259acb..000000000 --- a/requirements.txt +++ /dev/null @@ -1,6 +0,0 @@ -cirq==0.7.0 -nbconvert==5.6.1 -nbformat==4.4.0 -pylint==2.4.4 -yapf==0.28.0 -tensorflow==2.1.0 diff --git a/scripts/README.md b/scripts/README.md deleted file mode 100644 index e11fdb72e..000000000 --- a/scripts/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# For installation/testing scripts - -TODO (mbbrough): once we have dependancies on non github version of cirq add a pip requirements file. - -We have basic scripts to make doing local testin, code linting and formatting easier for you. - -1. `./scripts/format_all.sh` will apply clang-format and yapf to all source files. -2. `./scripts/test_all.sh` will run all bazel py_test and cc_test rules. -3. `./scripts/lint_all.sh` will run pylint and (eventually) clang-tidy - -Make sure you have run all of these checks before submitting a PR and are happy with the outputs. diff --git a/scripts/benchmark_all.sh b/scripts/benchmark_all.sh deleted file mode 100644 index 9fbf73387..000000000 --- a/scripts/benchmark_all.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -echo "Testing benchmarks."; -test_outputs=$(bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors $(bazel query //benchmarks/...)) -exit_code=$? - -if [ "$exit_code" == "0" ]; then - echo "Testing Complete! Moving on to"; -else - echo "Testing failed, please correct errors before proceeding." - echo "{$test_outputs}" - exit 64; -fi - -echo "Running preconfigured benchmarks."; -bazel_run=${bazel run -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4"} -bazel_run benchmarks/scripts:benchmark_clifford_circuit -- --op_density 1 --n_moments 10 --n_qubits 4 \ No newline at end of file diff --git a/scripts/build_docs.py b/scripts/build_docs.py deleted file mode 100644 index 70c2e251d..000000000 --- a/scripts/build_docs.py +++ /dev/null @@ -1,77 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tool to generate external api_docs for tfq.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import os - -from absl import app -from absl import flags -from tensorflow_docs.api_generator import doc_controls -from tensorflow_docs.api_generator import generate_lib -from tensorflow_docs.api_generator import public_api - -import tensorflow_quantum as tfq - -flags.DEFINE_string("output_dir", "/tmp/tfq_api", "Where to output the docs") - -flags.DEFINE_string("code_url_prefix", - ("https://github.com/tensorflow/quantum/tree/master/" - "tensorflow_quantum"), "The url prefix for links to code.") - -flags.DEFINE_bool("search_hints", True, - "Include metadata search hints in the generated files") - -flags.DEFINE_string("site_path", "quantum/api_docs/python", - "Path prefix in the _toc.yaml") - -FLAGS = flags.FLAGS - - - - -def main(unused_argv): - - doc_generator = generate_lib.DocGenerator( - root_title="TensorFlow Quantum", - py_modules=[("tfq", tfq)], - base_dir=os.path.dirname(tfq.__file__), - code_url_prefix=FLAGS.code_url_prefix, - search_hints=FLAGS.search_hints, - site_path=FLAGS.site_path, - callbacks=[public_api.local_definitions_filter], - private_map={ - "tfq": ["python", "core"], - "tfq.layers": [ - "circuit_construction", - "circuit_executors", - "high_level", - ], - "tfq.differentiators": [ - "linear_combination", "differentiator", "parameter_shift", - "stochastic_differentiator", "parameter_shift_util", - "stochastic_differentiator_util" - ], - "tfq.datasets": ["cluster_state"] - }) - - doc_generator.build(output_dir=FLAGS.output_dir) - - -if __name__ == "__main__": - app.run(main) diff --git a/scripts/build_pip_package_test.sh b/scripts/build_pip_package_test.sh deleted file mode 100755 index 9701c16da..000000000 --- a/scripts/build_pip_package_test.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -pip install tensorflow==2.1.0 cirq==0.7.0 - -# cd tensorflow_quantum -echo "Y\n" | ./configure.sh - -bazel build -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" release:build_pip_package -rm /tmp/tensorflow_quantum/* || echo ok -bazel-bin/release/build_pip_package /tmp/tensorflow_quantum/ -pip install -U /tmp/tensorflow_quantum/*.whl \ No newline at end of file diff --git a/scripts/ci_install.sh b/scripts/ci_install.sh deleted file mode 100755 index 17c4b3795..000000000 --- a/scripts/ci_install.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -wget https://github.com/bazelbuild/bazel/releases/download/0.26.0/bazel_0.26.0-linux-x86_64.deb -sudo dpkg -i bazel_0.26.0-linux-x86_64.deb -pip install --upgrade pip setuptools wheel -pip install -r requirements.txt \ No newline at end of file diff --git a/scripts/ci_validate_tutorials.sh b/scripts/ci_validate_tutorials.sh deleted file mode 100755 index 5e9905a7a..000000000 --- a/scripts/ci_validate_tutorials.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -# Run the tutorials using the installed pip package -pip install jupyter nbformat==4.4.0 nbconvert==5.6.1 -# Workaround for ipykernel - see https://github.com/ipython/ipykernel/issues/422 -pip install ipykernel==5.1.1 -# Leave the quantum directory, otherwise errors may occur -cd .. -examples_output=$(python3 quantum/scripts/test_tutorials.py) -exit_code=$? -if [ "$exit_code" == "0" ]; then - exit 0; -else - echo "Tutorials failed to run to completion:" - echo "{$examples_output}" - exit 64; -fi \ No newline at end of file diff --git a/scripts/format_all.sh b/scripts/format_all.sh deleted file mode 100755 index 0e374a3cc..000000000 --- a/scripts/format_all.sh +++ /dev/null @@ -1,24 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -echo "Doing python language formatting..." -python3 -m yapf --style=google --in-place --recursive ./benchmarks -python3 -m yapf --style=google --in-place --recursive ./tensorflow_quantum -echo -e "Done! \nDoing notebook formatting..." -python3 ./scripts/format_ipynb.py -echo -e "Done! \nDoing C++ formatting..." -find tensorflow_quantum/ -iname *.h -o -iname *.cc | xargs clang-format -i -style=google -echo "Done!" -exit 0; diff --git a/scripts/format_check.sh b/scripts/format_check.sh deleted file mode 100755 index 1d91427e0..000000000 --- a/scripts/format_check.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -echo "Checking python formatting..."; - -################################################################################ -# Python incremental format checker adapted from format-incremental in Cirq. -# -# The base git revision to compare against is chosen from the following defaults, -# in order, until one exists: -# -# 1. upstream/master -# 2. origin/master -# 3. master -# -# If none exists, the script fails. -################################################################################ - -# Get the working directory to the repo root. -cd "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd "$(git rev-parse --show-toplevel)" - -# Parse arguments. -rev="" -for arg in $@; do - echo -e "\033[31mNo arguments expected.\033[0m" >&2 - exit 1 -done - -# Figure out which branch to compare against. -if [ -z "${rev}" ]; then - if [ "$(git cat-file -t upstream/master 2> /dev/null)" == "commit" ]; then - rev=upstream/master - elif [ "$(git cat-file -t origin/master 2> /dev/null)" == "commit" ]; then - rev=origin/master - elif [ "$(git cat-file -t master 2> /dev/null)" == "commit" ]; then - rev=master - else - echo -e "\033[31mNo default revision found to compare against.\033[0m" >&2 - exit 1 - fi -fi -base="$(git merge-base ${rev} HEAD)" -if [ "$(git rev-parse ${rev})" == "${base}" ]; then - echo -e "Comparing against revision '${rev}'." >&2 -else - echo -e "Comparing against revision '${rev}' (merge base ${base})." >&2 - rev="${base}" -fi - -# Get the _test version of changed python files. -needed_changes=0 -changed_files=$(git diff --name-only ${rev} -- | grep "\.py$" | grep -v "_pb2\.py$") -esc=$(printf '\033') -for changed_file in ${changed_files}; do - # Extract changed line ranges from diff output. - changed_line_ranges=$( \ - git diff --unified=0 "${rev}" -- "${changed_file}" \ - | perl -ne 'chomp(); if (/@@ -\d+(,\d+)? \+(\d+)(,)?(\d+)? @@/) {$end=$2+($4 or 1)-1; print "--lines=$2-$end "}' \ - ) - if [[ "${changed_line_ranges}" != "--lines=0-0 " ]]; then - # Do the formatting. - results=$(yapf --style=google --diff "${changed_file}" ${changed_line_ranges}) - - # Print colorized error messages. - if [ ! -z "${results}" ]; then - needed_changes=1 - echo -e "\n\033[31mChanges in ${changed_file} require formatting:\033[0m\n${results}" \ - | sed "s/^\(+ .*\)$/${esc}[32m\\1${esc}[0m/" \ - | sed "s/^\(- .*\)$/${esc}[31m\\1${esc}[0m/" - fi - fi -done - -if (( needed_changes == 0 )); then - echo -e "\033[32mNo formatting needed on changed lines\033[0m." -else - echo -e "\033[31mSome formatting needed on changed lines\033[0m." - exit 1 -fi - -echo "Checking C++ formatting..."; -formatting_outputs=$(find tensorflow_quantum/ -iname *.h -o -iname *.cc | xargs clang-format -style=google -output-replacements-xml); -CFORMATCHECK=0 -while read -r formatting_outputs; do - if [ "$formatting_outputs" != "" ] && [ "$formatting_outputs" != "" ] && [ "$formatting_outputs" != "" ] && [ "$formatting_outputs" != " " ]; then - CFORMATCHECK=64 - fi -done <<< "$formatting_outputs" -if [ "$CFORMATCHECK" == "0" ]; then - echo "C++ format checking complete!"; - exit 0; -else - echo "C++ format checking failed, please run the formatting script before proceeding." - exit 64; -fi diff --git a/scripts/format_ipynb.py b/scripts/format_ipynb.py deleted file mode 100644 index fac7a8bf1..000000000 --- a/scripts/format_ipynb.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Format notebook code cells using yapf google style.""" -import glob -import nbformat -import yapf - -# Must be run from the top level of the `TFQuantum` repo. -NOTEBOOKS = glob.glob("docs/tutorials/*.ipynb") -for fname in NOTEBOOKS: - nb = nbformat.read(fname, as_version=nbformat.NO_CONVERT) - all_cells = nb.get('cells') - for i, cell in enumerate(all_cells): - if cell.get('cell_type') != 'code': - continue - lines = cell.get('source') - # This will safely skip over cells containing !% magic - try: - fmt_lines = yapf.yapf_api.FormatCode(''.join(lines), - style_config="google")[0] - except SyntaxError: - continue - # google style always adds an EOF newline; undo this. - all_cells[i]['source'] = fmt_lines[:-1] - - nb['cells'] = all_cells - nbformat.write(nb, fname, version=nbformat.NO_CONVERT) diff --git a/scripts/import_test.py b/scripts/import_test.py deleted file mode 100644 index 2353f26c5..000000000 --- a/scripts/import_test.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests to check if importing `tfq` APIs is successful or not.""" -import tensorflow_quantum as tfq - - -def test_imports(): - """Test that pip package was built with proper structure.""" - - # Top level modules. - _ = tfq.layers - _ = tfq.differentiators - - # Ops and Op getters. - _ = tfq.get_expectation_op - _ = tfq.get_sampled_expectation_op - _ = tfq.get_sampling_op - _ = tfq.get_state_op - _ = tfq.append_circuit - _ = tfq.padded_to_ragged - - # Util functions. - _ = tfq.convert_to_tensor - _ = tfq.from_tensor - _ = tfq.util.get_supported_gates - _ = tfq.util.exponential - - # Keras layers. - _ = tfq.layers.AddCircuit - _ = tfq.layers.Expectation - _ = tfq.layers.Sample - _ = tfq.layers.State - _ = tfq.layers.SampledExpectation - _ = tfq.layers.ControlledPQC - _ = tfq.layers.PQC - - # Differentiators. - _ = tfq.differentiators.ForwardDifference - _ = tfq.differentiators.CentralDifference - _ = tfq.differentiators.LinearCombination - _ = tfq.differentiators.ParameterShift - _ = tfq.differentiators.SGDifferentiator - _ = tfq.differentiators.Differentiator - - # Datasets. - _ = tfq.datasets.excited_cluster_states - - -if __name__ == "__main__": - test_imports() diff --git a/scripts/lint_all.sh b/scripts/lint_all.sh deleted file mode 100755 index fb7e1c9a5..000000000 --- a/scripts/lint_all.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -echo "Checking for lint in python code..."; -linting_outputs=$(pylint --rcfile .pylintrc ./tensorflow_quantum ./examples); -exit_code=$? -if [ "$exit_code" == "0" ]; then - echo "Python linting complete!"; - exit 0; -else - echo "Linting failed, please correct errors before proceeding." - echo "{$linting_outputs}" - exit 64; -fi - -# TODO (mbbrough/pmassey): Is there an autolinter for C++ stuff we should put in here ? -# Yes we need to put clang-tidy in here! diff --git a/scripts/run_example.sh b/scripts/run_example.sh deleted file mode 100755 index bb86edc22..000000000 --- a/scripts/run_example.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -cd .. -cp quantum/scripts/import_test.py import_test.py -python import_test.py \ No newline at end of file diff --git a/scripts/test_all.sh b/scripts/test_all.sh deleted file mode 100755 index ce442aead..000000000 --- a/scripts/test_all.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -echo "Testing All Bazel py_test and cc_tests."; -test_outputs=$(bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --notest_keep_going --test_output=errors $(bazel query //tensorflow_quantum/...)) -exit_code=$? -if [ "$exit_code" == "0" ]; then - echo "Testing Complete!"; - exit 0; -else - echo "Testing failed, please correct errors before proceeding." - echo "{$test_outputs}" - exit 64; -fi \ No newline at end of file diff --git a/scripts/test_benchmarks.sh b/scripts/test_benchmarks.sh deleted file mode 100644 index d969a0d29..000000000 --- a/scripts/test_benchmarks.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -echo "Testing all Benchmarks."; -bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors $(bazel query //benchmarks/scripts:all) -# test_outputs=$(bazel test -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors $(bazel query //benchmarks/scripts:all)) -bench_outputs=$() -# bench_outputs=$(bazel run -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" --cxxopt="-msse3" --cxxopt="-msse4" --test_output=errors //benchmarks/scripts:benchmark_clifford_circuit) -exit_code=$? -if [ "$exit_code" == "0" ]; then - echo "Testing Complete!"; - exit 0; -else - echo "Testing failed, please correct errors before proceeding." - echo "{$test_outputs}" - exit 64; -fi \ No newline at end of file diff --git a/scripts/test_tutorials.py b/scripts/test_tutorials.py deleted file mode 100644 index bb9513977..000000000 --- a/scripts/test_tutorials.py +++ /dev/null @@ -1,49 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module to ensure all notebooks execute without error by pytesting them.""" -import glob -import re - -from absl.testing import parameterized -import nbformat -import nbconvert -import tensorflow as tf - -# Must be run from the directory containing `quantum` repo. -NOTEBOOKS = glob.glob("quantum/docs/tutorials/*.ipynb") - - -class ExamplesTest(tf.test.TestCase, parameterized.TestCase): - - @parameterized.parameters(NOTEBOOKS) - def test_notebook(self, path): - """Test that notebooks open/run correctly.""" - - nb = nbformat.read(path, as_version=4) - # Scrub any magic from the notebook before running. - for cell in nb.get("cells"): - if cell['cell_type'] == 'code': - src = cell['source'] - # Comment out lines containing '!' but not '!=' - src = re.sub(r'\!(?!=)', r'#!', src) - cell['source'] = src - - _ = nbconvert.preprocessors.execute.executenb(nb, - timeout=900, - kernel_name="python3") - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/BUILD b/tensorflow_quantum/BUILD deleted file mode 100644 index fd4e75c2e..000000000 --- a/tensorflow_quantum/BUILD +++ /dev/null @@ -1,6 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) diff --git a/tensorflow_quantum/__init__.py b/tensorflow_quantum/__init__.py deleted file mode 100644 index 60f2aacb9..000000000 --- a/tensorflow_quantum/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module functions for tensorflow_quantum.*""" - -# Import basic ops and op getters. -from tensorflow_quantum.core import (append_circuit, get_expectation_op, - get_sampled_expectation_op, - get_sampling_op, get_state_op, - padded_to_ragged) - -# Re-label python module as layers module. -import tensorflow_quantum.python.layers as layers - -# Import utility functions for tensor operations & conversions. -from tensorflow_quantum.python.util import ( - # Utility functions - convert_to_tensor, - from_tensor, -) - -# Re-label python module as util module. -import tensorflow_quantum.python.util as util - -# Import datasets. -import tensorflow_quantum.datasets as datasets - -# Import differentiators. -import tensorflow_quantum.python.differentiators as differentiators - -__version__ = "0.2.0" diff --git a/tensorflow_quantum/core/BUILD b/tensorflow_quantum/core/BUILD deleted file mode 100644 index fd4e75c2e..000000000 --- a/tensorflow_quantum/core/BUILD +++ /dev/null @@ -1,6 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) diff --git a/tensorflow_quantum/core/__init__.py b/tensorflow_quantum/core/__init__.py deleted file mode 100644 index 74e06924c..000000000 --- a/tensorflow_quantum/core/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Imports to tensorflow_quantum.core.* level.""" -# Import getters for constructing ops. -from tensorflow_quantum.core.ops import (get_expectation_op, - get_sampled_expectation_op, - get_sampling_op, get_state_op) -# Special case for append op which we didn't name well. -from tensorflow_quantum.core.ops import padded_to_ragged -from tensorflow_quantum.core.ops import \ - tfq_append_circuit as append_circuit diff --git a/tensorflow_quantum/core/ops/BUILD b/tensorflow_quantum/core/ops/BUILD deleted file mode 100644 index afc8037c9..000000000 --- a/tensorflow_quantum/core/ops/BUILD +++ /dev/null @@ -1,234 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -cc_binary( - name = "_tfq_ps_utils.so", - srcs = [ - "tfq_ps_decompose_op.cc", - "tfq_ps_symbol_replace_op.cc", - "tfq_ps_weights_from_symbols_op.cc", - ], - copts = [ - "-pthread", - "-std=c++11", - "-O3", - "-D_GLIBCXX_USE_CXX11_ABI=0", - ], - linkshared = 1, - deps = [ - ":parse_context", - ":tfq_simulate_utils", - "//tensorflow_quantum/core/proto:program_cc_proto", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_binary( - name = "_tfq_simulate_ops.so", - srcs = [ - "tfq_simulate_expectation_op.cc", - "tfq_simulate_state_op.cc", - ], - copts = [ - "-pthread", - "-std=c++11", - "-O3", - "-D_GLIBCXX_USE_CXX11_ABI=0", - ], - linkshared = 1, - deps = [ - ":parse_context", - ":tfq_simulate_utils", - "//tensorflow_quantum/core/proto:pauli_sum_cc_proto", - "//tensorflow_quantum/core/proto:program_cc_proto", - "//tensorflow_quantum/core/qsim", - "//tensorflow_quantum/core/src:program_resolution", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/container:inlined_vector", - "@com_google_absl//absl/types:optional", - "@com_google_absl//absl/types:span", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_binary( - name = "_tfq_utility_ops.so", - srcs = [ - "tfq_circuit_append_op.cc", - ], - copts = [ - "-pthread", - "-std=c++11", - "-O3", - "-D_GLIBCXX_USE_CXX11_ABI=0", - ], - linkshared = 1, - deps = [ - ":parse_context", - ":tfq_simulate_utils", - "//tensorflow_quantum/core/proto:program_cc_proto", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/container:inlined_vector", - "@com_google_absl//absl/types:optional", - "@com_google_absl//absl/types:span", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_library( - name = "parse_context", - srcs = ["parse_context.cc"], - hdrs = ["parse_context.h"], - deps = [ - ":tfq_simulate_utils", - "//tensorflow_quantum/core/proto:pauli_sum_cc_proto", - "//tensorflow_quantum/core/proto:program_cc_proto", - "//tensorflow_quantum/core/src:program_resolution", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/container:inlined_vector", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_library( - name = "tfq_simulate_utils", - srcs = ["tfq_simulate_utils.cc"], - hdrs = ["tfq_simulate_utils.h"], - deps = [ - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -py_library( - name = "tfq_simulate_ops_py", - srcs = ["tfq_simulate_ops.py"], - data = [":_tfq_simulate_ops.so"], - deps = [ - ":load_module", - ], -) - -py_test( - name = "tfq_simulate_ops_test", - srcs = ["tfq_simulate_ops_test.py"], - python_version = "PY3", - deps = [ - ":tfq_simulate_ops_py", - "//tensorflow_quantum/python:util", - ], -) - -py_library( - name = "circuit_execution_ops", - srcs = ["circuit_execution_ops.py"], - deps = [ - "tfq_utility_ops_py", - ":cirq_ops", - ":tfq_simulate_ops_py", - ], -) - -py_test( - name = "circuit_execution_ops_test", - timeout = "eternal", - srcs = ["circuit_execution_ops_test.py"], - python_version = "PY3", - deps = [ - ":circuit_execution_ops", - "//tensorflow_quantum/python:util", - ], -) - -py_library( - name = "cirq_ops", - srcs = ["cirq_ops.py"], - deps = [ - ":batch_util", - "//tensorflow_quantum/core/serialize:serializer", - ], -) - -py_test( - name = "cirq_ops_test", - srcs = ["cirq_ops_test.py"], - python_version = "PY3", - deps = [ - ":batch_util", - ":cirq_ops", - "//tensorflow_quantum/core/serialize:serializer", - "//tensorflow_quantum/python:util", - ], -) - -py_library( - name = "batch_util", - srcs = ["batch_util.py"], - deps = [ - "//tensorflow_quantum/core/serialize:serializer", - ], -) - -py_test( - name = "batch_util_test", - timeout = "eternal", - srcs = ["batch_util_test.py"], - python_version = "PY3", - deps = [ - ":batch_util", - "//tensorflow_quantum/python:util", - ], -) - -py_library( - name = "tfq_ps_util_ops_py", - srcs = ["tfq_ps_util_ops.py"], - data = [":_tfq_ps_utils.so"], - deps = [ - ":load_module", - ], -) - -py_test( - name = "tfq_ps_util_ops_test", - srcs = ["tfq_ps_util_ops_test.py"], - python_version = "PY3", - deps = [ - ":tfq_ps_util_ops_py", - "//tensorflow_quantum/python:util", - ], -) - -py_library( - name = "tfq_utility_ops_py", - srcs = ["tfq_utility_ops.py"], - data = [":_tfq_utility_ops.so"], - deps = [ - ":load_module", - ], -) - -py_test( - name = "tfq_utility_ops_test", - srcs = ["tfq_utility_ops_test.py"], - python_version = "PY3", - deps = [ - ":tfq_utility_ops_py", - "//tensorflow_quantum/core/serialize:serializer", - "//tensorflow_quantum/python:util", - ], -) - -py_library( - name = "load_module", - srcs = ["load_module.py"], - deps = [], -) diff --git a/tensorflow_quantum/core/ops/__init__.py b/tensorflow_quantum/core/ops/__init__.py deleted file mode 100644 index af92b9ff2..000000000 --- a/tensorflow_quantum/core/ops/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module for tfq.core.ops.*""" - -# Import getters for constructing ops. -from tensorflow_quantum.core.ops.circuit_execution_ops import ( - get_expectation_op, get_sampled_expectation_op, get_sampling_op, - get_state_op) -# Special case for append op which we didn't name well. -from tensorflow_quantum.core.ops.tfq_utility_ops import padded_to_ragged -from tensorflow_quantum.core.ops.tfq_utility_ops import tfq_append_circuit diff --git a/tensorflow_quantum/core/ops/batch_util.py b/tensorflow_quantum/core/ops/batch_util.py deleted file mode 100644 index 365a51ea5..000000000 --- a/tensorflow_quantum/core/ops/batch_util.py +++ /dev/null @@ -1,642 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A module to for running Cirq Simulators in parallel.""" -import asyncio -import collections -import itertools -import multiprocessing -import os - -import numpy as np -import cirq - -from tensorflow_quantum.core.serialize import serializer - - -# TODO (mbbrough): Remove this workaround class once cirq.PauliSumCollector can -# be used end to end with engine. This current issue is that -# cirq.PauliSumCollector does not produce serializable gates for basis -# conversion. -class TFQPauliSumCollector(cirq.work.collector.Collector): - """Copy of cirq.PauliSumCollector with some fixes to work with engine.""" - - def __init__(self, - circuit, - observable, - *, - samples_per_term, - max_samples_per_job=1000000): - - observable = cirq.PauliSum.wrap(observable) - self._circuit = circuit - self._samples_per_job = max_samples_per_job - self._pauli_coef_terms = [ - (p / p.coefficient, p.coefficient) for p in observable if p - ] - self._identity_offset = 0 - for p in observable: - if not p: - self._identity_offset += p.coefficient - self._zeros = collections.defaultdict(lambda: 0) - self._ones = collections.defaultdict(lambda: 0) - self._samples_per_term = samples_per_term - self._total_samples_requested = 0 - - def next_job(self): - """Get the next job.""" - i = self._total_samples_requested // self._samples_per_term - if i >= len(self._pauli_coef_terms): - return None - pauli, _ = self._pauli_coef_terms[i] - remaining = self._samples_per_term * (i + - 1) - self._total_samples_requested - amount_to_request = min(remaining, self._samples_per_job) - self._total_samples_requested += amount_to_request - return cirq.work.collector.CircuitSampleJob( - circuit=_fixed_circuit_plus_pauli_string_measurements( - self._circuit, pauli), - repetitions=amount_to_request, - tag=pauli) - - def on_job_result(self, job, result): - """Post process the `job` and `result` you have.""" - job_id = job.tag - parities = result.histogram(key='out', - fold_func=lambda bits: np.sum(bits) % 2) - self._zeros[job_id] += parities[0] - self._ones[job_id] += parities[1] - - def estimated_energy(self): - """Sums up the sampled expectations, weighted by their coefficients.""" - energy = 0j - for pauli_string, coef in self._pauli_coef_terms: - a = self._zeros[pauli_string] - b = self._ones[pauli_string] - if a + b: - energy += coef * (a - b) / (a + b) - energy = complex(energy) - if energy.imag == 0: - energy = energy.real - energy += self._identity_offset - return energy - - -def _fixed_circuit_plus_pauli_string_measurements(circuit, pauli_string): - """A circuit measuring the given observable at the end of the given circuit. - """ - assert pauli_string - circuit = circuit.copy() - # Uses cirq.SingleQubitCliffordGates which aren't serializable by engine in - # cirq 0.6. This is a workaround until fixed. - # circuit.append(cirq.Moment(pauli_string.to_z_basis_ops())) - circuit.append(cirq.Moment(cirq.decompose(pauli_string.to_z_basis_ops()))) - circuit.append( - cirq.Moment([cirq.measure(*sorted(pauli_string.keys()), key='out')])) - return circuit - - -def _make_complex_view(shape, init_val): - """Build a RawArray that will map to the real and imaginary parts of a - complex number.""" - shape = list(shape) - shape[-1] *= 2 - data = np.ones(shape, dtype=np.float32) * init_val - - flattened_size = 1 - for dim_size in shape: - flattened_size *= dim_size - shared_mem_array = multiprocessing.RawArray('f', flattened_size) - np_view = np.frombuffer(shared_mem_array, dtype=np.float32).reshape(shape) - np.copyto(np_view, data) - return shared_mem_array - - -def _convert_complex_view_to_np(view, shape): - """Get a numpy view ontop of the rawarray view. Small overhead.""" - shape = list(shape) - shape[-1] *= 2 - return np.frombuffer(view, dtype=np.float32).reshape(shape) - - -def _update_complex_np(np_view, i, to_add): - """Update the shared memory undernath the numpy view. - to_add is passed by reference since we don't do much with it.""" - np_view[i, ...] = np.pad(to_add, - (0, (np_view.shape[-1] // 2 - to_add.shape[-1])), - 'constant', - constant_values=-2).view(np.float32) - - -def _convert_complex_view_to_result(view, shape): - """Convert a rawarray view to a numpy array and reindex so that - the underlying pair of double arrays are squished together to make a - complex array of half the underlying size.""" - shape = list(shape) - shape[-1] *= 2 - np_view = np.frombuffer(view, dtype=np.float32).reshape(shape) - - # The below view will cause a re-interpretation of underlying - # memory so use sparingly. - return np_view.view(np.complex64) - - -def _make_simple_view(shape, init_val, dtype, c_code): - """Make a shared memory view for floating type.""" - data = np.ones(shape, dtype=dtype) * init_val - flattened_size = 1 - for dim_size in shape: - flattened_size *= dim_size - shared_mem_array = multiprocessing.RawArray(c_code, flattened_size) - np_view = np.frombuffer(shared_mem_array, dtype=dtype).reshape(shape) - np.copyto(np_view, data) - return shared_mem_array - - -def _convert_simple_view_to_np(view, dtype, shape): - """Create a numpy view to a float array, low overhead.""" - return np.frombuffer(view, dtype=dtype).reshape(shape) - - -def _batch_update_simple_np(np_view, i, to_add): - """Update the shared memory underneath the numpy view. - to_add is again passed by reference.""" - np_view[i, ...] = to_add - - -def _pointwise_update_simple_np(np_view, i, j, to_add): - """Do a batch and sub-batch index update to numpy view.""" - np_view[i, j, ...] = to_add - - -def _convert_simple_view_to_result(view, dtype, shape): - """Convert a RawArray view to final numpy array.""" - return np.frombuffer(view, dtype=dtype).reshape(shape) - - -def _prep_pool_input_args(indices, *args, slice_args=True): - """Break down a set of indices, and optinal args into a generator - of length cpu_count.""" - block_size = int(np.ceil(len(indices) / os.cpu_count())) - for i in range(0, len(indices), block_size): - if slice_args: - yield tuple([indices[i:i + block_size]] + - [x[i:i + block_size] for x in args]) - else: - yield tuple([indices[i:i + block_size]] + [x for x in args]) - - -# process are separate from all the other processes, -# so INFO_DICTs will not step on each other. -INFO_DICT = {} - - -def _setup_dict(array_view, view_shape, simulator, post_process): - INFO_DICT['arr'] = array_view - INFO_DICT['shape'] = view_shape - INFO_DICT['sim'] = simulator - INFO_DICT['post_process'] = post_process - - -def _state_worker_func(indices, programs, params): - """Compute the wavefunction for each program in indices.""" - x_np = _convert_complex_view_to_np(INFO_DICT['arr'], INFO_DICT['shape']) - simulator = INFO_DICT['sim'] - - for i, index in enumerate(indices): - result = simulator.simulate(programs[i], params[i]) - final_array = INFO_DICT['post_process'](result).astype(np.complex64) - _update_complex_np(x_np, index, final_array) - - -def _analytical_expectation_worker_func(indices, programs, params, ops): - """Compute the expectation of the op[batch_index], w.r.t - circuit[batch_index] where batch_index is calculated from indices.""" - x_np = _convert_simple_view_to_np(INFO_DICT['arr'], np.float32, - INFO_DICT['shape']) - simulator = INFO_DICT['sim'] - - # TODO: remove this when picklable. - for i in range(len(ops)): - for j in range(len(ops[i])): - ops[i][j] = serializer.deserialize_paulisum(ops[i][j]) - - old_batch_index = -2 - state = -1 - for i, index_tuple in enumerate(indices): - batch_index = index_tuple[0] - op_index = index_tuple[1] - # (#679) Just ignore empty programs. - if len(programs[batch_index].all_qubits()) == 0: - continue - - if old_batch_index != batch_index: - # must compute a new wavefunction. - qubit_oder = dict( - zip(sorted(programs[batch_index].all_qubits()), - list(range(len(programs[batch_index].all_qubits()))))) - state = simulator.simulate(programs[batch_index], - params[batch_index]) - - result = INFO_DICT['post_process'](ops[batch_index][op_index], state, - qubit_oder) - _pointwise_update_simple_np(x_np, batch_index, op_index, result) - old_batch_index = batch_index - - -def _sample_expectation_worker_func(indices, programs, params, ops, n_samples): - x_np = _convert_simple_view_to_np(INFO_DICT['arr'], np.float32, - INFO_DICT['shape']) - simulator = INFO_DICT['sim'] - - # TODO: remove this when picklable. - for i in range(len(ops)): - for j in range(len(ops[i])): - ops[i][j] = serializer.deserialize_paulisum(ops[i][j]) - - for i, index_tuple in enumerate(indices): - batch_index = index_tuple[0] - op_index = index_tuple[1] - # (#679) Just ignore empty programs. - if len(programs[batch_index].all_qubits()) == 0: - continue - circuit = cirq.resolve_parameters(programs[batch_index], - params[batch_index]) - - sampler = TFQPauliSumCollector( - circuit, - ops[batch_index][op_index], - samples_per_term=n_samples[batch_index][op_index]) - - asyncio.set_event_loop(asyncio.new_event_loop()) - sampler.collect(simulator, concurrency=1) - result = sampler.estimated_energy().real - - _pointwise_update_simple_np(x_np, batch_index, op_index, result) - - -def _sample_worker_func(indices, programs, params, n_samples): - """Sample n_samples from progams[i] with params[i] placed in it.""" - x_np = _convert_simple_view_to_np(INFO_DICT['arr'], np.int32, - INFO_DICT['shape']) - simulator = INFO_DICT['sim'] - - for i, index in enumerate(indices): - qubits = sorted(programs[i].all_qubits()) - # (#679) Just ignore empty programs. - if len(qubits) == 0: - continue - state = simulator.simulate(programs[i], params[i]) - samples = INFO_DICT['post_process'](state, len(qubits), - n_samples[i]).astype(np.int32) - _batch_update_simple_np( - x_np, index, - np.pad(samples, ((0, 0), (x_np.shape[2] - len(qubits), 0)), - 'constant', - constant_values=-2)) - - -def _validate_inputs(circuits, param_resolvers, simulator, sim_type): - """Type check and sanity check inputs.""" - if not isinstance(circuits, (list, tuple, np.ndarray)): - raise TypeError('circuits must be a list or array.' - ' Given: {}'.format(type(circuits))) - - if any(not isinstance(x, cirq.Circuit) for x in circuits): - raise TypeError('circuits must contain cirq.Circuit objects') - - if not isinstance(param_resolvers, (list, tuple, np.ndarray)): - raise TypeError('param_resolvers must be a list or array.' - ' Given: {}'.format(type(param_resolvers))) - - if any(not isinstance(x, cirq.ParamResolver) for x in param_resolvers): - raise TypeError('param_resolvers must contain cirq.ParamResolvers.') - - if not (len(circuits) == len(param_resolvers)): - raise ValueError('Circuit batch size does not match resolve batch ' - 'size.') - - if sim_type == 'analytic': - if not isinstance(simulator, cirq.SimulatesFinalState): - raise TypeError('For analytic operations only' - ' cirq.SimulatesFinalState' - ' is required. Given: {}'.format(type(simulator))) - elif sim_type == 'sample': - if not isinstance(simulator, cirq.Sampler): - raise TypeError('For sample based operations a cirq.Sampler is ' - 'required. Given: {}'.format(type(simulator))) - else: - raise ValueError('Invalid simulator type specified.') - - -def batch_calculate_state(circuits, param_resolvers, simulator): - """Compute states using a given simulator using parallel processing. - - Returns a NumPy array containing the final circuit state for each - `cirq.Circuit` in `circuits`, given that the corresponding - `cirq.ParamResolver` in `param_resolvers` was used to resolve any symbols - in it. If simulator is a `cirq.DensityMatrixSimulator` this final state will - be a density matrix, if simulator is a `cirq.Simulator` this final state - will be a wavefunction. More specifically for a given `i` - `batch_calculate_state` will use `param_resolvers[i]` to resolve the symbols - in `circuits[i]` and then place the final state in the return list at index - `i`. - - Args: - circuits: Python `list` of `cirq.Circuit`s. - param_resolvers: Python `list` of `cirq.ParamResolver`s, where - `param_resolvers[i]` is the resolver to be used with `circuits[i]`. - simulator: Simulator object. Currently - supported are `cirq.DensityMatrixSimulator` and `cirq.Simulator`. - - Returns: - `np.ndarray` containing the resulting state information. The array will - have dimensions: [len(circuits), ] in the - case of `cirq.Simulator`. In the case of `cirq.DensityMatrixSimulator` - the shape is - [len(circuits), , ] - """ - _validate_inputs(circuits, param_resolvers, simulator, 'analytic') - - biggest_circuit = max(len(circuit.all_qubits()) for circuit in circuits) - if isinstance(simulator, - cirq.sim.density_matrix_simulator.DensityMatrixSimulator): - return_mem_shape = (len(circuits), 1 << biggest_circuit, - 1 << biggest_circuit) - post_process = lambda x: x.final_density_matrix - elif isinstance(simulator, cirq.sim.sparse_simulator.Simulator): - return_mem_shape = (len(circuits), 1 << biggest_circuit) - post_process = lambda x: x.final_state - else: - raise TypeError('Simulator {} is not supported by ' - 'batch_calculate_state.'.format(type(simulator))) - - shared_array = _make_complex_view(return_mem_shape, -2) - input_args = _prep_pool_input_args(range(len(circuits)), circuits, - param_resolvers) - with multiprocessing.Pool(processes=None, - initializer=_setup_dict, - initargs=(shared_array, return_mem_shape, - simulator, post_process)) as pool: - - pool.starmap(_state_worker_func, list(input_args)) - - return _convert_complex_view_to_result(shared_array, return_mem_shape) - - -def batch_calculate_expectation(circuits, param_resolvers, ops, simulator): - """Compute expectations from circuits using parallel processing. - - Returns a `np.ndarray` containing the expectation values of `ops` - applied to a specific circuit in `circuits`, given that the - corresponding `cirq.ParamResolver` in `param_resolvers` was used to resolve - any symbols in the circuit. Specifically the returned array at index `i,j` - will be equal to the expectation value of `ops[i][j]` on `circuits[i]` with - `param_resolvers[i]` used to resolve any symbols in `circuits[i]`. - Expectation calculations will be carried out using the simulator object - (`cirq.DensityMatrixSimulator` and `cirq.Simulator` are currently supported) - - Args: - circuits: Python `list` of `cirq.Circuit`s. - param_resolvers: Python `list` of `cirq.ParamResolver`s, where - `param_resolvers[i]` is the resolver to be used with `circuits[i]`. - ops: 2d Python `list` of `cirq.PauliSum` objects where `ops[i][j]` will - be used to calculate the expectation on `circuits[i]` for all `j`, - after `param_resolver[i]` is used to resolve any parameters - in the circuit. - simulator: Simulator object. Currently supported are - `cirq.DensityMatrixSimulator` and `cirq.Simulator`. - - Returns: - `np.ndarray` containing the expectation values. Shape is: - [len(circuits), len(ops[0])] - """ - _validate_inputs(circuits, param_resolvers, simulator, 'analytic') - if not isinstance(ops, (list, tuple, np.ndarray)): - raise TypeError('ops must be a list or array.' - ' Given: {}'.format(type(ops))) - - if len(ops) != len(circuits): - raise ValueError('Shape of ops and circuits do not match.') - - for sub_list in ops: - if not isinstance(sub_list, (list, tuple, np.ndarray)): - raise TypeError('elements of ops must be type list.') - for x in sub_list: - if not isinstance(x, cirq.PauliSum): - raise TypeError('ops must contain only cirq.PauliSum objects.' - ' Given: {}'.format(type(x))) - - return_mem_shape = (len(circuits), len(ops[0])) - if isinstance(simulator, - cirq.sim.density_matrix_simulator.DensityMatrixSimulator): - post_process = lambda op, state, order: sum( - x._expectation_from_density_matrix_no_validation( - state.final_density_matrix, order) for x in op).real - elif isinstance(simulator, cirq.sim.sparse_simulator.Simulator): - post_process = \ - lambda op, state, order: op.expectation_from_wavefunction( - state.final_state, order).real - else: - raise TypeError('Simulator {} is not supported by ' - 'batch_calculate_expectation.'.format(type(simulator))) - - shared_array = _make_simple_view(return_mem_shape, -2, np.float32, 'f') - - # avoid mutating ops array - ops = np.copy(ops) - # TODO (mbbrough): make cirq PauliSUms pickable at some point ? - for i in range(len(ops)): - for j in range(len(ops[i])): - ops[i][j] = serializer.serialize_paulisum(ops[i][j]) - - input_args = list( - _prep_pool_input_args(list( - itertools.product(range(len(circuits)), range(len(ops[0])))), - circuits, - param_resolvers, - ops, - slice_args=False)) - - with multiprocessing.Pool(processes=None, - initializer=_setup_dict, - initargs=(shared_array, return_mem_shape, - simulator, post_process)) as pool: - - pool.starmap(_analytical_expectation_worker_func, input_args) - - return _convert_simple_view_to_result(shared_array, np.float32, - return_mem_shape) - - -def batch_calculate_sampled_expectation(circuits, param_resolvers, ops, - n_samples, simulator): - """Compute expectations from sampling circuits using parallel processing. - - Returns a `np.ndarray` containing the expectation values of `ops` - applied to a specific circuit in `circuits`, given that the - corresponding `cirq.ParamResolver` in `param_resolvers` was used to resolve - any symbols in the circuit. Specifically the returned array at index `i,j` - will be equal to the expectation value of `ops[i][j]` on `circuits[i]` with - `param_resolvers[i]` used to resolve any symbols in `circuits[i]`. - Expectation estimations will be carried out using the simulator object - (`cirq.DensityMatrixSimulator` and `cirq.Simulator` are currently supported) - . Expectations for ops[i][j] are estimated by drawing n_samples[i][j] - samples. - - Args: - circuits: Python `list` of `cirq.Circuit`s. - param_resolvers: Python `list` of `cirq.ParamResolver`s, where - `param_resolvers[i]` is the resolver to be used with `circuits[i]`. - ops: 2d Python `list` of `cirq.PauliSum` objects where `ops[i][j]` will - be used to calculate the expectation on `circuits[i]` for all `j`, - after `param_resolver[i]` is used to resolve any parameters - in the circuit. - n_samples: 2d Python `list` of `int`s where `n_samples[i][j]` is - equal to the number of samples to draw in each term of `ops[i][j]` - when estimating the expectation. - simulator: Simulator object. Currently supported are - `cirq.DensityMatrixSimulator` and `cirq.Simulator`. - - Returns: - `np.ndarray` containing the expectation values. Shape is: - [len(circuits), len(ops[0])] - """ - _validate_inputs(circuits, param_resolvers, simulator, 'sample') - if not isinstance(ops, (list, tuple, np.ndarray)): - raise TypeError('ops must be a list or array.' - ' Given: {}'.format(type(ops))) - - if len(ops) != len(circuits): - raise ValueError('Shape of ops and circuits do not match.') - - if len(n_samples) != len(circuits): - raise ValueError('Shape of n_samples does not match circuits.') - - for sub_list in n_samples: - if not isinstance(sub_list, (list, tuple, np.ndarray)): - raise TypeError('Elements of n_elements must be lists of ints.') - for x in sub_list: - if not isinstance(x, int): - raise TypeError('Non-integer value found in n_samples.') - if x <= 0: - raise ValueError('n_samples contains sample value <= 0.') - - for sub_list in ops: - if not isinstance(sub_list, (list, tuple, np.ndarray)): - raise TypeError('elements of ops must be type list.') - for x in sub_list: - if not isinstance(x, cirq.PauliSum): - raise TypeError('ops must contain only cirq.PauliSum objects.' - ' Given: {}'.format(type(x))) - - return_mem_shape = (len(circuits), len(ops[0])) - shared_array = _make_simple_view(return_mem_shape, -2, np.float32, 'f') - - # avoid mutating ops array - ops = np.copy(ops) - # TODO (mbbrough): make cirq PauliSums pickable at some point ? - for i in range(len(ops)): - for j in range(len(ops[i])): - ops[i][j] = serializer.serialize_paulisum(ops[i][j]) - - input_args = list( - _prep_pool_input_args(list( - itertools.product(range(len(circuits)), range(len(ops[0])))), - circuits, - param_resolvers, - ops, - n_samples, - slice_args=False)) - - with multiprocessing.Pool(processes=None, - initializer=_setup_dict, - initargs=(shared_array, return_mem_shape, - simulator, None)) as pool: - - pool.starmap(_sample_expectation_worker_func, input_args) - - return _convert_simple_view_to_result(shared_array, np.float32, - return_mem_shape) - - -def batch_sample(circuits, param_resolvers, n_samples, simulator): - """Sample from circuits using parallel processing. - - Returns a `np.ndarray` containing n_samples samples from all the circuits in - circuits given that the corresponding `cirq.ParamResolver` in - `param_resolvers` was used to resolve any symbols. Specifically the - returned array at index `i,j` will correspond to a `np.ndarray` of - booleans representing bitstring `j` that was sampled from `circuits[i]`. - Samples are drawn using the provided simulator object (Currently supported - are `cirq.DensityMatrixSimulator` and `cirq.Simulator`). - - Note: In order to keep numpy shape consistent, smaller circuits will - have sample bitstrings padded with -2 on "qubits that don't exist - in the circuit". - - Args: - circuits: Python `list` of `cirq.Circuit`s. - param_resolvers: Python `list` of `cirq.ParamResolver`s, where - `param_resolvers[i]` is the resolver to be used with `circuits[i]`. - n_samples: `int` describing number of samples to draw from each - circuit. - simulator: Simulator object. Currently - supported are `cirq.DensityMatrixSimulator` and `cirq.Simulator`. - - Returns: - `np.ndarray` containing the samples with invalid qubits blanked out. - It's shape is - [len(circuits), n_samples, <# qubits in largest circuit>]. - circuits that are smaller than #qubits in largest circuit have null - qubits in bitstrings mapped to -2. - """ - _validate_inputs(circuits, param_resolvers, simulator, 'sample') - if not isinstance(n_samples, int): - raise TypeError('n_samples must be an int.' - 'Given: {}'.format(type(n_samples))) - - if n_samples <= 0: - raise ValueError('n_samples must be > 0.') - - biggest_circuit = max(len(circuit.all_qubits()) for circuit in circuits) - return_mem_shape = (len(circuits), n_samples, biggest_circuit) - shared_array = _make_simple_view(return_mem_shape, -2, np.int32, 'i') - - if isinstance(simulator, - cirq.sim.density_matrix_simulator.DensityMatrixSimulator): - post_process = lambda state, size, n_samples: \ - cirq.sample_density_matrix( - state.final_density_matrix, [i for i in range(size)], - repetitions=n_samples) - elif isinstance(simulator, cirq.sim.sparse_simulator.Simulator): - post_process = lambda state, size, n_samples: cirq.sample_state_vector( - state.final_state, list(range(size)), repetitions=n_samples) - else: - raise TypeError('Simulator {} is not supported by batch_sample.'.format( - type(simulator))) - - input_args = list( - _prep_pool_input_args(range(len(circuits)), circuits, param_resolvers, - [n_samples] * len(circuits))) - - with multiprocessing.Pool(processes=None, - initializer=_setup_dict, - initargs=(shared_array, return_mem_shape, - simulator, post_process)) as pool: - - pool.starmap(_sample_worker_func, input_args) - - return _convert_simple_view_to_result(shared_array, np.int32, - return_mem_shape) diff --git a/tensorflow_quantum/core/ops/batch_util_test.py b/tensorflow_quantum/core/ops/batch_util_test.py deleted file mode 100644 index fa5c4488e..000000000 --- a/tensorflow_quantum/core/ops/batch_util_test.py +++ /dev/null @@ -1,250 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Test parallel Cirq simulations.""" -import numpy as np -import tensorflow as tf -from absl.testing import parameterized -from scipy import stats -import cirq - -from tensorflow_quantum.core.ops import batch_util -from tensorflow_quantum.python import util - -BATCH_SIZE = 12 -N_QUBITS = 5 -PAULI_LENGTH = 3 -SYMBOLS = ['alpha', 'beta', 'gamma'] - - -def _get_mixed_batch(qubits, symbols, size): - circuit1, resolver1 = util.random_circuit_resolver_batch(qubits, size // 2) - circuit2, resolver2 = util.random_symbol_circuit_resolver_batch( - qubits, symbols, size // 2) - return circuit1 + circuit2, resolver1 + resolver2 - - -def _pad_state(sim, state, n): - if isinstance(sim, cirq.sim.sparse_simulator.Simulator): - state = state.final_state - if isinstance(sim, cirq.DensityMatrixSimulator): - state = state.final_density_matrix - return np.pad(state, (0, (1 << n) - state.shape[-1]), - 'constant', - constant_values=-2) - - -def _expectation_helper(sim, circuit, params, op): - if isinstance(sim, cirq.sim.sparse_simulator.Simulator): - state = sim.simulate(circuit, params).final_state.astype(np.complex128) - return [ - op.expectation_from_wavefunction( - state, - dict( - zip(sorted(circuit.all_qubits()), - (j for j in range(len(circuit.all_qubits())))))).real - ] - if isinstance(sim, cirq.DensityMatrixSimulator): - state = sim.simulate(circuit, params).final_density_matrix - return [ - sum( - x._expectation_from_density_matrix_no_validation( - state, - dict( - zip(sorted(circuit.all_qubits()), ( - j - for j in range(len(circuit.all_qubits())))))) - for x in op) - ] - - return NotImplemented - - -def _sample_helper(sim, state, n_qubits, n_samples): - if isinstance(sim, cirq.sim.sparse_simulator.Simulator): - return cirq.sample_state_vector(state.final_state, - list(range(n_qubits)), - repetitions=n_samples) - if isinstance(sim, cirq.DensityMatrixSimulator): - return cirq.sample_density_matrix(state.final_density_matrix, - list(range(n_qubits)), - repetitions=n_samples) - - return NotImplemented - - -class BatchUtilTest(tf.test.TestCase, parameterized.TestCase): - """Test cases for BatchUtils main functions.""" - - @parameterized.parameters([{ - 'sim': cirq.DensityMatrixSimulator() - }, { - 'sim': cirq.sim.sparse_simulator.Simulator() - }]) - def test_batch_simulate_state(self, sim): - """Test variable sized wavefunction output.""" - circuit_batch, resolver_batch = _get_mixed_batch( - cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE) - results = batch_util.batch_calculate_state(circuit_batch, - resolver_batch, sim) - - for circuit, resolver, result in zip(circuit_batch, resolver_batch, - results): - r = _pad_state(sim, sim.simulate(circuit, resolver), N_QUBITS) - self.assertAllClose(r, result, rtol=1e-5, atol=1e-5) - - self.assertDTypeEqual(results, np.complex64) - - @parameterized.parameters([{ - 'sim': cirq.DensityMatrixSimulator() - }, { - 'sim': cirq.sim.sparse_simulator.Simulator() - }]) - def test_batch_expectation(self, sim): - """Test expectation.""" - qubits = cirq.GridQubit.rect(1, N_QUBITS) - circuit_batch, resolver_batch = _get_mixed_batch( - qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE) - ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE) - - results = batch_util.batch_calculate_expectation( - circuit_batch, resolver_batch, [[x] for x in ops], sim) - - for circuit, resolver, result, op in zip(circuit_batch, resolver_batch, - results, ops): - r = _expectation_helper(sim, circuit, resolver, op) - self.assertAllClose(r, result, rtol=1e-5, atol=1e-5) - - self.assertDTypeEqual(results, np.float32) - - @parameterized.parameters([{ - 'sim': cirq.DensityMatrixSimulator() - }, { - 'sim': cirq.sim.sparse_simulator.Simulator() - }]) - def test_batch_sampled_expectation(self, sim): - """Test expectation.""" - qubits = cirq.GridQubit.rect(1, N_QUBITS) - circuit_batch, resolver_batch = _get_mixed_batch( - qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE) - - ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE) - n_samples = [[1000] for _ in range(len(ops))] - - results = batch_util.batch_calculate_sampled_expectation( - circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim) - - for circuit, resolver, result, op in zip(circuit_batch, resolver_batch, - results, ops): - r = _expectation_helper(sim, circuit, resolver, op) - self.assertAllClose(r, result, rtol=1.0, atol=1e-1) - - self.assertDTypeEqual(results, np.float32) - - @parameterized.parameters([{ - 'sim': cirq.DensityMatrixSimulator() - }, { - 'sim': cirq.sim.sparse_simulator.Simulator() - }]) - def test_batch_sample(self, sim): - """Test sampling.""" - n_samples = 2000 * (2**N_QUBITS) - - circuit_batch, resolver_batch = _get_mixed_batch( - cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE) - - results = batch_util.batch_sample(circuit_batch, resolver_batch, - n_samples, sim) - - tfq_histograms = [] - for r in results: - tfq_histograms.append( - np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)), - range=(0, 2**N_QUBITS), - bins=2**N_QUBITS)[0]) - - cirq_histograms = [] - for circuit, resolver in zip(circuit_batch, resolver_batch): - state = sim.simulate(circuit, resolver) - r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples) - cirq_histograms.append( - np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)), - range=(0, 2**N_QUBITS), - bins=2**N_QUBITS)[0]) - - for a, b in zip(tfq_histograms, cirq_histograms): - self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005) - - self.assertDTypeEqual(results, np.int32) - - @parameterized.parameters([{ - 'sim': cirq.DensityMatrixSimulator() - }, { - 'sim': cirq.sim.sparse_simulator.Simulator() - }]) - def test_empty_circuits(self, sim): - """Test functions with empty circuits.""" - # Common preparation - resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)] - circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)] - qubits = cirq.GridQubit.rect(1, N_QUBITS) - ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE) - n_samples = [[1000] for _ in range(len(ops))] - # If there is no op on a qubit, the expectation answer is -2.0 - true_expectation = (-2.0,) - - # (1) Test expectation - results = batch_util.batch_calculate_expectation( - circuit_batch, resolver_batch, [[x] for x in ops], sim) - - for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops): - self.assertAllClose(true_expectation, result, rtol=1e-5, atol=1e-5) - - self.assertDTypeEqual(results, np.float32) - - # (2) Test sampled_expectation - results = batch_util.batch_calculate_sampled_expectation( - circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim) - - for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops): - self.assertAllClose(true_expectation, result, rtol=1.0, atol=1e-1) - - self.assertDTypeEqual(results, np.float32) - - # (3) Test state - results = batch_util.batch_calculate_state(circuit_batch, - resolver_batch, sim) - - for circuit, resolver, result in zip(circuit_batch, resolver_batch, - results): - r = _pad_state(sim, sim.simulate(circuit, resolver), 0) - self.assertAllClose(r, result, rtol=1e-5, atol=1e-5) - - self.assertDTypeEqual(results, np.complex64) - - # (4) Test sampling - n_samples = 2000 * (2**N_QUBITS) - results = batch_util.batch_sample(circuit_batch, resolver_batch, - n_samples, sim) - - for circuit, resolver, a in zip(circuit_batch, resolver_batch, results): - state = sim.simulate(circuit, resolver) - r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples) - self.assertAllClose(r, a, atol=1e-5) - - self.assertDTypeEqual(results, np.int32) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_quantum/core/ops/circuit_execution_ops.py b/tensorflow_quantum/core/ops/circuit_execution_ops.py deleted file mode 100644 index 3e226f531..000000000 --- a/tensorflow_quantum/core/ops/circuit_execution_ops.py +++ /dev/null @@ -1,338 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A module for user-facing generators of tfq ops.""" -import enum - -import cirq -from tensorflow_quantum.core.ops import (cirq_ops, tfq_simulate_ops, - tfq_utility_ops) - - -class TFQWavefunctionSimulator(enum.Enum): - """Enum to make specifying TFQ simulators user-friendly.""" - expectation = tfq_simulate_ops.tfq_simulate_expectation - samples = tfq_simulate_ops.tfq_simulate_samples - state = tfq_simulate_ops.tfq_simulate_state - - -def get_expectation_op(backend=None): - """Get a TensorFlow op that will calculate batches of expectation values. - - This function produces a non-differentiable TF op that will calculate - batches of expectation values given tensor batches of `cirq.Circuit`s, - parameter values, and `cirq.PauliSum` operators to measure. - - - >>> # Simulate circuits with C++. - >>> my_op = tfq.get_expectation_op() - >>> # Prepare some inputs. - >>> qubit = cirq.GridQubit(0, 0) - >>> my_symbol = sympy.Symbol('alpha') - >>> my_circuit_tensor = tfq.convert_to_tensor([ - ... cirq.Circuit(cirq.H(qubit) ** my_symbol) - ... ]) - >>> my_values = np.array([[0.123]]) - >>> my_paulis = tfq.convert_to_tensor([[ - ... 3.5 * cirq.X(qubit) - 2.2 * cirq.Y(qubit) - ... ]]) - >>> # This op can now be run with: - >>> output = my_op( - ... my_circuit_tensor, ['alpha'], my_values, my_paulis) - >>> output - tf.Tensor([[0.71530885]], shape=(1, 1), dtype=float32) - - - In order to make the op differentiable, a `tfq.differentiator` object is - needed. see `tfq.differentiators` for more details. Below is a simple - example of how to make my_op from the above code block differentiable: - - >>> diff = tfq.differentiators.ForwardDifference() - >>> my_differentiable_op = diff.generate_differentiable_op( - ... analytic_op=my_op - ... ) - - - Args: - backend: Optional Python `object` that specifies what backend this op - should use when evaluating circuits. Can be any - `cirq.SimulatesFinalState`. If not provided the default C++ analytical - expectation calculation op is returned. - - Returns: - A `callable` with the following signature: - - ```op(programs, symbol_names, symbol_values, pauli_sums)``` - - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - - Returns: - `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). - """ - - # TODO (mbbrough): investigate how the above docstring renders. - if backend is None: - return TFQWavefunctionSimulator.expectation - - if isinstance(backend, cirq.SimulatesFinalState): - return cirq_ops._get_cirq_analytical_expectation(backend) - - if isinstance(backend, (cirq.SimulatesSamples, cirq.Sampler)): - raise NotImplementedError("Sample-based expectation is not supported." - " Use " - "tf.get_sampled_expectation_op() instead.") - - raise TypeError("Backend {} is invalid. Expected a Cirq.SimulatesFinalState" - " or None.".format(backend)) - - -def get_sampling_op(backend=None): - """Get a Tensorflow op that produces samples from given quantum circuits. - - This function produces a non-differentiable op that will calculate - batches of circuit samples given tensor batches of `cirq.Circuit`s, - parameter values, and a scalar telling the op how many samples to take. - - - >>> # Simulate circuits with cirq. - >>> my_op = tfq.get_sampling_op(backend=cirq.sim.Simulator()) - >>> # Simulate circuits with C++. - >>> my_second_op = tfq.get_sampling_op() - >>> # Prepare some inputs. - >>> qubit = cirq.GridQubit(0, 0) - >>> my_symbol = sympy.Symbol('alpha') - >>> my_circuit_tensor = tfq.convert_to_tensor( - ... [cirq.Circuit(cirq.X(qubit)**my_symbol)]) - >>> my_values = np.array([[2.0]]) - >>> n_samples = np.array([10]) - >>> # This op can now be run to take samples. - >>> output = my_second_op( - ... my_circuit_tensor, ['alpha'], my_values, n_samples) - >>> output - - - - Args: - backend: Optional Python `object` that specifies what backend this op - should use when evaluating circuits. Can be any `cirq.Sampler`. If - not provided the default C++ sampling op is returned. - - Returns: - A `callable` with the following signature: - - ```op(programs, symbol_names, symbol_values, num_samples)``` - - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - num_samples: `tf.Tensor` with one element indicating the number of - samples to draw. - - Returns: - `tf.Tensor` with shape - [batch_size, num_samples, n_qubits] that - holds samples (as boolean values) for each circuit. - """ - - # TODO (mbbrough): investigate how the above docstring renders. - if backend is None: - return lambda programs, symbol_names, symbol_values, num_samples: \ - tfq_utility_ops.padded_to_ragged(TFQWavefunctionSimulator.samples( - programs, symbol_names, symbol_values, num_samples)) - - if isinstance(backend, (cirq.SimulatesSamples, cirq.Sampler)): - return lambda programs, symbol_names, symbol_values, num_samples: \ - tfq_utility_ops.padded_to_ragged(cirq_ops._get_cirq_samples(backend)( - programs, symbol_names, symbol_values, num_samples)) - - raise TypeError("Backend {} is invalid. Expected a Cirq.Sampler " - "or None.".format(backend)) - - -def get_state_op(backend=None): - """Get a TensorFlow op that produces states from given quantum circuits. - - This function produces a non-differentiable op that will calculate - batches of state tensors given tensor batches of `cirq.Circuit`s and - parameter values. - - - >>> # Simulate circuits with cirq. - >>> my_op = tfq.get_state_op(backend=cirq.DensityMatrixSimulator()) - >>> # Simulate circuits with C++. - >>> my_second_op = tfq.get_state_op() - >>> # Prepare some inputs. - >>> qubit = cirq.GridQubit(0, 0) - >>> my_symbol = sympy.Symbol('alpha') - >>> my_circuit_tensor = tfq.convert_to_tensor([ - ... cirq.Circuit(cirq.Y(qubit) ** my_symbol) - ... ]) - >>> my_values = np.array([[0.5]]) - >>> # This op can now be run to calculate the state. - >>> output = my_second_op(my_circuit_tensor, ['alpha'], my_values) - >>> output - - - - Args: - backend: Optional Python `object` that specifies what backend this op - should use when evaluating circuits. Can be any - `cirq.SimulatesFinalState`. If not provided, the default C++ - wavefunction simulator will be used. - - Returns: - A `callable` with the following signature: - - ```op(programs, symbol_names, symbol_values)``` - - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - - Returns: - `tf.Tensor` with shape [batch_size, size of state] that - contains the state information of the circuit. - """ - - # TODO (mbbrough): investigate how the above docstring renders. - if backend is None: - return lambda programs, symbol_names, symbol_values: \ - tfq_utility_ops.padded_to_ragged(TFQWavefunctionSimulator.state( - programs, symbol_names, symbol_values)) - - if isinstance(backend, (cirq.SimulatesFinalState)): - return lambda programs, symbol_names, symbol_values: \ - tfq_utility_ops.padded_to_ragged( - cirq_ops._get_cirq_simulate_state(backend)( - programs, symbol_names, symbol_values)) - - raise TypeError("Backend {} is invalid. Expected a Cirq.SimulatesFinalState" - " or None.".format(backend)) - - -def get_sampled_expectation_op(backend=None): - """Get a TensorFlow op that will calculate sampled expectation values. - - This function produces a non-differentiable TF op that will calculate - batches of expectation values given tensor batches of `cirq.Circuit`s, - parameter values, and `cirq.PauliSum` operators to measure. - Expectation is estimated by taking num_samples shots per term in the - corresponding PauliSum. - - - >>> # Simulate circuits with C++. - >>> my_op = tfq.get_sampled_expectation_op() - >>> # Prepare some inputs. - >>> qubit = cirq.GridQubit(0, 0) - >>> my_symbol = sympy.Symbol('alpha') - >>> my_circuit_tensor = tfq.convert_to_tensor([ - ... cirq.Circuit(cirq.H(qubit) ** my_symbol) - ... ]) - >>> my_values = np.array([[0.123]]) - >>> my_paulis = tfq.convert_to_tensor([[ - ... 3.5 * cirq.X(qubit) - 2.2 * cirq.Y(qubit) - ... ]]) - >>> my_num_samples = np.array([[100]]) - >>> # This op can now be run with: - >>> output = my_op( - ... my_circuit_tensor, ['alpha'], my_values, my_paulis, my_num_samples) - >>> output - tf.Tensor([[0.71530885]], shape=(1, 1), dtype=float32) - - - In order to make the op differentiable, a `tfq.differentiator` object is - needed. see `tfq.differentiators` for more details. Below is a simple - example of how to make my_op from the above code block differentiable: - - - >>> diff = tfq.differentiators.ForwardDifference() - >>> my_differentiable_op = diff.generate_differentiable_op( - ... analytic_op=my_op - ... ) - - Args: - backend: Python `object` that specifies what backend this op should use - when evaluating circuits. It only accepts `cirq.Sampler`. - - Returns: - A `callable` with the following signature: - - ```op(programs, symbol_names, symbol_values, pauli_sums, num_samples)``` - - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - num_samples: `tf.Tensor` with `n_samples[i][j]` is equal to the - number of samples to draw in each term of `pauli_sums[i][j]` - when estimating the expectation. It can also be tiled up to the - shape of pauli_sums by broadcasting if tf.shape(num_samples)[0] - or tf.shape(num_samples)[1] is 1 and the other dimension is the - same with that of pauli_sums. - - Returns: - `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). - """ - # TODO (mbbrough): investigate how the above docstring renders. - if backend is None: - # TODO(zaqqwerty, jaeyoo): Remove comment once sampled_expectation - # is implemented, and update docstring - # return TFQWavefunctionSimulator.sampled_expectation - return cirq_ops._get_cirq_sampled_expectation(cirq.sim.Simulator()) - - if isinstance(backend, cirq.Sampler): - return cirq_ops._get_cirq_sampled_expectation(backend) - - raise TypeError( - "Backend {} is invalid. Expected a Cirq.Sampler or None.".format( - backend)) diff --git a/tensorflow_quantum/core/ops/circuit_execution_ops_test.py b/tensorflow_quantum/core/ops/circuit_execution_ops_test.py deleted file mode 100644 index 5deb2bac3..000000000 --- a/tensorflow_quantum/core/ops/circuit_execution_ops_test.py +++ /dev/null @@ -1,517 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module to test consistency between Cirq and TFQ circuit execution ops.""" -from unittest import mock -import numpy as np -import tensorflow as tf -from absl.testing import parameterized -from scipy import stats -import cirq - -from tensorflow_quantum.core.ops import batch_util, circuit_execution_ops -from tensorflow_quantum.python import util - -# Number of random circuits to use in a test batch. -BATCH_SIZE = 15 - -# These get used everywhere -WF_SIM = cirq.sim.sparse_simulator.Simulator() -DM_SIM = cirq.sim.density_matrix_simulator.DensityMatrixSimulator() - -EXPECTATION_OPS = [ - circuit_execution_ops.get_expectation_op(backend=None), - circuit_execution_ops.get_expectation_op(backend=WF_SIM), - circuit_execution_ops.get_expectation_op(backend=DM_SIM) -] - -SAMPLING_OPS = [ - circuit_execution_ops.get_sampling_op(backend=None), - circuit_execution_ops.get_sampling_op(backend=WF_SIM), - circuit_execution_ops.get_sampling_op(backend=DM_SIM) -] - -STATE_OPS = [ - circuit_execution_ops.get_state_op(backend=None), - circuit_execution_ops.get_state_op(backend=WF_SIM), - circuit_execution_ops.get_state_op(backend=DM_SIM) -] - -SAMPLED_EXPECTATION_OPS = [ - circuit_execution_ops.get_sampled_expectation_op(backend=None), - circuit_execution_ops.get_sampled_expectation_op(backend=WF_SIM), - circuit_execution_ops.get_sampled_expectation_op(backend=DM_SIM) -] - -SIMS = [WF_SIM, WF_SIM, DM_SIM] - - -class OpGetterInputChecks(tf.test.TestCase): - """Check that the op getters handle inputs correctly.""" - - def test_get_expectation_inputs(self): - """Test that get expectation only accepts inputs it should.""" - circuit_execution_ops.get_expectation_op() - circuit_execution_ops.get_expectation_op(backend=cirq.Simulator()) - circuit_execution_ops.get_expectation_op( - backend=cirq.DensityMatrixSimulator()) - circuit_execution_ops.get_expectation_op() - with self.assertRaisesRegex(NotImplementedError, - expected_regex='Sample-based'): - mock_engine = mock.Mock() - circuit_execution_ops.get_expectation_op( - cirq.google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq.google.XMON)) - with self.assertRaisesRegex( - TypeError, expected_regex="a Cirq.SimulatesFinalState"): - circuit_execution_ops.get_expectation_op(backend="junk") - - def test_get_sampled_expectation_inputs(self): - """Test that get expectation only accepts inputs it should.""" - circuit_execution_ops.get_sampled_expectation_op() - circuit_execution_ops.get_sampled_expectation_op( - backend=cirq.Simulator()) - circuit_execution_ops.get_sampled_expectation_op( - backend=cirq.DensityMatrixSimulator()) - mock_engine = mock.Mock() - circuit_execution_ops.get_sampled_expectation_op( - cirq.google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq.google.XMON)) - with self.assertRaisesRegex(TypeError, expected_regex="a Cirq.Sampler"): - circuit_execution_ops.get_sampled_expectation_op(backend="junk") - - def test_get_samples_inputs(self): - """Test that get_samples only accepts inputs it should.""" - circuit_execution_ops.get_sampling_op() - circuit_execution_ops.get_sampling_op(backend=cirq.Simulator()) - circuit_execution_ops.get_sampling_op( - backend=cirq.DensityMatrixSimulator()) - mock_engine = mock.Mock() - circuit_execution_ops.get_sampling_op( - backend=cirq.google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq.google.XMON)) - with self.assertRaisesRegex(TypeError, - expected_regex="Expected a Cirq.Sampler"): - circuit_execution_ops.get_sampling_op(backend="junk") - - def test_get_state_inputs(self): - """Test that get_states only accepts inputs it should.""" - circuit_execution_ops.get_state_op() - circuit_execution_ops.get_state_op(backend=cirq.Simulator()) - circuit_execution_ops.get_state_op( - backend=cirq.DensityMatrixSimulator()) - with self.assertRaisesRegex(TypeError, - expected_regex="Cirq.SimulatesFinalState"): - circuit_execution_ops.get_state_op(backend="junk") - with self.assertRaisesRegex(TypeError, - expected_regex="Cirq.SimulatesFinalState"): - mock_engine = mock.Mock() - circuit_execution_ops.get_state_op( - backend=cirq.google.QuantumEngineSampler( - engine=mock_engine, - processor_id='test', - gate_set=cirq.google.XMON)) - - -class ExecutionOpsConsistentyTest(tf.test.TestCase, parameterized.TestCase): - """Test all ops produce equivalent output to one another.""" - - @parameterized.parameters([{ - 'op_and_sim': (op, sim) - } for (op, sim) in zip(STATE_OPS, SIMS)]) - def test_supported_gates_consistent(self, op_and_sim): - """Ensure that supported gates are consistent across backends.""" - op = op_and_sim[0] - sim = op_and_sim[1] - qubits = cirq.GridQubit.rect(1, 5) - circuit_batch = [] - - gate_ref = util.get_supported_gates() - for gate in gate_ref: - # Create a circuit with non zero entries on real - # and imaginary values. - c = cirq.Circuit() - for qubit in qubits: - c += cirq.Circuit(cirq.Y(qubit)**0.125) - - if gate_ref[gate] == 2: - op_qubits = np.random.choice(qubits, size=2, replace=False) - c += cirq.Circuit(gate(*op_qubits)) - elif gate_ref[gate] == 1: - op_qubits = np.random.choice(qubits, size=1, replace=False) - c += cirq.Circuit(gate(*op_qubits)) - else: - raise ValueError( - "Unable to test supported gates across all ops." - "please update circuit_execution_ops_test.py") - - circuit_batch.append(c) - - op_states = op(util.convert_to_tensor(circuit_batch), [], - [[]] * len(circuit_batch)).to_list() - cirq_states = batch_util.batch_calculate_state( - circuit_batch, [cirq.ParamResolver({}) for _ in circuit_batch], sim) - - self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'op_and_sim': [(op, sim) - for (op, sim) in zip(STATE_OPS, SIMS)], - 'n_qubits': [3, 7] - }))) - def test_simulate_state_no_symbols(self, op_and_sim, n_qubits): - """Compute states using cirq and tfq without symbols.""" - op = op_and_sim[0] - sim = op_and_sim[1] - - circuit_batch, resolver_batch = util.random_circuit_resolver_batch( - cirq.GridQubit.rect(1, n_qubits), BATCH_SIZE) - - op_states = op(util.convert_to_tensor(circuit_batch), [], - [[]] * BATCH_SIZE).to_list() - cirq_states = batch_util.batch_calculate_state(circuit_batch, - resolver_batch, sim) - - self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'op_and_sim': [(op, sim) - for (op, sim) in zip(STATE_OPS, SIMS)], - 'n_qubits': [3, 7], - 'symbol_names': [['a'], ['a', 'b'], - ['a', 'b', 'c', 'd', 'e']] - }))) - def test_simulate_state_with_symbols(self, op_and_sim, n_qubits, - symbol_names): - """Compute states using cirq and tfq with symbols.""" - op = op_and_sim[0] - sim = op_and_sim[1] - - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - cirq.GridQubit.rect(1, n_qubits), symbol_names, BATCH_SIZE) - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch]) - - op_states = op(util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array).to_list() - - cirq_states = batch_util.batch_calculate_state(circuit_batch, - resolver_batch, sim) - - self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product(**{ - 'op_and_sim': [(op, sim) for (op, sim) in zip(STATE_OPS, SIMS)], - }))) - def test_simulate_state_empty(self, op_and_sim): - """Test empty circuits for states using cirq and tfq.""" - op = op_and_sim[0] - sim = op_and_sim[1] - - circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)] - resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)] - - op_states = op(util.convert_to_tensor(circuit_batch), [], - [[]] * BATCH_SIZE).to_list() - cirq_states = batch_util.batch_calculate_state(circuit_batch, - resolver_batch, sim) - - self.assertAllClose(cirq_states, op_states, atol=1e-5, rtol=1e-5) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'op_and_sim': [(op, sim) - for (op, sim) in zip(EXPECTATION_OPS, SIMS)], - 'n_qubits': [3, 7], - 'symbol_names': [['a', 'b', 'c', 'd', 'e']], - 'max_paulisum_length': [6] - }))) - def test_analytical_expectation(self, op_and_sim, n_qubits, symbol_names, - max_paulisum_length): - """Compute expectations using cirq and tfq.""" - op = op_and_sim[0] - sim = op_and_sim[1] - - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - qubits, symbol_names, BATCH_SIZE) - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch]) - - pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length, - BATCH_SIZE) - - op_expectations = op( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, - util.convert_to_tensor([[psum] for psum in pauli_sums])) - - cirq_expectations = batch_util.batch_calculate_expectation( - circuit_batch, resolver_batch, [[x] for x in pauli_sums], sim) - - self.assertAllClose(op_expectations.numpy().flatten(), - cirq_expectations.flatten(), - rtol=1e-5, - atol=1e-5) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'op_and_sim': [(op, sim) - for (op, sim) in zip(EXPECTATION_OPS, SIMS)], - 'n_qubits': [3], - 'symbol_names': [['a', 'b', 'c', 'd', 'e']], - 'max_paulisum_length': [6] - }))) - def test_analytical_expectation_empty(self, op_and_sim, n_qubits, - symbol_names, max_paulisum_length): - """Test empty circuits for analytical expectation using cirq and tfq.""" - op = op_and_sim[0] - sim = op_and_sim[1] - - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)] - resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)] - - symbol_values_array = np.array( - [[0.0 for _ in symbol_names] for _ in resolver_batch]) - - pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length, - BATCH_SIZE) - - op_expectations = op( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, - util.convert_to_tensor([[psum] for psum in pauli_sums])) - - cirq_expectations = batch_util.batch_calculate_expectation( - circuit_batch, resolver_batch, [[x] for x in pauli_sums], sim) - - self.assertAllClose(op_expectations.numpy().flatten(), - cirq_expectations.flatten(), - rtol=1e-5, - atol=1e-5) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'op_and_sim': [(op, sim) for ( - op, sim) in zip(SAMPLED_EXPECTATION_OPS, SIMS)], - 'n_qubits': [3, 7], - 'symbol_names': [['a', 'b', 'c', 'd', 'e']], - 'max_paulisum_length': [6] - }))) - def test_sampled_expectation(self, op_and_sim, n_qubits, symbol_names, - max_paulisum_length): - """Compute sampled expectations using cirq and tfq.""" - op = op_and_sim[0] - sim = op_and_sim[1] - - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - qubits, symbol_names, BATCH_SIZE) - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch]) - - pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length, - BATCH_SIZE) - num_samples = [[100]] * BATCH_SIZE - - op_expectations = op( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, - util.convert_to_tensor([[psum] for psum in pauli_sums]), - num_samples) - - cirq_expectations = batch_util.batch_calculate_sampled_expectation( - circuit_batch, resolver_batch, [[x] for x in pauli_sums], - num_samples, sim) - - self.assertAllClose(op_expectations.numpy().flatten(), - cirq_expectations.flatten(), - rtol=1e-1, - atol=1e-1) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'op_and_sim': [(op, sim) for ( - op, sim) in zip(SAMPLED_EXPECTATION_OPS, SIMS)], - 'n_qubits': [3], - 'symbol_names': [['a', 'b', 'c', 'd', 'e']], - 'max_paulisum_length': [6] - }))) - def test_sampled_expectation_empty(self, op_and_sim, n_qubits, symbol_names, - max_paulisum_length): - """Test empty circuits for sampled expectation using cirq and tfq.""" - op = op_and_sim[0] - sim = op_and_sim[1] - - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)] - resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)] - - symbol_values_array = np.array( - [[0.0 for _ in symbol_names] for _ in resolver_batch]) - - pauli_sums = util.random_pauli_sums(qubits, max_paulisum_length, - BATCH_SIZE) - num_samples = [[100]] * BATCH_SIZE - - op_expectations = op( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, - util.convert_to_tensor([[psum] for psum in pauli_sums]), - num_samples) - - cirq_expectations = batch_util.batch_calculate_sampled_expectation( - circuit_batch, resolver_batch, [[x] for x in pauli_sums], - num_samples, sim) - - self.assertAllClose(op_expectations.numpy().flatten(), - cirq_expectations.flatten(), - rtol=1e-1, - atol=1e-1) - - # keep the qubit count low here, all computations scale exponentially - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'op_and_sim': [(op, sim) - for (op, sim) in zip(SAMPLING_OPS, SIMS)], - 'n_qubits': [6], - 'symbol_names': [['a', 'b', 'c', 'd', 'e']] - }))) - def test_sampling(self, op_and_sim, n_qubits, symbol_names): - """Compare sampling with tfq ops and Cirq.""" - op = op_and_sim[0] - sim = op_and_sim[1] - qubits = cirq.GridQubit.rect(1, n_qubits) - n_samples = int((2**n_qubits) * 1000) - - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - qubits, symbol_names, BATCH_SIZE, 30) - for i in range(BATCH_SIZE): - circuit_batch[i] += cirq.Circuit( - *[cirq.H(qubit) for qubit in qubits]) - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch]) - - op_samples = np.array( - op(util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, [n_samples]).to_list()) - - op_histograms = [ - np.histogram( - sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)), - range=(0, 2**len(qubits)), - bins=2**len(qubits))[0] for sample in op_samples - ] - - cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch, - n_samples, sim) - - cirq_histograms = [ - np.histogram( - sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)), - range=(0, 2**len(qubits)), - bins=2**len(qubits))[0] for sample in cirq_samples - ] - - for a, b in zip(op_histograms, cirq_histograms): - self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005) - - # keep the qubit count low here, all computations scale exponentially - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'op_and_sim': [(op, sim) - for (op, sim) in zip(SAMPLING_OPS, SIMS)], - 'n_qubits': [3], - 'symbol_names': [['a', 'b', 'c', 'd', 'e']] - }))) - def test_sampling_empty(self, op_and_sim, n_qubits, symbol_names): - """Test empty circuits for sampling using cirq and tfq.""" - op = op_and_sim[0] - sim = op_and_sim[1] - qubits = cirq.GridQubit.rect(1, n_qubits) - n_samples = int((2**n_qubits) * 1000) - - circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)] - resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)] - - symbol_values_array = np.array( - [[0.0 for _ in symbol_names] for _ in resolver_batch]) - - op_samples = np.array( - op(util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, [n_samples]).to_list()) - - op_histograms = [ - np.histogram( - sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)), - range=(0, 2**len(qubits)), - bins=2**len(qubits))[0] for sample in op_samples - ] - - cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch, - n_samples, sim) - - cirq_histograms = [ - np.histogram( - sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)), - range=(0, 2**len(qubits)), - bins=2**len(qubits))[0] for sample in cirq_samples - ] - - for a, b in zip(op_histograms, cirq_histograms): - self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_quantum/core/ops/cirq_ops.py b/tensorflow_quantum/core/ops/cirq_ops.py deleted file mode 100644 index 70e2155bc..000000000 --- a/tensorflow_quantum/core/ops/cirq_ops.py +++ /dev/null @@ -1,649 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Generators for ops that call out to cirq simulators from the tf graph.""" -import functools -import numbers - -import numpy as np -import tensorflow as tf -import cirq - -from tensorflow_quantum.core.ops import batch_util -from tensorflow_quantum.core.proto import pauli_sum_pb2 -from tensorflow_quantum.core.serialize import serializer - - -def _upgrade_inputs(op_wrapper): - """It is helpful to call this on the py_function wrappers you generate, - as if they are the first element in an eager graph, the inputs - may or may not already be tensors.""" - - @functools.wraps(op_wrapper) - def wrapper(*args): - tensorized_args = [] - for arg in args: - if not tf.is_tensor(arg): - arg = tf.convert_to_tensor(arg) - tensorized_args.append(arg) - return op_wrapper(*tensorized_args) - - return wrapper - - -def _input_check_helper(programs, symbol_names, symbol_values): - """Helper function that type checks common inputs. - - Type and size check the `programs`, `symbol_names`, and `symbol_values` - inputs, which are used by all ops in this module. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - """ - if not programs.dtype == tf.dtypes.string: - raise TypeError('programs tensor must be of type string') - # if symbol_names is empty it won't be of type string - if tf.size(symbol_names) > 0 and not symbol_names.dtype == tf.dtypes.string: - raise TypeError('symbol_names tensor must be of type string') - if not isinstance(symbol_values.dtype.as_numpy_dtype(), numbers.Real): - raise TypeError('symbol_values tensor must be a real-valued' - ' numeric tensor.') - if not (int(symbol_values.shape[0]) == int(tf.size(programs))): - raise ValueError('first dimension of symbol_values tensor' - ' must match size of programs tensor.') - if len(symbol_values.shape) < 2 or not (int(tf.size(symbol_names)) == int( - symbol_values.shape[1])): - raise ValueError('size of symbol_names tensor must match second' - ' dimension of symbol_values tensor.') - - -def _batch_deserialize_helper(programs, symbol_names, symbol_values): - """Helper function that converts tensors to cirq constructs. - - Converts the string representation of the circuits in `programs` - to `cirq.Circuit` objects and produces a corresponding - `cirq.ParamResolver` constructed using `symbol_names` and `symbol_values`. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - - Returns: - `tuple` containing a `list` of `cirq.Circuit`s constructed from programs - and a `list` of `cirq.ParamResolver`s. - """ - de_ser_symbol_names = [x.decode('UTF-8') for x in symbol_names.numpy()] - de_ser_programs = [] - resolvers = [] - # TODO(zaqqwerty): investigate parallelization of this loop - for program, values in zip(programs, symbol_values): - program = program.numpy() - values = values.numpy().astype(float) - - circuit_proto = cirq.google.api.v2.program_pb2.Program() - circuit_proto.ParseFromString(program) - - circuit = serializer.deserialize_circuit(circuit_proto) - - resolver = cirq.study.resolver.ParamResolver( - dict(zip(de_ser_symbol_names, values))) - de_ser_programs.append(circuit) - resolvers.append(resolver) - return de_ser_programs, resolvers - - -def _get_cirq_analytical_expectation( - simulator=cirq.sim.sparse_simulator.Simulator()): - """Get a `callable` that is a TensorFlow op that outputs expectation values. - - Generate a TensorFlow `tf.py_function` op that when called on `tf.Tensor`s - containing circuits and parameters produces a `tf.Tensor` of expectation - values. - - Args: - simulator: `cirq.Simulator` object to use for circuit execution. - - Returns: - `callable` that is a TensorFlow op for computing expectation. - """ - - def cirq_analytical_expectation(programs, symbol_names, symbol_values, - pauli_sums): - """Calculate the expectation value of circuits wrt some operator(s). - - Calculate the expectation value for all the `cirq.PauliSum`s in - `pauli_sums` on each `cirq.Circuit` in `programs`. Each circuit will - have the values in `symbol_values` resolved into the symbols in the - circuit (with the ordering defined by `symbol_names`). - - ```python - - symbol_names = ['a', 'b', 'c'] - programs = tfq.convert_to_tensor( - [cirq.Circuit(H(q0) ** sympy.Symbol('a'), - X(q1) ** sympy.Symbol('b'), - Y(q2) ** sympy.Symbol('c'))] - ) - - symbol_values = [[3,2,1]] - pauli_sums = tfq.convert_to_tensor( - [1.5 * cirq.Z(q0) * cirq.Z(q1)] - ) - - cirq_analytical_expectation( - programs, symbol_names, sybmol_values, pauli_sums) - ``` - - Would place the values of 3 into the Symbol labeled 'a', 2 into the - symbol labeled 'b' and 1 into the symbol labeled 'c'. Then it would - calculate the ZZ expectation on this circuit. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - - Returns: - `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). - """ - _input_check_helper(programs, symbol_names, symbol_values) - if not (pauli_sums.dtype == tf.dtypes.string): - raise TypeError('pauli_sums tensor must be of type string.') - if not (pauli_sums.shape[0] == programs.shape[0]): - raise TypeError('pauli_sums tensor must have the same batch shape ' - 'as programs tensor.') - - programs, resolvers = _batch_deserialize_helper(programs, symbol_names, - symbol_values) - - sum_inputs = [] - for sub_list in pauli_sums.numpy(): - to_append = [] - for x in sub_list: - obj = pauli_sum_pb2.PauliSum() - obj.ParseFromString(x) - to_append.append(serializer.deserialize_paulisum(obj)) - sum_inputs.append(to_append) - - expectations = batch_util.batch_calculate_expectation( - programs, resolvers, sum_inputs, simulator) - - return expectations - - if not isinstance(simulator, cirq.sim.SimulatesFinalState): - raise TypeError("simulator must inherit cirq.sim.SimulatesFinalState.") - - @_upgrade_inputs - def expectation_generator(programs_tf, symbol_names_tf, symbol_values_tf, - pauli_sums_tf): - out = tf.py_function( - func=cirq_analytical_expectation, - inp=[ - tf.stop_gradient(programs_tf), - tf.stop_gradient(symbol_names_tf), symbol_values_tf, - tf.stop_gradient(pauli_sums_tf) - ], - Tout=tf.float32, - ) - out.set_shape([programs_tf.shape[0], pauli_sums_tf.shape[1]]) - return out - - return expectation_generator - - -def _get_cirq_sampled_expectation( - simulator=cirq.sim.sparse_simulator.Simulator()): - """Get a `callable` that is a TensorFlow op that outputs sampled expectation - values. - - Generate a TensorFlow `tf.py_function` op that when called on `tf.Tensor`s - containing circuits and parameters produces a `tf.Tensor` of sampled - expectation values. - - Args: - simulator: `cirq.Simulator` object to use for circuit execution. - - Returns: - `callable` that is a TensorFlow op for computing expectation. - """ - - def cirq_sampled_expectation(programs, symbol_names, symbol_values, - pauli_sums, num_samples): - """Calculate the sampled expectation value of circuits wrt some - operator(s). - - Estimates the expectation value for all the `cirq.PauliSum`s in - `pauli_sums` on each `cirq.Circuit` in `programs`. Each circuit will - have the values in `symbol_values` resolved into the symbols in the - circuit (with the ordering defined by `symbol_names`). - - ```python - - symbol_names = ['a', 'b', 'c'] - programs = tfq.convert_to_tensor( - [cirq.Circuit(H(q0) ** sympy.Symbol('a'), - X(q1) ** sympy.Symbol('b'), - Y(q2) ** sympy.Symbol('c'))] - ) - - symbol_values = [[3,2,1]] - pauli_sums = tfq.convert_to_tensor( - [1.5 * cirq.Z(q0) * cirq.Z(q1)] - ) - n_samples = [[100]] - - cirq_sampled_expectation( - programs, symbol_names, sybmol_values, pauli_sums, n_samples) - ``` - - Would place the values of 3 into the Symbol labeled 'a', 2 into the - symbol labeled 'b' and 1 into the symbol labeled 'c'. Then it would - estimate the ZZ expectation on this circuit by draw samples from the - circuit 100 times. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - num_samples: `tf.Tensor` with `n_samples[i][j]` is equal to the - number of samples to draw in each term of `pauli_sums[i][j]` - when estimating the expectation. - - Returns: - `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). - """ - _input_check_helper(programs, symbol_names, symbol_values) - if not (pauli_sums.dtype == tf.dtypes.string): - raise TypeError('pauli_sums tensor must be of type string.') - if not (pauli_sums.shape[0] == programs.shape[0]): - raise TypeError('pauli_sums tensor must have the same batch shape ' - 'as programs tensor.') - - if not (num_samples.dtype == tf.dtypes.int32 or - num_samples.dtype == tf.dtypes.int64): - raise TypeError('num_samples tensor must be of type int32 of ' - 'int64.') - if not (num_samples.shape == pauli_sums.shape): - raise TypeError('num_samples tensor must have the same shape ' - 'as pauli_sums tensor. got: {} expected: {}'.format( - num_samples.shape, pauli_sums.shape)) - if tf.less_equal(num_samples, 0).numpy().any(): - raise TypeError('num_samples contains sample value <= 0.') - - programs, resolvers = _batch_deserialize_helper(programs, symbol_names, - symbol_values) - - num_samples = num_samples.numpy().tolist() - - sum_inputs = [] - for sub_list in pauli_sums.numpy(): - to_append = [] - for x in sub_list: - obj = pauli_sum_pb2.PauliSum() - obj.ParseFromString(x) - to_append.append(serializer.deserialize_paulisum(obj)) - sum_inputs.append(to_append) - - expectations = batch_util.batch_calculate_sampled_expectation( - programs, resolvers, sum_inputs, num_samples, simulator) - - return expectations - - if not isinstance(simulator, cirq.Sampler): - raise TypeError("cirq.Sampler is required for sampled expectation.") - - @_upgrade_inputs - def sampled_expectation_generator(programs_tf, symbol_names_tf, - symbol_values_tf, pauli_sums_tf, - num_samples_tf): - out = tf.py_function( - func=cirq_sampled_expectation, - inp=[ - tf.stop_gradient(programs_tf), - tf.stop_gradient(symbol_names_tf), - symbol_values_tf, - tf.stop_gradient(pauli_sums_tf), - tf.stop_gradient(num_samples_tf), - ], - Tout=tf.float32, - ) - out.set_shape([programs_tf.shape[0], pauli_sums_tf.shape[1]]) - return out - - return sampled_expectation_generator - - -# TODO(trevormccrt): should this be removed when differentiators come in ? -def _group_tuples(inputs): - """Helper that groups a `list` of `tuple`s based on the elements at index 0. - - Given a `list` of `tuple`s, return a `dict` mapping from every unique first - element in the list to the lists containing the rest of the elements. - Example: - [(a,2,3),(b,1,1),(b,1,2),(a,1,1)] -> {a:[(2,3),(1,1)], b:[(1,1),(1,2)]} - - Args: - input: Python `list` of tuples to group. - - Returns: - Python `dict` containing groups. - """ - groups = {} - for item in inputs: - current_groups = groups.get(item[0], []) - current_groups.append(item[1:]) - groups[item[0]] = current_groups - return groups - - -def _get_cirq_samples(sampler=cirq.sim.sparse_simulator.Simulator()): - """Get a `callable` that is a TensorFlow op that outputs circuit samples. - - Generate a TensorFlow `tf.py_function` op that when called on `tf.Tensor`s - of circuits and parameters produces a tensor of bitstring samples from all - the circuits. - - Args: - simulator: `cirq.Simulator` object to use for circuit execution. - - Returns: - `callable` that is a Tensorflow op for taking samples. - """ - - @tf.custom_gradient - def cirq_sample(programs, symbol_names, symbol_values, num_samples): - """Draw samples from circuits. - - Draw samples from `circuits` where each circuit will have the values in - `symbol_values` resolved into the symbols in the circuit (with the - ordering defined by `symbol_names`). - - ```python - - symbol_names = ['a', 'b', 'c'] - programs = tfq.convert_to_tensor( - [cirq.Circuit(H(q0) ** sympy.Symbol('a'), - X(q1) ** sympy.Symbol('b'), - Y(q2) ** sympy.Symbol('c'))] - ) - - symbol_values = [[3,2,1]] - n_samples = [100] - - cirq_sample(programs, symbol_names, sybmol_values, n_samples) - ``` - - Would place the values of 3 into the Symbol labeled 'a', 2 into the - symbol labeled 'b' and 1 into the symbol labeled 'c'. Then it would - draw 100 samples from the circuit. - - Note: In the case of circuits with varying size, all nonexistant - samples for a particular circuit are padded with -2. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - num_samples: `tf.Tensor` with one element indicating the number of - samples to draw. - - Returns: - `tf.Tensor` with shape - [batch_size, num_samples, <# qubits in largest circuit>] that - holds samples (as boolean values) for each circuit. - """ - - def _no_grad(grad): - raise RuntimeError( - 'Differentiation through a sampling operation is not supported.' - ) - - _input_check_helper(programs, symbol_names, symbol_values) - - if not (int(tf.size(num_samples)) == 1): - raise ValueError("num_samples tensor must have size 1") - if not isinstance(num_samples.dtype.as_numpy_dtype(), numbers.Integral): - raise TypeError("num_samples tensor must be of integer type") - - serialized_programs = programs - programs, resolvers = _batch_deserialize_helper(programs, symbol_names, - symbol_values) - - num_samples = int(num_samples.numpy()) - - if not isinstance(sampler, cirq.google.QuantumEngineSampler): - results = batch_util.batch_sample(programs, resolvers, num_samples, - sampler) - - else: - - max_n_qubits = 0 - for p in programs: - if p.has_measurements(): - # should never hit this error because the seriazlizer - # does not support cirq.measurement yet - raise RuntimeError('TFQ does not support programs with ' - 'pre-existing measurements.') - p.append(cirq.measure(*p.all_qubits(), key='tfq')) - max_n_qubits = max([max_n_qubits, len(p.all_qubits())]) - - # group samples from identical circuits to reduce communication - # overhead. Have to keep track of the order in which things came - # in to make sure the output is ordered correctly - to_be_grouped = [ - (ser_prog.numpy(), resolver, index) - for index, ( - ser_prog, - resolver) in enumerate(zip(serialized_programs, resolvers)) - ] - - grouped = _group_tuples(to_be_grouped) - - # start all the necessary jobs - result_mapping = {} - for key, value in grouped.items(): - program = programs[value[0][1]] - resolvers = [x[0] for x in value] - orders = [x[1] for x in value] - - # sampler.run_sweep blocks until results are in, so go around it - result = sampler._engine.run_sweep( - program=program, - params=resolvers, - repetitions=num_samples, - processor_ids=sampler._processor_ids, - gate_set=sampler._gate_set) - - result_mapping[result] = orders - - # get all results - cirq_results = [None] * len(programs) - for key, value in result_mapping.items(): - this_results = key.results() - for result, index in zip(this_results, value): - cirq_results[index] = result - - results = [] - for r in cirq_results: - results.append( - tf.keras.preprocessing.sequence.pad_sequences( - r.measurements['tfq'], - maxlen=max_n_qubits, - dtype=np.int8, - value=-2, - padding='pre')) - - return np.array(results, dtype=np.int8), _no_grad - - if not isinstance(sampler, cirq.Sampler): - raise TypeError("simulator must inherit cirq.Sampler.") - - @_upgrade_inputs - def sample_generator(circuit_spec, param_names, param_values, num_samples): - out = tf.py_function( - func=cirq_sample, - inp=[ - tf.stop_gradient(circuit_spec), - tf.stop_gradient(param_names), param_values, - tf.stop_gradient(num_samples) - ], - Tout=tf.int8, - ) - out.set_shape([circuit_spec.shape[0], None, None]) - return out - - return sample_generator - - -def _get_cirq_simulate_state(simulator=cirq.sim.sparse_simulator.Simulator()): - """Get a `callable` that is a TensorFlow op that outputs circuit states. - - Generate a TensorFlow `tf.py_function` op that when called on `tf.Tensor`s - of circuits and parameters produces a `tf.Tensor` containing the final state - of all the input circuits. - - Args: - simulator: `cirq.Simulator` object to use for circuit execution. - - Returns: - `callable` that is a Tensorflow op for calculating states. - """ - - @tf.custom_gradient - def cirq_simulate_state(programs, symbol_names, symbol_values): - """Simulate the final state of circuits. - - Calculate the final state of for each `cirq.Circuit` in `programs` - with the values in `symbol_values` resolved into the symbols in the - circuit (with the ordering defined by `symbol_names`). - - ```python - symbol_names = ['a', 'b', 'c'] - programs = tfq.convert_to_tensor( - [cirq.Circuit(H(q0) ** sympy.Symbol('a'), - X(q1) ** sympy.Symbol('b'), - Y(q2) ** sympy.Symbol('c'))] - ) - - symbol_values = [[3,2,1]] - - cirq_simulate_state(programs, symbol_names, sybmol_values) - ``` - - Would place the values of 3 into the Symbol labeled 'a', 2 into the - symbol labeled 'b' and 1 into the symbol labeled 'c'. Then it would - simulate the final state of the circuit. - - Note: In the case of circuits with varying size, all nonexistent - amplitudes are padded with -2. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - - Returns: - `tf.Tensor` with shape [batch_size, ] - that contains the state information of the circuit. - """ - - def _no_grad(grad): - raise RuntimeError( - 'Differentiation through states is not supported.') - - _input_check_helper(programs, symbol_names, symbol_values) - - states = batch_util.batch_calculate_state( - *_batch_deserialize_helper(programs, symbol_names, symbol_values), - simulator) - - return states, _no_grad - - if not isinstance(simulator, cirq.sim.SimulatesFinalState): - raise TypeError("simulator must inherit cirq.sim.SimulatesFinalState.") - - @_upgrade_inputs - def state_generator(circuit_spec, param_names, param_values): - out = tf.py_function( - func=cirq_simulate_state, - inp=[ - tf.stop_gradient(circuit_spec), - tf.stop_gradient(param_names), - param_values, - ], - Tout=tf.complex64, - ) - if isinstance(simulator, cirq.sim.Simulator): - out.set_shape([circuit_spec.shape[0], None]) - else: - out.set_shape([circuit_spec.shape[0], None, None]) - - return out - - return state_generator diff --git a/tensorflow_quantum/core/ops/cirq_ops_test.py b/tensorflow_quantum/core/ops/cirq_ops_test.py deleted file mode 100644 index 5b8753ea1..000000000 --- a/tensorflow_quantum/core/ops/cirq_ops_test.py +++ /dev/null @@ -1,428 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for the cirq simulation ops.""" -from unittest import mock -import numpy as np -import tensorflow as tf -from absl.testing import parameterized -import cirq - -from tensorflow_quantum.core.ops import cirq_ops -from tensorflow_quantum.core.serialize import serializer -from tensorflow_quantum.python import util - -MOMENT_DEPTH = 25 - -WF_SIM = cirq.sim.sparse_simulator.Simulator() -DM_SIM = cirq.sim.density_matrix_simulator.DensityMatrixSimulator() - - -class CirqAnalyticalExpectationTest(tf.test.TestCase): - """Tests get_cirq_analytical_expectation.""" - - def test_get_cirq_analytical_expectation_op(self): - """Input check the wrapper for the cirq analytical expectation op.""" - with self.assertRaisesRegex( - TypeError, - "simulator must inherit cirq.sim.SimulatesFinalState."): - cirq_ops._get_cirq_analytical_expectation("junk") - # TODO(peterse): Tighten these tests a bit.. - cirq_ops._get_cirq_analytical_expectation() - cirq_ops._get_cirq_analytical_expectation( - cirq.sim.sparse_simulator.Simulator()) - cirq_ops._get_cirq_analytical_expectation( - cirq.sim.density_matrix_simulator.DensityMatrixSimulator()) - - def test_cirq_analytical_expectation_op_inputs(self): - """Test input checking in the state sim op.""" - test_op = cirq_ops._get_cirq_analytical_expectation( - cirq.sim.sparse_simulator.Simulator()) - bits = cirq.GridQubit.rect(1, 5) - test_circuit = serializer.serialize_circuit( - cirq.testing.random_circuit(bits, MOMENT_DEPTH, - 0.9)).SerializeToString() - test_pauli_sum = serializer.serialize_paulisum( - cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0]) - ])).SerializeToString() - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'symbol_names tensor must be of type string'): - _ = test_op([test_circuit], [0], [[0]], [[test_pauli_sum]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'programs tensor must be of type string'): - _ = test_op([0], ['rx'], [[0]], [test_pauli_sum]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'real-valued numeric tensor.'): - _ = test_op([test_circuit], ['rx'], 'junk', [[test_pauli_sum]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'size of symbol_names tensor must match'): - _ = test_op([test_circuit], ['rx'], [[1, 1]], [[test_pauli_sum]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'size of symbol_names tensor must match'): - _ = test_op([test_circuit], ['rx', 'ry'], [[1]], [[test_pauli_sum]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'first dimension of symbol_values tensor'): - _ = test_op([test_circuit, test_circuit], ['rx'], [[1]], - [test_pauli_sum]) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'pauli_sums tensor must be of type string.'): - _ = test_op([test_circuit], ['rx'], [[1]], 0) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'pauli_sums tensor must have the same batch shape'): - _ = test_op([test_circuit], ['rx'], [[1]], - [[test_pauli_sum], [test_pauli_sum]]) - - _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]]) - _ = test_op([test_circuit], [], [[]], [[test_pauli_sum]]) - - def test_analytic_expectation_empty_circuit(self): - """Test empty circuits""" - test_op = cirq_ops._get_cirq_analytical_expectation( - cirq.sim.sparse_simulator.Simulator()) - bits = cirq.GridQubit.rect(1, 5) - test_pauli_sum = serializer.serialize_paulisum( - cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0]) - ])).SerializeToString() - test_empty_circuit = serializer.serialize_circuit( - cirq.Circuit()).SerializeToString() - _ = test_op([test_empty_circuit], [], [[]], [[test_pauli_sum]]) - - -class CirqSampledExpectationTest(tf.test.TestCase): - """Tests get_cirq_sampled_expectation.""" - - def test_get_cirq_sampled_expectation_op(self): - """Input check the wrapper for the cirq analytical expectation op.""" - with self.assertRaisesRegex( - TypeError, "cirq.Sampler is required for sampled expectation."): - cirq_ops._get_cirq_sampled_expectation("junk") - # TODO(peterse): Tighten these tests a bit.. - cirq_ops._get_cirq_sampled_expectation() - cirq_ops._get_cirq_sampled_expectation( - cirq.sim.sparse_simulator.Simulator()) - cirq_ops._get_cirq_sampled_expectation( - cirq.sim.density_matrix_simulator.DensityMatrixSimulator()) - - def test_cirq_sampled_expectation_op_inputs(self): - """test input checking in the state sim op.""" - test_op = cirq_ops._get_cirq_sampled_expectation( - cirq.sim.sparse_simulator.Simulator()) - bits = cirq.GridQubit.rect(1, 5) - test_circuit = serializer.serialize_circuit( - cirq.testing.random_circuit(bits, MOMENT_DEPTH, - 0.9)).SerializeToString() - test_pauli_sum = serializer.serialize_paulisum( - cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0]) - ])).SerializeToString() - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'symbol_names tensor must be of type string'): - _ = test_op([test_circuit], [0], [[0]], [[test_pauli_sum]], [[1]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'programs tensor must be of type string'): - _ = test_op([0], ['rx'], [[0]], [test_pauli_sum], [[1]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'real-valued numeric tensor.'): - _ = test_op([test_circuit], ['rx'], 'junk', [[test_pauli_sum]], - [[1]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'size of symbol_names tensor must match'): - _ = test_op([test_circuit], ['rx'], [[1, 1]], [[test_pauli_sum]], - [[1]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'size of symbol_names tensor must match'): - _ = test_op([test_circuit], ['rx', 'ry'], [[1]], [[test_pauli_sum]], - [[1]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'first dimension of symbol_values tensor'): - _ = test_op([test_circuit, test_circuit], ['rx'], [[1]], - [test_pauli_sum], [[1]]) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'pauli_sums tensor must be of type string.'): - _ = test_op([test_circuit], ['rx'], [[1]], 0, [[1]]) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'pauli_sums tensor must have the same batch shape'): - _ = test_op([test_circuit], ['rx'], [[1]], - [[test_pauli_sum], [test_pauli_sum]], [[1]]) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'num_samples tensor must have the same shape'): - _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]], - [[1], [1]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'num_samples tensor must be of type int32'): - _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]], - [[1.0]]) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'num_samples tensor must have the same shape'): - _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]], - [[1], [1]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'num_samples contains sample value <= 0'): - _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]], - [[0]]) - - _ = test_op([test_circuit], ['rx'], [[1]], [[test_pauli_sum]], [[1]]) - _ = test_op([test_circuit], [], [[]], [[test_pauli_sum]], [[1]]) - - def test_sampled_expectation_empty_circuit(self): - """Test empty circuits""" - test_op = cirq_ops._get_cirq_sampled_expectation( - cirq.sim.sparse_simulator.Simulator()) - bits = cirq.GridQubit.rect(1, 5) - test_pauli_sum = serializer.serialize_paulisum( - cirq.PauliSum.from_pauli_strings([cirq.Z(bits[0]) - ])).SerializeToString() - test_empty_circuit = serializer.serialize_circuit( - cirq.Circuit()).SerializeToString() - _ = test_op([test_empty_circuit], [], [[]], [[test_pauli_sum]], [[1]]) - - -class CirqSimulateStateTest(tf.test.TestCase, parameterized.TestCase): - """Tests get_cirq_simulate_state.""" - - def test_get_cirq_state_op(self): - """Input check the wrapper for the cirq state op.""" - with self.assertRaisesRegex( - TypeError, - "simulator must inherit cirq.sim.SimulatesFinalState."): - cirq_ops._get_cirq_simulate_state("junk") - cirq_ops._get_cirq_simulate_state() - cirq_ops._get_cirq_simulate_state(cirq.sim.sparse_simulator.Simulator()) - cirq_ops._get_cirq_simulate_state( - cirq.sim.density_matrix_simulator.DensityMatrixSimulator()) - - # TODO(trevormccrt): input checking might be parameterizeable over all ops - # if we decide to properly input check our c++ ops - def test_cirq_state_op_inputs(self): - """test input checking in the state sim op.""" - test_op = cirq_ops._get_cirq_simulate_state( - cirq.sim.sparse_simulator.Simulator()) - bits = cirq.GridQubit.rect(1, 5) - test_circuit = serializer.serialize_circuit( - cirq.testing.random_circuit(bits, MOMENT_DEPTH, - 0.9)).SerializeToString() - # exceptions raised in the tf graph don't get passed - # through in an identifiable way - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'symbol_names tensor must be of type string'): - _ = test_op([test_circuit], [0], [[0]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'programs tensor must be of type string'): - _ = test_op([0], ['rx'], [[0]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'real-valued numeric tensor.'): - _ = test_op([test_circuit], ['rx'], 'junk') - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'size of symbol_names tensor must match'): - _ = test_op([test_circuit], ['rx'], [[1, 1]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'size of symbol_names tensor must match'): - _ = test_op([test_circuit], ['rx', 'ry'], [[1]]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'first dimension of symbol_values tensor'): - _ = test_op([test_circuit, test_circuit], ['rx'], [[1]]) - _ = test_op([test_circuit], ['rx'], [[1]]) - _ = test_op([test_circuit], [], [[]]) - - @parameterized.parameters([ - { - 'op_and_sim': (cirq_ops._get_cirq_simulate_state(WF_SIM), WF_SIM), - 'all_n_qubits': [2, 3] - }, - { - 'op_and_sim': (cirq_ops._get_cirq_simulate_state(DM_SIM), DM_SIM), - 'all_n_qubits': [2, 3] - }, - { - 'op_and_sim': (cirq_ops._get_cirq_simulate_state(WF_SIM), WF_SIM), - 'all_n_qubits': [2, 5, 8, 10] - }, - { - 'op_and_sim': (cirq_ops._get_cirq_simulate_state(DM_SIM), DM_SIM), - 'all_n_qubits': [2, 5, 8, 10] - }, - ]) - def test_simulate_state_output_padding(self, op_and_sim, all_n_qubits): - """If a circuit executing op is asked to simulate states given circuits - acting on different numbers of qubits, the op should return a tensor - padded with zeros up to the size of the largest circuit. The padding - should be physically correct, such that samples taken from the padded - states still match samples taken from the original circuit.""" - op = op_and_sim[0] - sim = op_and_sim[1] - - circuit_batch = [] - for n_qubits in all_n_qubits: - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch += util.random_circuit_resolver_batch(qubits, 1)[0] - - tfq_results = op(util.convert_to_tensor(circuit_batch), [], - [[]] * len(circuit_batch)) - - # dont use batch_util here to enforce consistant padding everywhere - # without extra tests - manual_padded_results = [] - for circuit in circuit_batch: - result = sim.simulate(circuit) - - # density matricies should be zero everywhere except for the - # top left corner - if isinstance( - result, - cirq.sim.density_matrix_simulator.DensityMatrixTrialResult): - dm = result.final_density_matrix - blank_state = np.ones( - (2**max(all_n_qubits), 2**(max(all_n_qubits))), - dtype=np.complex64) * -2 - blank_state[:dm.shape[0], :dm.shape[1]] = dm - manual_padded_results.append(blank_state) - - # wavefunctions should be zero everywhere to the right of the states - # present in this system - elif isinstance( - result, - cirq.sim.wave_function_simulator.WaveFunctionTrialResult): - wf = result.final_state - blank_state = np.ones( - (2**max(all_n_qubits)), dtype=np.complex64) * -2 - blank_state[:wf.shape[0]] = wf - manual_padded_results.append(blank_state) - - else: - # TODO - raise RuntimeError('Simulator returned unknown type of result.') - - self.assertAllClose(tfq_results, manual_padded_results) - - def test_state_empty_circuit(self): - """Test empty circuits""" - test_op = cirq_ops._get_cirq_simulate_state( - cirq.sim.sparse_simulator.Simulator()) - test_empty_circuit = serializer.serialize_circuit( - cirq.Circuit()).SerializeToString() - _ = test_op([test_empty_circuit], [], [[]]) - - -class CirqSamplesTest(tf.test.TestCase, parameterized.TestCase): - """Tests get_cirq_samples.""" - - def test_get_cirq_sampling_op(self): - """Input check the wrapper for the cirq sampling op.""" - with self.assertRaisesRegex(TypeError, - "simulator must inherit cirq.Sampler."): - cirq_ops._get_cirq_samples("junk") - cirq_ops._get_cirq_samples() - cirq_ops._get_cirq_samples(cirq.sim.sparse_simulator.Simulator()) - cirq_ops._get_cirq_samples( - cirq.sim.density_matrix_simulator.DensityMatrixSimulator()) - mock_engine = mock.Mock() - cirq_ops._get_cirq_samples( - cirq.google.QuantumEngineSampler(engine=mock_engine, - processor_id='test', - gate_set=cirq.google.XMON)) - - def test_cirq_sampling_op_inputs(self): - """test input checking in the cirq sampling op.""" - test_op = cirq_ops._get_cirq_samples( - cirq.sim.sparse_simulator.Simulator()) - - bits = cirq.GridQubit.rect(1, 5) - test_circuit = serializer.serialize_circuit( - cirq.testing.random_circuit(bits, MOMENT_DEPTH, - 0.9)).SerializeToString() - - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'symbol_names tensor must be of type string'): - _ = test_op([test_circuit], [0], [[0]], [10]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'programs tensor must be of type string'): - _ = test_op([0], ['rx'], [[0]], [10]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'real-valued numeric tensor.'): - _ = test_op([test_circuit], ['rx'], 'junk', [10]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'size of symbol_names tensor must match'): - _ = test_op([test_circuit], ['rx'], [[1, 1]], [10]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'size of symbol_names tensor must match'): - _ = test_op([test_circuit], ['rx', 'ry'], [[1]], [10]) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'num_samples tensor must be of integer type'): - _ = test_op([test_circuit], ['rx'], [[1]], "junk") - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'num_samples tensor must have size 1'): - _ = test_op([test_circuit], ['rx'], [[1]], [10, 10]) - - _ = test_op([test_circuit], ['rx'], [[1]], [10]) - _ = test_op([test_circuit], [], [[]], [10]) - - @parameterized.parameters([ - { - 'op': cirq_ops._get_cirq_samples(WF_SIM), - 'all_n_qubits': [2, 3], - 'n_samples': 10 - }, - { - 'op': cirq_ops._get_cirq_samples(DM_SIM), - 'all_n_qubits': [2, 3], - 'n_samples': 10 - }, - { - 'op': cirq_ops._get_cirq_samples(WF_SIM), - 'all_n_qubits': [2, 5, 8, 10], - 'n_samples': 10 - }, - { - 'op': cirq_ops._get_cirq_samples(DM_SIM), - 'all_n_qubits': [2, 5, 8, 10], - 'n_samples': 10 - }, - ]) - def test_sampling_output_padding(self, op, all_n_qubits, n_samples): - """Check that the sampling ops pad outputs correctly""" - circuits = [] - expected_outputs = [] - for n_qubits in all_n_qubits: - this_expected_output = np.zeros((n_samples, max(all_n_qubits))) - this_expected_output[:, max(all_n_qubits) - n_qubits:] = 1 - this_expected_output[:, :max(all_n_qubits) - n_qubits] = -2 - expected_outputs.append(this_expected_output) - circuits.append( - cirq.Circuit( - *cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits)))) - results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits), - [n_samples]).numpy() - self.assertAllClose(expected_outputs, results) - - def test_sample_empty_circuit(self): - """Test empty circuits""" - test_op = cirq_ops._get_cirq_samples( - cirq.sim.sparse_simulator.Simulator()) - test_empty_circuit = serializer.serialize_circuit( - cirq.Circuit()).SerializeToString() - _ = test_op([test_empty_circuit], [], [[]], [10]) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/core/ops/load_module.py b/tensorflow_quantum/core/ops/load_module.py deleted file mode 100644 index b5002ad84..000000000 --- a/tensorflow_quantum/core/ops/load_module.py +++ /dev/null @@ -1,46 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module to load python op libraries.""" - -import os -from distutils.sysconfig import get_python_lib - -from tensorflow.python.framework import load_library -from tensorflow.python.platform import resource_loader - - -def load_module(name): - """Loads the module with the given name. - - First attempts to load the module as though it was embedded into the binary - using Bazel. If that fails, then it attempts to load the module as though - it was installed in site-packages via PIP. - - Args: - name: The name of the module, e.g. "_tfq_simulate_ops.so" - - Returns: - A python module containing the Python wrappers for the Ops. - - Raises: - RuntimeError: If the library cannot be found. - """ - try: - path = resource_loader.get_path_to_datafile(name) - return load_library.load_op_library(path) - except: - path = os.path.join(get_python_lib(), "tensorflow_quantum/core/ops", - name) - return load_library.load_op_library(path) diff --git a/tensorflow_quantum/core/ops/parse_context.cc b/tensorflow_quantum/core/ops/parse_context.cc deleted file mode 100644 index 60ca8e2b3..000000000 --- a/tensorflow_quantum/core/ops/parse_context.cc +++ /dev/null @@ -1,258 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/ops/parse_context.h" - -#include - -#include -#include - -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/lib/core/error_codes.pb.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/lib/core/threadpool.h" -#include "tensorflow_quantum/core/ops/tfq_simulate_utils.h" -#include "tensorflow_quantum/core/proto/pauli_sum.pb.h" -#include "tensorflow_quantum/core/src/program_resolution.h" - -namespace tfq { -namespace { - -using ::cirq::google::api::v2::Program; -using ::tensorflow::OpKernelContext; -using ::tensorflow::Status; -using ::tensorflow::Tensor; -using ::tfq::proto::PauliSum; - -template -Status ParseProto(const std::string& text, T* proto) { - // First attempt to parse from the binary representation. - if (proto->ParseFromString(text)) { - return Status::OK(); - } - - // If that fails, then try to parse from the human readable representation. - if (google::protobuf::TextFormat::ParseFromString(text, proto)) { - return Status::OK(); - } - - return Status(tensorflow::error::INVALID_ARGUMENT, - "Unparseable proto: " + text); -} - -} // namespace - -Status ParsePrograms(OpKernelContext* context, const std::string& input_name, - std::vector* programs) { - const tensorflow::Tensor* input; - Status status = context->input(input_name, &input); - if (!status.ok()) { - return status; - } - - if (input->dims() != 1) { - // Never parse anything other than a 1d list of circuits. - return Status( - tensorflow::error::INVALID_ARGUMENT, - absl::StrCat("programs must be rank 1. Got rank ", input->dims(), ".")); - } - - const auto program_strings = input->vec(); - const int num_programs = program_strings.dimension(0); - programs->assign(num_programs, Program()); - - auto DoWork = [&](int start, int end) { - for (int i = start; i < end; i++) { - OP_REQUIRES_OK(context, ParseProto(program_strings(i), &programs->at(i))); - } - }; - - const int block_size = GetBlockSize(context, num_programs); - context->device() - ->tensorflow_cpu_worker_threads() - ->workers->TransformRangeConcurrently(block_size, num_programs, DoWork); - - return Status::OK(); -} - -Status GetProgramsAndProgramsToAppend( - OpKernelContext* context, std::vector* programs, - std::vector* programs_to_append) { - Status status = ParsePrograms(context, "programs", programs); - if (!status.ok()) { - return status; - } - - status = ParsePrograms(context, "programs_to_append", programs_to_append); - if (!status.ok()) { - return status; - } - - if (programs->size() != programs_to_append->size()) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "programs and programs_to_append must have matching sizes."); - } - - return Status::OK(); -} - -// TODO(pmassey): Add a getter for the case where there is only 1 input program. - -Status GetProgramsAndNumQubits( - OpKernelContext* context, std::vector* programs, - std::vector* num_qubits, - std::vector>* p_sums /*=nullptr*/) { - Status status = ParsePrograms(context, "programs", programs); - if (!status.ok()) { - return status; - } - - if (p_sums) { - status = GetPauliSums(context, p_sums); - if (!status.ok()) { - return status; - } - } - - num_qubits->reserve(programs->size()); - for (size_t i = 0; i < programs->size(); i++) { - Program& program = (*programs)[i]; - Status status = Status::OK(); - unsigned int this_num_qubits; - if (p_sums) { - status = ResolveQubitIds(&program, &this_num_qubits, &(p_sums->at(i))); - } else { - status = ResolveQubitIds(&program, &this_num_qubits); - } - if (!status.ok()) { - return status; - } - num_qubits->push_back(this_num_qubits); - } - - return Status::OK(); -} - -Status GetPauliSums(OpKernelContext* context, - std::vector>* p_sums) { - const Tensor* input; - Status status = context->input("pauli_sums", &input); - if (!status.ok()) { - return status; - } - - if (input->dims() != 2) { - return Status(tensorflow::error::INVALID_ARGUMENT, - absl::StrCat("pauli_sums must be rank 2. Got rank ", - input->dims(), ".")); - } - - const auto sum_specs = input->matrix(); - p_sums->reserve(sum_specs.dimension(0)); - for (int i = 0; i < sum_specs.dimension(0); i++) { - std::vector sub_ops; - sub_ops.reserve(sum_specs.dimension(1)); - for (int j = 0; j < sum_specs.dimension(1); j++) { - const std::string& text = sum_specs(i, j); - PauliSum p; - // TODO(pmassey): Consider parsing from the serialized instead of the - // human readable proto to pass smaller messages. - status = ParseProto(text, &p); - if (!status.ok()) { - return status; - } - sub_ops.push_back(p); - } - p_sums->push_back(sub_ops); - } - - return Status::OK(); -} - -Status GetSymbolMaps(OpKernelContext* context, std::vector* maps) { - const Tensor* input_names; - Status status = context->input("symbol_names", &input_names); - if (!status.ok()) { - return status; - } - - if (input_names->dims() != 1) { - return Status(tensorflow::error::INVALID_ARGUMENT, - absl::StrCat("symbol_names must be rank 1. Got rank ", - input_names->dims(), ".")); - } - - const Tensor* input_values; - status = context->input("symbol_values", &input_values); - if (!status.ok()) { - return status; - } - - if (input_values->dims() != 2) { - return Status(tensorflow::error::INVALID_ARGUMENT, - absl::StrCat("symbol_values must be rank 2. Got rank ", - input_values->dims(), ".")); - } - - const auto symbol_names = input_names->vec(); - const auto symbol_values = input_values->matrix(); - - if (symbol_names.dimension(0) != symbol_values.dimension(1)) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Input symbol names and value sizes do not match."); - } - - maps->reserve(symbol_values.dimension(0)); - for (int i = 0; i < symbol_values.dimension(0); i++) { - SymbolMap map; - for (int j = 0; j < symbol_values.dimension(1); j++) { - const std::string& name = symbol_names(j); - const float value = symbol_values(i, j); - map[name] = {j, value}; - } - - maps->push_back(map); - } - - return Status::OK(); -} - -// TODO (mbbrough/pmassey/jaeyoo): Should grads return an EigenMatrixXd instead -// of a vector of vectors ? -Status GetGradients(OpKernelContext* context, - std::vector>* grads) { - const Tensor* input; - const Status status = context->input("grad", &input); - if (!status.ok()) { - return status; - } - - const auto input_grads = input->matrix(); - grads->reserve(input_grads.dimension(0)); - for (int i = 0; i < input_grads.dimension(0); i++) { - std::vector sub_grads; - sub_grads.reserve(input_grads.dimension(1)); - for (int j = 0; j < input_grads.dimension(1); j++) { - sub_grads.push_back(input_grads(i, j)); - } - grads->push_back(sub_grads); - } - - return Status::OK(); -} - -} // namespace tfq diff --git a/tensorflow_quantum/core/ops/parse_context.h b/tensorflow_quantum/core/ops/parse_context.h deleted file mode 100644 index 4327e8075..000000000 --- a/tensorflow_quantum/core/ops/parse_context.h +++ /dev/null @@ -1,80 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_OPS_PARSE_CONTEXT -#define TFQ_CORE_OPS_PARSE_CONTEXT - -#include -#include - -#include "absl/container/flat_hash_map.h" -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/proto/pauli_sum.pb.h" - -namespace tfq { - -// Simplest Program proto parsing -tensorflow::Status ParsePrograms( - tensorflow::OpKernelContext* context, const std::string& input_name, - std::vector* programs); - -// Parses a vector of programs along with another vector of programs to append -tensorflow::Status GetProgramsAndProgramsToAppend( - tensorflow::OpKernelContext* context, - std::vector* programs, - std::vector* programs_to_append); - -// A parameter map is a mapping from the name of the parameter to the index in -// the input parameter value tensor (for gradient computations) and the value -// of the parameter (for forward computation). -typedef absl::flat_hash_map> SymbolMap; - -// Parses Cirq Program protos out of the 'circuit_specs' input Tensor. Also -// resolves the QubitIds inside of the Program. Optionally will resolve the -// QubitIds found in programs into PauliSums such that they are consistent -// and correct with the original programs. -tensorflow::Status GetProgramsAndNumQubits( - tensorflow::OpKernelContext* context, - std::vector* programs, - std::vector* num_qubits, - std::vector>* p_sums = nullptr); - -// Parses PauliSum protos out of the 'pauli_sums' input tensor. Note this -// function does NOT resolve QubitID's as any paulisum needs a reference -// program to "discover" all of the active qubits and define the ordering. -tensorflow::Status GetPauliSums( - tensorflow::OpKernelContext* context, - std::vector>* p_sums); - -// Parses the input context to construct the SymbolMaps for the entire batch. -// The two input Tensors are expected to be of size: -// -// symbol_names : [max_num_symbols] -// symbol_values: [batch_size, max_num_symbols] -// -// and the returns 'maps' is of size [batch_size], where each map contains all -// of the input symbols and their associated value. -tensorflow::Status GetSymbolMaps(tensorflow::OpKernelContext* context, - std::vector* maps); - -// Parses gradients out of the 'grads' input Tensor. -tensorflow::Status GetGradients(tensorflow::OpKernelContext* context, - std::vector>* grads); - -} // namespace tfq - -#endif // TFQ_CORE_OPS_PARSE_CONTEXT diff --git a/tensorflow_quantum/core/ops/tfq_circuit_append_op.cc b/tensorflow_quantum/core/ops/tfq_circuit_append_op.cc deleted file mode 100644 index 48c04caeb..000000000 --- a/tensorflow_quantum/core/ops/tfq_circuit_append_op.cc +++ /dev/null @@ -1,94 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/framework/tensor_shape.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/lib/core/threadpool.h" -#include "tensorflow_quantum/core/ops/parse_context.h" -#include "tensorflow_quantum/core/ops/tfq_simulate_utils.h" - -namespace tfq { - -using ::cirq::google::api::v2::Moment; -using ::cirq::google::api::v2::Program; - -class TfqCircuitAppendOp : public tensorflow::OpKernel { - public: - explicit TfqCircuitAppendOp(tensorflow::OpKernelConstruction *context) - : OpKernel(context) {} - - void Compute(tensorflow::OpKernelContext *context) override { - std::vector programs; - std::vector programs_to_append; - - const int num_inputs = context->num_inputs(); - OP_REQUIRES(context, num_inputs == 2, - tensorflow::errors::InvalidArgument(absl::StrCat( - "Expected 2 inputs, got ", num_inputs, " inputs."))); - - OP_REQUIRES_OK(context, GetProgramsAndProgramsToAppend( - context, &programs, &programs_to_append)); - - tensorflow::Tensor *output = nullptr; - OP_REQUIRES_OK(context, context->allocate_output( - 0, context->input(0).shape(), &output)); - auto output_tensor = output->flat(); - - auto DoWork = [&](int start, int end) { - for (int i = start; i < end; i++) { - for (int j = 0; j < programs_to_append.at(i).circuit().moments().size(); - j++) { - Moment *new_moment = programs.at(i).mutable_circuit()->add_moments(); - *new_moment = programs_to_append.at(i).circuit().moments(j); - } - programs.at(i).SerializeToString(&output_tensor(i)); - } - }; - - const int output_dim_size = programs.size(); - const int block_size = GetBlockSize(context, output_dim_size); - context->device() - ->tensorflow_cpu_worker_threads() - ->workers->TransformRangeConcurrently(block_size, output_dim_size, - DoWork); - } -}; - -REGISTER_KERNEL_BUILDER(Name("TfqAppendCircuit").Device(tensorflow::DEVICE_CPU), - TfqCircuitAppendOp); - -REGISTER_OP("TfqAppendCircuit") - .Input("programs: string") - .Input("programs_to_append: string") - .Output("programs_extended: string") - .SetShapeFn([](tensorflow::shape_inference::InferenceContext *c) { - tensorflow::shape_inference::ShapeHandle programs_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &programs_shape)); - - tensorflow::shape_inference::ShapeHandle programs_to_append_shape; - TF_RETURN_IF_ERROR( - c->WithRank(c->input(1), 1, &programs_to_append_shape)); - - c->set_output(0, c->input(0)); - - return tensorflow::Status::OK(); - }); - -} // namespace tfq diff --git a/tensorflow_quantum/core/ops/tfq_ps_decompose_op.cc b/tensorflow_quantum/core/ops/tfq_ps_decompose_op.cc deleted file mode 100644 index 6655f56fa..000000000 --- a/tensorflow_quantum/core/ops/tfq_ps_decompose_op.cc +++ /dev/null @@ -1,310 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include -#include - -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/framework/tensor_shape.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/lib/core/threadpool.h" -#include "tensorflow_quantum/core/ops/parse_context.h" -#include "tensorflow_quantum/core/ops/tfq_simulate_utils.h" - -namespace tfq { - -using ::cirq::google::api::v2::Arg; -using ::cirq::google::api::v2::Circuit; -using ::cirq::google::api::v2::Moment; -using ::cirq::google::api::v2::Operation; -using ::cirq::google::api::v2::Program; -using ::tensorflow::Status; -using ::tensorflow::Tensor; - -class TfqPsDecomposeOp : public tensorflow::OpKernel { - public: - explicit TfqPsDecomposeOp(tensorflow::OpKernelConstruction *context) - : OpKernel(context) {} - - void Compute(tensorflow::OpKernelContext *context) override { - std::vector programs; - - const int num_inputs = context->num_inputs(); - OP_REQUIRES(context, num_inputs == 1, - tensorflow::errors::InvalidArgument(absl::StrCat( - "Expected 1 inputs, got ", num_inputs, " inputs."))); - - OP_REQUIRES_OK(context, ParsePrograms(context, "programs", &programs)); - - tensorflow::Tensor *output = nullptr; - OP_REQUIRES_OK(context, context->allocate_output( - 0, context->input(0).shape(), &output)); - auto output_tensor = output->flat(); - - const int max_buffer_moments = 3; - - auto DoWork = [&](int start, int end) { - for (int i = start; i < end; i++) { - Program cur_program = programs.at(i); - Program new_program; - new_program.mutable_language()->set_gate_set("tfq_gate_set"); - new_program.mutable_circuit()->set_scheduling_strategy( - Circuit::MOMENT_BY_MOMENT); - for (int j = 0; j < cur_program.circuit().moments().size(); j++) { - Moment cur_moment(cur_program.circuit().moments().at(j)); - std::vector temp_moment_list(max_buffer_moments, Moment()); - int num_extra_moments = 0; - for (int k = 0; k < cur_moment.operations().size(); k++) { - Operation cur_op = cur_moment.operations().at(k); - auto &cur_op_map = *cur_op.mutable_args(); - if (cur_op.gate().id() == "PISP") { - auto exponent = cur_op_map.at("exponent"); - auto phase_exponent = cur_op_map.at("phase_exponent"); - if (exponent.arg_case() == Arg::ArgCase::kSymbol || - phase_exponent.arg_case() == Arg::ArgCase::kSymbol) { - // Decompose cirq.PhasedISwapPowGate only if it is - // parameterized. - num_extra_moments = 3; - Operation new_op; - - new_op = getOpForPISP(cur_op, 0, 0); - cur_moment.mutable_operations()->at(k) = new_op; - new_op = getOpForPISP(cur_op, 1, 1); - *cur_moment.add_operations() = new_op; - new_op = getOpForISP(cur_op, "XXP", exponent.symbol()); - *temp_moment_list[0].add_operations() = new_op; - new_op = getOpForISP(cur_op, "YYP", exponent.symbol()); - *temp_moment_list[1].add_operations() = new_op; - new_op = getOpForPISP(cur_op, 1, 0); - *temp_moment_list[2].add_operations() = new_op; - new_op = getOpForPISP(cur_op, 0, 1); - *temp_moment_list[2].add_operations() = new_op; - } - } else if (cur_op.gate().id() == "ISP") { - auto exponent = cur_op_map.at("exponent"); - if (exponent.arg_case() == Arg::ArgCase::kSymbol) { - // Decompose cirq.ISwapPowGate only if it is parameterized. - if (num_extra_moments == 0) num_extra_moments = 1; - Operation new_op; - new_op = getOpForISP(cur_op, "XXP", exponent.symbol()); - cur_moment.mutable_operations()->at(k) = new_op; - new_op = getOpForISP(cur_op, "YYP", exponent.symbol()); - *temp_moment_list[0].add_operations() = new_op; - } - } else if (cur_op.gate().id() == "PXP") { - auto exponent = cur_op_map.at("exponent"); - auto phase_exponent = cur_op_map.at("phase_exponent"); - if (exponent.arg_case() == Arg::ArgCase::kSymbol || - phase_exponent.arg_case() == Arg::ArgCase::kSymbol) { - // Decompose cirq.PhasedXPowGate only if it is parameterized. - num_extra_moments = 2; - Operation new_op; - new_op = getOpForPXP(cur_op, "ZP", "phase_exponent", true); - cur_moment.mutable_operations()->at(k) = new_op; - new_op = getOpForPXP(cur_op, "XP", "exponent", false); - *temp_moment_list[0].add_operations() = new_op; - new_op = getOpForPXP(cur_op, "ZP", "phase_exponent", false); - *temp_moment_list[1].add_operations() = new_op; - } - } else if (cur_op.gate().id() == "FSIM") { - auto theta = cur_op_map.at("theta"); - auto phi = cur_op_map.at("phi"); - if (theta.arg_case() == Arg::ArgCase::kSymbol || - phi.arg_case() == Arg::ArgCase::kSymbol) { - // Decompose cirq.FSimGate only if it is parameterized. - num_extra_moments = 2; - Operation new_op; - new_op = getOpForFSIM(cur_op, "XXP", "theta", true); - cur_moment.mutable_operations()->at(k) = new_op; - new_op = getOpForFSIM(cur_op, "YYP", "theta", true); - *temp_moment_list[0].add_operations() = new_op; - new_op = getOpForFSIM(cur_op, "CZP", "phi", false); - *temp_moment_list[1].add_operations() = new_op; - } - } - } - *new_program.mutable_circuit()->add_moments() = cur_moment; - if (num_extra_moments > 0) { - for (int l = 0; l < num_extra_moments; l++) { - *new_program.mutable_circuit()->add_moments() = - temp_moment_list[l]; - } - } - } - new_program.SerializeToString(&output_tensor(i)); - } - }; - - const int block_size = GetBlockSize(context, programs.size()); - context->device() - ->tensorflow_cpu_worker_threads() - ->workers->TransformRangeConcurrently(block_size, programs.size(), - DoWork); - } - - private: - // Helper functions for decompositions of ISwapPowGate, PhasedX, FSIM, - // PhasedISwapPow. - Operation getOpForISP(Operation &cur_op, std::string id, std::string symbol) { - // Step 1. parse the current op. - auto &cur_op_map = *cur_op.mutable_args(); - float cur_exponent_scalar = - cur_op_map["exponent_scalar"].arg_value().float_value(); - auto &cur_op_qubits = cur_op.qubits(); - // Step 2. create a new op. - Operation new_op; - new_op.mutable_gate()->set_id(id); - // Step 3. add global_shift, exponent_scalar, exponent. - auto &new_op_map = *new_op.mutable_args(); - new_op_map["global_shift"].mutable_arg_value()->set_float_value(-0.5); - new_op_map["exponent_scalar"].mutable_arg_value()->set_float_value( - cur_exponent_scalar * -0.5); - new_op_map["exponent"].set_symbol(symbol); - // Step 4. add qubits. - *new_op.mutable_qubits() = {cur_op_qubits.begin(), cur_op_qubits.end()}; - return new_op; - } - - Operation getOpForPXP(Operation &cur_op, std::string id, std::string key, - bool sign_flip = false) { - // Step 1. parse the current op. - auto &cur_op_map = *cur_op.mutable_args(); - auto &cur_op_qubits = cur_op.qubits(); - auto target_exponent = cur_op_map[key]; - float target_exponent_scalar = - cur_op_map[absl::StrCat(key, "_scalar")].arg_value().float_value(); - float sign = (sign_flip) ? -1.0 : 1.0; - // Step 2. create a new op. - Operation new_op; - new_op.mutable_gate()->set_id(id); - // Step 3. add global_shift, exponent_scalar, exponent. - auto &new_op_map = *new_op.mutable_args(); - new_op_map["global_shift"].mutable_arg_value()->set_float_value(0.0); - switch (target_exponent.arg_case()) { - case Arg::ArgCase::kSymbol: - new_op_map["exponent_scalar"].mutable_arg_value()->set_float_value( - sign * target_exponent_scalar); - new_op_map["exponent"].set_symbol(target_exponent.symbol()); - break; - case Arg::ArgCase::kArgValue: - new_op_map["exponent_scalar"].mutable_arg_value()->set_float_value(1.0); - new_op_map["exponent"].mutable_arg_value()->set_float_value( - sign * target_exponent.arg_value().float_value()); - break; - case Arg::ArgCase::kFunc: - // TODO(jaeyoo) : support this if prepared. - break; - default: - break; - } - // Step 4. add qubits. - *new_op.mutable_qubits() = {cur_op_qubits.begin(), cur_op_qubits.end()}; - return new_op; - } - - Operation getOpForPISP(Operation &cur_op, bool sign_flip, bool use_target) { - // Step 1. parse the current op. - auto &cur_op_map = *cur_op.mutable_args(); - auto &cur_op_qubits = cur_op.qubits(); - auto target_exponent = cur_op_map["phase_exponent"]; - float target_exponent_scalar = - cur_op_map["phase_exponent_scalar"].arg_value().float_value(); - float sign = (sign_flip) ? -1.0 : 1.0; - // Step 2. create a new op. - Operation new_op; - new_op.mutable_gate()->set_id("ZP"); - // Step 3. add global_shift, exponent_scalar, exponent. - auto &new_op_map = *new_op.mutable_args(); - new_op_map["global_shift"].mutable_arg_value()->set_float_value(0.0); - switch (target_exponent.arg_case()) { - case Arg::ArgCase::kSymbol: - new_op_map["exponent_scalar"].mutable_arg_value()->set_float_value( - sign * target_exponent_scalar); - new_op_map["exponent"].set_symbol(target_exponent.symbol()); - break; - case Arg::ArgCase::kArgValue: - new_op_map["exponent_scalar"].mutable_arg_value()->set_float_value(1.0); - new_op_map["exponent"].mutable_arg_value()->set_float_value( - sign * target_exponent.arg_value().float_value()); - break; - case Arg::ArgCase::kFunc: - // TODO(jaeyoo) : support this if prepared. - break; - default: - break; - } - *new_op.mutable_qubits() = {cur_op_qubits.begin() + use_target, - cur_op_qubits.end() - !use_target}; - return new_op; - } - - Operation getOpForFSIM(Operation &cur_op, std::string id, std::string key, - bool use_global_shift = false) { - // Step 1. parse the current op. - auto &cur_op_map = *cur_op.mutable_args(); - auto &cur_op_qubits = cur_op.qubits(); - auto target_exponent = cur_op_map[key]; - float target_exponent_scalar = - cur_op_map[absl::StrCat(key, "_scalar")].arg_value().float_value(); - float global_shift = (use_global_shift) ? -0.5 : 0.0; - float sign = (key == "theta") ? 1.0 : -1.0; - // Step 2. create a new op. - Operation new_op; - new_op.mutable_gate()->set_id(id); - // Step 3. add global_shift, exponent_scalar, exponent. - auto &new_op_map = *new_op.mutable_args(); - new_op_map["global_shift"].mutable_arg_value()->set_float_value( - global_shift); - switch (target_exponent.arg_case()) { - case Arg::ArgCase::kSymbol: - new_op_map["exponent_scalar"].mutable_arg_value()->set_float_value( - sign * target_exponent_scalar / M_PI); - new_op_map["exponent"].set_symbol(target_exponent.symbol()); - break; - case Arg::ArgCase::kArgValue: - new_op_map["exponent_scalar"].mutable_arg_value()->set_float_value(1.0); - new_op_map["exponent"].mutable_arg_value()->set_float_value( - sign * target_exponent.arg_value().float_value() / M_PI); - break; - case Arg::ArgCase::kFunc: - // TODO(jaeyoo) : support this if prepared. - break; - default: - break; - } - // Step 4. add qubits. - *new_op.mutable_qubits() = {cur_op_qubits.begin(), cur_op_qubits.end()}; - return new_op; - } -}; - -REGISTER_KERNEL_BUILDER(Name("TfqPsDecompose").Device(tensorflow::DEVICE_CPU), - TfqPsDecomposeOp); - -REGISTER_OP("TfqPsDecompose") - .Input("programs: string") - .Output("ps_programs: string") - .SetShapeFn([](tensorflow::shape_inference::InferenceContext *c) { - tensorflow::shape_inference::ShapeHandle programs_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &programs_shape)); - - c->set_output(0, c->input(0)); - - return tensorflow::Status::OK(); - }); - -} // namespace tfq diff --git a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc b/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc deleted file mode 100644 index a86e777aa..000000000 --- a/tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc +++ /dev/null @@ -1,200 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/framework/tensor_shape.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/lib/core/threadpool.h" -#include "tensorflow_quantum/core/ops/parse_context.h" -#include "tensorflow_quantum/core/ops/tfq_simulate_utils.h" - -namespace tfq { - -using ::cirq::google::api::v2::Arg; -using ::cirq::google::api::v2::Moment; -using ::cirq::google::api::v2::Operation; -using ::cirq::google::api::v2::Program; - -using ::tensorflow::Status; -using ::tensorflow::Tensor; - -class TfqPsSymbolReplaceOp : public tensorflow::OpKernel { - public: - explicit TfqPsSymbolReplaceOp(tensorflow::OpKernelConstruction *context) - : OpKernel(context) {} - - void Compute(tensorflow::OpKernelContext *context) override { - std::vector programs; - - const int num_inputs = context->num_inputs(); - OP_REQUIRES(context, num_inputs == 3, - tensorflow::errors::InvalidArgument(absl::StrCat( - "Expected 3 inputs, got ", num_inputs, " inputs."))); - - OP_REQUIRES_OK(context, ParsePrograms(context, "programs", &programs)); - - // Parse the input string here. - const Tensor *symbols_tensor; - context->input("symbols", &symbols_tensor); - OP_REQUIRES( - context, symbols_tensor->dims() == 1, - tensorflow::errors::InvalidArgument(absl::StrCat( - "symbols must be rank 1. Got rank ", symbols_tensor->dims(), "."))); - - const auto symbols = symbols_tensor->vec(); - const size_t n_symbols = symbols.size(); - - // Parse the replacement string here. - const Tensor *replacement_symbols_tensor; - context->input("replacement_symbols", &replacement_symbols_tensor); - OP_REQUIRES(context, replacement_symbols_tensor->dims() == 1, - tensorflow::errors::InvalidArgument(absl::StrCat( - "replacement_symbols must be rank 1. Got rank ", - replacement_symbols_tensor->dims(), "."))); - - const auto replacement_symbols = - replacement_symbols_tensor->vec(); - - OP_REQUIRES(context, symbols.size() == replacement_symbols.size(), - tensorflow::errors::InvalidArgument(absl::StrCat( - "symbols.shape is not equal to replacement_symbols.shape: ", - symbols.size(), " != ", replacement_symbols.size()))); - - // (i,j,k) = the kth replaced program for symbols(j) in programs(i). - std::vector>> output_programs( - programs.size(), std::vector>( - n_symbols, std::vector())); - - auto DoWork = [&](int start, int end) { - for (int i = start; i < end; i++) { - int sidx = i % n_symbols; - int pidx = i / n_symbols; - std::string symbol_to_replace = symbols(sidx); - Program cur_program = programs.at(pidx); - for (int j = 0; j < cur_program.circuit().moments().size(); j++) { - Moment cur_moment = cur_program.circuit().moments().at(j); - for (int k = 0; k < cur_moment.operations().size(); k++) { - Operation cur_op = cur_moment.operations().at(k); - for (auto l = cur_op.args().begin(); l != cur_op.args().end(); - l++) { - const std::string key = (*l).first; - const Arg &arg = (*l).second; - if (arg.symbol() == symbol_to_replace) { - // Copy the proto, modify the symbol and append to output. - Program temp(cur_program); - temp.mutable_circuit() - ->mutable_moments() - ->at(j) - .mutable_operations() - ->at(k) - .mutable_args() - ->at(key) - .set_symbol(replacement_symbols(sidx)); - - std::string res; - temp.SerializeToString(&res); - output_programs.at(pidx).at(sidx).push_back(res); - temp.Clear(); - } - } - } - } - } - }; - - const int block_size = GetBlockSize(context, programs.size() * n_symbols); - context->device() - ->tensorflow_cpu_worker_threads() - ->workers->TransformRangeConcurrently( - block_size, programs.size() * n_symbols, DoWork); - - size_t biggest_pad = 0; - Program empty = Program(); - empty.mutable_language()->set_gate_set("tfq_gate_set"); - empty.mutable_circuit(); // create empty circuits entry. - - std::string empty_program; - empty.SerializeToString(&empty_program); - - for (size_t i = 0; i < output_programs.size(); i++) { - for (size_t j = 0; j < n_symbols; j++) { - biggest_pad = std::max(biggest_pad, output_programs.at(i).at(j).size()); - } - } - - tensorflow::Tensor *output = nullptr; - tensorflow::TensorShape output_shape; - // batch size. - output_shape.AddDim(programs.size()); - // entry size. - output_shape.AddDim(n_symbols); - output_shape.AddDim(biggest_pad); - OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); - - auto output_tensor = output->tensor(); - - // TODO: investigate whether or not it is worth this parallelization at the - // end. - // spinning up and down parallelization for string copying might not be - // worth it. - auto DoWork2 = [&](int start, int end) { - for (int i = start; i < end; i++) { - int sidx = i % n_symbols; - int pidx = i / n_symbols; - for (size_t j = 0; j < output_programs.at(pidx).at(sidx).size(); j++) { - output_tensor(pidx, sidx, j) = - output_programs.at(pidx).at(sidx).at(j); - } - for (size_t j = output_programs.at(pidx).at(sidx).size(); - j < biggest_pad; j++) { - output_tensor(pidx, sidx, j) = empty_program; - } - } - }; - context->device() - ->tensorflow_cpu_worker_threads() - ->workers->TransformRangeConcurrently( - block_size, programs.size() * n_symbols, DoWork2); - } -}; - -REGISTER_KERNEL_BUILDER( - Name("TfqPsSymbolReplace").Device(tensorflow::DEVICE_CPU), - TfqPsSymbolReplaceOp); - -REGISTER_OP("TfqPsSymbolReplace") - .Input("programs: string") - .Input("symbols: string") - .Input("replacement_symbols: string") - .Output("ps_programs: string") - .SetShapeFn([](tensorflow::shape_inference::InferenceContext *c) { - tensorflow::shape_inference::ShapeHandle programs_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &programs_shape)); - - tensorflow::shape_inference::ShapeHandle symbols_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &symbols_shape)); - - tensorflow::shape_inference::ShapeHandle replacement_symbols_shape; - TF_RETURN_IF_ERROR( - c->WithRank(c->input(2), 1, &replacement_symbols_shape)); - - return tensorflow::Status::OK(); - }); - -} // namespace tfq \ No newline at end of file diff --git a/tensorflow_quantum/core/ops/tfq_ps_util_ops.py b/tensorflow_quantum/core/ops/tfq_ps_util_ops.py deleted file mode 100644 index ad746d045..000000000 --- a/tensorflow_quantum/core/ops/tfq_ps_util_ops.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Expose bindings for ParameterShift C++ ops.""" -from tensorflow_quantum.core.ops.load_module import load_module - -PS_UTIL_MODULE = load_module("_tfq_ps_utils.so") - -# pylint: disable=invalid-name -tfq_ps_decompose = PS_UTIL_MODULE.tfq_ps_decompose -tfq_ps_symbol_replace = PS_UTIL_MODULE.tfq_ps_symbol_replace -tfq_ps_weights_from_symbols = PS_UTIL_MODULE.tfq_ps_weights_from_symbols diff --git a/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py b/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py deleted file mode 100644 index 0413dbb62..000000000 --- a/tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py +++ /dev/null @@ -1,831 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Test for ParameterShift specific C++ ops.""" -import numpy as np -import tensorflow as tf -import sympy -import cirq - -from tensorflow_quantum.core.ops import tfq_ps_util_ops -from tensorflow_quantum.python import util - - -def _complex_test_circuit(): - t = sympy.Symbol('t') - r = sympy.Symbol('r') - qubits = cirq.GridQubit.rect(1, 6) - circuit_batch = [ - cirq.Circuit( - cirq.Moment([cirq.H(q) for q in qubits]), - cirq.Moment([ - cirq.X(qubits[4]), - cirq.PhasedXPowGate(phase_exponent=np.random.random() * t).on( - qubits[5]), - cirq.ISwapPowGate(exponent=np.random.random() * t).on( - qubits[0], qubits[1]), - cirq.FSimGate(theta=np.random.random() * t, - phi=np.random.random() * r).on( - qubits[2], qubits[3]) - ]), cirq.Moment([cirq.H(q) for q in qubits])), - cirq.Circuit( - cirq.FSimGate(theta=np.random.random() * t, - phi=np.random.random() * r).on(*qubits[:2]), - cirq.FSimGate(theta=np.random.random() * r, - phi=np.random.random() * t).on(qubits[1], qubits[0])), - cirq.Circuit( - cirq.Moment([ - cirq.ISwapPowGate(exponent=np.random.random() * - t).on(*qubits[:2]), - cirq.PhasedXPowGate(phase_exponent=np.random.random() * r).on( - qubits[2]), - cirq.ISwapPowGate(exponent=np.random.random() * - r).on(*qubits[3:5]) - ])) - ] - return circuit_batch - - -class PSDecomposeTest(tf.test.TestCase): - """Tests on tfq_ps_decompose""" - - def test_iswap_gate_test(self): - """Test 1 ISwapPowGate decomposition.""" - t = sympy.Symbol('t') - qubits = cirq.GridQubit.rect(1, 2) - circuit = cirq.Circuit( - cirq.ISwapPowGate(exponent=np.random.random() * t).on(*qubits)) - inputs = util.convert_to_tensor([circuit]) - outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs) - decomposed_programs = util.from_tensor(outputs) - rand_resolver = {'t': np.random.random()} - self.assertAllClose(cirq.unitary( - cirq.resolve_parameters(circuit, rand_resolver)), - cirq.unitary( - cirq.resolve_parameters(decomposed_programs[0], - rand_resolver)), - atol=1e-5) - - def test_phased_x_pow_gate_test(self): - """Test 1 PhasedXPowGate decomposition.""" - t = sympy.Symbol('t') - r = sympy.Symbol('r') - q = cirq.GridQubit(0, 0) - circuit = cirq.Circuit( - cirq.PhasedXPowGate(phase_exponent=np.random.random() * r, - exponent=np.random.random() * t).on(q)) - inputs = util.convert_to_tensor([circuit]) - outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs) - decomposed_programs = util.from_tensor(outputs) - rand_resolver = {'t': np.random.random(), 'r': np.random.random()} - self.assertAllClose(cirq.unitary( - cirq.resolve_parameters(circuit, rand_resolver)), - cirq.unitary( - cirq.resolve_parameters(decomposed_programs[0], - rand_resolver)), - atol=1e-5) - - def test_fsim_gate_test(self): - """Test 1 FSimPowGate decomposition.""" - t = sympy.Symbol('t') - r = sympy.Symbol('r') - qubits = cirq.GridQubit.rect(1, 2) - circuit = cirq.Circuit( - cirq.FSimGate(theta=np.random.random() * r, - phi=np.random.random() * t).on(*qubits)) - inputs = util.convert_to_tensor([circuit]) - outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs) - decomposed_programs = util.from_tensor(outputs) - rand_resolver = {'t': np.random.random(), 'r': np.random.random()} - self.assertAllClose(cirq.unitary( - cirq.resolve_parameters(circuit, rand_resolver)), - cirq.unitary( - cirq.resolve_parameters(decomposed_programs[0], - rand_resolver)), - atol=1e-5) - - def test_decompose_with_complex_circuit(self): - """Test decompose with complex circuit.""" - names = ['CLAE', 'HRYV', 'IRKB', 'LKRV', 'PJOU', 'CJKX', 'NASW'] - # Test circuit has a Moment with 1) FSimGate & PhasedXPowGate, - # 2) PhasedXPowGate & ISwapPowGate and 3) FSimGate & ISwapPowGate. - # Be careful, they are not decomposed if not parameterized. - circuit_batch = [ - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.FSimGate(theta=0.10338130973488413 * - sympy.Symbol('CLAE'), - phi=0.10338130973488413 * - sympy.Symbol('IRKB')). - on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)), - cirq.PhasedXPowGate(phase_exponent=1.0, - exponent=0.86426029696045281 * - sympy.Symbol('HRYV')).on( - cirq.GridQubit(0, 1)), - ]), - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 3)), - cirq.Z.on(cirq.GridQubit(0, 0)), - cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 1), - cirq.GridQubit(0, 2)), - ]), - cirq.Moment(operations=[ - (cirq.CNOT**(0.92874230274398684 * sympy.Symbol('IRKB')) - ).on(cirq.GridQubit(0, 1), cirq.GridQubit(0, 2)), - ]), - cirq.Moment(operations=[ - cirq.PhasedXPowGate(phase_exponent=sympy.Symbol('PJOU'), - exponent=0.2081415255258906 * - sympy.Symbol('LKRV')).on( - cirq.GridQubit(0, 2)), - (cirq.ISWAP**(0.32860954996781722 * sympy.Symbol('PJOU')) - ).on(cirq.GridQubit(0, 1), cirq.GridQubit(0, 3)), - ]), - cirq.Moment(operations=[ - cirq.PhasedXPowGate(phase_exponent=sympy.Symbol('CJKX')).on( - cirq.GridQubit(0, 1)), - cirq.ZZ.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 3)), - (cirq.X**(0.6826594585474709 * - sympy.Symbol('HRYV'))).on(cirq.GridQubit(0, 2)), - ]), - cirq.Moment(operations=[ - (cirq.ZZ**(0.18781276022427218 * sympy.Symbol('PJOU')) - ).on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 3)), - ]), - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.FSimGate(theta=0.13793763138552417 * - sympy.Symbol('CJKX'), - phi=0.13793763138552417 * - sympy.Symbol('PJOU')). - on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)), - (cirq.ISWAP**(0.028165738453673095 * sympy.Symbol('NASW')) - ).on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)), - ]), - cirq.Moment(operations=[ - cirq.FSimGate(theta=0.74356520426349459 * - sympy.Symbol('CJKX'), - phi=0.74356520426349459 * - sympy.Symbol('NASW')). - on(cirq.GridQubit(0, 3), cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.CNOT.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 2)), - cirq.SWAP.on(cirq.GridQubit(0, 3), cirq.GridQubit(0, 1)), - ]), - cirq.Moment(operations=[ - cirq.H.on(cirq.GridQubit(0, 3)), - cirq.H.on(cirq.GridQubit(0, 2)), - cirq.CNOT.on(cirq.GridQubit(0, 1), cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.CNOT.on(cirq.GridQubit(0, 0), cirq.GridQubit(0, 1)), - cirq.YY.on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)), - ]), - cirq.Moment(operations=[ - cirq.CZ.on(cirq.GridQubit(0, 1), cirq.GridQubit(0, 0)), - cirq.CNOT.on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 3)), - ]), - cirq.Moment(operations=[ - cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 0), - cirq.GridQubit(0, 2)), - cirq.CNOT.on(cirq.GridQubit(0, 3), cirq.GridQubit(0, 1)), - ]), - cirq.Moment(operations=[ - cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 0), - cirq.GridQubit(0, 3)), - cirq.SWAP.on(cirq.GridQubit(0, 2), cirq.GridQubit(0, 1)), - ]), - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - cirq.PhasedXPowGate( - phase_exponent=1.0).on(cirq.GridQubit(0, 2)), - cirq.FSimGate(theta=1, phi=1).on(cirq.GridQubit(0, 1), - cirq.GridQubit(0, 3)), - ]), - ]) - ] - - # Decompose programs. - inputs = util.convert_to_tensor(circuit_batch) - outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs) - decomposed_programs = util.from_tensor(outputs) - self.assertEqual(len(decomposed_programs), len(circuit_batch)) - - # Original programs has parameterized ISP, PXP, FSIM, but this result - # has no such gates at all. All parameterized gates have at most two - # eigenvalues. There are still ISwap and PhasedX(1.0) because they are - # not parameterized, which doesn't affect ParameterShift differentiation - # at all. - for program in decomposed_programs: - for moment in program: - for gate_op in moment: - # Consider parameterized gates only - if cirq.is_parameterized(gate_op.gate): - # Check I. The gate should have _eigen_components. - self.assertTrue( - hasattr(gate_op.gate, '_eigen_components')) - # Check II. The gate should have two eigen values. - self.assertEqual(len(gate_op.gate._eigen_components()), - 2, gate_op.gate) - # Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has - # 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components. - # Check if two programs are identical. - rand_resolver = {name: np.random.random() for name in names} - self.assertAllClose(cirq.unitary( - cirq.resolve_parameters(circuit_batch[0], rand_resolver)), - cirq.unitary( - cirq.resolve_parameters(decomposed_programs[0], - rand_resolver)), - atol=1e-5) - - def test_moment_preservation(self): - """Test Moment-structure preservation.""" - t = sympy.Symbol('t') - r = sympy.Symbol('r') - qubits = cirq.GridQubit.rect(1, 6) - circuit_batch = [ - cirq.Circuit( - cirq.Moment([cirq.H(q) for q in qubits]), - cirq.Moment([ - cirq.X(qubits[4]), - cirq.PhasedXPowGate(phase_exponent=np.random.random() * - t).on(qubits[5]), - cirq.ISwapPowGate(exponent=np.random.random() * t).on( - qubits[0], qubits[1]), - cirq.FSimGate(theta=np.random.random() * t, - phi=np.random.random() * r).on( - qubits[2], qubits[3]) - ]), cirq.Moment([cirq.H(q) for q in qubits])) - ] - inputs = util.convert_to_tensor(circuit_batch) - outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs) - decomposed_programs = util.from_tensor(outputs) - # Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has - # 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components. - # Check if two programs are identical. - rand_resolver = {'t': np.random.random(), 'r': np.random.random()} - self.assertAllClose(cirq.unitary( - cirq.resolve_parameters(circuit_batch[0], rand_resolver)), - cirq.unitary( - cirq.resolve_parameters(decomposed_programs[0], - rand_resolver)), - atol=1e-5) - # Check if the Moments are conserved. - max_decomposed_length = 3 - n_non_decomposed_moments = 2 - self.assertEqual(len(decomposed_programs[0]), - n_non_decomposed_moments + max_decomposed_length) - # Total length of Moments = 5 - # The non-decomposed moments should be the same. - self.assertEqual(decomposed_programs[0][0], circuit_batch[0][0]) - self.assertEqual(decomposed_programs[0][-1], circuit_batch[0][-1]) - # Check paralellized decompose gates in Moment[1]~[3]. - # The target ops are replaced by the first decomposition gates. It means - # the first Moment has exactly the same number of gate ops. - self.assertEqual(len(decomposed_programs[0][1]), - len(circuit_batch[0][1])) - # From the second Moments, the Moments only have decomposition gates. - # In this example, two ISwapPowGate & one PhasedXPowGate are located. - # Since PhasedXPowGate, ISwapPowGate, FSimGate has 3, 2, 3 result gates - # Moment[2] have 3 gate ops and Moment[3] have 2 gate ops. - self.assertEqual(len(decomposed_programs[0][2]), 3) - self.assertEqual(len(decomposed_programs[0][3]), 2) - - def test_more_complex_moment_preservation(self): - """Test Moment-structure preservation.""" - circuit_batch = _complex_test_circuit() - inputs = util.convert_to_tensor(circuit_batch) - outputs = tfq_ps_util_ops.tfq_ps_decompose(inputs) - decomposed_programs = util.from_tensor(outputs) - # Now all programs don't have ISWAP & PhasedXPowGate because ISWAP has - # 3 eigenvalues and PhasedXPowGate doesn't have _eigen_components. - # Check if two programs are identical. - rand_resolver = {'t': np.random.random(), 'r': np.random.random()} - for i in range(3): - self.assertAllClose(cirq.unitary( - cirq.resolve_parameters(circuit_batch[i], rand_resolver)), - cirq.unitary( - cirq.resolve_parameters( - decomposed_programs[i], rand_resolver)), - atol=1e-5) - # Check if the Moments are conserved. - # Circuit 1. - max_decomposed_length = 3 - n_non_decomposed_moments = 2 - self.assertEqual(len(decomposed_programs[0]), - n_non_decomposed_moments + max_decomposed_length) - # Total length of Moments = 5 - # The non-decomposed moments should be the same. - self.assertEqual(decomposed_programs[0][0], circuit_batch[0][0]) - self.assertEqual(decomposed_programs[0][-1], circuit_batch[0][-1]) - # Check paralellized decompose gates in Moment[1]~[3]. - # The target ops are replaced by the first decomposition gates. It means - # the first Moment has exactly the same number of gate ops. - self.assertEqual(len(decomposed_programs[0][1]), - len(circuit_batch[0][1])) - # From the second Moments, the Moments only have decomposition gates. - # In this example, two ISwapPowGate & one PhasedXPowGate are located. - # Since PhasedXPowGate, ISwapPowGate, FSimGate has 3, 2, 3 result gates - # Moment[2] have 3 gate ops and Moment[3] have 2 gate ops. - self.assertEqual(len(decomposed_programs[0][2]), 3) - self.assertEqual(len(decomposed_programs[0][3]), 2) - - # Circuit 2. two FSimGates. - self.assertEqual(len(decomposed_programs[1]), 2 * max_decomposed_length) - - # Circuit 3. one PXP between two ISwapPowGates. - self.assertEqual(len(decomposed_programs[2]), max_decomposed_length) - - -class PSSymbolReplaceTest(tf.test.TestCase): - """Tests tfq_ps_symbol_replace.""" - - def test_simple_case(self): - """Test trivial case.""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('alpha'), - ) - inputs = util.convert_to_tensor([circuit]) - symbols = tf.convert_to_tensor(['alpha']) - new = tf.convert_to_tensor(['new']) - res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new) - output = util.from_tensor(res) - correct_00 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('new'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('alpha'), - ) - correct_01 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('new'), - cirq.Z(bit)**sympy.Symbol('alpha'), - ) - correct_02 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('new'), - ) - self.assertEqual(correct_00, output[0][0][0]) - self.assertEqual(correct_01, output[0][0][1]) - self.assertEqual(correct_02, output[0][0][2]) - - def test_error(self): - """Ensure that errors happen with bad inputs.""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 2)) - inputs = util.convert_to_tensor([[circuit]]) - symbols = tf.convert_to_tensor(['test']) - replacements = tf.convert_to_tensor(['nothing']) - with self.assertRaisesRegex(Exception, - expected_regex='rank 1. Got rank 2.'): - tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements) - - inputs = tf.convert_to_tensor(['junk']) - with self.assertRaisesRegex(Exception, - expected_regex='Unparseable proto:'): - tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements) - - inputs = util.convert_to_tensor([circuit]) - symbols = tf.convert_to_tensor([['test']]) - replacements = tf.convert_to_tensor(['nothing']) - with self.assertRaisesRegex(Exception, - expected_regex='rank 1. Got rank 2.'): - tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements) - - symbols = tf.convert_to_tensor(['test']) - replacements = tf.convert_to_tensor([['nothing']]) - with self.assertRaisesRegex(Exception, - expected_regex='rank 1. Got rank 2.'): - tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements) - - symbols = tf.convert_to_tensor(['test']) - replacements = tf.convert_to_tensor(['nothing', 'too long']) - with self.assertRaisesRegex( - Exception, - expected_regex= - 'symbols.shape is not equal to replacement_symbols.shape'): - tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, replacements) - - def test_weight_coefficient(self): - """Test that scalar multiples of trivial case work.""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2.0), - cirq.Y(bit)**(sympy.Symbol('alpha') * 3.0), - cirq.Z(bit)**(sympy.Symbol('alpha') * 4.0), - ) - inputs = util.convert_to_tensor([circuit]) - symbols = tf.convert_to_tensor(['alpha']) - new = tf.convert_to_tensor(['new']) - res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new) - output = util.from_tensor(res) - correct_00 = cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('new') * 2.0), - cirq.Y(bit)**(sympy.Symbol('alpha') * 3.0), - cirq.Z(bit)**(sympy.Symbol('alpha') * 4.0), - ) - correct_01 = cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2.0), - cirq.Y(bit)**(sympy.Symbol('new') * 3.0), - cirq.Z(bit)**(sympy.Symbol('alpha') * 4.0), - ) - correct_02 = cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2.0), - cirq.Y(bit)**(sympy.Symbol('alpha') * 3.0), - cirq.Z(bit)**(sympy.Symbol('new') * 4.0), - ) - self.assertEqual(correct_00, output[0][0][0]) - self.assertEqual(correct_01, output[0][0][1]) - self.assertEqual(correct_02, output[0][0][2]) - - def test_simple_pad(self): - """Test simple padding.""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('alpha'), - ) - circuit2 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('beta'), - cirq.Y(bit)**sympy.Symbol('beta'), - cirq.Z(bit)**sympy.Symbol('beta'), - ) - circuit3 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('alpha'), - ) - inputs = util.convert_to_tensor([circuit, circuit2, circuit3]) - symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma']) - new = tf.convert_to_tensor(['new', 'old', 'nothing']) - res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new) - output = util.from_tensor(res) - - correct_00 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('new'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('alpha'), - ) - correct_01 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('new'), - cirq.Z(bit)**sympy.Symbol('alpha'), - ) - correct_02 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('new'), - ) - self.assertEqual(correct_00, output[0][0][0]) - self.assertEqual(correct_01, output[0][0][1]) - self.assertEqual(correct_02, output[0][0][2]) - - self.assertEqual(correct_00, output[2][0][0]) - self.assertEqual(correct_01, output[2][0][1]) - self.assertEqual(correct_02, output[2][0][2]) - - correct_10 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('old'), - cirq.Y(bit)**sympy.Symbol('beta'), - cirq.Z(bit)**sympy.Symbol('beta'), - ) - correct_11 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('beta'), - cirq.Y(bit)**sympy.Symbol('old'), - cirq.Z(bit)**sympy.Symbol('beta'), - ) - correct_12 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('beta'), - cirq.Y(bit)**sympy.Symbol('beta'), - cirq.Z(bit)**sympy.Symbol('old'), - ) - self.assertEqual(correct_10, output[1][1][0]) - self.assertEqual(correct_11, output[1][1][1]) - self.assertEqual(correct_12, output[1][1][2]) - - correct_20 = cirq.Circuit() - correct_21 = cirq.Circuit() - correct_22 = cirq.Circuit() - self.assertEqual(correct_20, output[2][2][0]) - self.assertEqual(correct_21, output[2][2][1]) - self.assertEqual(correct_22, output[2][2][2]) - - correct = cirq.Circuit() - for i in range(3): - for j in range(3): - for k in range(3): - if i != j and (not (i == 2 and j == 0)): - self.assertEqual(correct, output[i][j][k]) - - def test_complex_pad(self): - """Test trickier padding.""" - bit = cirq.GridQubit(0, 0) - bit2 = cirq.GridQubit(0, 1) - circuit = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('alpha'), - cirq.XX(bit, bit2)**sympy.Symbol('alpha')) - circuit2 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('beta'), - cirq.Y(bit)**sympy.Symbol('beta'), - cirq.Z(bit)**sympy.Symbol('beta'), - cirq.XX(bit, bit2)**sympy.Symbol('alpha')) - circuit3 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('alpha'), - cirq.XX(bit, bit2)**sympy.Symbol('alpha')) - inputs = util.convert_to_tensor([circuit, circuit2, circuit3]) - symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma']) - new = tf.convert_to_tensor(['new', 'old', 'nothing']) - res = tfq_ps_util_ops.tfq_ps_symbol_replace(inputs, symbols, new) - output = util.from_tensor(res) - - correct_000 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('new'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('alpha'), - cirq.XX(bit, bit2)**sympy.Symbol('alpha')) - correct_001 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('new'), - cirq.Z(bit)**sympy.Symbol('alpha'), - cirq.XX(bit, bit2)**sympy.Symbol('alpha')) - correct_002 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('new'), - cirq.XX(bit, bit2)**sympy.Symbol('alpha')) - correct_003 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('alpha'), - cirq.Y(bit)**sympy.Symbol('alpha'), - cirq.Z(bit)**sympy.Symbol('alpha'), - cirq.XX(bit, bit2)**sympy.Symbol('new')) - - self.assertEqual(correct_000, output[0][0][0]) - self.assertEqual(correct_001, output[0][0][1]) - self.assertEqual(correct_002, output[0][0][2]) - self.assertEqual(correct_003, output[0][0][3]) - - self.assertEqual(correct_000, output[2][0][0]) - self.assertEqual(correct_001, output[2][0][1]) - self.assertEqual(correct_002, output[2][0][2]) - self.assertEqual(correct_003, output[2][0][3]) - - correct_110 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('old'), - cirq.Y(bit)**sympy.Symbol('beta'), - cirq.Z(bit)**sympy.Symbol('beta'), - cirq.XX(bit, bit2)**sympy.Symbol('alpha')) - correct_111 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('beta'), - cirq.Y(bit)**sympy.Symbol('old'), - cirq.Z(bit)**sympy.Symbol('beta'), - cirq.XX(bit, bit2)**sympy.Symbol('alpha')) - correct_112 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('beta'), - cirq.Y(bit)**sympy.Symbol('beta'), - cirq.Z(bit)**sympy.Symbol('old'), - cirq.XX(bit, bit2)**sympy.Symbol('alpha')) - correct_113 = cirq.Circuit() - - self.assertEqual(correct_110, output[1][1][0]) - self.assertEqual(correct_111, output[1][1][1]) - self.assertEqual(correct_112, output[1][1][2]) - self.assertEqual(correct_113, output[1][1][3]) - - correct_100 = cirq.Circuit( - cirq.X(bit)**sympy.Symbol('beta'), - cirq.Y(bit)**sympy.Symbol('beta'), - cirq.Z(bit)**sympy.Symbol('beta'), - cirq.XX(bit, bit2)**sympy.Symbol('new')) - correct_101 = cirq.Circuit() - correct_102 = cirq.Circuit() - correct_103 = cirq.Circuit() - - self.assertEqual(correct_100, output[1][0][0]) - self.assertEqual(correct_101, output[1][0][1]) - self.assertEqual(correct_102, output[1][0][2]) - self.assertEqual(correct_103, output[1][0][3]) - - correct_220 = cirq.Circuit() - correct_221 = cirq.Circuit() - correct_222 = cirq.Circuit() - correct_223 = cirq.Circuit() - - self.assertEqual(correct_220, output[2][2][0]) - self.assertEqual(correct_221, output[2][2][1]) - self.assertEqual(correct_222, output[2][2][2]) - self.assertEqual(correct_223, output[2][2][3]) - - correct = cirq.Circuit() - for i in range(3): - for j in range(3): - for k in range(3): - if i != j and (not (i == 2 and j == 0)) \ - and (not (i == 1 and j == 0)): - self.assertEqual(correct, output[i][j][k]) - - -class PSWeightsFromSymbolTest(tf.test.TestCase): - """Tests tfq_ps_weights_from_symbols.""" - - def test_simple(self): - """Ensure that weight extraction works.""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 2)) - inputs = util.convert_to_tensor([circuit]) - symbols = tf.convert_to_tensor(['alpha']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - self.assertAllClose(res, np.array([[[2.0]]])) - - def test_empty(self): - """Test empty circuit. and symbol free circuit. does nothing.""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit(cirq.X(bit)) - circuit2 = cirq.Circuit() - inputs = util.convert_to_tensor([circuit, circuit2]) - symbols = tf.convert_to_tensor(['alpha']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - self.assertAllClose(res, np.array([[[]], [[]]])) - - def test_rotation_gates(self): - """Test that rotation gates work.""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit(cirq.Rx(sympy.Symbol('alpha') * 5.0)(bit)) - inputs = util.convert_to_tensor([circuit]) - symbols = tf.convert_to_tensor(['alpha']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - self.assertAllClose(res, np.array([[[5.0 / np.pi]]])) - - def test_error(self): - """Ensure if a symbol can't be found the op errors.""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit(cirq.X(bit)**(sympy.Symbol('delta') * 2)) - inputs = util.convert_to_tensor([circuit]) - symbols = tf.convert_to_tensor(['alpha', 'delta']) - tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - symbols = tf.convert_to_tensor(['alpha']) - with self.assertRaisesRegex(Exception, expected_regex='sympy.Symbol'): - tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - - symbols = tf.convert_to_tensor([['delta']]) - with self.assertRaisesRegex(Exception, - expected_regex='rank 1. Got rank 2.'): - tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - - inputs = tf.convert_to_tensor(['junk']) - symbols = tf.convert_to_tensor(['delta']) - with self.assertRaisesRegex(Exception, - expected_regex='Unparseable proto:'): - tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - - inputs = util.convert_to_tensor([[circuit]]) - with self.assertRaisesRegex(Exception, - expected_regex='rank 1. Got rank 2.'): - tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - - def test_many_values(self): - """Ensure that padding with few symbols and many values works.""" - bit = cirq.GridQubit(0, 0) - circuits = [ - cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2.0), - cirq.Y(bit)**(sympy.Symbol('alpha') * 3.0), - cirq.Z(bit)**(sympy.Symbol('alpha')), - cirq.X(bit)**(sympy.Symbol('alpha') * 4.0)), - cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 9.0)), - cirq.Circuit(cirq.X(bit)**sympy.Symbol('beta')) - ] - inputs = util.convert_to_tensor(circuits) - symbols = tf.convert_to_tensor(['alpha', 'beta']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - self.assertAllClose( - res, - np.array([[[2.0, 3.0, 1.0, 4.0], [0.0, 0.0, 0.0, 0.0]], - [[9.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], - [[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0]]])) - - def test_many_symbols(self): - """Ensure that padding with few values and many symbols works.""" - bit = cirq.GridQubit(0, 0) - circuits = [ - cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 2.0)), - cirq.Circuit(cirq.X(bit)**(sympy.Symbol('beta') * 6)), - cirq.Circuit(cirq.X(bit)**(sympy.Symbol('alpha') * 5.0)), - cirq.Circuit(cirq.X(bit)**(sympy.Symbol('gamma') * 8)), - cirq.Circuit(cirq.X(bit)**(sympy.Symbol('delta') * 9)) - ] - inputs = util.convert_to_tensor(circuits) - symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma', 'delta']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - self.assertAllClose( - res, - np.array([[[2.0], [0.0], [0.0], [0.0]], [[0.0], [6.0], [0.0], - [0.0]], - [[5.0], [0.0], [0.0], [0.0]], [[0.0], [0.0], [8.0], - [0.0]], - [[0.0], [0.0], [0.0], [9.0]]])) - - def test_out_of_order(self): - """Test that discovery order of symbols in circuits doesn't matter.""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2), - cirq.Y(bit)**(sympy.Symbol('beta') * 3)) - inputs = util.convert_to_tensor([circuit]) - symbols = tf.convert_to_tensor(['alpha', 'beta']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - self.assertAllClose(res, np.array([[[2.0], [3.0]]])) - symbols = tf.convert_to_tensor(['beta', 'alpha']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - self.assertAllClose(res, np.array([[[3.0], [2.0]]])) - - def test_padding(self): - """Ensure that the padding is correct in a complex example.""" - bit = cirq.GridQubit(0, 0) - circuits = [ - cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2.0), - cirq.Y(bit)**(sympy.Symbol('alpha') * 3.0), - cirq.Z(bit)**(sympy.Symbol('beta') * 4.0), - ), - cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2.0), - cirq.Y(bit)**(sympy.Symbol('beta') * 3.0), - cirq.Z(bit)**(sympy.Symbol('beta') * 4.0), - ), - cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2.0), - cirq.Y(bit)**(sympy.Symbol('beta') * 3.0), - cirq.Z(bit)**(sympy.Symbol('gamma') * 4.0), - ) - ] - inputs = util.convert_to_tensor(circuits) - symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - self.assertAllClose( - res, - np.array([[[2.0, 3.0], [4.0, 0.0], [0.0, 0.0]], - [[2.0, 0.0], [3.0, 4.0], [0.0, 0.0]], - [[2.0, 0.0], [3.0, 0.0], [4.0, 0.0]]])) - - def test_padding_with_non_parameterized_gates(self): - """Ensure that the padding is correct in a complex example.""" - bit = cirq.GridQubit(0, 0) - circuits = [ - cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2.0), - cirq.Y(bit)**3.0, - cirq.Z(bit)**(sympy.Symbol('beta') * 4.0), - ), - cirq.Circuit( - cirq.X(bit)**(sympy.Symbol('alpha') * 2.0), - cirq.Y(bit)**(sympy.Symbol('beta') * 3.0), - cirq.Z(bit)**4.0, - ), - cirq.Circuit( - cirq.X(bit)**2.0, - cirq.Y(bit)**(sympy.Symbol('beta') * 3.0), - cirq.Z(bit)**(sympy.Symbol('gamma') * 4.0), - ) - ] - inputs = util.convert_to_tensor(circuits) - symbols = tf.convert_to_tensor(['alpha', 'beta', 'gamma']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - self.assertAllClose( - res, - np.array([[[2.0], [4.0], [0.0]], [[2.0], [3.0], [0.0]], - [[0.0], [3.0], [4.0]]])) - - def test_ignorance(self): - """Test ignorance of ISP, PXP, FSIM gates.""" - circuit_batch = _complex_test_circuit() - inputs = util.convert_to_tensor(circuit_batch) - symbols = tf.convert_to_tensor(['r', 't']) - res = tfq_ps_util_ops.tfq_ps_weights_from_symbols(inputs, symbols) - # Because there are no weights to be gathered, the last dimension = 0 - self.assertAllClose(tf.shape(res), [len(circuit_batch), 2, 0]) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc b/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc deleted file mode 100644 index f8d1e2b20..000000000 --- a/tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc +++ /dev/null @@ -1,182 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "absl/container/flat_hash_map.h" -#include "absl/container/flat_hash_set.h" -#include "absl/strings/numbers.h" -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/framework/tensor_shape.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/lib/core/threadpool.h" -#include "tensorflow_quantum/core/ops/parse_context.h" -#include "tensorflow_quantum/core/ops/tfq_simulate_utils.h" - -namespace tfq { - -using ::cirq::google::api::v2::Arg; -using ::cirq::google::api::v2::Moment; -using ::cirq::google::api::v2::Operation; -using ::cirq::google::api::v2::Program; -using ::tensorflow::Tensor; - -class TfqPsWeightsFromSymbolOp : public tensorflow::OpKernel { - public: - explicit TfqPsWeightsFromSymbolOp(tensorflow::OpKernelConstruction *context) - : OpKernel(context) {} - - void Compute(tensorflow::OpKernelContext *context) override { - std::vector programs; - - const int num_inputs = context->num_inputs(); - OP_REQUIRES(context, num_inputs == 2, - tensorflow::errors::InvalidArgument(absl::StrCat( - "Expected 2 inputs, got ", num_inputs, " inputs."))); - - OP_REQUIRES_OK(context, ParsePrograms(context, "programs", &programs)); - - // Parse the input string here. - const Tensor *symbols_tensor; - context->input("symbols", &symbols_tensor); - OP_REQUIRES( - context, symbols_tensor->dims() == 1, - tensorflow::errors::InvalidArgument(absl::StrCat( - "symbols must be rank 1. Got rank ", symbols_tensor->dims(), "."))); - - const auto symbols = symbols_tensor->vec(); - const int n_symbols = symbols.size(); - - // (i,j,k) = the kth scalar value found for symbols(j) in programs(i). - std::vector>> output_results( - programs.size(), - std::vector>(n_symbols, std::vector())); - - // map from symbols -> index in second dimension of output_results. - absl::flat_hash_map symbols_map; - for (int i = 0; i < n_symbols; i++) { - symbols_map[symbols(i)] = i; - } - std::vector ignore_list = {"ISP", "PXP", "FSIM", "PISP"}; - absl::flat_hash_set ignored_symbol_set(ignore_list.begin(), - ignore_list.end()); - - std::vector n_single_symbol(programs.size(), 0); - - auto DoWork = [&](int start, int end) { - for (int i = start; i < end; i++) { - Program cur_program = programs.at(i); - for (int j = 0; j < cur_program.circuit().moments().size(); j++) { - Moment cur_moment = cur_program.circuit().moments().at(j); - for (int k = 0; k < cur_moment.operations().size(); k++) { - Operation cur_op = cur_moment.operations().at(k); - if (ignored_symbol_set.contains(cur_op.gate().id())) continue; - - const auto &cur_op_map = *cur_op.mutable_args(); - const auto exponent = cur_op_map.at("exponent"); - if (exponent.arg_case() == Arg::ArgCase::kSymbol) { - // this gate has parameterized exponent. - const absl::string_view symbol_name = exponent.symbol(); - if (!symbols_map.contains(symbol_name)) { - // Should never happen. raise error. - OP_REQUIRES(context, false, - tensorflow::errors::InvalidArgument( - "A circuit contains a sympy.Symbol not found " - "in symbols!")); - } - output_results.at(i) - .at(symbols_map.at(symbol_name)) - .push_back(cur_op.args() - .at("exponent_scalar") - .arg_value() - .float_value()); - } - } - } - // loop over all index entries of symbols_map and find largest - // value from output_results. - for (int j = 0; j < n_symbols; j++) { - n_single_symbol.at(i) = - std::max(n_single_symbol.at(i), - static_cast(output_results.at(i).at(j).size())); - } - } - }; - - const int block_size = GetBlockSize(context, programs.size()); - context->device() - ->tensorflow_cpu_worker_threads() - ->workers->TransformRangeConcurrently(block_size, programs.size(), - DoWork); - - int largest_single_symbol = 0; - for (size_t i = 0; i < n_single_symbol.size(); i++) { - largest_single_symbol = - std::max(n_single_symbol.at(i), largest_single_symbol); - } - - tensorflow::Tensor *output = nullptr; - tensorflow::TensorShape output_shape; - // batch size. - output_shape.AddDim(programs.size()); - // entry size. - output_shape.AddDim(n_symbols); - output_shape.AddDim(largest_single_symbol); - - OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); - - auto output_tensor = output->tensor(); - - auto DoWork2 = [&](int start, int end) { - for (int i = start; i < end; i++) { - for (int j = 0; j < n_symbols; j++) { - for (size_t k = 0; k < output_results.at(i).at(j).size(); k++) { - output_tensor(i, j, k) = output_results.at(i).at(j).at(k); - } - for (int k = output_results.at(i).at(j).size(); - k < largest_single_symbol; k++) { - output_tensor(i, j, k) = 0.0f; - } - } - } - }; - context->device() - ->tensorflow_cpu_worker_threads() - ->workers->TransformRangeConcurrently(block_size, programs.size(), - DoWork2); - } -}; - -REGISTER_KERNEL_BUILDER( - Name("TfqPsWeightsFromSymbols").Device(tensorflow::DEVICE_CPU), - TfqPsWeightsFromSymbolOp); - -REGISTER_OP("TfqPsWeightsFromSymbols") - .Input("programs: string") - .Input("symbols: string") - .Output("weights: float") - .SetShapeFn([](tensorflow::shape_inference::InferenceContext *c) { - tensorflow::shape_inference::ShapeHandle programs_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &programs_shape)); - - tensorflow::shape_inference::ShapeHandle symbols_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &symbols_shape)); - - return tensorflow::Status::OK(); - }); - -} // namespace tfq diff --git a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc deleted file mode 100644 index 131b188fd..000000000 --- a/tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc +++ /dev/null @@ -1,180 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include -#include - -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/framework/tensor_shape.h" -#include "tensorflow/core/lib/core/error_codes.pb.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/lib/core/threadpool.h" -#include "tensorflow_quantum/core/ops/parse_context.h" -#include "tensorflow_quantum/core/ops/tfq_simulate_utils.h" -#include "tensorflow_quantum/core/proto/pauli_sum.pb.h" -#include "tensorflow_quantum/core/qsim/mux.h" -#include "tensorflow_quantum/core/qsim/state_space.h" -#include "tensorflow_quantum/core/src/circuit.h" -#include "tensorflow_quantum/core/src/circuit_parser.h" -#include "tensorflow_quantum/core/src/program_resolution.h" - -namespace tfq { - -using ::cirq::google::api::v2::Program; -using ::tensorflow::Status; -using ::tfq::proto::PauliSum; -using ::tfq::qsim::GetStateSpace; -using ::tfq::qsim::StateSpace; - -class TfqSimulateExpectationOp : public tensorflow::OpKernel { - public: - explicit TfqSimulateExpectationOp(tensorflow::OpKernelConstruction *context) - : OpKernel(context) {} - - void Compute(tensorflow::OpKernelContext *context) override { - // TODO (mbbrough): add more dimension checks for other inputs here. - const int num_inputs = context->num_inputs(); - OP_REQUIRES(context, num_inputs == 4, - tensorflow::errors::InvalidArgument(absl::StrCat( - "Expected 4 inputs, got ", num_inputs, " inputs."))); - - // Create the output Tensor. - const int output_dim_batch_size = context->input(0).dim_size(0); - const int output_dim_op_size = context->input(3).dim_size(1); - tensorflow::TensorShape output_shape; - output_shape.AddDim(output_dim_batch_size); - output_shape.AddDim(output_dim_op_size); - - tensorflow::Tensor *output = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); - auto output_tensor = output->matrix(); - - std::vector programs; - std::vector num_qubits; - std::vector> pauli_sums; - OP_REQUIRES_OK(context, GetProgramsAndNumQubits(context, &programs, - &num_qubits, &pauli_sums)); - - std::vector maps; - OP_REQUIRES_OK(context, GetSymbolMaps(context, &maps)); - - OP_REQUIRES(context, pauli_sums.size() == programs.size(), - tensorflow::errors::InvalidArgument(absl::StrCat( - "Number of circuits and PauliSums do not match. Got ", - programs.size(), " circuits and ", pauli_sums.size(), - " paulisums."))); - - auto DoWork = [&](int start, int end) { - int old_batch_index = -2; - int cur_batch_index = -1; - int cur_op_index; - std::unique_ptr test_state = - std::unique_ptr(GetStateSpace(1, 1)); - std::unique_ptr scratch_state = - std::unique_ptr(GetStateSpace(1, 1)); - for (int i = start; i < end; i++) { - cur_batch_index = i / output_dim_op_size; - cur_op_index = i % output_dim_op_size; - - // (#679) Just ignore empty program - if (programs[cur_batch_index].circuit().moments().empty()) { - output_tensor(cur_batch_index, cur_op_index) = -2.0; - continue; - } - - if (cur_batch_index != old_batch_index) { - // We've run into a new wavefunction we must compute. - // Only compute a new wavefunction when we have to. - Program program = programs[cur_batch_index]; - const int num = num_qubits[cur_batch_index]; - OP_REQUIRES_OK(context, - ResolveSymbols(maps[cur_batch_index], &program)); - - Circuit circuit; - OP_REQUIRES_OK(context, CircuitFromProgram(program, num, &circuit)); - - // TODO(mbbrough): Update this allocation hack so that a StateSpace - // object can grow it's memory dynamically to larger and larger size - // without ever having to call free (until very end). This is tricky - // to implement because right now certain statespaces can't simulate - // all states and we use StateSpaceSlow for smaller circuits. - if (num != num_qubits[old_batch_index]) { - test_state.reset(GetStateSpace(num, 1)); - test_state->CreateState(); - - // Also re-allocate scratch state for expectation calculations. - scratch_state.reset(GetStateSpace(num, 1)); - scratch_state->CreateState(); - } - // no need to update scratch_state since ComputeExpectation - // will take care of things for us. - test_state->SetStateZero(); - OP_REQUIRES_OK(context, test_state->Update(circuit)); - } - - float expectation = 0.0; - OP_REQUIRES_OK(context, test_state->ComputeExpectation( - pauli_sums[cur_batch_index][cur_op_index], - scratch_state.get(), &expectation)); - - output_tensor(cur_batch_index, cur_op_index) = expectation; - old_batch_index = cur_batch_index; - } - }; - - const int block_size = - GetBlockSize(context, output_dim_batch_size * output_dim_op_size); - context->device() - ->tensorflow_cpu_worker_threads() - ->workers->TransformRangeConcurrently( - block_size, output_dim_batch_size * output_dim_op_size, DoWork); - } -}; - -REGISTER_KERNEL_BUILDER( - Name("TfqSimulateExpectation").Device(tensorflow::DEVICE_CPU), - TfqSimulateExpectationOp); - -REGISTER_OP("TfqSimulateExpectation") - .Input("programs: string") - .Input("symbol_names: string") - .Input("symbol_values: float") - .Input("pauli_sums: string") - .Output("expectations: float") - .SetShapeFn([](tensorflow::shape_inference::InferenceContext *c) { - tensorflow::shape_inference::ShapeHandle programs_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &programs_shape)); - - tensorflow::shape_inference::ShapeHandle symbol_names_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &symbol_names_shape)); - - tensorflow::shape_inference::ShapeHandle symbol_values_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &symbol_values_shape)); - - tensorflow::shape_inference::ShapeHandle pauli_sums_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(3), 2, &pauli_sums_shape)); - - tensorflow::shape_inference::DimensionHandle output_rows = - c->Dim(programs_shape, 0); - tensorflow::shape_inference::DimensionHandle output_cols = - c->Dim(pauli_sums_shape, 1); - c->set_output(0, c->Matrix(output_rows, output_cols)); - - return tensorflow::Status::OK(); - }); - -} // namespace tfq diff --git a/tensorflow_quantum/core/ops/tfq_simulate_ops.py b/tensorflow_quantum/core/ops/tfq_simulate_ops.py deleted file mode 100644 index a70e2f4c5..000000000 --- a/tensorflow_quantum/core/ops/tfq_simulate_ops.py +++ /dev/null @@ -1,152 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module to register python op gradient.""" -import tensorflow as tf -from tensorflow_quantum.core.ops.load_module import load_module - -SIM_OP_MODULE = load_module("_tfq_simulate_ops.so") - - -def tfq_simulate_expectation(programs, symbol_names, symbol_values, pauli_sums): - """Calculate the expectation value of circuits wrt some operator(s) - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specificed by programs, following the ordering - dictated by `symbol_names`. - pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - Returns: - `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). - """ - return SIM_OP_MODULE.tfq_simulate_expectation( - programs, symbol_names, tf.cast(symbol_values, tf.float32), pauli_sums) - - -def tfq_simulate_state(programs, symbol_names, symbol_values): - """Returns the state of the programs using the C++ wavefunction simulator. - - Simulate the final state of `programs` given `symbol_values` are placed - inside of the symbols with the name in `symbol_names` in each circuit. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specificed by programs, following the ordering - dictated by `symbol_names`. - Returns: - A `tf.Tensor` containing the final state of each circuit in `programs`. - """ - return SIM_OP_MODULE.tfq_simulate_state(programs, symbol_names, - tf.cast(symbol_values, tf.float32)) - - -@tf.function -def tfq_simulate_samples(programs, symbol_names, symbol_values, num_samples): - """Generate samples using the C++ wavefunction simulator. - - Simulate the final state of `programs` given `symbol_values` are placed - inside of the symbols with the name in `symbol_names` in each circuit. - From there we will then sample from the final state using native tensorflow - operations. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - num_samples: `tf.Tensor` with one element indicating the number of - samples to draw. - Returns: - A `tf.Tensor` containing the samples taken from each circuit in - `programs`. - """ - # get the state from the simulator - state = tfq_simulate_state(programs, symbol_names, - tf.cast(symbol_values, tf.float32)) - - # sample from the state - real_state = tf.math.real(state) - state_mask = tf.cast(tf.math.greater(real_state, -1.5), dtype=state.dtype) - state_zeroed = tf.multiply(state, state_mask) - log_probs = tf.math.log( - tf.cast(tf.square(tf.abs(state_zeroed)), tf.float64) - - tf.constant(10**-9, dtype=tf.float64)) - samples = tf.random.categorical(log_probs, - tf.gather( - tf.cast(num_samples, dtype=tf.int32), - 0), - dtype=tf.int64) - - # determine how many qubits make up each state - individual_sizes = tf.cast( - tf.reduce_sum(tf.cast(state_mask, tf.int32), axis=1), tf.float64) - n_qubits = tf.cast( - tf.math.round( - tf.math.log((individual_sizes)) / - tf.math.log(tf.constant(2.0, dtype=tf.float64))), tf.int32) - max_n_qubits = tf.reduce_max(n_qubits) - - # convert samples to binary - def gen_binary_mask(x): - return tf.bitwise.left_shift(tf.constant(1, dtype=x.dtype), x) - - binary_conversion_mask = tf.reverse( - tf.vectorized_map(gen_binary_mask, tf.range(0, max_n_qubits)), [0]) - - def num_to_bin(x): - return tf.cast(tf.cast( - tf.bitwise.bitwise_and(x, tf.cast(binary_conversion_mask, x.dtype)), - tf.bool), - dtype=tf.int8) - - def row_to_num(y): - return tf.vectorized_map(num_to_bin, y) - - binary_samples = tf.vectorized_map(row_to_num, samples) - - #create the padded output tensor - vertical_dim = tf.gather(tf.shape(binary_samples), tf.constant(1)) - - def create_pad_mask(x): - right = tf.zeros([vertical_dim, x], dtype=tf.int8) - left = tf.ones([vertical_dim, max_n_qubits - x], dtype=tf.int8)* \ - tf.constant(2, dtype=tf.int8) - return tf.concat([left, right], axis=1) - - padding_mask = tf.map_fn(create_pad_mask, n_qubits, dtype=tf.int8) - return binary_samples - padding_mask diff --git a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py b/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py deleted file mode 100644 index 47fabec8d..000000000 --- a/tensorflow_quantum/core/ops/tfq_simulate_ops_test.py +++ /dev/null @@ -1,479 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests that specifically target tfq_simulate_ops.""" -import numpy as np -from absl.testing import parameterized -import tensorflow as tf -import cirq - -from tensorflow_quantum.python import util -from tensorflow_quantum.core.ops import tfq_simulate_ops -from tensorflow_quantum.python import util - - -class SimulateExpectationTest(tf.test.TestCase): - """Tests tfq_simulate_expectation.""" - - def test_simulate_expectation_inputs(self): - """Make sure the the expectation op fails gracefully on bad inputs.""" - n_qubits = 5 - batch_size = 5 - symbol_names = ['alpha'] - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - qubits, symbol_names, batch_size) - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch]) - - pauli_sums = util.random_pauli_sums(qubits, 3, batch_size) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'programs must be rank 1'): - # Circuit tensor has too many dimensions. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor([circuit_batch]), symbol_names, - symbol_values_array, - util.convert_to_tensor([[x] for x in pauli_sums])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'symbol_names must be rank 1.'): - # symbol_names tensor has too many dimensions. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), np.array([symbol_names]), - symbol_values_array, - util.convert_to_tensor([[x] for x in pauli_sums])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'symbol_values must be rank 2.'): - # symbol_values_array tensor has too many dimensions. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - np.array([symbol_values_array]), - util.convert_to_tensor([[x] for x in pauli_sums])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'symbol_values must be rank 2.'): - # symbol_values_array tensor has too few dimensions. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array[0], - util.convert_to_tensor([[x] for x in pauli_sums])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'pauli_sums must be rank 2.'): - # pauli_sums tensor has too few dimensions. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, - util.convert_to_tensor([x for x in pauli_sums])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'pauli_sums must be rank 2.'): - # pauli_sums tensor has too many dimensions. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, - util.convert_to_tensor([[[x]] for x in pauli_sums])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'Unparseable proto'): - # circuit tensor has the right type but invalid values. - tfq_simulate_ops.tfq_simulate_expectation( - ['junk'] * batch_size, symbol_names, symbol_values_array, - util.convert_to_tensor([[x] for x in pauli_sums])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'Could not find symbol in parameter map'): - # symbol_names tensor has the right type but invalid values. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), ['junk'], - symbol_values_array, - util.convert_to_tensor([[x] for x in pauli_sums])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'qubits not found in circuit'): - # pauli_sums tensor has the right type but invalid values. - new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)] - new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size) - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, - util.convert_to_tensor([[x] for x in new_pauli_sums])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'Unparseable proto'): - # pauli_sums tensor has the right type but invalid values 2. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, [['junk']] * batch_size) - - with self.assertRaisesRegex(TypeError, 'Cannot convert'): - # circuits tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_expectation( - [1.0] * batch_size, symbol_names, symbol_values_array, - util.convert_to_tensor([[x] for x in pauli_sums])) - - with self.assertRaisesRegex(TypeError, 'Cannot convert'): - # symbol_names tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), [0.1234], - symbol_values_array, - util.convert_to_tensor([[x] for x in pauli_sums])) - - with self.assertRaisesRegex(tf.errors.UnimplementedError, ''): - # symbol_values tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - [['junk']] * batch_size, - util.convert_to_tensor([[x] for x in pauli_sums])) - - with self.assertRaisesRegex(TypeError, 'Cannot convert'): - # pauli_sums tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, [[1.0]] * batch_size) - - with self.assertRaisesRegex(TypeError, 'missing'): - # we are missing an argument. - # pylint: disable=no-value-for-parameter - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array) - # pylint: enable=no-value-for-parameter - - with self.assertRaisesRegex(TypeError, 'positional arguments'): - # pylint: disable=too-many-function-args - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, - util.convert_to_tensor([[x] for x in pauli_sums]), []) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - expected_regex='do not match'): - # wrong op size. - tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor([cirq.Circuit()]), symbol_names, - symbol_values_array.astype(np.float64), - util.convert_to_tensor([[x] for x in pauli_sums])) - - res = tfq_simulate_ops.tfq_simulate_expectation( - util.convert_to_tensor([cirq.Circuit() for _ in pauli_sums]), - symbol_names, symbol_values_array.astype(np.float64), - util.convert_to_tensor([[x] for x in pauli_sums])) - self.assertDTypeEqual(res, np.float32) - - -class SimulateStateTest(tf.test.TestCase, parameterized.TestCase): - """Tests tfq_simulate_state.""" - - def test_simulate_state_inputs(self): - """Make sure the state op fails gracefully on bad inputs.""" - n_qubits = 5 - batch_size = 5 - symbol_names = ['alpha'] - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - qubits, symbol_names, batch_size) - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch]) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'programs must be rank 1'): - # programs tensor has the wrong shape. - tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor([circuit_batch]), symbol_names, - symbol_values_array) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'symbol_names must be rank 1'): - # symbol_names tensor has the wrong shape. - tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor(circuit_batch), np.array([symbol_names]), - symbol_values_array) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'symbol_values must be rank 2'): - # symbol_values tensor has the wrong shape. - tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor(circuit_batch), symbol_names, - np.array([symbol_values_array])) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'symbol_values must be rank 2'): - # symbol_values tensor has the wrong shape 2. - tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array[0]) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'Unparseable proto'): - # programs tensor has the right type, but invalid value. - tfq_simulate_ops.tfq_simulate_state(['junk'] * batch_size, - symbol_names, - symbol_values_array) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'Could not find symbol in parameter map'): - # symbol_names tensor has the right type, but invalid value. - tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor(circuit_batch), ['junk'], - symbol_values_array) - - with self.assertRaisesRegex(TypeError, 'Cannot convert'): - # programs tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_state([1] * batch_size, symbol_names, - symbol_values_array) - - with self.assertRaisesRegex(TypeError, 'Cannot convert'): - # symbol_names tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor(circuit_batch), [1], symbol_values_array) - - with self.assertRaisesRegex(tf.errors.UnimplementedError, ''): - # symbol_values tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor(circuit_batch), symbol_names, - [['junk']] * batch_size) - - with self.assertRaisesRegex(TypeError, 'missing'): - # too few tensors. - # pylint: disable=no-value-for-parameter - tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor(circuit_batch), symbol_names) - # pylint: enable=no-value-for-parameter - - # TODO (mbbrough): determine if we should allow extra arguments ? - with self.assertRaisesRegex(TypeError, 'positional arguments'): - # pylint: disable=too-many-function-args - tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, []) - - @parameterized.parameters([ - { - 'all_n_qubits': [2, 3] - }, - { - 'all_n_qubits': [1, 5, 8] - }, - ]) - def test_simulate_state_output_padding(self, all_n_qubits): - """If a tfq_simulate op is asked to simulate states given circuits - acting on different numbers of qubits, the op should return a tensor - padded with zeros up to the size of the largest circuit. The padding - should be physically correct, such that samples taken from the padded - states still match samples taken from the original circuit. """ - circuit_batch = [] - for n_qubits in all_n_qubits: - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch += util.random_circuit_resolver_batch(qubits, 1)[0] - - tfq_results = tfq_simulate_ops.tfq_simulate_state( - util.convert_to_tensor(circuit_batch), [], - [[]] * len(circuit_batch)) - - # Don't use batch_util here to enforce consistent padding everywhere - # without extra tests. - sim = cirq.Simulator() - manual_padded_results = [] - for circuit in circuit_batch: - result = sim.simulate(circuit) - wf = result.final_state - blank_state = np.ones( - (2**max(all_n_qubits)), dtype=np.complex64) * -2 - blank_state[:wf.shape[0]] = wf - manual_padded_results.append(blank_state) - - self.assertAllClose(tfq_results, manual_padded_results) - - -class SimulateSamplesTest(tf.test.TestCase, parameterized.TestCase): - """Tests tfq_simulate_samples.""" - - def test_simulate_samples_inputs(self): - """Make sure the sample op fails gracefully on bad inputs.""" - n_qubits = 5 - batch_size = 5 - num_samples = 10 - symbol_names = ['alpha'] - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - qubits, symbol_names, batch_size) - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch]) - - with self.assertRaisesRegex(ValueError, 'rank 1 but is rank 2'): - # programs tensor has the wrong shape. - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor([circuit_batch]), symbol_names, - symbol_values_array, [num_samples]) - - with self.assertRaisesRegex(ValueError, 'rank 1 but is rank 2'): - # symbol_names tensor has the wrong shape. - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor(circuit_batch), np.array([symbol_names]), - symbol_values_array, [num_samples]) - - with self.assertRaisesRegex(ValueError, 'rank 2 but is rank 3'): - # symbol_values tensor has the wrong shape. - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor(circuit_batch), symbol_names, - np.array([symbol_values_array]), [num_samples]) - - with self.assertRaisesRegex(ValueError, 'rank 2 but is rank 1'): - # symbol_values tensor has the wrong shape 2. - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array[0], [num_samples]) - - with self.assertRaisesRegex(ValueError, 'rank 0 but is rank 1'): - # num_samples tensor has the wrong shape. - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, [[num_samples]]) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'Unparseable proto'): - # programs tensor has the right type, but invalid value. - tfq_simulate_ops.tfq_simulate_samples(['junk'] * batch_size, - symbol_names, - symbol_values_array, - [num_samples]) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'Could not find symbol in parameter map'): - # symbol_names tensor has the right type, but invalid value. - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor(circuit_batch), ['junk'], - symbol_values_array, [num_samples]) - - with self.assertRaisesRegex(TypeError, 'Expected string'): - # programs tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_samples([1] * batch_size, - symbol_names, - symbol_values_array, - [num_samples]) - - with self.assertRaisesRegex(TypeError, 'Expected string'): - # programs tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor(circuit_batch), [1], symbol_values_array, - [num_samples]) - - with self.assertRaisesRegex(tf.errors.UnimplementedError, - 'Cast string to float is not supported'): - # programs tensor has the wrong type. - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor(circuit_batch), symbol_names, - [['junk']] * batch_size, [num_samples]) - - with self.assertRaisesRegex(Exception, 'not supported'): - # num_samples tensor has the wrong shape. - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array, ['junk']) - - with self.assertRaisesRegex(TypeError, 'missing'): - # too few tensors. - # pylint: disable=no-value-for-parameter - tfq_simulate_ops.tfq_simulate_samples( - util.convert_to_tensor(circuit_batch), symbol_names, - symbol_values_array) - # pylint: enable=no-value-for-parameter - - @parameterized.parameters([ - { - 'all_n_qubits': [2, 3], - 'n_samples': 10 - }, - { - 'all_n_qubits': [1, 5, 8], - 'n_samples': 10 - }, - ]) - def test_sampling_output_padding(self, all_n_qubits, n_samples): - """Check that the sampling ops pad outputs correctly""" - op = tfq_simulate_ops.tfq_simulate_samples - circuits = [] - expected_outputs = [] - for n_qubits in all_n_qubits: - this_expected_output = np.zeros((n_samples, max(all_n_qubits))) - this_expected_output[:, max(all_n_qubits) - n_qubits:] = 1 - this_expected_output[:, :max(all_n_qubits) - n_qubits] = -2 - expected_outputs.append(this_expected_output) - circuits.append( - cirq.Circuit( - *cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits)))) - results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits), - [n_samples]).numpy() - self.assertAllClose(expected_outputs, results) - - -class InputTypesTest(tf.test.TestCase, parameterized.TestCase): - """Tests that different inputs types work for all of the ops. """ - - @parameterized.parameters([ - { - 'symbol_type': tf.float32 - }, - { - 'symbol_type': tf.float64 - }, - { - 'symbol_type': tf.int32 - }, - { - 'symbol_type': tf.int64 - }, - { - 'symbol_type': tf.complex64 - }, - ]) - def test_symbol_values_type(self, symbol_type): - """Tests all three ops for the different types. """ - qubit = cirq.GridQubit(0, 0) - circuits = util.convert_to_tensor([cirq.Circuit(cirq.H(qubit))]) - symbol_names = ['symbol'] - symbol_values = tf.convert_to_tensor([[1]], dtype=symbol_type) - pauli_sums = util.random_pauli_sums([qubit], 3, 1) - pauli_sums = util.convert_to_tensor([[x] for x in pauli_sums]) - - result = tfq_simulate_ops.tfq_simulate_state(circuits, symbol_names, - symbol_values) - self.assertDTypeEqual(result, np.complex64) - - result = tfq_simulate_ops.tfq_simulate_expectation( - circuits, symbol_names, symbol_values, pauli_sums) - self.assertDTypeEqual(result, np.float32) - - result = tfq_simulate_ops.tfq_simulate_samples(circuits, symbol_names, - symbol_values, [100]) - self.assertDTypeEqual(result, np.int8) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc b/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc deleted file mode 100644 index fc8654576..000000000 --- a/tensorflow_quantum/core/ops/tfq_simulate_state_op.cc +++ /dev/null @@ -1,153 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/framework/shape_inference.h" -#include "tensorflow/core/framework/tensor_shape.h" -#include "tensorflow/core/lib/core/error_codes.pb.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow/core/lib/core/threadpool.h" -#include "tensorflow_quantum/core/ops/parse_context.h" -#include "tensorflow_quantum/core/ops/tfq_simulate_utils.h" -#include "tensorflow_quantum/core/qsim/mux.h" -#include "tensorflow_quantum/core/qsim/state_space.h" -#include "tensorflow_quantum/core/src/circuit_parser.h" -#include "tensorflow_quantum/core/src/program_resolution.h" - -namespace tfq { - -using ::cirq::google::api::v2::Program; -using ::tensorflow::Status; -using ::tfq::Circuit; -using ::tfq::CircuitFromProgram; -using ::tfq::qsim::GetStateSpace; -using ::tfq::qsim::StateSpace; - -class TfqSimulateStateOp : public tensorflow::OpKernel { - public: - explicit TfqSimulateStateOp(tensorflow::OpKernelConstruction *context) - : OpKernel(context) {} - - void Compute(tensorflow::OpKernelContext *context) override { - // TODO (mbbrough): add more dimension checks for other inputs here. - DCHECK_EQ(3, context->num_inputs()); - - std::vector programs; - std::vector num_qubits; - OP_REQUIRES_OK(context, - GetProgramsAndNumQubits(context, &programs, &num_qubits)); - std::vector maps; - OP_REQUIRES_OK(context, GetSymbolMaps(context, &maps)); - - OP_REQUIRES( - context, maps.size() == programs.size(), - tensorflow::errors::InvalidArgument(absl::StrCat( - "Number of circuits and values do not match. Got ", programs.size(), - " circuits and ", maps.size(), " values."))); - - int max_num_qubits = 0; - for (const int num : num_qubits) { - max_num_qubits = std::max(max_num_qubits, num); - } - - // TODO(pmassey): Investigate creating a matrix that isn't just the maximum - // required size. - const int output_dim_size = maps.size(); - tensorflow::TensorShape output_shape; - output_shape.AddDim(output_dim_size); - output_shape.AddDim(1 << max_num_qubits); - - tensorflow::Tensor *output = nullptr; - OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); - auto output_tensor = output->matrix>(); - - auto DoWork = [&](int start, int end) { - std::unique_ptr state = - std::unique_ptr(GetStateSpace(1, 1)); - int old_num_qubits = -1; - for (int i = start; i < end; i++) { - Program program = programs[i]; - const int num = num_qubits[i]; - OP_REQUIRES_OK(context, ResolveSymbols(maps[i], &program)); - - // QSim work below - Circuit circuit; - OP_REQUIRES_OK(context, CircuitFromProgram(program, num, &circuit)); - - // TODO(mbbrough): Update this allocation hack so that a StateSpace - // object can grow it's memory dynamically to larger and larger size - // without ever having to call free (until the very end). This is - // tricky to implement because right now certain statespaces can't - // simulate all states and we use StateSpaceSlow for smaller circuits. - if (num != old_num_qubits) { - state.reset(GetStateSpace(num, 1)); - state->CreateState(); - } - state->SetStateZero(); - OP_REQUIRES_OK(context, state->Update(circuit)); - uint64_t state_size = state->GetDimension(); - for (uint64_t j = 0; j < state_size; j++) { - output_tensor(i, j) = state->GetAmpl(j); - } - for (uint64_t j = state_size; j < (uint64_t(1) << max_num_qubits); - j++) { - output_tensor(i, j) = std::complex(-2, 0); - } - old_num_qubits = num; - } - }; - - const int block_size = GetBlockSize(context, output_dim_size); - context->device() - ->tensorflow_cpu_worker_threads() - ->workers->TransformRangeConcurrently(block_size, output_dim_size, - DoWork); - } -}; - -REGISTER_KERNEL_BUILDER(Name("TfqSimulateState").Device(tensorflow::DEVICE_CPU), - TfqSimulateStateOp); - -REGISTER_OP("TfqSimulateState") - .Input("programs: string") - .Input("symbol_names: string") - .Input("symbol_values: float") - .Output("wavefunction: complex64") - .SetShapeFn([](tensorflow::shape_inference::InferenceContext *c) { - tensorflow::shape_inference::ShapeHandle programs_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(0), 1, &programs_shape)); - - tensorflow::shape_inference::ShapeHandle symbol_names_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 1, &symbol_names_shape)); - - tensorflow::shape_inference::ShapeHandle symbol_values_shape; - TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 2, &symbol_values_shape)); - - // TODO(pmassey): Which output dimension size matters? Does this allocate - // any memory or gives hints to the graph building? I apparently just set - // this as rows in the previous run and that seemed to work. - tensorflow::shape_inference::DimensionHandle output_rows = - c->Dim(symbol_values_shape, 0); - tensorflow::shape_inference::DimensionHandle output_cols = - c->Dim(symbol_values_shape, 1); - c->set_output(0, c->Matrix(output_rows, output_cols)); - - return tensorflow::Status::OK(); - }); - -} // namespace tfq diff --git a/tensorflow_quantum/core/ops/tfq_simulate_utils.cc b/tensorflow_quantum/core/ops/tfq_simulate_utils.cc deleted file mode 100644 index d2ce64079..000000000 --- a/tensorflow_quantum/core/ops/tfq_simulate_utils.cc +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/ops/tfq_simulate_utils.h" - -#include "tensorflow/core/framework/op_kernel.h" -#include "tensorflow/core/lib/core/threadpool.h" - -namespace tfq { - -int GetBlockSize(tensorflow::OpKernelContext* context, const int output_size) { - int size = output_size / - context->device()->tensorflow_cpu_worker_threads()->num_threads; - return std::max(size, 1); -} - -} // namespace tfq diff --git a/tensorflow_quantum/core/ops/tfq_simulate_utils.h b/tensorflow_quantum/core/ops/tfq_simulate_utils.h deleted file mode 100644 index cac38fd06..000000000 --- a/tensorflow_quantum/core/ops/tfq_simulate_utils.h +++ /dev/null @@ -1,29 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_OPS_TFQ_SIMULATE_UTILS_H_ -#define TFQ_CORE_OPS_TFQ_SIMULATE_UTILS_H_ - -#include "tensorflow/core/framework/op_kernel.h" - -namespace tfq { - -// Returns a block size that distributes the work evenly across the different -// circuits. -int GetBlockSize(tensorflow::OpKernelContext* context, const int output_size); - -} // namespace tfq - -#endif // TFQ_CORE_OPS_TFQ_SIMULATE_UTILS_H_ diff --git a/tensorflow_quantum/core/ops/tfq_utility_ops.py b/tensorflow_quantum/core/ops/tfq_utility_ops.py deleted file mode 100644 index 2049b9826..000000000 --- a/tensorflow_quantum/core/ops/tfq_utility_ops.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Expose bindings for tfq utility ops.""" -import tensorflow as tf -from tensorflow_quantum.core.ops.load_module import load_module - -UTILITY_OP_MODULE = load_module("_tfq_utility_ops.so") - -# pylint: disable=invalid-name -tfq_append_circuit = UTILITY_OP_MODULE.tfq_append_circuit - - -@tf.function -def padded_to_ragged(masked_state): - """Utility `tf.function` that converts a padded tensor to ragged. - - Convert a state `tf.Tensor` padded with the value -2 to a `tf.RaggedTensor` - using efficient boolean masking. - - Args: - masked_state: `tf.State` tensor with -2 padding. - Returns: - state_ragged: State tensor without padding as a `tf.RaggedTensor`. - """ - abs_state = tf.abs(tf.cast(masked_state, tf.float32)) - mask = tf.math.less(abs_state, tf.constant(1.1, dtype=abs_state.dtype)) - state_ragged = tf.ragged.boolean_mask(masked_state, mask) - return state_ragged diff --git a/tensorflow_quantum/core/ops/tfq_utility_ops_test.py b/tensorflow_quantum/core/ops/tfq_utility_ops_test.py deleted file mode 100644 index a7133cec6..000000000 --- a/tensorflow_quantum/core/ops/tfq_utility_ops_test.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for tfq utility ops.""" -import numpy as np -import tensorflow as tf -from absl.testing import parameterized -import cirq - -from tensorflow_quantum.core.ops import tfq_utility_ops -from tensorflow_quantum.core.serialize import serializer -from tensorflow_quantum.python import util - - -class CircuitAppendOpTest(tf.test.TestCase, parameterized.TestCase): - """Test the in-graph circuit append op.""" - - def test_append_input_checking(self): - """Check that the append op has correct input checking.""" - test_circuit = serializer.serialize_circuit( - cirq.Circuit(cirq.X.on(cirq.GridQubit(0, 0)))).SerializeToString() - with self.assertRaisesRegex(TypeError, 'Cannot convert \\[1\\]'): - tfq_utility_ops.tfq_append_circuit([test_circuit], [1]) - with self.assertRaisesRegex(TypeError, 'Cannot convert \\[1\\]'): - tfq_utility_ops.tfq_append_circuit([1], [test_circuit]) - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'Unparseable proto'): - tfq_utility_ops.tfq_append_circuit(['wrong'], ['wrong']) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'programs and programs_to_append must have matching sizes.'): - tfq_utility_ops.tfq_append_circuit([test_circuit], - [test_circuit, test_circuit]) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'programs and programs_to_append must have matching sizes.'): - tfq_utility_ops.tfq_append_circuit([test_circuit, test_circuit], - [test_circuit]) - with self.assertRaisesRegex( - tf.errors.InvalidArgumentError, - 'programs and programs_to_append must have matching sizes'): - tfq_utility_ops.tfq_append_circuit([], [test_circuit]) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - 'programs must be rank 1. Got rank 2'): - tfq_utility_ops.tfq_append_circuit([[test_circuit, test_circuit]], - [[test_circuit, test_circuit]]) - - with self.assertRaisesRegex(TypeError, - 'missing 1 required positional argument'): - # pylint: disable=no-value-for-parameter - tfq_utility_ops.tfq_append_circuit([test_circuit]) - # pylint: enable=no-value-for-parameter - - # TODO (mbbrough): should this line work or no. what is the TF - # standard here ? - tfq_utility_ops.tfq_append_circuit([test_circuit], [test_circuit], - [test_circuit]) - - # These tests really just makes sure we can cast output - res = tfq_utility_ops.tfq_append_circuit([], []) - - self.assertDTypeEqual(res.numpy().astype(np.str), np.dtype(' -1, True, False) - expected = tf.ragged.boolean_mask(padded_array, mask) - actual = tfq_utility_ops.padded_to_ragged( - np.array(padded_array, dtype=float)) - self.assertAllEqual(expected, actual) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_quantum/core/proto/BUILD b/tensorflow_quantum/core/proto/BUILD deleted file mode 100644 index a934cd8d1..000000000 --- a/tensorflow_quantum/core/proto/BUILD +++ /dev/null @@ -1,34 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -load("@com_google_protobuf//:protobuf.bzl", "py_proto_library") - -# load("//tools/build_defs/proto/cpp:cc_proto_library.bzl", "cc_proto_library") - -# Export for the PIP package. -exports_files(["__init__.py"]) - -cc_proto_library( - name = "program_cc_proto", - deps = [ - "@cirq//cirq/google/api/v2:program_proto", - ], -) - -py_proto_library( - name = "pauli_sum_py_proto", - srcs = ["pauli_sum.proto"], -) - -proto_library( - name = "pauli_sum_proto", - srcs = ["pauli_sum.proto"], -) - -cc_proto_library( - name = "pauli_sum_cc_proto", - deps = [ - ":pauli_sum_proto", - ], -) diff --git a/tensorflow_quantum/core/proto/__init__.py b/tensorflow_quantum/core/proto/__init__.py deleted file mode 100644 index bdbbd7a51..000000000 --- a/tensorflow_quantum/core/proto/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== diff --git a/tensorflow_quantum/core/proto/pauli_sum.proto b/tensorflow_quantum/core/proto/pauli_sum.proto deleted file mode 100644 index b0aa200b8..000000000 --- a/tensorflow_quantum/core/proto/pauli_sum.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; - -package tfq.proto; - -// Store the sum of simpler terms. -message PauliSum { - repeated PauliTerm terms = 1; -} - -// Store a term which is a coefficient of some number of tensored up paulis. -message PauliTerm { - float coefficient_real = 1; - float coefficient_imag = 2; - repeated PauliQubitPair paulis = 3; -} - -// Store a pauli acting on a particular qubit. -message PauliQubitPair { - string qubit_id = 1; - string pauli_type = 2; -} diff --git a/tensorflow_quantum/core/qsim/BUILD b/tensorflow_quantum/core/qsim/BUILD deleted file mode 100644 index 2a9ee804b..000000000 --- a/tensorflow_quantum/core/qsim/BUILD +++ /dev/null @@ -1,125 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -cc_library( - name = "qsim", - deps = [ - ":fuser_basic", - ":mux", - ":state_space", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/memory", - "@com_google_absl//absl/strings", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_library( - name = "fuser_basic", - srcs = ["fuser_basic.cc"], - hdrs = ["fuser_basic.h"], - deps = [ - "//tensorflow_quantum/core/src:circuit", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_test( - name = "fuser_basic_test", - srcs = ["fuser_basic_test.cc"], - deps = [ - ":fuser_basic", - "//tensorflow_quantum/core/src:circuit", - "//tensorflow_quantum/core/src:gates_def", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_googletest//:gtest_main", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_library( - name = "mux", - srcs = ["mux.cc"], - hdrs = ["mux.h"], - deps = [ - ":state_space", - ":state_space_slow", - ":state_space_avx", - ":state_space_sse", - "@com_google_absl//absl/memory", - ], -) - -cc_test( - name = "mux_test", - srcs = ["mux_test.cc"], - deps = [ - ":mux", - "@com_google_googletest//:gtest_main", - ], -) - -cc_library( - name = "state_space", - srcs = ["state_space.cc"], - hdrs = ["state_space.h"], - deps = [ - ":fuser_basic", - "//tensorflow_quantum/core/proto:pauli_sum_cc_proto", - "//tensorflow_quantum/core/src:circuit", - "//tensorflow_quantum/core/src:circuit_parser", - "//tensorflow_quantum/core/src:matrix", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_library( - name = "state_space_slow", - srcs = ["state_space_slow.cc"], - hdrs = ["state_space_slow.h"], - deps = [ - ":state_space", - "//tensorflow_quantum/core/src:matrix", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_library( - name = "state_space_avx", - srcs = ["state_space_avx.cc"], - hdrs = ["state_space_avx.h"], - deps = [ - ":state_space", - ":util", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_library( - name = "state_space_sse", - srcs = ["state_space_sse.cc"], - hdrs = ["state_space_sse.h"], - deps = [ - ":state_space", - ":util", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_library( - name = "util", - srcs = ["util.cc"], - hdrs = ["util.h"], - deps = [], -) diff --git a/tensorflow_quantum/core/qsim/fuser_basic.cc b/tensorflow_quantum/core/qsim/fuser_basic.cc deleted file mode 100644 index f3ee49185..000000000 --- a/tensorflow_quantum/core/qsim/fuser_basic.cc +++ /dev/null @@ -1,129 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/qsim/fuser_basic.h" - -#include -#include -#include - -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/src/circuit.h" - -namespace tfq { -namespace qsim { -namespace { - -using ::tensorflow::Status; - -// Appends to `fused_gates` all single-qubit gates on the current `qubit_wire`, -// until reaching the end of the wire or a multi-qubit gate. -// Starts the search at `current_timeslice`. -void Advance(const std::vector& qubit_wire, - std::vector* fused_gates, - unsigned int* current_timeslice) { - while (*current_timeslice < qubit_wire.size() && - qubit_wire[*current_timeslice]->num_qubits == 1) { - fused_gates->push_back(qubit_wire[(*current_timeslice)++]); - } -} - -} // namespace - -Status FuseGates(const Circuit& circuit, std::vector* fused) { - // Holds only the two-qubit gates in the circuit, in correct time order; - // these are later used as anchors for single qubit gate fusing. - std::vector gates_seq; - gates_seq.resize(0); - gates_seq.reserve(circuit.gates.size()); - - // Lattice of gates. The first index refers to a qubit location; - // the second index refers to a time slice of simultaneously occurring gates. - std::vector> gates_lat(circuit.num_qubits); - - // Reserve 128 time slices for each qubit in the lattice. - for (unsigned int k = 0; k < circuit.num_qubits; ++k) { - gates_lat[k].resize(0); - gates_lat[k].reserve(128); - } - - // Examine every gate in the circuit. - // Place a reference in gates_lat at all lattice points (locations and times) - // at which the gate acts; record each two-qubit gate in gates_seq. - for (const auto& gate : circuit.gates) { - if (gate.num_qubits == 1) { - gates_lat[gate.qubits[0]].push_back(&gate); - } else if (gate.num_qubits == 2) { - gates_lat[gate.qubits[0]].push_back(&gate); - gates_lat[gate.qubits[1]].push_back(&gate); - gates_seq.push_back(&gate); - } - } - - // For each qubit, holds the latest timeslice processed on that qubit. - std::vector last(circuit.num_qubits, 0); - - // Fuse gates. - // Fusing is performed by having each two-qubit gate (anchor) in the sequence - // greedily absorb all single-qubit gates around them. - for (const Gate* pgate : gates_seq) { - unsigned int q0 = pgate->qubits[0]; - unsigned int q1 = pgate->qubits[1]; - - // No more unprocessed gates available on q0. - if (last[q0] >= gates_lat[q0].size()) continue; - - // This two-qubit gate has already been absorbed into a different anchor. - if (gates_lat[q0][last[q0]]->time > pgate->time) continue; - - GateFused gate_f = {pgate->time, 2, {q0, q1}, pgate}; - do { - // Collect all available single-qubit gates before the anchor. - Advance(gates_lat[q0], &gate_f.gates, &last[q0]); - Advance(gates_lat[q1], &gate_f.gates, &last[q1]); - - // Initial fuse should end at the anchor which initiated the fuse. - if (gates_lat[q0][last[q0]] != gates_lat[q1][last[q1]]) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Error fusing gates."); - } - - // Collect the anchor. - gate_f.gates.push_back(gates_lat[q0][last[q0]]); - - // Collect all available single-qubit gates after the anchor. - last[q0]++; - last[q1]++; - Advance(gates_lat[q0], &gate_f.gates, &last[q0]); - Advance(gates_lat[q1], &gate_f.gates, &last[q1]); - - } while ( - // There are still gates available on both wires - last[q0] < gates_lat[q0].size() && - last[q1] < gates_lat[q1].size() - // The next gate is a two-qubit gate sharing both qubits with the anchor - && gates_lat[q0][last[q0]] == gates_lat[q1][last[q1]]); - - fused->push_back(std::move(gate_f)); - } - - // TODO: deal with single-qubit orphan gates if present. - // TODO: Add a check for single-qubits gates, and return error if present. - - return Status::OK(); -} - -} // namespace qsim -} // namespace tfq diff --git a/tensorflow_quantum/core/qsim/fuser_basic.h b/tensorflow_quantum/core/qsim/fuser_basic.h deleted file mode 100644 index 490f5aeb2..000000000 --- a/tensorflow_quantum/core/qsim/fuser_basic.h +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_QSIM_FUSER_BASIC_H_ -#define TFQ_CORE_QSIM_FUSER_BASIC_H_ - -#include - -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/src/circuit.h" - -namespace tfq { -namespace qsim { - -class GateFused { - public: - unsigned int time; - unsigned int num_qubits; - std::array qubits; - const Gate* pmaster; - std::vector gates; - - // provided to ease fuser testing - bool operator==(const GateFused& r) const { - if (this->time != r.time) { - return false; - } - if (this->num_qubits != r.num_qubits) { - return false; - } - for (unsigned int i = 0; i < this->num_qubits; i++) { - if (this->qubits[i] != r.qubits[i]) { - return false; - } - } - if (*this->pmaster != *r.pmaster) { - return false; - } - if (this->gates.size() != r.gates.size()) { - return false; - } - for (size_t i = 0; i < this->gates.size(); i++) { - if (*this->gates.at(i) != *r.gates.at(i)) { - return false; - } - } - return true; - } - - bool operator!=(const GateFused& r) const { return !(*this == r); } -}; - -tensorflow::Status FuseGates(const Circuit& circuit, - std::vector* fused); - -} // namespace qsim -} // namespace tfq - -#endif // TFQ_CORE_QSIM_FUSER_BASIC_H_ diff --git a/tensorflow_quantum/core/qsim/fuser_basic_test.cc b/tensorflow_quantum/core/qsim/fuser_basic_test.cc deleted file mode 100644 index b4770aee1..000000000 --- a/tensorflow_quantum/core/qsim/fuser_basic_test.cc +++ /dev/null @@ -1,332 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/qsim/fuser_basic.h" - -#include "absl/container/flat_hash_map.h" -#include "gtest/gtest.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/src/circuit.h" -#include "tensorflow_quantum/core/src/gates_def.h" - -namespace tfq { -namespace qsim { -namespace { - -using ::tensorflow::Status; - -TEST(FuserBasicTest, GateFused) { - Status status; - GateFused test_fused, real_fused; - test_fused.time = real_fused.time = 42; - test_fused.num_qubits = real_fused.num_qubits = 2; - test_fused.qubits[0] = real_fused.qubits[0] = 0; - test_fused.qubits[1] = real_fused.qubits[1] = 1; - - std::vector locations; - XPowGateBuilder x_pow_builder; - Gate gate_x, gate_cnot; - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - locations.push_back(0); - status = x_pow_builder.Build(0, locations, arg_map, &gate_x); - - ASSERT_EQ(status, Status::OK()); - test_fused.gates.push_back(&gate_x); - real_fused.gates.push_back(&gate_x); - locations.clear(); - - CNotPowGateBuilder cnot_pow_builder; - absl::flat_hash_map arg_map_cnot; - arg_map_cnot["global_shift"] = 0.0; - arg_map_cnot["exponent"] = 1.0; - arg_map_cnot["exponent_scalar"] = 1.0; - locations.push_back(0); - locations.push_back(1); - status = cnot_pow_builder.Build(1, locations, arg_map_cnot, &gate_cnot); - - ASSERT_EQ(status, Status::OK()); - test_fused.gates.push_back(&gate_cnot); - real_fused.gates.push_back(&gate_cnot); - locations.clear(); - - test_fused.pmaster = &gate_cnot; - real_fused.pmaster = &gate_cnot; - - // confirm objects are actually equal - ASSERT_EQ(test_fused.time, real_fused.time); - ASSERT_EQ(test_fused.num_qubits, real_fused.num_qubits); - ASSERT_EQ(test_fused.qubits[0], real_fused.qubits[0]); - ASSERT_EQ(test_fused.qubits[1], real_fused.qubits[1]); - ASSERT_EQ(test_fused.pmaster, real_fused.pmaster); - ASSERT_EQ(test_fused.gates[0], real_fused.gates[0]); - ASSERT_EQ(test_fused.gates[1], real_fused.gates[1]); - - // check equality operator overload - test_fused.time = real_fused.time + 1; - ASSERT_NE(test_fused, real_fused); - test_fused.time = real_fused.time; - - test_fused.num_qubits = real_fused.num_qubits + 1; - ASSERT_NE(test_fused, real_fused); - test_fused.num_qubits = real_fused.num_qubits; - - test_fused.qubits[0] = real_fused.qubits[0] + 1; - ASSERT_NE(test_fused, real_fused); - test_fused.qubits[0] = real_fused.qubits[0]; - - test_fused.qubits[1] = real_fused.qubits[1] + 1; - ASSERT_NE(test_fused, real_fused); - test_fused.qubits[1] = real_fused.qubits[1]; - - test_fused.pmaster = &gate_x; - ASSERT_NE(test_fused, real_fused); - test_fused.pmaster = &gate_cnot; - - test_fused.gates[0] = &gate_cnot; - ASSERT_NE(test_fused, real_fused); - test_fused.gates[0] = &gate_x; - - test_fused.gates[1] = &gate_x; - ASSERT_NE(test_fused, real_fused); - test_fused.gates[1] = &gate_cnot; - - ASSERT_EQ(test_fused, real_fused); -} - -TEST(FuserBasicTest, FuseGatesMulti) { - // Tests that many gates are fused correctly. - // - // Construct the following test circuit: - // q0 -- X -- -- |CNOT| -- -- |I| - // q1 -- Y -- Z -- |CNOT| -- H -- |I| - // This should all be gathered into one GateFused. - Status status; - GateFused real_fused; - std::vector test_fused_vec; - Circuit test_circuit; - real_fused.num_qubits = 2; - real_fused.qubits[0] = 0; - real_fused.qubits[1] = 1; - test_circuit.num_qubits = 2; - test_circuit.gates.reserve(7); - - std::vector locations; - XPowGateBuilder x_pow_builder; - YPowGateBuilder y_pow_builder; - ZPowGateBuilder z_pow_builder; - CNotPowGateBuilder cnot_pow_builder; - HPowGateBuilder h_pow_builder; - I2GateBuilder i2_builder; - Gate gate_x, gate_y, gate_z, gate_cnot, gate_h, gate_ident; - absl::flat_hash_map arg_map_1q, arg_map_2q, empty_map; - arg_map_1q["global_shift"] = 0.0; - arg_map_1q["exponent"] = 1.0; - arg_map_1q["exponent_scalar"] = 1.0; - - arg_map_2q["global_shift"] = 0.0; - arg_map_2q["exponent"] = 1.0; - arg_map_2q["exponent_scalar"] = 1.0; - - locations.push_back(0); - status = x_pow_builder.Build(0, locations, arg_map_1q, &gate_x); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_x); - real_fused.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - locations.push_back(1); - status = y_pow_builder.Build(0, locations, arg_map_1q, &gate_y); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_y); - real_fused.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - locations.push_back(1); - status = z_pow_builder.Build(1, locations, arg_map_1q, &gate_z); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_z); - real_fused.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - unsigned int pmaster_time = 2; - locations.push_back(0); - locations.push_back(1); - status = - cnot_pow_builder.Build(pmaster_time, locations, arg_map_2q, &gate_cnot); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_cnot); - real_fused.pmaster = &test_circuit.gates.back(); - real_fused.gates.push_back(&test_circuit.gates.back()); - real_fused.time = pmaster_time; - locations.clear(); - - locations.push_back(0); - status = h_pow_builder.Build(3, locations, arg_map_1q, &gate_h); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_h); - real_fused.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - locations.push_back(0); - locations.push_back(1); - status = i2_builder.Build(4, locations, empty_map, &gate_ident); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_ident); - real_fused.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - ASSERT_EQ(FuseGates(test_circuit, &test_fused_vec), Status::OK()); - ASSERT_EQ(1, test_fused_vec.size()); - ASSERT_EQ(real_fused, test_fused_vec.at(0)); -} - -TEST(FuserBasicTest, FuseGatesDisjoint) { - // Tests that two-qubit gates not sharing both qubits - // are put into different GateFused. - // - // Construct the following test circuit: - // q0 -- X -- -- |CNOT| -- -- |I| -- - // q1 -- |CNOT| -- Y -- |CNOT| -- X -- |I| -- |I| - // q2 -- |CNOT| -- Z -- H -- -- -- |I| - // This should be fused into three different GateFused objects. - // The t = 0 CNOT should fuse the Y, Z, and H gates; - // the t = 2 CNOT should fuse the X gates and I(q0, q1); - // and the final I(q1, q2) should be alone. - Status status; - GateFused real_fused_1, real_fused_2, real_fused_3; - std::vector test_fused_vec; - Circuit test_circuit; - real_fused_1.num_qubits = real_fused_2.num_qubits = real_fused_3.num_qubits = - 2; - real_fused_2.qubits[0] = 0; - real_fused_1.qubits[0] = real_fused_2.qubits[1] = real_fused_3.qubits[0] = 1; - real_fused_1.qubits[1] = real_fused_3.qubits[1] = 2; - test_circuit.num_qubits = 3; - test_circuit.gates.reserve(10); - - std::vector locations; - XPowGateBuilder x_pow_builder; - YPowGateBuilder y_pow_builder; - ZPowGateBuilder z_pow_builder; - CNotPowGateBuilder cnot_pow_builder; - HPowGateBuilder h_pow_builder; - I2GateBuilder i2_builder; - Gate gate_x_1, gate_cnot_1, gate_y, gate_z, gate_cnot_2, gate_h, gate_x_2, - gate_ident_1, gate_ident_2; - absl::flat_hash_map arg_map_1q, arg_map_2q, empty_map; - arg_map_1q["global_shift"] = 0.0; - arg_map_1q["exponent"] = 1.0; - arg_map_1q["exponent_scalar"] = 1.0; - - arg_map_2q["global_shift"] = 0.0; - arg_map_2q["exponent"] = 1.0; - arg_map_2q["exponent_scalar"] = 1.0; - - // First fused gate - unsigned int pmaster_time_1 = 0; - locations.push_back(1); - locations.push_back(2); - status = cnot_pow_builder.Build(pmaster_time_1, locations, arg_map_2q, - &gate_cnot_1); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_cnot_1); - real_fused_1.pmaster = &test_circuit.gates.back(); - real_fused_1.gates.push_back(&test_circuit.gates.back()); - real_fused_1.time = pmaster_time_1; - locations.clear(); - - locations.push_back(1); - status = y_pow_builder.Build(1, locations, arg_map_1q, &gate_y); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_y); - real_fused_1.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - locations.push_back(2); - status = z_pow_builder.Build(1, locations, arg_map_1q, &gate_z); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_z); - real_fused_1.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - locations.push_back(2); - status = h_pow_builder.Build(2, locations, arg_map_1q, &gate_h); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_h); - real_fused_1.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - // Second fused gate - locations.push_back(0); - status = x_pow_builder.Build(0, locations, arg_map_1q, &gate_x_1); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_x_1); - real_fused_2.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - unsigned int pmaster_time_2 = 2; - locations.push_back(0); - locations.push_back(1); - status = cnot_pow_builder.Build(pmaster_time_2, locations, arg_map_2q, - &gate_cnot_2); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_cnot_2); - real_fused_2.pmaster = &test_circuit.gates.back(); - real_fused_2.gates.push_back(&test_circuit.gates.back()); - real_fused_2.time = pmaster_time_2; - locations.clear(); - - locations.push_back(1); - status = x_pow_builder.Build(3, locations, arg_map_1q, &gate_x_2); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_x_2); - real_fused_2.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - locations.push_back(0); - locations.push_back(1); - status = i2_builder.Build(4, locations, empty_map, &gate_ident_1); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_ident_1); - real_fused_2.gates.push_back(&test_circuit.gates.back()); - locations.clear(); - - // Third fused gate - unsigned int pmaster_time_3 = 5; - locations.push_back(1); - locations.push_back(2); - status = - i2_builder.Build(pmaster_time_3, locations, empty_map, &gate_ident_2); - ASSERT_EQ(status, Status::OK()); - test_circuit.gates.push_back(gate_ident_2); - real_fused_3.pmaster = &test_circuit.gates.back(); - real_fused_3.gates.push_back(&test_circuit.gates.back()); - real_fused_3.time = pmaster_time_3; - locations.clear(); - - // Check fused gate equality - ASSERT_EQ(FuseGates(test_circuit, &test_fused_vec), Status::OK()); - ASSERT_EQ(3, test_fused_vec.size()); - ASSERT_EQ(real_fused_1, test_fused_vec.at(0)); - ASSERT_EQ(real_fused_2, test_fused_vec.at(1)); - ASSERT_EQ(real_fused_3, test_fused_vec.at(2)); -} - -} // namespace -} // namespace qsim -} // namespace tfq diff --git a/tensorflow_quantum/core/qsim/mux.cc b/tensorflow_quantum/core/qsim/mux.cc deleted file mode 100644 index 45a2fcf70..000000000 --- a/tensorflow_quantum/core/qsim/mux.cc +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/qsim/mux.h" - -#ifdef __AVX2__ -#include "tensorflow_quantum/core/qsim/state_space_avx.h" -#elif __SSE4_1__ -#include "tensorflow_quantum/core/qsim/state_space_sse.h" -#endif - -#include - -#include "absl/memory/memory.h" -#include "tensorflow_quantum/core/qsim/state_space.h" -#include "tensorflow_quantum/core/qsim/state_space_slow.h" - -namespace tfq { -namespace qsim { - -StateSpace* GetStateSpace(const uint64_t num_qubits, - const uint64_t num_threads) { - if (num_qubits <= 3) { - return new StateSpaceSlow(num_qubits, num_threads); - } - -#ifdef __AVX2__ - return new StateSpaceAVX(num_qubits, num_threads); -#elif __SSE4_1__ - return new StateSpaceSSE(num_qubits, num_threads); -#else - return new StateSpaceSlow(num_qubits, num_threads); -#endif -} - -} // namespace qsim -} // namespace tfq diff --git a/tensorflow_quantum/core/qsim/mux.h b/tensorflow_quantum/core/qsim/mux.h deleted file mode 100644 index c7fe84bf9..000000000 --- a/tensorflow_quantum/core/qsim/mux.h +++ /dev/null @@ -1,32 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_QSIM_MUX_H_ -#define TFQ_CORE_QSIM_MUX_H_ - -#include - -#include "tensorflow_quantum/core/qsim/state_space.h" - -namespace tfq { -namespace qsim { - -StateSpace* GetStateSpace(const uint64_t num_qubits, - const uint64_t num_threads); - -} // namespace qsim -} // namespace tfq - -#endif // TFQ_CORE_QSIM_MUX_H_ diff --git a/tensorflow_quantum/core/qsim/mux_test.cc b/tensorflow_quantum/core/qsim/mux_test.cc deleted file mode 100644 index a68b843a0..000000000 --- a/tensorflow_quantum/core/qsim/mux_test.cc +++ /dev/null @@ -1,35 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/qsim/mux.h" - -#include - -#include "gtest/gtest.h" -#include "tensorflow_quantum/core/qsim/state_space.h" - -namespace tfq { -namespace qsim { -namespace { - -TEST(MuxTest, GetStateSpace) { - auto simulator = GetStateSpace(1, 1); - EXPECT_FALSE(simulator == nullptr); - delete simulator; -} - -} // namespace -} // namespace qsim -} // namespace tfq diff --git a/tensorflow_quantum/core/qsim/state_space.cc b/tensorflow_quantum/core/qsim/state_space.cc deleted file mode 100644 index 5402b3bf3..000000000 --- a/tensorflow_quantum/core/qsim/state_space.cc +++ /dev/null @@ -1,114 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/qsim/state_space.h" - -#include -#include - -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/proto/pauli_sum.pb.h" -#include "tensorflow_quantum/core/qsim/fuser_basic.h" -#include "tensorflow_quantum/core/src/circuit.h" -#include "tensorflow_quantum/core/src/circuit_parser.h" -#include "tensorflow_quantum/core/src/matrix.h" - -namespace tfq { -namespace qsim { - -tensorflow::Status StateSpace::Update(const Circuit& circuit) { - tensorflow::Status status; - // Special case for single qubit; - // derived classes free to return an error. - if (GetDimension() <= 2) { - for (uint64_t i = 0; i < circuit.gates.size(); i++) { - const auto& gate = circuit.gates[i]; - if (gate.num_qubits == 1) { - float matrix[8]; - Matrix2Set(gate.matrix, matrix); - status = ApplyGate1(matrix); - if (!status.ok()) { - return status; - } - } else { - return tensorflow::Status( - tensorflow::error::INVALID_ARGUMENT, - "Got a multi-qubit gate in a 1 qubit circuit."); - } - } - return tensorflow::Status::OK(); - } - - std::vector fused_gates; - status = FuseGates(circuit, &fused_gates); - if (!status.ok()) { - return status; - } - - for (const GateFused& gate : fused_gates) { - float matrix[32]; - CalcMatrix4(gate.qubits[0], gate.qubits[1], gate.gates, matrix); - ApplyGate2(gate.qubits[0], gate.qubits[1], matrix); - } - - return tensorflow::Status::OK(); -} - -tensorflow::Status StateSpace::ComputeExpectation( - const tfq::proto::PauliSum& p_sum, StateSpace* scratch, - float* expectation_value) { - // apply the gates of the pauliterms to a copy of the wavefunction - // and add up expectation value term by term. - tensorflow::Status status = tensorflow::Status::OK(); - for (const tfq::proto::PauliTerm& term : p_sum.terms()) { - // catch identity terms - if (term.paulis_size() == 0) { - *expectation_value += term.coefficient_real(); - // TODO(zaqqwerty): error somewhere if identities have any imaginary part - continue; - } - - Circuit measurement_circuit; - - status = CircuitFromPauliTerm(term, num_qubits_, &measurement_circuit); - if (!status.ok()) { - return status; - } - scratch->CopyFrom(*this); - status = scratch->Update(measurement_circuit); - if (!status.ok()) { - return status; - } - *expectation_value += - term.coefficient_real() * GetRealInnerProduct(*scratch); - } - return status; -} - -bool StateSpace::Valid() const { - // TODO: more roubust test? - return state_ != nullptr; -} - -float* StateSpace::GetRawState() const { return state_; }; - -uint64_t StateSpace::GetDimension() const { return size_ / 2; } - -uint64_t StateSpace::GetNumQubits() const { return num_qubits_; } - -uint64_t StateSpace::GetNumThreads() const { return num_threads_; } - -} // namespace qsim -} // namespace tfq diff --git a/tensorflow_quantum/core/qsim/state_space.h b/tensorflow_quantum/core/qsim/state_space.h deleted file mode 100644 index 79e897674..000000000 --- a/tensorflow_quantum/core/qsim/state_space.h +++ /dev/null @@ -1,116 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_QSIM_STATE_SPACE_H_ -#define TFQ_CORE_QSIM_STATE_SPACE_H_ - -#include -#include - -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/proto/pauli_sum.pb.h" -#include "tensorflow_quantum/core/src/circuit.h" - -namespace tfq { -namespace qsim { - -// Contains the allowed StateSpace labels -enum StateSpaceType { AVX, SLOW, SSE }; - -// Handles simulations of pure states (wavefunctions), not density matrices -class StateSpace { - public: - StateSpace(const uint64_t num_qubits, const uint64_t num_threads) - : state_(NULL), - size_(2 * (uint64_t{1} << num_qubits)), - num_qubits_(num_qubits), - num_threads_(num_threads) {} - - // Updates the state by applying the given circuit. - tensorflow::Status Update(const Circuit& circuit); - - // Computes the expectation value for a given state vector and PauliSum. - // Uses scratch StateSpace for evolving pauli terms forward and computing - // inner products. Assums that scratch has memory allocated, but does not - // require scratch to initialize values. - tensorflow::Status ComputeExpectation(const tfq::proto::PauliSum& p_sum, - StateSpace* scratch, - float* expectation_value); - - // Returns true if memory for the state has been succesfully allocated - bool Valid() const; - - // Pointer to the raw state managed by this StateSpace - float* GetRawState() const; - - // Dimension of the complex Hilbert space represented by this StateSpace - uint64_t GetDimension() const; - - // Number of qubits this StateSpace operates on - uint64_t GetNumQubits() const; - - // Number of threads that can be used by this StateSpace - uint64_t GetNumThreads() const; - - virtual ~StateSpace() {} - - // Get the simulator type. - virtual StateSpaceType GetType() const = 0; - - // Reserve the memory associated with the state in this space - virtual void CreateState() = 0; - - // Free the memory associated with the state in this space - virtual void DeleteState() = 0; - - // Return a pointer to a clone of this StateSpace that is unitialized. - // NOTE: user is responsible for deleting the returned copy. - virtual StateSpace* Clone() const = 0; - - // Copy the contents of others state into this state. Will not - // check if state has been initialized. - virtual void CopyFrom(const StateSpace& other) const = 0; - - // Function to apply a two qubit gate to the state on indices q0 and q1. - virtual void ApplyGate2(const unsigned int q0, const unsigned int q1, - const float* matrix) = 0; - - // Function to apply a one-qubit gate if there is only one qubit in the state. - // Implementations are given the option to return an error. - virtual tensorflow::Status ApplyGate1(const float* matrix) = 0; - - // Set all entries in the state to zero - virtual void SetStateZero() = 0; - - // Get the inner product between this state and the state in `other` - virtual float GetRealInnerProduct(const StateSpace& other) const = 0; - - // Get the amplitude at the given state index - virtual std::complex GetAmpl(const uint64_t i) const = 0; - - // Set the amplitude at the given state index - virtual void SetAmpl(const uint64_t i, const std::complex& val) = 0; - - protected: - float* state_; - uint64_t size_; - uint64_t num_qubits_; - uint64_t num_threads_; -}; - -} // namespace qsim -} // namespace tfq - -#endif // TFQ_CORE_QSIM_STATE_SPACE_H_ diff --git a/tensorflow_quantum/core/qsim/state_space_avx.cc b/tensorflow_quantum/core/qsim/state_space_avx.cc deleted file mode 100644 index 0f433340b..000000000 --- a/tensorflow_quantum/core/qsim/state_space_avx.cc +++ /dev/null @@ -1,615 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifdef __AVX2__ - -#include "tensorflow_quantum/core/qsim/state_space_avx.h" - -#include - -#include -#include - -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/qsim/util.h" - -namespace tfq { -namespace qsim { - -StateSpaceAVX::StateSpaceAVX(const uint64_t num_qubits, - const uint64_t num_threads) - : StateSpace(num_qubits, num_threads) {} - -StateSpaceAVX::~StateSpaceAVX() { DeleteState(); } - -StateSpaceType StateSpaceAVX::GetType() const { return StateSpaceType::AVX; } - -void StateSpaceAVX::CreateState() { - state_ = (float*)qsim::_aligned_malloc(sizeof(float) * size_); -} - -void StateSpaceAVX::DeleteState() { qsim::_aligned_free(state_); } - -StateSpace* StateSpaceAVX::Clone() const { - StateSpaceAVX* state_copy = - new StateSpaceAVX(GetNumQubits(), GetNumThreads()); - return state_copy; -} - -void StateSpaceAVX::CopyFrom(const StateSpace& other) const { - auto data = GetRawState(); - auto copy_data = other.GetRawState(); - - uint64_t size2 = GetDimension() / 8; - __m256 tmp1, tmp2; - for (uint64_t i = 0; i < size2; ++i) { - tmp1 = _mm256_load_ps(copy_data + 16 * i); - tmp2 = _mm256_load_ps(copy_data + 16 * i + 8); - - _mm256_store_ps(data + 16 * i, tmp1); - _mm256_store_ps(data + 16 * i + 8, tmp2); - } -} - -void StateSpaceAVX::ApplyGate2(const unsigned int q0, const unsigned int q1, - const float* matrix) { - // Assume q0 < q1. - if (q0 > 2) { - ApplyGate2HH(q0, q1, matrix); - } else if (q1 > 2) { - ApplyGate2HL(q0, q1, matrix); - } else { - ApplyGate2LL(q0, q1, matrix); - } -} - -tensorflow::Status StateSpaceAVX::ApplyGate1(const float* matrix) { - return tensorflow::Status(tensorflow::error::INVALID_ARGUMENT, - "AVX simulator doesn't support small circuits."); -} - -void StateSpaceAVX::SetStateZero() { - uint64_t size2 = GetDimension() / 8; - __m256 val0 = _mm256_setzero_ps(); - - auto data = GetRawState(); - - for (uint64_t i = 0; i < size2; ++i) { - _mm256_store_ps(data + 16 * i, val0); - _mm256_store_ps(data + 16 * i + 8, val0); - } - data[0] = 1; -} - -float StateSpaceAVX::GetRealInnerProduct(const StateSpace& other) const { - uint64_t size2 = GetDimension() / 4; - __m256d expv = _mm256_setzero_pd(); - __m256d rs, is; - - auto statea = GetRawState(); - auto stateb = other.GetRawState(); - - // Currently not a thread safe implementation of inner product! - for (uint64_t i = 0; i < size2; ++i) { - rs = _mm256_cvtps_pd(_mm_load_ps(statea + 8 * i)); - is = _mm256_cvtps_pd(_mm_load_ps(stateb + 8 * i)); - expv = _mm256_fmadd_pd(rs, is, expv); - rs = _mm256_cvtps_pd(_mm_load_ps(statea + 8 * i + 4)); - is = _mm256_cvtps_pd(_mm_load_ps(stateb + 8 * i + 4)); - expv = _mm256_fmadd_pd(rs, is, expv); - } - double buffer[4]; - _mm256_storeu_pd(buffer, expv); - return (float)(buffer[0] + buffer[1] + buffer[2] + buffer[3]); -} - -std::complex StateSpaceAVX::GetAmpl(const uint64_t i) const { - uint64_t p = (16 * (i / 8)) + (i % 8); - return std::complex(GetRawState()[p], GetRawState()[p + 8]); -} - -void StateSpaceAVX::SetAmpl(const uint64_t i, const std::complex& val) { - uint64_t p = (16 * (i / 8)) + (i % 8); - GetRawState()[p] = val.real(); - GetRawState()[p + 8] = val.imag(); -} - -void StateSpaceAVX::ApplyGate2HH(const unsigned int q0, const unsigned int q1, - const float* matrix) { - uint64_t sizei = uint64_t(1) << (GetNumQubits() + 1); - uint64_t sizej = uint64_t(1) << (q1 + 1); - uint64_t sizek = uint64_t(1) << (q0 + 1); - - auto rstate = GetRawState(); - - for (uint64_t i = 0; i < sizei; i += 2 * sizej) { - for (uint64_t j = 0; j < sizej; j += 2 * sizek) { - for (uint64_t k = 0; k < sizek; k += 16) { - uint64_t si = i | j | k; - - __m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in; - - uint64_t p = si; - r0 = _mm256_load_ps(rstate + p); - i0 = _mm256_load_ps(rstate + p + 8); - ru = _mm256_set1_ps(matrix[0]); - iu = _mm256_set1_ps(matrix[1]); - rn = _mm256_mul_ps(r0, ru); - in = _mm256_mul_ps(r0, iu); - rn = _mm256_fnmadd_ps(i0, iu, rn); - in = _mm256_fmadd_ps(i0, ru, in); - p = si | sizek; - r1 = _mm256_load_ps(rstate + p); - i1 = _mm256_load_ps(rstate + p + 8); - ru = _mm256_set1_ps(matrix[2]); - iu = _mm256_set1_ps(matrix[3]); - rn = _mm256_fmadd_ps(r1, ru, rn); - in = _mm256_fmadd_ps(r1, iu, in); - rn = _mm256_fnmadd_ps(i1, iu, rn); - in = _mm256_fmadd_ps(i1, ru, in); - p = si | sizej; - r2 = _mm256_load_ps(rstate + p); - i2 = _mm256_load_ps(rstate + p + 8); - ru = _mm256_set1_ps(matrix[4]); - iu = _mm256_set1_ps(matrix[5]); - rn = _mm256_fmadd_ps(r2, ru, rn); - in = _mm256_fmadd_ps(r2, iu, in); - rn = _mm256_fnmadd_ps(i2, iu, rn); - in = _mm256_fmadd_ps(i2, ru, in); - p |= sizek; - r3 = _mm256_load_ps(rstate + p); - i3 = _mm256_load_ps(rstate + p + 8); - ru = _mm256_set1_ps(matrix[6]); - iu = _mm256_set1_ps(matrix[7]); - rn = _mm256_fmadd_ps(r3, ru, rn); - in = _mm256_fmadd_ps(r3, iu, in); - rn = _mm256_fnmadd_ps(i3, iu, rn); - in = _mm256_fmadd_ps(i3, ru, in); - p = si; - _mm256_store_ps(rstate + p, rn); - _mm256_store_ps(rstate + p + 8, in); - - ru = _mm256_set1_ps(matrix[8]); - iu = _mm256_set1_ps(matrix[9]); - rn = _mm256_mul_ps(r0, ru); - in = _mm256_mul_ps(r0, iu); - rn = _mm256_fnmadd_ps(i0, iu, rn); - in = _mm256_fmadd_ps(i0, ru, in); - ru = _mm256_set1_ps(matrix[10]); - iu = _mm256_set1_ps(matrix[11]); - rn = _mm256_fmadd_ps(r1, ru, rn); - in = _mm256_fmadd_ps(r1, iu, in); - rn = _mm256_fnmadd_ps(i1, iu, rn); - in = _mm256_fmadd_ps(i1, ru, in); - ru = _mm256_set1_ps(matrix[12]); - iu = _mm256_set1_ps(matrix[13]); - rn = _mm256_fmadd_ps(r2, ru, rn); - in = _mm256_fmadd_ps(r2, iu, in); - rn = _mm256_fnmadd_ps(i2, iu, rn); - in = _mm256_fmadd_ps(i2, ru, in); - ru = _mm256_set1_ps(matrix[14]); - iu = _mm256_set1_ps(matrix[15]); - rn = _mm256_fmadd_ps(r3, ru, rn); - in = _mm256_fmadd_ps(r3, iu, in); - rn = _mm256_fnmadd_ps(i3, iu, rn); - in = _mm256_fmadd_ps(i3, ru, in); - p = si | sizek; - _mm256_store_ps(rstate + p, rn); - _mm256_store_ps(rstate + p + 8, in); - - ru = _mm256_set1_ps(matrix[16]); - iu = _mm256_set1_ps(matrix[17]); - rn = _mm256_mul_ps(r0, ru); - in = _mm256_mul_ps(r0, iu); - rn = _mm256_fnmadd_ps(i0, iu, rn); - in = _mm256_fmadd_ps(i0, ru, in); - ru = _mm256_set1_ps(matrix[18]); - iu = _mm256_set1_ps(matrix[19]); - rn = _mm256_fmadd_ps(r1, ru, rn); - in = _mm256_fmadd_ps(r1, iu, in); - rn = _mm256_fnmadd_ps(i1, iu, rn); - in = _mm256_fmadd_ps(i1, ru, in); - ru = _mm256_set1_ps(matrix[20]); - iu = _mm256_set1_ps(matrix[21]); - rn = _mm256_fmadd_ps(r2, ru, rn); - in = _mm256_fmadd_ps(r2, iu, in); - rn = _mm256_fnmadd_ps(i2, iu, rn); - in = _mm256_fmadd_ps(i2, ru, in); - ru = _mm256_set1_ps(matrix[22]); - iu = _mm256_set1_ps(matrix[23]); - rn = _mm256_fmadd_ps(r3, ru, rn); - in = _mm256_fmadd_ps(r3, iu, in); - rn = _mm256_fnmadd_ps(i3, iu, rn); - in = _mm256_fmadd_ps(i3, ru, in); - p = si | sizej; - _mm256_store_ps(rstate + p, rn); - _mm256_store_ps(rstate + p + 8, in); - - ru = _mm256_set1_ps(matrix[24]); - iu = _mm256_set1_ps(matrix[25]); - rn = _mm256_mul_ps(r0, ru); - in = _mm256_mul_ps(r0, iu); - rn = _mm256_fnmadd_ps(i0, iu, rn); - in = _mm256_fmadd_ps(i0, ru, in); - ru = _mm256_set1_ps(matrix[26]); - iu = _mm256_set1_ps(matrix[27]); - rn = _mm256_fmadd_ps(r1, ru, rn); - in = _mm256_fmadd_ps(r1, iu, in); - rn = _mm256_fnmadd_ps(i1, iu, rn); - in = _mm256_fmadd_ps(i1, ru, in); - ru = _mm256_set1_ps(matrix[28]); - iu = _mm256_set1_ps(matrix[29]); - rn = _mm256_fmadd_ps(r2, ru, rn); - in = _mm256_fmadd_ps(r2, iu, in); - rn = _mm256_fnmadd_ps(i2, iu, rn); - in = _mm256_fmadd_ps(i2, ru, in); - ru = _mm256_set1_ps(matrix[30]); - iu = _mm256_set1_ps(matrix[31]); - rn = _mm256_fmadd_ps(r3, ru, rn); - in = _mm256_fmadd_ps(r3, iu, in); - rn = _mm256_fnmadd_ps(i3, iu, rn); - in = _mm256_fmadd_ps(i3, ru, in); - p |= sizek; - _mm256_store_ps(rstate + p, rn); - _mm256_store_ps(rstate + p + 8, in); - } - } - } -} - -void StateSpaceAVX::ApplyGate2HL(const unsigned int q0, const unsigned int q1, - const float* matrix) { - __m256 mb; - __m256i ml; - - uint64_t sizei = uint64_t(1) << (GetNumQubits() + 1); - uint64_t sizej = uint64_t(1) << (q1 + 1); - - auto rstate = GetRawState(); - - switch (q0) { - case 0: - ml = _mm256_set_epi32(6, 7, 4, 5, 2, 3, 0, 1); - mb = _mm256_castsi256_ps(_mm256_set_epi32(-1, 0, -1, 0, -1, 0, -1, 0)); - break; - case 1: - ml = _mm256_set_epi32(5, 4, 7, 6, 1, 0, 3, 2); - mb = _mm256_castsi256_ps(_mm256_set_epi32(-1, -1, 0, 0, -1, -1, 0, 0)); - break; - case 2: - ml = _mm256_set_epi32(3, 2, 1, 0, 7, 6, 5, 4); - mb = _mm256_castsi256_ps(_mm256_set_epi32(-1, -1, -1, -1, 0, 0, 0, 0)); - break; - } - - for (uint64_t i = 0; i < sizei; i += 2 * sizej) { - for (uint64_t j = 0; j < sizej; j += 16) { - uint64_t si = i | j; - - __m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in, rm, im; - - uint64_t p = si; - - r0 = _mm256_load_ps(rstate + p); - i0 = _mm256_load_ps(rstate + p + 8); - - r1 = _mm256_permutevar8x32_ps(r0, ml); - i1 = _mm256_permutevar8x32_ps(i0, ml); - - p = si | sizej; - - r2 = _mm256_load_ps(rstate + p); - i2 = _mm256_load_ps(rstate + p + 8); - - r3 = _mm256_permutevar8x32_ps(r2, ml); - i3 = _mm256_permutevar8x32_ps(i2, ml); - - ru = _mm256_set1_ps(matrix[0]); - iu = _mm256_set1_ps(matrix[1]); - rn = _mm256_mul_ps(r0, ru); - in = _mm256_mul_ps(r0, iu); - rn = _mm256_fnmadd_ps(i0, iu, rn); - in = _mm256_fmadd_ps(i0, ru, in); - ru = _mm256_set1_ps(matrix[2]); - iu = _mm256_set1_ps(matrix[3]); - rn = _mm256_fmadd_ps(r1, ru, rn); - in = _mm256_fmadd_ps(r1, iu, in); - rn = _mm256_fnmadd_ps(i1, iu, rn); - in = _mm256_fmadd_ps(i1, ru, in); - ru = _mm256_set1_ps(matrix[4]); - iu = _mm256_set1_ps(matrix[5]); - rn = _mm256_fmadd_ps(r2, ru, rn); - in = _mm256_fmadd_ps(r2, iu, in); - rn = _mm256_fnmadd_ps(i2, iu, rn); - in = _mm256_fmadd_ps(i2, ru, in); - ru = _mm256_set1_ps(matrix[6]); - iu = _mm256_set1_ps(matrix[7]); - rn = _mm256_fmadd_ps(r3, ru, rn); - in = _mm256_fmadd_ps(r3, iu, in); - rn = _mm256_fnmadd_ps(i3, iu, rn); - in = _mm256_fmadd_ps(i3, ru, in); - - ru = _mm256_set1_ps(matrix[8]); - iu = _mm256_set1_ps(matrix[9]); - rm = _mm256_mul_ps(r0, ru); - im = _mm256_mul_ps(r0, iu); - rm = _mm256_fnmadd_ps(i0, iu, rm); - im = _mm256_fmadd_ps(i0, ru, im); - ru = _mm256_set1_ps(matrix[10]); - iu = _mm256_set1_ps(matrix[11]); - rm = _mm256_fmadd_ps(r1, ru, rm); - im = _mm256_fmadd_ps(r1, iu, im); - rm = _mm256_fnmadd_ps(i1, iu, rm); - im = _mm256_fmadd_ps(i1, ru, im); - ru = _mm256_set1_ps(matrix[12]); - iu = _mm256_set1_ps(matrix[13]); - rm = _mm256_fmadd_ps(r2, ru, rm); - im = _mm256_fmadd_ps(r2, iu, im); - rm = _mm256_fnmadd_ps(i2, iu, rm); - im = _mm256_fmadd_ps(i2, ru, im); - ru = _mm256_set1_ps(matrix[14]); - iu = _mm256_set1_ps(matrix[15]); - rm = _mm256_fmadd_ps(r3, ru, rm); - im = _mm256_fmadd_ps(r3, iu, im); - rm = _mm256_fnmadd_ps(i3, iu, rm); - im = _mm256_fmadd_ps(i3, ru, im); - - rm = _mm256_permutevar8x32_ps(rm, ml); - im = _mm256_permutevar8x32_ps(im, ml); - rn = _mm256_blendv_ps(rn, rm, mb); - in = _mm256_blendv_ps(in, im, mb); - p = si; - _mm256_store_ps(rstate + p, rn); - _mm256_store_ps(rstate + p + 8, in); - - ru = _mm256_set1_ps(matrix[16]); - iu = _mm256_set1_ps(matrix[17]); - rn = _mm256_mul_ps(r0, ru); - in = _mm256_mul_ps(r0, iu); - rn = _mm256_fnmadd_ps(i0, iu, rn); - in = _mm256_fmadd_ps(i0, ru, in); - ru = _mm256_set1_ps(matrix[18]); - iu = _mm256_set1_ps(matrix[19]); - rn = _mm256_fmadd_ps(r1, ru, rn); - in = _mm256_fmadd_ps(r1, iu, in); - rn = _mm256_fnmadd_ps(i1, iu, rn); - in = _mm256_fmadd_ps(i1, ru, in); - ru = _mm256_set1_ps(matrix[20]); - iu = _mm256_set1_ps(matrix[21]); - rn = _mm256_fmadd_ps(r2, ru, rn); - in = _mm256_fmadd_ps(r2, iu, in); - rn = _mm256_fnmadd_ps(i2, iu, rn); - in = _mm256_fmadd_ps(i2, ru, in); - ru = _mm256_set1_ps(matrix[22]); - iu = _mm256_set1_ps(matrix[23]); - rn = _mm256_fmadd_ps(r3, ru, rn); - in = _mm256_fmadd_ps(r3, iu, in); - rn = _mm256_fnmadd_ps(i3, iu, rn); - in = _mm256_fmadd_ps(i3, ru, in); - - ru = _mm256_set1_ps(matrix[24]); - iu = _mm256_set1_ps(matrix[25]); - rm = _mm256_mul_ps(r0, ru); - im = _mm256_mul_ps(r0, iu); - rm = _mm256_fnmadd_ps(i0, iu, rm); - im = _mm256_fmadd_ps(i0, ru, im); - ru = _mm256_set1_ps(matrix[26]); - iu = _mm256_set1_ps(matrix[27]); - rm = _mm256_fmadd_ps(r1, ru, rm); - im = _mm256_fmadd_ps(r1, iu, im); - rm = _mm256_fnmadd_ps(i1, iu, rm); - im = _mm256_fmadd_ps(i1, ru, im); - ru = _mm256_set1_ps(matrix[28]); - iu = _mm256_set1_ps(matrix[29]); - rm = _mm256_fmadd_ps(r2, ru, rm); - im = _mm256_fmadd_ps(r2, iu, im); - rm = _mm256_fnmadd_ps(i2, iu, rm); - im = _mm256_fmadd_ps(i2, ru, im); - ru = _mm256_set1_ps(matrix[30]); - iu = _mm256_set1_ps(matrix[31]); - rm = _mm256_fmadd_ps(r3, ru, rm); - im = _mm256_fmadd_ps(r3, iu, im); - rm = _mm256_fnmadd_ps(i3, iu, rm); - im = _mm256_fmadd_ps(i3, ru, im); - - rm = _mm256_permutevar8x32_ps(rm, ml); - im = _mm256_permutevar8x32_ps(im, ml); - rn = _mm256_blendv_ps(rn, rm, mb); - in = _mm256_blendv_ps(in, im, mb); - p = si | sizej; - _mm256_store_ps(rstate + p, rn); - _mm256_store_ps(rstate + p + 8, in); - } - } -} - -void StateSpaceAVX::ApplyGate2LL(const unsigned int q0, const unsigned int q1, - const float* matrix) { - const unsigned int q = q0 + q1; - - __m256 mb1, mb2, mb3; - __m256i ml1, ml2, ml3; - - uint64_t sizei = uint64_t(1) << (GetNumQubits() + 1); - auto rstate = GetRawState(); - - switch (q) { - case 1: - ml1 = _mm256_set_epi32(7, 6, 4, 5, 3, 2, 0, 1); - ml2 = _mm256_set_epi32(7, 4, 5, 6, 3, 0, 1, 2); - ml3 = _mm256_set_epi32(4, 6, 5, 7, 0, 2, 1, 3); - mb1 = _mm256_castsi256_ps(_mm256_set_epi32(0, 0, -1, 0, 0, 0, -1, 0)); - mb2 = _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, 0, 0, -1, 0, 0)); - mb3 = _mm256_castsi256_ps(_mm256_set_epi32(-1, 0, 0, 0, -1, 0, 0, 0)); - break; - case 2: - ml1 = _mm256_set_epi32(7, 6, 5, 4, 2, 3, 0, 1); - ml2 = _mm256_set_epi32(7, 2, 5, 0, 3, 6, 1, 4); - ml3 = _mm256_set_epi32(2, 6, 0, 4, 3, 7, 1, 5); - mb1 = _mm256_castsi256_ps(_mm256_set_epi32(0, 0, 0, 0, -1, 0, -1, 0)); - mb2 = _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, -1, 0, 0, 0, 0)); - mb3 = _mm256_castsi256_ps(_mm256_set_epi32(-1, 0, -1, 0, 0, 0, 0, 0)); - break; - case 3: - ml1 = _mm256_set_epi32(7, 6, 5, 4, 1, 0, 3, 2); - ml2 = _mm256_set_epi32(7, 6, 1, 0, 3, 2, 5, 4); - ml3 = _mm256_set_epi32(1, 0, 5, 4, 3, 2, 7, 6); - mb1 = _mm256_castsi256_ps(_mm256_set_epi32(0, 0, 0, 0, -1, -1, 0, 0)); - mb2 = _mm256_castsi256_ps(_mm256_set_epi32(0, 0, -1, -1, 0, 0, 0, 0)); - mb3 = _mm256_castsi256_ps(_mm256_set_epi32(-1, -1, 0, 0, 0, 0, 0, 0)); - break; - } - - for (uint64_t i = 0; i < sizei; i += 16) { - __m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in, rm, im; - - auto p = rstate + i; - - r0 = _mm256_load_ps(p); - i0 = _mm256_load_ps(p + 8); - - r1 = _mm256_permutevar8x32_ps(r0, ml1); - i1 = _mm256_permutevar8x32_ps(i0, ml1); - - r2 = _mm256_permutevar8x32_ps(r0, ml2); - i2 = _mm256_permutevar8x32_ps(i0, ml2); - - r3 = _mm256_permutevar8x32_ps(r0, ml3); - i3 = _mm256_permutevar8x32_ps(i0, ml3); - - ru = _mm256_set1_ps(matrix[0]); - iu = _mm256_set1_ps(matrix[1]); - rn = _mm256_mul_ps(r0, ru); - in = _mm256_mul_ps(r0, iu); - rn = _mm256_fnmadd_ps(i0, iu, rn); - in = _mm256_fmadd_ps(i0, ru, in); - ru = _mm256_set1_ps(matrix[2]); - iu = _mm256_set1_ps(matrix[3]); - rn = _mm256_fmadd_ps(r1, ru, rn); - in = _mm256_fmadd_ps(r1, iu, in); - rn = _mm256_fnmadd_ps(i1, iu, rn); - in = _mm256_fmadd_ps(i1, ru, in); - ru = _mm256_set1_ps(matrix[4]); - iu = _mm256_set1_ps(matrix[5]); - rn = _mm256_fmadd_ps(r2, ru, rn); - in = _mm256_fmadd_ps(r2, iu, in); - rn = _mm256_fnmadd_ps(i2, iu, rn); - in = _mm256_fmadd_ps(i2, ru, in); - ru = _mm256_set1_ps(matrix[6]); - iu = _mm256_set1_ps(matrix[7]); - rn = _mm256_fmadd_ps(r3, ru, rn); - in = _mm256_fmadd_ps(r3, iu, in); - rn = _mm256_fnmadd_ps(i3, iu, rn); - in = _mm256_fmadd_ps(i3, ru, in); - - ru = _mm256_set1_ps(matrix[8]); - iu = _mm256_set1_ps(matrix[9]); - rm = _mm256_mul_ps(r0, ru); - im = _mm256_mul_ps(r0, iu); - rm = _mm256_fnmadd_ps(i0, iu, rm); - im = _mm256_fmadd_ps(i0, ru, im); - ru = _mm256_set1_ps(matrix[10]); - iu = _mm256_set1_ps(matrix[11]); - rm = _mm256_fmadd_ps(r1, ru, rm); - im = _mm256_fmadd_ps(r1, iu, im); - rm = _mm256_fnmadd_ps(i1, iu, rm); - im = _mm256_fmadd_ps(i1, ru, im); - ru = _mm256_set1_ps(matrix[12]); - iu = _mm256_set1_ps(matrix[13]); - rm = _mm256_fmadd_ps(r2, ru, rm); - im = _mm256_fmadd_ps(r2, iu, im); - rm = _mm256_fnmadd_ps(i2, iu, rm); - im = _mm256_fmadd_ps(i2, ru, im); - ru = _mm256_set1_ps(matrix[14]); - iu = _mm256_set1_ps(matrix[15]); - rm = _mm256_fmadd_ps(r3, ru, rm); - im = _mm256_fmadd_ps(r3, iu, im); - rm = _mm256_fnmadd_ps(i3, iu, rm); - im = _mm256_fmadd_ps(i3, ru, im); - - rm = _mm256_permutevar8x32_ps(rm, ml1); - im = _mm256_permutevar8x32_ps(im, ml1); - rn = _mm256_blendv_ps(rn, rm, mb1); - in = _mm256_blendv_ps(in, im, mb1); - - ru = _mm256_set1_ps(matrix[16]); - iu = _mm256_set1_ps(matrix[17]); - rm = _mm256_mul_ps(r0, ru); - im = _mm256_mul_ps(r0, iu); - rm = _mm256_fnmadd_ps(i0, iu, rm); - im = _mm256_fmadd_ps(i0, ru, im); - ru = _mm256_set1_ps(matrix[18]); - iu = _mm256_set1_ps(matrix[19]); - rm = _mm256_fmadd_ps(r1, ru, rm); - im = _mm256_fmadd_ps(r1, iu, im); - rm = _mm256_fnmadd_ps(i1, iu, rm); - im = _mm256_fmadd_ps(i1, ru, im); - ru = _mm256_set1_ps(matrix[20]); - iu = _mm256_set1_ps(matrix[21]); - rm = _mm256_fmadd_ps(r2, ru, rm); - im = _mm256_fmadd_ps(r2, iu, im); - rm = _mm256_fnmadd_ps(i2, iu, rm); - im = _mm256_fmadd_ps(i2, ru, im); - ru = _mm256_set1_ps(matrix[22]); - iu = _mm256_set1_ps(matrix[23]); - rm = _mm256_fmadd_ps(r3, ru, rm); - im = _mm256_fmadd_ps(r3, iu, im); - rm = _mm256_fnmadd_ps(i3, iu, rm); - im = _mm256_fmadd_ps(i3, ru, im); - - rm = _mm256_permutevar8x32_ps(rm, ml2); - im = _mm256_permutevar8x32_ps(im, ml2); - rn = _mm256_blendv_ps(rn, rm, mb2); - in = _mm256_blendv_ps(in, im, mb2); - - ru = _mm256_set1_ps(matrix[24]); - iu = _mm256_set1_ps(matrix[25]); - rm = _mm256_mul_ps(r0, ru); - im = _mm256_mul_ps(r0, iu); - rm = _mm256_fnmadd_ps(i0, iu, rm); - im = _mm256_fmadd_ps(i0, ru, im); - ru = _mm256_set1_ps(matrix[26]); - iu = _mm256_set1_ps(matrix[27]); - rm = _mm256_fmadd_ps(r1, ru, rm); - im = _mm256_fmadd_ps(r1, iu, im); - rm = _mm256_fnmadd_ps(i1, iu, rm); - im = _mm256_fmadd_ps(i1, ru, im); - ru = _mm256_set1_ps(matrix[28]); - iu = _mm256_set1_ps(matrix[29]); - rm = _mm256_fmadd_ps(r2, ru, rm); - im = _mm256_fmadd_ps(r2, iu, im); - rm = _mm256_fnmadd_ps(i2, iu, rm); - im = _mm256_fmadd_ps(i2, ru, im); - ru = _mm256_set1_ps(matrix[30]); - iu = _mm256_set1_ps(matrix[31]); - rm = _mm256_fmadd_ps(r3, ru, rm); - im = _mm256_fmadd_ps(r3, iu, im); - rm = _mm256_fnmadd_ps(i3, iu, rm); - im = _mm256_fmadd_ps(i3, ru, im); - - rm = _mm256_permutevar8x32_ps(rm, ml3); - im = _mm256_permutevar8x32_ps(im, ml3); - rn = _mm256_blendv_ps(rn, rm, mb3); - in = _mm256_blendv_ps(in, im, mb3); - - _mm256_store_ps(p, rn); - _mm256_store_ps(p + 8, in); - } -} - -} // namespace qsim -} // namespace tfq - -#endif diff --git a/tensorflow_quantum/core/qsim/state_space_avx.h b/tensorflow_quantum/core/qsim/state_space_avx.h deleted file mode 100644 index ccfbc25d1..000000000 --- a/tensorflow_quantum/core/qsim/state_space_avx.h +++ /dev/null @@ -1,86 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_QSIM_STATE_SPACE_AVX_H_ -#define TFQ_CORE_QSIM_STATE_SPACE_AVX_H_ - -#include - -#include -#include - -#include "tensorflow_quantum/core/qsim/state_space.h" - -namespace tfq { -namespace qsim { - -class StateSpaceAVX : public StateSpace { - public: - StateSpaceAVX(const uint64_t num_qubits, const uint64_t num_threads); - - virtual ~StateSpaceAVX(); - - StateSpaceType GetType() const override; - - // Reserve the memory associated with the state in this space - virtual void CreateState() override; - - // Free the memory associated with the state in this space - virtual void DeleteState() override; - - // Return a pointer to a copy of this StateSpace. - // NOTE: user is responsible for deleting the returned copy. - virtual StateSpace* Clone() const override; - - // Copy the state information from another statespace. - // Assumes the state has been initialized/created. - virtual void CopyFrom(const StateSpace& other) const override; - - // Function to apply a two qubit gate to the state on indices q0 and q1. - virtual void ApplyGate2(const unsigned int q0, const unsigned int q1, - const float* matrix) override; - - // Function to apply a one-qubit gate if there is only one qubit in the state. - // Implementations are given the option to return an error. - virtual tensorflow::Status ApplyGate1(const float* matrix) override; - - // Set all entries in the state to zero - virtual void SetStateZero() override; - - // Get the inner product between this state and the state in `other` - virtual float GetRealInnerProduct(const StateSpace& other) const override; - - // Get the amplitude at the given state index - virtual std::complex GetAmpl(const uint64_t i) const override; - - // Set the amplitude at the given state index - virtual void SetAmpl(const uint64_t i, - const std::complex& val) override; - - private: - void ApplyGate2HH(const unsigned int q0, const unsigned int q1, - const float* matrix); - - void ApplyGate2HL(const unsigned int q0, const unsigned int q1, - const float* matrix); - - void ApplyGate2LL(const unsigned int q0, const unsigned int q1, - const float* matrix); -}; - -} // namespace qsim -} // namespace tfq - -#endif // TFQ_CORE_QSIM_STATE_SPACE_AVX_H_ diff --git a/tensorflow_quantum/core/qsim/state_space_slow.cc b/tensorflow_quantum/core/qsim/state_space_slow.cc deleted file mode 100644 index 0baee6d97..000000000 --- a/tensorflow_quantum/core/qsim/state_space_slow.cc +++ /dev/null @@ -1,175 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/qsim/state_space_slow.h" - -#include -#include -#include - -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/src/matrix.h" - -namespace tfq { -namespace qsim { - -StateSpaceSlow::StateSpaceSlow(const uint64_t num_qubits, - const uint64_t num_threads) - : StateSpace(num_qubits, num_threads) {} - -StateSpaceSlow::~StateSpaceSlow() { DeleteState(); } - -StateSpaceType StateSpaceSlow::GetType() const { return StateSpaceType::SLOW; } - -void StateSpaceSlow::CreateState() { - state_ = (float*)malloc(sizeof(float) * size_); -} - -void StateSpaceSlow::DeleteState() { free(state_); } - -StateSpace* StateSpaceSlow::Clone() const { - StateSpaceSlow* state_copy = - new StateSpaceSlow(GetNumQubits(), GetNumThreads()); - return state_copy; -} - -void StateSpaceSlow::CopyFrom(const StateSpace& other) const { - auto state = GetRawState(); - auto other_state = other.GetRawState(); - for (uint64_t i = 0; i < size_; i++) { - state[i] = other_state[i]; - } -} - -void StateSpaceSlow::ApplyGate2(const unsigned int q0, const unsigned int q1, - const float* m) { - // Assume q0 < q1. - uint64_t sizei = uint64_t(1) << (GetNumQubits() + 1); - uint64_t sizej = uint64_t(1) << (q1 + 1); - uint64_t sizek = uint64_t(1) << (q0 + 1); - - auto data = GetRawState(); - - for (uint64_t i = 0; i < sizei; i += 2 * sizej) { - for (uint64_t j = 0; j < sizej; j += 2 * sizek) { - for (uint64_t k = 0; k < sizek; k += 2) { - uint64_t si = i | j | k; - - uint64_t p = si; - float s0r = data[p + 0]; - float s0i = data[p + 1]; - p = si | sizek; - float s1r = data[p + 0]; - float s1i = data[p + 1]; - p = si | sizej; - float s2r = data[p + 0]; - float s2i = data[p + 1]; - p |= sizek; - float s3r = data[p + 0]; - float s3i = data[p + 1]; - - p = si; - data[p + 0] = s0r * m[0] - s0i * m[1] + s1r * m[2] - s1i * m[3] + - s2r * m[4] - s2i * m[5] + s3r * m[6] - s3i * m[7]; - data[p + 1] = s0r * m[1] + s0i * m[0] + s1r * m[3] + s1i * m[2] + - s2r * m[5] + s2i * m[4] + s3r * m[7] + s3i * m[6]; - p = si | sizek; - data[p + 0] = s0r * m[8] - s0i * m[9] + s1r * m[10] - s1i * m[11] + - s2r * m[12] - s2i * m[13] + s3r * m[14] - s3i * m[15]; - data[p + 1] = s0r * m[9] + s0i * m[8] + s1r * m[11] + s1i * m[10] + - s2r * m[13] + s2i * m[12] + s3r * m[15] + s3i * m[14]; - p = si | sizej; - data[p + 0] = s0r * m[16] - s0i * m[17] + s1r * m[18] - s1i * m[19] + - s2r * m[20] - s2i * m[21] + s3r * m[22] - s3i * m[23]; - data[p + 1] = s0r * m[17] + s0i * m[16] + s1r * m[19] + s1i * m[18] + - s2r * m[21] + s2i * m[20] + s3r * m[23] + s3i * m[22]; - p |= sizek; - data[p + 0] = s0r * m[24] - s0i * m[25] + s1r * m[26] - s1i * m[27] + - s2r * m[28] - s2i * m[29] + s3r * m[30] - s3i * m[31]; - data[p + 1] = s0r * m[25] + s0i * m[24] + s1r * m[27] + s1i * m[26] + - s2r * m[29] + s2i * m[28] + s3r * m[31] + s3i * m[30]; - } - } - } -} - -tensorflow::Status StateSpaceSlow::ApplyGate1(const float* matrix) { - // Workaround function to apply single qubit gates if the - // circuit only has one qubit. - - float r_0, i_0, r_1, i_1; - - auto data = GetRawState(); - - r_0 = data[0] * matrix[0] - data[1] * matrix[1] + data[2] * matrix[2] - - data[3] * matrix[3]; - i_0 = data[0] * matrix[1] + data[1] * matrix[0] + data[2] * matrix[3] + - data[3] * matrix[2]; - - r_1 = data[0] * matrix[4] - data[1] * matrix[5] + data[2] * matrix[6] - - data[3] * matrix[7]; - i_1 = data[0] * matrix[5] + data[1] * matrix[4] + data[2] * matrix[7] + - data[3] * matrix[6]; - - data[0] = r_0; - data[1] = i_0; - data[2] = r_1; - data[3] = i_1; - - return tensorflow::Status::OK(); -} - -void StateSpaceSlow::SetStateZero() { - //#pragma omp parallel for num_threads(num_threads_) - auto data = GetRawState(); - for (uint64_t i = 0; i < size_; ++i) { - data[i] = 0; - } - data[0] = 1; -} - -float StateSpaceSlow::GetRealInnerProduct(const StateSpace& other) const { - uint64_t size2 = GetDimension(); - double result = 0.0; - - // Currently not a thread safe implementation of inner product! - for (uint64_t i = 0; i < size2; ++i) { - const std::complex amp_a = GetAmpl(i); - const std::complex amp_other = other.GetAmpl(i); - - const std::complex amp_a_d = std::complex( - static_cast(amp_a.real()), static_cast(amp_a.imag())); - - const std::complex amp_other_d = - std::complex(static_cast(amp_other.real()), - static_cast(amp_other.imag())); - - result += (std::conj(amp_a_d) * amp_other_d).real(); - } - - return static_cast(result); -} - -std::complex StateSpaceSlow::GetAmpl(const uint64_t i) const { - return std::complex(GetRawState()[2 * i], GetRawState()[2 * i + 1]); -} - -void StateSpaceSlow::SetAmpl(const uint64_t i, const std::complex& val) { - GetRawState()[2 * i] = val.real(); - GetRawState()[2 * i + 1] = val.imag(); -} - -} // namespace qsim -} // namespace tfq diff --git a/tensorflow_quantum/core/qsim/state_space_slow.h b/tensorflow_quantum/core/qsim/state_space_slow.h deleted file mode 100644 index 8261098e4..000000000 --- a/tensorflow_quantum/core/qsim/state_space_slow.h +++ /dev/null @@ -1,74 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_QSIM_STATE_SPACE_SLOW_H_ -#define TFQ_CORE_QSIM_STATE_SPACE_SLOW_H_ - -#include -#include - -#include "tensorflow_quantum/core/qsim/state_space.h" - -namespace tfq { -namespace qsim { - -class StateSpaceSlow : public StateSpace { - public: - StateSpaceSlow(const uint64_t num_qubits, const uint64_t num_threads); - - virtual ~StateSpaceSlow(); - - StateSpaceType GetType() const override; - - // Reserve the memory associated with the state in this space - virtual void CreateState() override; - - // Free the memory associated with the state in this space - virtual void DeleteState() override; - - // Return a pointer to a copy of this StateSpace. - // NOTE: user is responsible for deleting the returned copy. - virtual StateSpace* Clone() const override; - - // Copy the state information from another statespace. - // Assumes the state has been initialized/created. - virtual void CopyFrom(const StateSpace& other) const override; - - // Function to apply a two qubit gate to the state on indices q0 and q1. - virtual void ApplyGate2(const unsigned int q0, const unsigned int q1, - const float* matrix) override; - - // Function to apply a one-qubit gate if there is only one qubit in the state. - // Implementations are given the option to return an error. - virtual tensorflow::Status ApplyGate1(const float* matrix) override; - - // Set all entries in the state to zero - virtual void SetStateZero() override; - - // Get the inner product between this state and the state in `other` - virtual float GetRealInnerProduct(const StateSpace& other) const override; - - // Get the amplitude at the given state index - virtual std::complex GetAmpl(const uint64_t i) const override; - - // Set the amplitude at the given state index - virtual void SetAmpl(const uint64_t i, - const std::complex& val) override; -}; - -} // namespace qsim -} // namespace tfq - -#endif // TFQ_CORE_QSIM_STATE_SPACE_SLOW_H_ diff --git a/tensorflow_quantum/core/qsim/state_space_sse.cc b/tensorflow_quantum/core/qsim/state_space_sse.cc deleted file mode 100644 index f833091c0..000000000 --- a/tensorflow_quantum/core/qsim/state_space_sse.cc +++ /dev/null @@ -1,4471 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifdef __SSE4_1__ - -#include "tensorflow_quantum/core/qsim/state_space_sse.h" - -#include -#include - -#include -#include - -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/qsim/util.h" - -namespace tfq { -namespace qsim { - -StateSpaceSSE::StateSpaceSSE(const uint64_t num_qubits, - const uint64_t num_threads) - : StateSpace(num_qubits, num_threads) {} - -StateSpaceSSE::~StateSpaceSSE() { DeleteState(); } - -StateSpaceType StateSpaceSSE::GetType() const { return StateSpaceType::SSE; } - -void StateSpaceSSE::CreateState() { - state_ = (float*)qsim::_aligned_malloc(sizeof(float) * size_); -} - -void StateSpaceSSE::DeleteState() { qsim::_aligned_free(state_); } - -StateSpace* StateSpaceSSE::Clone() const { - StateSpaceSSE* state_copy = - new StateSpaceSSE(GetNumQubits(), GetNumThreads()); - return state_copy; -} - -void StateSpaceSSE::CopyFrom(const StateSpace& other) const { - auto data = GetRawState(); - auto copy_data = other.GetRawState(); - - uint64_t size2 = GetDimension() / 8; - __m128 tmp1, tmp2, tmp3, tmp4; - for (uint64_t i = 0; i < size2; ++i) { - tmp1 = _mm_load_ps(copy_data + 16 * i); - tmp2 = _mm_load_ps(copy_data + 16 * i + 4); - tmp3 = _mm_load_ps(copy_data + 16 * i + 8); - tmp4 = _mm_load_ps(copy_data + 16 * i + 12); - - _mm_store_ps(data + 16 * i, tmp1); - _mm_store_ps(data + 16 * i + 4, tmp2); - _mm_store_ps(data + 16 * i + 8, tmp3); - _mm_store_ps(data + 16 * i + 12, tmp4); - } -} - -void StateSpaceSSE::ApplyGate2(const unsigned int q0, const unsigned int q1, - const float* m) { - // Assume q0 < q1. - if (q0 > 2) { - ApplyGate2HH(q0, q1, m); - } else if (q1 > 2) { - ApplyGate2HL(q0, q1, m); - } else { - ApplyGate2LL(q0, q1, m); - } -} - -tensorflow::Status StateSpaceSSE::ApplyGate1(const float* matrix) { - return tensorflow::Status(tensorflow::error::INVALID_ARGUMENT, - "SSE simulator doesn't support small circuits."); -} - -void StateSpaceSSE::SetStateZero() { - uint64_t size2 = GetDimension() / 8; - - //__m256 val0 = _mm256_setzero_ps(); - __m128 val0 = _mm_setzero_ps(); - - auto data = GetRawState(); - - for (uint64_t i = 0; i < size2; ++i) { - //_mm256_store_ps(state.get() + 16 * i, val0); - //_mm256_store_ps(state.get() + 16 * i + 8, val0); - _mm_store_ps(data + 16 * i, val0); - _mm_store_ps(data + 16 * i + 4, val0); - _mm_store_ps(data + 16 * i + 8, val0); - _mm_store_ps(data + 16 * i + 12, val0); - } - - data[0] = 1; -} - -float StateSpaceSSE::GetRealInnerProduct(const StateSpace& other) const { - uint64_t size2 = GetDimension() / 4; - __m128d expv_0 = _mm_setzero_pd(); - __m128d expv_1 = _mm_setzero_pd(); - __m128d temp = _mm_setzero_pd(); - __m128d rs_0, rs_1, is_0, is_1; - - auto statea = GetRawState(); - auto stateb = other.GetRawState(); - - //#pragma omp parallel for num_threads(num_threads_) - // Currently not a thread safe implementation of inner product! - for (uint64_t i = 0; i < size2; ++i) { - // rs = _mm256_cvtps_pd(_mm_load_ps(statea + 8 * i)); - rs_0 = _mm_cvtps_pd(_mm_load_ps(statea + 8 * i)); - rs_1 = _mm_cvtps_pd(_mm_load_ps(statea + 8 * i + 2)); - - // is = _mm256_cvtps_pd(_mm_load_ps(stateb + 8 * i)); - is_0 = _mm_cvtps_pd(_mm_load_ps(stateb + 8 * i)); - is_1 = _mm_cvtps_pd(_mm_load_ps(stateb + 8 * i + 2)); - - // expv = _mm256_fmadd_pd(rs, is, expv); - temp = _mm_mul_pd(rs_0, is_0); - expv_0 = _mm_add_pd(expv_0, temp); - temp = _mm_mul_pd(rs_1, is_1); - expv_1 = _mm_add_pd(expv_1, temp); - - // rs = _mm256_cvtps_pd(_mm_load_ps(statea + 8 * i + 4)); - rs_0 = _mm_cvtps_pd(_mm_load_ps(statea + 8 * i + 4)); - rs_1 = _mm_cvtps_pd(_mm_load_ps(statea + 8 * i + 6)); - - // is = _mm256_cvtps_pd(_mm_load_ps(stateb + 8 * i + 4)); - is_0 = _mm_cvtps_pd(_mm_load_ps(stateb + 8 * i + 4)); - is_1 = _mm_cvtps_pd(_mm_load_ps(stateb + 8 * i + 6)); - - // expv = _mm256_fmadd_pd(rs, is, expv); - temp = _mm_mul_pd(rs_0, is_0); - expv_0 = _mm_add_pd(expv_0, temp); - temp = _mm_mul_pd(rs_1, is_1); - expv_1 = _mm_add_pd(expv_1, temp); - } - double buffer[4]; - _mm_storeu_pd(buffer, expv_0); - _mm_storeu_pd(buffer + 2, expv_1); - return (float)(buffer[0] + buffer[1] + buffer[2] + buffer[3]); -} - -std::complex StateSpaceSSE::GetAmpl(const uint64_t i) const { - uint64_t p = (16 * (i / 8)) + (i % 8); - return std::complex(GetRawState()[p], GetRawState()[p + 8]); -} - -void StateSpaceSSE::SetAmpl(const uint64_t i, const std::complex& val) { - uint64_t p = (16 * (i / 8)) + (i % 8); - GetRawState()[p] = val.real(); - GetRawState()[p + 8] = val.imag(); -} - -void StateSpaceSSE::ApplyGate2HH(const unsigned int q0, const unsigned int q1, - const float* matrix) { - uint64_t sizei = uint64_t(1) << (GetNumQubits() + 1); - uint64_t sizej = uint64_t(1) << (q1 + 1); - uint64_t sizek = uint64_t(1) << (q0 + 1); - - auto rstate = GetRawState(); - - for (uint64_t i = 0; i < sizei; i += 2 * sizej) { - for (uint64_t j = 0; j < sizej; j += 2 * sizek) { - for (uint64_t k = 0; k < sizek; k += 16) { - uint64_t si = i | j | k; - - //__m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in; - __m128 r0_0, i0_0, r1_0, i1_0, r2_0, i2_0, r3_0, i3_0, ru_0, iu_0, rn_0, - in_0; - __m128 r0_1, i0_1, r1_1, i1_1, r2_1, i2_1, r3_1, i3_1, ru_1, iu_1, rn_1, - in_1; - - // use this for all fmadd and fnmadd replacements. - __m128 temp; - - uint64_t p = si; - // r0 = _mm256_load_ps(rstate + p); - r0_0 = _mm_load_ps(rstate + p); - r0_1 = _mm_load_ps(rstate + p + 4); - - // i0 = _mm256_load_ps(rstate + p + 8); - i0_0 = _mm_load_ps(rstate + p + 8); - i0_1 = _mm_load_ps(rstate + p + 12); - - // Can we get rid of ru duplicates ? - // ru = _mm256_set1_ps(matrix[0]); - ru_0 = _mm_set1_ps(matrix[0]); - ru_1 = _mm_set1_ps(matrix[0]); - - // iu = _mm256_set1_ps(matrix[1]); - iu_0 = _mm_set1_ps(matrix[1]); - iu_1 = _mm_set1_ps(matrix[1]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - p = si | sizek; - - // r1 = _mm256_load_ps(rstate + p); - r1_0 = _mm_load_ps(rstate + p); - r1_1 = _mm_load_ps(rstate + p + 4); - - // i1 = _mm256_load_ps(rstate + p + 8); - i1_0 = _mm_load_ps(rstate + p + 8); - i1_1 = _mm_load_ps(rstate + p + 12); - - // ru = _mm256_set1_ps(matrix[2]); - ru_0 = _mm_set1_ps(matrix[2]); - ru_1 = _mm_set1_ps(matrix[2]); - - // iu = _mm256_set1_ps(matrix[3]); - iu_0 = _mm_set1_ps(matrix[3]); - iu_1 = _mm_set1_ps(matrix[3]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - p = si | sizej; - // r2 = _mm256_load_ps(rstate + p); - r2_0 = _mm_load_ps(rstate + p); - r2_1 = _mm_load_ps(rstate + p + 4); - - // i2 = _mm256_load_ps(rstate + p + 8); - i2_0 = _mm_load_ps(rstate + p + 8); - i2_1 = _mm_load_ps(rstate + p + 12); - - // ru = _mm256_set1_ps(matrix[4]); - ru_0 = _mm_set1_ps(matrix[4]); - ru_1 = _mm_set1_ps(matrix[4]); - - // iu = _mm256_set1_ps(matrix[5]); - iu_0 = _mm_set1_ps(matrix[5]); - iu_1 = _mm_set1_ps(matrix[5]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - p |= sizek; - - // r3 = _mm256_load_ps(rstate + p); - r3_0 = _mm_load_ps(rstate + p); - r3_1 = _mm_load_ps(rstate + p + 4); - - // i3 = _mm256_load_ps(rstate + p + 8); - i3_0 = _mm_load_ps(rstate + p + 8); - i3_1 = _mm_load_ps(rstate + p + 12); - - // ru = _mm256_set1_ps(matrix[6]); - ru_0 = _mm_set1_ps(matrix[6]); - ru_1 = _mm_set1_ps(matrix[6]); - - // iu = _mm256_set1_ps(matrix[7]); - iu_0 = _mm_set1_ps(matrix[7]); - iu_1 = _mm_set1_ps(matrix[7]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - p = si; - - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - - // ru = _mm256_set1_ps(matrix[8]); - ru_0 = _mm_set1_ps(matrix[8]); - ru_1 = _mm_set1_ps(matrix[8]); - - // iu = _mm256_set1_ps(matrix[9]); - iu_0 = _mm_set1_ps(matrix[9]); - iu_1 = _mm_set1_ps(matrix[9]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[10]); - ru_0 = _mm_set1_ps(matrix[10]); - ru_1 = _mm_set1_ps(matrix[10]); - - // iu = _mm256_set1_ps(matrix[11]); - iu_0 = _mm_set1_ps(matrix[11]); - iu_1 = _mm_set1_ps(matrix[11]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[12]); - ru_0 = _mm_set1_ps(matrix[12]); - ru_1 = _mm_set1_ps(matrix[12]); - - // iu = _mm256_set1_ps(matrix[13]); - iu_0 = _mm_set1_ps(matrix[13]); - iu_1 = _mm_set1_ps(matrix[13]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[14]); - ru_0 = _mm_set1_ps(matrix[14]); - ru_1 = _mm_set1_ps(matrix[14]); - - // iu = _mm256_set1_ps(matrix[15]); - iu_0 = _mm_set1_ps(matrix[15]); - iu_1 = _mm_set1_ps(matrix[15]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - p = si | sizek; - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - - // ru = _mm256_set1_ps(matrix[16]); - ru_0 = _mm_set1_ps(matrix[16]); - ru_1 = _mm_set1_ps(matrix[16]); - - // iu = _mm256_set1_ps(matrix[17]); - iu_0 = _mm_set1_ps(matrix[17]); - iu_1 = _mm_set1_ps(matrix[17]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[18]); - ru_0 = _mm_set1_ps(matrix[18]); - ru_1 = _mm_set1_ps(matrix[18]); - - // iu = _mm256_set1_ps(matrix[19]); - iu_0 = _mm_set1_ps(matrix[19]); - iu_1 = _mm_set1_ps(matrix[19]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[20]); - ru_0 = _mm_set1_ps(matrix[20]); - ru_1 = _mm_set1_ps(matrix[20]); - - // iu = _mm256_set1_ps(matrix[21]); - iu_0 = _mm_set1_ps(matrix[21]); - iu_1 = _mm_set1_ps(matrix[21]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[22]); - ru_0 = _mm_set1_ps(matrix[22]); - ru_1 = _mm_set1_ps(matrix[22]); - - // iu = _mm256_set1_ps(matrix[23]); - iu_0 = _mm_set1_ps(matrix[23]); - iu_1 = _mm_set1_ps(matrix[23]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - p = si | sizej; - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - - // ru = _mm256_set1_ps(matrix[24]); - ru_0 = _mm_set1_ps(matrix[24]); - ru_1 = _mm_set1_ps(matrix[24]); - - // iu = _mm256_set1_ps(matrix[25]); - iu_0 = _mm_set1_ps(matrix[25]); - iu_1 = _mm_set1_ps(matrix[25]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[26]); - ru_0 = _mm_set1_ps(matrix[26]); - ru_1 = _mm_set1_ps(matrix[26]); - - // iu = _mm256_set1_ps(matrix[27]); - iu_0 = _mm_set1_ps(matrix[27]); - iu_1 = _mm_set1_ps(matrix[27]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[28]); - ru_0 = _mm_set1_ps(matrix[28]); - ru_1 = _mm_set1_ps(matrix[28]); - - // iu = _mm256_set1_ps(matrix[29]); - iu_0 = _mm_set1_ps(matrix[29]); - iu_1 = _mm_set1_ps(matrix[29]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[30]); - ru_0 = _mm_set1_ps(matrix[30]); - ru_1 = _mm_set1_ps(matrix[30]); - - // iu = _mm256_set1_ps(matrix[31]); - iu_0 = _mm_set1_ps(matrix[31]); - iu_1 = _mm_set1_ps(matrix[31]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - p |= sizek; - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - } - } - } -} - -void StateSpaceSSE::ApplyGate2LL(const unsigned int q0, const unsigned int q1, - const float* matrix) { - const unsigned int q = q0 + q1; - - //__m256 mb1, mb2, mb3; - __m128 mb1, mb2, mb3; - //__m256i ml1, ml2, ml3; - - uint64_t sizei = uint64_t(1) << (GetNumQubits() + 1); - auto rstate = GetRawState(); - - switch (q) { - case 1: - // ml1 = _mm256_set_epi32(7, 6, 4, 5, 3, 2, 0, 1); - // ml2 = _mm256_set_epi32(7, 4, 5, 6, 3, 0, 1, 2); - // ml3 = _mm256_set_epi32(4, 6, 5, 7, 0, 2, 1, 3); - mb1 = _mm_castsi128_ps(_mm_set_epi32(0, 0, -1, 0)); - mb2 = _mm_castsi128_ps(_mm_set_epi32(0, -1, 0, 0)); - mb3 = _mm_castsi128_ps(_mm_set_epi32(-1, 0, 0, 0)); - - // mb2 = _mm256_castsi256_ps(_mm256_set_epi32(0, -1, 0, 0, 0, -1, 0, - // 0)); mb3 = _mm256_castsi256_ps(_mm256_set_epi32(-1, 0, 0, 0, -1, 0, - // 0, 0)); - - for (uint64_t i = 0; i < sizei; i += 16) { - //__m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in, rm, im; - __m128 r0_0, i0_0, r1_0, i1_0, r2_0, i2_0, r3_0, i3_0, ru_0, iu_0, rn_0, - in_0, rm_0, im_0; - __m128 r0_1, i0_1, r1_1, i1_1, r2_1, i2_1, r3_1, i3_1, ru_1, iu_1, rn_1, - in_1, rm_1, im_1; - - // holder for fnmadd and fmadd. - __m128 temp; - - auto p = rstate + i; - - // r0 = _mm256_load_ps(p); - r0_0 = _mm_load_ps(p); - r0_1 = _mm_load_ps(p + 4); - - // i0 = _mm256_load_ps(p + 8); - i0_0 = _mm_load_ps(p + 8); - i0_1 = _mm_load_ps(p + 8 + 4); - - // r1 = _mm256_permutevar8x32_ps(r0, ml1); - r1_0 = _mm_shuffle_ps(r0_0, r0_0, 225); - r1_1 = _mm_shuffle_ps(r0_1, r0_1, 225); - - // i1 = _mm256_permutevar8x32_ps(i0, ml1); - i1_0 = _mm_shuffle_ps(i0_0, i0_0, 225); - i1_1 = _mm_shuffle_ps(i0_1, i0_1, 225); - - // r2 = _mm256_permutevar8x32_ps(r0, ml2); - r2_0 = _mm_shuffle_ps(r0_0, r0_0, 198); - r2_1 = _mm_shuffle_ps(r0_1, r0_1, 198); - - // i2 = _mm256_permutevar8x32_ps(i0, ml2); - i2_0 = _mm_shuffle_ps(i0_0, i0_0, 198); - i2_1 = _mm_shuffle_ps(i0_1, i0_1, 198); - - // r3 = _mm256_permutevar8x32_ps(r0, ml3); - r3_0 = _mm_shuffle_ps(r0_0, r0_0, 39); - r3_1 = _mm_shuffle_ps(r0_1, r0_1, 39); - - // i3 = _mm256_permutevar8x32_ps(i0, ml3); - i3_0 = _mm_shuffle_ps(i0_0, i0_0, 39); - i3_1 = _mm_shuffle_ps(i0_1, i0_1, 39); - - // ru = _mm256_set1_ps(matrix[0]); - ru_0 = _mm_set1_ps(matrix[0]); - ru_1 = _mm_set1_ps(matrix[0]); - - // iu = _mm256_set1_ps(matrix[1]); - iu_0 = _mm_set1_ps(matrix[1]); - iu_1 = _mm_set1_ps(matrix[1]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[2]); - ru_0 = _mm_set1_ps(matrix[2]); - ru_1 = _mm_set1_ps(matrix[2]); - - // iu = _mm256_set1_ps(matrix[3]); - iu_0 = _mm_set1_ps(matrix[3]); - iu_1 = _mm_set1_ps(matrix[3]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[4]); - ru_0 = _mm_set1_ps(matrix[4]); - ru_1 = _mm_set1_ps(matrix[4]); - - // iu = _mm256_set1_ps(matrix[5]); - iu_0 = _mm_set1_ps(matrix[5]); - iu_1 = _mm_set1_ps(matrix[5]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[6]); - ru_0 = _mm_set1_ps(matrix[6]); - ru_1 = _mm_set1_ps(matrix[6]); - - // iu = _mm256_set1_ps(matrix[7]); - iu_0 = _mm_set1_ps(matrix[7]); - iu_1 = _mm_set1_ps(matrix[7]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[8]); - ru_0 = _mm_set1_ps(matrix[8]); - ru_1 = _mm_set1_ps(matrix[8]); - - // iu = _mm256_set1_ps(matrix[9]); - iu_0 = _mm_set1_ps(matrix[9]); - iu_1 = _mm_set1_ps(matrix[9]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[10]); - ru_0 = _mm_set1_ps(matrix[10]); - ru_1 = _mm_set1_ps(matrix[10]); - - // iu = _mm256_set1_ps(matrix[11]); - iu_0 = _mm_set1_ps(matrix[11]); - iu_1 = _mm_set1_ps(matrix[11]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[12]); - ru_0 = _mm_set1_ps(matrix[12]); - ru_1 = _mm_set1_ps(matrix[12]); - - // iu = _mm256_set1_ps(matrix[13]); - iu_0 = _mm_set1_ps(matrix[13]); - iu_1 = _mm_set1_ps(matrix[13]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[14]); - ru_0 = _mm_set1_ps(matrix[14]); - ru_1 = _mm_set1_ps(matrix[14]); - - // iu = _mm256_set1_ps(matrix[15]); - iu_0 = _mm_set1_ps(matrix[15]); - iu_1 = _mm_set1_ps(matrix[15]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml1); - rm_0 = _mm_shuffle_ps(rm_0, rm_0, 225); - rm_1 = _mm_shuffle_ps(rm_1, rm_1, 225); - - // im = _mm256_permutevar8x32_ps(im, ml1); - im_0 = _mm_shuffle_ps(im_0, im_0, 225); - im_1 = _mm_shuffle_ps(im_1, im_1, 225); - - // rn = _mm256_blendv_ps(rn, rm, mb1); - rn_0 = _mm_blendv_ps(rn_0, rm_0, mb1); - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb1); - - // in = _mm256_blendv_ps(in, im, mb1); - in_0 = _mm_blendv_ps(in_0, im_0, mb1); - in_1 = _mm_blendv_ps(in_1, im_1, mb1); - - // ru = _mm256_set1_ps(matrix[16]); - ru_0 = _mm_set1_ps(matrix[16]); - ru_1 = _mm_set1_ps(matrix[16]); - - // iu = _mm256_set1_ps(matrix[17]); - iu_0 = _mm_set1_ps(matrix[17]); - iu_1 = _mm_set1_ps(matrix[17]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[18]); - ru_0 = _mm_set1_ps(matrix[18]); - ru_1 = _mm_set1_ps(matrix[18]); - - // iu = _mm256_set1_ps(matrix[19]); - iu_0 = _mm_set1_ps(matrix[19]); - iu_1 = _mm_set1_ps(matrix[19]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[20]); - ru_0 = _mm_set1_ps(matrix[20]); - ru_1 = _mm_set1_ps(matrix[20]); - - // iu = _mm256_set1_ps(matrix[21]); - iu_0 = _mm_set1_ps(matrix[21]); - iu_1 = _mm_set1_ps(matrix[21]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[22]); - ru_0 = _mm_set1_ps(matrix[22]); - ru_1 = _mm_set1_ps(matrix[22]); - - // iu = _mm256_set1_ps(matrix[23]); - iu_0 = _mm_set1_ps(matrix[23]); - iu_1 = _mm_set1_ps(matrix[23]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml2); - rm_0 = _mm_shuffle_ps(rm_0, rm_0, 198); - rm_1 = _mm_shuffle_ps(rm_1, rm_1, 198); - - // im = _mm256_permutevar8x32_ps(im, ml2); - im_0 = _mm_shuffle_ps(im_0, im_0, 198); - im_1 = _mm_shuffle_ps(im_1, im_1, 198); - - // rn = _mm256_blendv_ps(rn, rm, mb2); - rn_0 = _mm_blendv_ps(rn_0, rm_0, mb2); - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb2); - - // in = _mm256_blendv_ps(in, im, mb2); - in_0 = _mm_blendv_ps(in_0, im_0, mb2); - in_1 = _mm_blendv_ps(in_1, im_1, mb2); - - // ru = _mm256_set1_ps(matrix[24]); - ru_0 = _mm_set1_ps(matrix[24]); - ru_1 = _mm_set1_ps(matrix[24]); - - // iu = _mm256_set1_ps(matrix[25]); - iu_0 = _mm_set1_ps(matrix[25]); - iu_1 = _mm_set1_ps(matrix[25]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[26]); - ru_0 = _mm_set1_ps(matrix[26]); - ru_1 = _mm_set1_ps(matrix[26]); - - // iu = _mm256_set1_ps(matrix[27]); - iu_0 = _mm_set1_ps(matrix[27]); - iu_1 = _mm_set1_ps(matrix[27]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[28]); - ru_0 = _mm_set1_ps(matrix[28]); - ru_1 = _mm_set1_ps(matrix[28]); - - // iu = _mm256_set1_ps(matrix[29]); - iu_0 = _mm_set1_ps(matrix[29]); - iu_1 = _mm_set1_ps(matrix[29]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[30]); - ru_0 = _mm_set1_ps(matrix[30]); - ru_1 = _mm_set1_ps(matrix[30]); - - // iu = _mm256_set1_ps(matrix[31]); - iu_0 = _mm_set1_ps(matrix[31]); - iu_1 = _mm_set1_ps(matrix[31]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml3); - rm_0 = _mm_shuffle_ps(rm_0, rm_0, 39); - rm_1 = _mm_shuffle_ps(rm_1, rm_1, 39); - - // im = _mm256_permutevar8x32_ps(im, ml3); - im_0 = _mm_shuffle_ps(im_0, im_0, 39); - im_1 = _mm_shuffle_ps(im_1, im_1, 39); - - // rn = _mm256_blendv_ps(rn, rm, mb3); - rn_0 = _mm_blendv_ps(rn_0, rm_0, mb3); - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb3); - - // in = _mm256_blendv_ps(in, im, mb3); - in_0 = _mm_blendv_ps(in_0, im_0, mb3); - in_1 = _mm_blendv_ps(in_1, im_1, mb3); - - //_mm256_store_ps(p, rn); - _mm_store_ps(p, rn_0); - _mm_store_ps(p + 4, rn_1); - - //_mm256_store_ps(p + 8, in); - _mm_store_ps(p + 8, in_0); - _mm_store_ps(p + 12, in_1); - } - - break; - case 2: - mb1 = _mm_castsi128_ps(_mm_set_epi32(-1, 0, -1, 0)); - mb2 = _mm_castsi128_ps(_mm_set_epi32(0, -1, 0, -1)); - mb3 = _mm_castsi128_ps(_mm_set_epi32(-1, 0, -1, 0)); - - for (uint64_t i = 0; i < sizei; i += 16) { - //__m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in, rm, im; - __m128 r0_0, i0_0, r1_0, i1_0, r2_0, i2_0, r3_0, i3_0, ru_0, iu_0, rn_0, - in_0, rm_0, im_0; - __m128 r0_1, i0_1, r1_1, i1_1, r2_1, i2_1, r3_1, i3_1, ru_1, iu_1, rn_1, - in_1, rm_1, im_1; - - // holder for fnmadd and fmadd. - __m128 temp, temp2; - - auto p = rstate + i; - - // r0 = _mm256_load_ps(p); - r0_0 = _mm_load_ps(p); - r0_1 = _mm_load_ps(p + 4); - - // i0 = _mm256_load_ps(p + 8); - i0_0 = _mm_load_ps(p + 8); - i0_1 = _mm_load_ps(p + 8 + 4); - - // r1 = _mm256_permutevar8x32_ps(r0, ml1); - r1_0 = _mm_shuffle_ps(r0_0, r0_0, 177); - r1_1 = r0_1; - - // i1 = _mm256_permutevar8x32_ps(i0, ml1); - i1_0 = _mm_shuffle_ps(i0_0, i0_0, 177); - i1_1 = i0_1; - - // r2 = _mm256_permutevar8x32_ps(r0, ml2); - r2_0 = _mm_shuffle_ps(r0_0, r0_1, 141); - r2_0 = _mm_shuffle_ps(r2_0, r2_0, 114); - - r2_1 = _mm_shuffle_ps(r0_0, r0_1, 216); - r2_1 = _mm_shuffle_ps(r2_1, r2_1, 216); - - // i2 = _mm256_permutevar8x32_ps(i0, ml2); - i2_0 = _mm_shuffle_ps(i0_0, i0_1, 141); - i2_0 = _mm_shuffle_ps(i2_0, i2_0, 114); - - i2_1 = _mm_shuffle_ps(i0_1, i0_1, 216); - i2_1 = _mm_shuffle_ps(i2_1, i2_1, 216); - - // r3 = _mm256_permutevar8x32_ps(r0, ml3); - r3_0 = _mm_shuffle_ps(r0_0, r0_1, 221); - r3_0 = _mm_shuffle_ps(r3_0, r3_0, 114); - - r3_1 = _mm_shuffle_ps(r0_0, r0_1, 136); - r3_1 = _mm_shuffle_ps(r3_1, r3_1, 114); - - // i3 = _mm256_permutevar8x32_ps(i0, ml3); - i3_0 = _mm_shuffle_ps(i0_0, i0_1, 221); - i3_0 = _mm_shuffle_ps(i3_0, i3_0, 114); - - i3_1 = _mm_shuffle_ps(i0_0, i0_1, 136); - i3_1 = _mm_shuffle_ps(i3_1, i3_1, 114); - - // ru = _mm256_set1_ps(matrix[0]); - ru_0 = _mm_set1_ps(matrix[0]); - ru_1 = _mm_set1_ps(matrix[0]); - - // iu = _mm256_set1_ps(matrix[1]); - iu_0 = _mm_set1_ps(matrix[1]); - iu_1 = _mm_set1_ps(matrix[1]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[2]); - ru_0 = _mm_set1_ps(matrix[2]); - ru_1 = _mm_set1_ps(matrix[2]); - - // iu = _mm256_set1_ps(matrix[3]); - iu_0 = _mm_set1_ps(matrix[3]); - iu_1 = _mm_set1_ps(matrix[3]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[4]); - ru_0 = _mm_set1_ps(matrix[4]); - ru_1 = _mm_set1_ps(matrix[4]); - - // iu = _mm256_set1_ps(matrix[5]); - iu_0 = _mm_set1_ps(matrix[5]); - iu_1 = _mm_set1_ps(matrix[5]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[6]); - ru_0 = _mm_set1_ps(matrix[6]); - ru_1 = _mm_set1_ps(matrix[6]); - - // iu = _mm256_set1_ps(matrix[7]); - iu_0 = _mm_set1_ps(matrix[7]); - iu_1 = _mm_set1_ps(matrix[7]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[8]); - ru_0 = _mm_set1_ps(matrix[8]); - ru_1 = _mm_set1_ps(matrix[8]); - - // iu = _mm256_set1_ps(matrix[9]); - iu_0 = _mm_set1_ps(matrix[9]); - iu_1 = _mm_set1_ps(matrix[9]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[10]); - ru_0 = _mm_set1_ps(matrix[10]); - ru_1 = _mm_set1_ps(matrix[10]); - - // iu = _mm256_set1_ps(matrix[11]); - iu_0 = _mm_set1_ps(matrix[11]); - iu_1 = _mm_set1_ps(matrix[11]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[12]); - ru_0 = _mm_set1_ps(matrix[12]); - ru_1 = _mm_set1_ps(matrix[12]); - - // iu = _mm256_set1_ps(matrix[13]); - iu_0 = _mm_set1_ps(matrix[13]); - iu_1 = _mm_set1_ps(matrix[13]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[14]); - ru_0 = _mm_set1_ps(matrix[14]); - ru_1 = _mm_set1_ps(matrix[14]); - - // iu = _mm256_set1_ps(matrix[15]); - iu_0 = _mm_set1_ps(matrix[15]); - iu_1 = _mm_set1_ps(matrix[15]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml1); - rm_0 = _mm_shuffle_ps(rm_0, rm_0, 177); - // rm_1 = nothing - - // im = _mm256_permutevar8x32_ps(im, ml1); - im_0 = _mm_shuffle_ps(im_0, im_0, 177); - // im_1 = nothing - - // rn = _mm256_blendv_ps(rn, rm, mb1); - rn_0 = _mm_blendv_ps(rn_0, rm_0, mb1); - // rn_1 = rn_1; - - // in = _mm256_blendv_ps(in, im, mb1); - in_0 = _mm_blendv_ps(in_0, im_0, mb1); - // in_1 = in_1; - - // ru = _mm256_set1_ps(matrix[16]); - ru_0 = _mm_set1_ps(matrix[16]); - ru_1 = _mm_set1_ps(matrix[16]); - - // iu = _mm256_set1_ps(matrix[17]); - iu_0 = _mm_set1_ps(matrix[17]); - iu_1 = _mm_set1_ps(matrix[17]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[18]); - ru_0 = _mm_set1_ps(matrix[18]); - ru_1 = _mm_set1_ps(matrix[18]); - - // iu = _mm256_set1_ps(matrix[19]); - iu_0 = _mm_set1_ps(matrix[19]); - iu_1 = _mm_set1_ps(matrix[19]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[20]); - ru_0 = _mm_set1_ps(matrix[20]); - ru_1 = _mm_set1_ps(matrix[20]); - - // iu = _mm256_set1_ps(matrix[21]); - iu_0 = _mm_set1_ps(matrix[21]); - iu_1 = _mm_set1_ps(matrix[21]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[22]); - ru_0 = _mm_set1_ps(matrix[22]); - ru_1 = _mm_set1_ps(matrix[22]); - - // iu = _mm256_set1_ps(matrix[23]); - iu_0 = _mm_set1_ps(matrix[23]); - iu_1 = _mm_set1_ps(matrix[23]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml2); - temp = _mm_shuffle_ps(rm_0, rm_1, 141); - temp2 = _mm_shuffle_ps(temp, temp, 114); - - temp = _mm_shuffle_ps(rm_0, rm_1, 216); - rm_1 = _mm_shuffle_ps(temp, temp, 216); - rm_0 = temp2; - - // im = _mm256_permutevar8x32_ps(im, ml2); - temp = _mm_shuffle_ps(im_0, im_1, 141); - temp2 = _mm_shuffle_ps(temp, temp, 114); - - temp = _mm_shuffle_ps(im_0, im_1, 216); - im_1 = _mm_shuffle_ps(temp, temp, 216); - im_0 = temp2; - - // rn = _mm256_blendv_ps(rn, rm, mb2); - // rn_0 = rn_0; - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb2); - - // in = _mm256_blendv_ps(in, im, mb2); - // in_0 = in_0; - in_1 = _mm_blendv_ps(in_1, im_1, mb2); - - // ru = _mm256_set1_ps(matrix[24]); - ru_0 = _mm_set1_ps(matrix[24]); - ru_1 = _mm_set1_ps(matrix[24]); - - // iu = _mm256_set1_ps(matrix[25]); - iu_0 = _mm_set1_ps(matrix[25]); - iu_1 = _mm_set1_ps(matrix[25]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[26]); - ru_0 = _mm_set1_ps(matrix[26]); - ru_1 = _mm_set1_ps(matrix[26]); - - // iu = _mm256_set1_ps(matrix[27]); - iu_0 = _mm_set1_ps(matrix[27]); - iu_1 = _mm_set1_ps(matrix[27]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[28]); - ru_0 = _mm_set1_ps(matrix[28]); - ru_1 = _mm_set1_ps(matrix[28]); - - // iu = _mm256_set1_ps(matrix[29]); - iu_0 = _mm_set1_ps(matrix[29]); - iu_1 = _mm_set1_ps(matrix[29]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[30]); - ru_0 = _mm_set1_ps(matrix[30]); - ru_1 = _mm_set1_ps(matrix[30]); - - // iu = _mm256_set1_ps(matrix[31]); - iu_0 = _mm_set1_ps(matrix[31]); - iu_1 = _mm_set1_ps(matrix[31]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml3); - temp = _mm_shuffle_ps(rm_0, rm_1, 221); - temp2 = _mm_shuffle_ps(temp, temp, 114); - - temp = _mm_shuffle_ps(rm_0, rm_1, 136); - rm_1 = _mm_shuffle_ps(temp, temp, 114); - rm_0 = temp2; - - // im = _mm256_permutevar8x32_ps(im, ml3); - temp = _mm_shuffle_ps(im_0, im_1, 221); - temp2 = _mm_shuffle_ps(temp, temp, 114); - - temp = _mm_shuffle_ps(im_0, im_1, 136); - im_1 = _mm_shuffle_ps(temp, temp, 114); - im_0 = temp2; - - // rn = _mm256_blendv_ps(rn, rm, mb3); - // rn_0 = rn_0; - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb3); - - // in = _mm256_blendv_ps(in, im, mb3); - // in_0 = in_0; - in_1 = _mm_blendv_ps(in_1, im_1, mb3); - - //_mm256_store_ps(p, rn); - _mm_store_ps(p, rn_0); - _mm_store_ps(p + 4, rn_1); - - //_mm256_store_ps(p + 8, in); - _mm_store_ps(p + 8, in_0); - _mm_store_ps(p + 12, in_1); - } - - break; - case 3: - mb1 = _mm_castsi128_ps(_mm_set_epi32(-1, -1, 0, 0)); - mb2 = _mm_castsi128_ps(_mm_set_epi32(0, 0, -1, -1)); - mb3 = _mm_castsi128_ps(_mm_set_epi32(-1, -1, 0, 0)); - - for (uint64_t i = 0; i < sizei; i += 16) { - //__m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in, rm, im; - __m128 r0_0, i0_0, r1_0, i1_0, r2_0, i2_0, r3_0, i3_0, ru_0, iu_0, rn_0, - in_0, rm_0, im_0; - __m128 r0_1, i0_1, r1_1, i1_1, r2_1, i2_1, r3_1, i3_1, ru_1, iu_1, rn_1, - in_1, rm_1, im_1; - - // holder for fnmadd and fmadd. - __m128 temp, temp2; - - auto p = rstate + i; - - // r0 = _mm256_load_ps(p); - r0_0 = _mm_load_ps(p); - r0_1 = _mm_load_ps(p + 4); - - // i0 = _mm256_load_ps(p + 8); - i0_0 = _mm_load_ps(p + 8); - i0_1 = _mm_load_ps(p + 8 + 4); - - // r1 = _mm256_permutevar8x32_ps(r0, ml1); - r1_0 = _mm_shuffle_ps(r0_0, r0_0, 78); - r1_1 = r0_1; - - // i1 = _mm256_permutevar8x32_ps(i0, ml1); - i1_0 = _mm_shuffle_ps(i0_0, i0_0, 78); - i1_1 = i0_1; - - // r2 = _mm256_permutevar8x32_ps(r0, ml2); - r2_0 = _mm_shuffle_ps(r0_0, r0_1, 78); - r2_0 = _mm_shuffle_ps(r2_0, r2_0, 78); - - r2_1 = _mm_shuffle_ps(r0_0, r0_1, 228); - - // i2 = _mm256_permutevar8x32_ps(i0, ml2); - i2_0 = _mm_shuffle_ps(i0_0, i0_1, 78); - i2_0 = _mm_shuffle_ps(i2_0, i2_0, 78); - - i2_1 = _mm_shuffle_ps(i0_0, i0_1, 228); - - // r3 = _mm256_permutevar8x32_ps(r0, ml3); - r3_0 = _mm_shuffle_ps(r0_0, r0_1, 238); - r3_0 = _mm_shuffle_ps(r3_0, r3_0, 78); - - r3_1 = _mm_shuffle_ps(r0_0, r0_1, 68); - r3_1 = _mm_shuffle_ps(r3_1, r3_1, 78); - - // i3 = _mm256_permutevar8x32_ps(i0, ml3); - i3_0 = _mm_shuffle_ps(i0_0, i0_1, 238); - i3_0 = _mm_shuffle_ps(i3_0, i3_0, 78); - - i3_1 = _mm_shuffle_ps(i0_0, i0_1, 68); - i3_1 = _mm_shuffle_ps(i3_1, i3_1, 78); - - // ru = _mm256_set1_ps(matrix[0]); - ru_0 = _mm_set1_ps(matrix[0]); - ru_1 = _mm_set1_ps(matrix[0]); - - // iu = _mm256_set1_ps(matrix[1]); - iu_0 = _mm_set1_ps(matrix[1]); - iu_1 = _mm_set1_ps(matrix[1]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[2]); - ru_0 = _mm_set1_ps(matrix[2]); - ru_1 = _mm_set1_ps(matrix[2]); - - // iu = _mm256_set1_ps(matrix[3]); - iu_0 = _mm_set1_ps(matrix[3]); - iu_1 = _mm_set1_ps(matrix[3]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[4]); - ru_0 = _mm_set1_ps(matrix[4]); - ru_1 = _mm_set1_ps(matrix[4]); - - // iu = _mm256_set1_ps(matrix[5]); - iu_0 = _mm_set1_ps(matrix[5]); - iu_1 = _mm_set1_ps(matrix[5]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[6]); - ru_0 = _mm_set1_ps(matrix[6]); - ru_1 = _mm_set1_ps(matrix[6]); - - // iu = _mm256_set1_ps(matrix[7]); - iu_0 = _mm_set1_ps(matrix[7]); - iu_1 = _mm_set1_ps(matrix[7]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[8]); - ru_0 = _mm_set1_ps(matrix[8]); - ru_1 = _mm_set1_ps(matrix[8]); - - // iu = _mm256_set1_ps(matrix[9]); - iu_0 = _mm_set1_ps(matrix[9]); - iu_1 = _mm_set1_ps(matrix[9]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[10]); - ru_0 = _mm_set1_ps(matrix[10]); - ru_1 = _mm_set1_ps(matrix[10]); - - // iu = _mm256_set1_ps(matrix[11]); - iu_0 = _mm_set1_ps(matrix[11]); - iu_1 = _mm_set1_ps(matrix[11]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[12]); - ru_0 = _mm_set1_ps(matrix[12]); - ru_1 = _mm_set1_ps(matrix[12]); - - // iu = _mm256_set1_ps(matrix[13]); - iu_0 = _mm_set1_ps(matrix[13]); - iu_1 = _mm_set1_ps(matrix[13]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[14]); - ru_0 = _mm_set1_ps(matrix[14]); - ru_1 = _mm_set1_ps(matrix[14]); - - // iu = _mm256_set1_ps(matrix[15]); - iu_0 = _mm_set1_ps(matrix[15]); - iu_1 = _mm_set1_ps(matrix[15]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml1); - rm_0 = _mm_shuffle_ps(rm_0, rm_0, 78); - // rm_1 = nothing - - // im = _mm256_permutevar8x32_ps(im, ml1); - im_0 = _mm_shuffle_ps(im_0, im_0, 78); - // im_1 = nothing - - // rn = _mm256_blendv_ps(rn, rm, mb1); - rn_0 = _mm_blendv_ps(rn_0, rm_0, mb1); - // rn_1 = rn_1; - - // in = _mm256_blendv_ps(in, im, mb1); - in_0 = _mm_blendv_ps(in_0, im_0, mb1); - // in_1 = in_1; - - // ru = _mm256_set1_ps(matrix[16]); - ru_0 = _mm_set1_ps(matrix[16]); - ru_1 = _mm_set1_ps(matrix[16]); - - // iu = _mm256_set1_ps(matrix[17]); - iu_0 = _mm_set1_ps(matrix[17]); - iu_1 = _mm_set1_ps(matrix[17]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[18]); - ru_0 = _mm_set1_ps(matrix[18]); - ru_1 = _mm_set1_ps(matrix[18]); - - // iu = _mm256_set1_ps(matrix[19]); - iu_0 = _mm_set1_ps(matrix[19]); - iu_1 = _mm_set1_ps(matrix[19]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[20]); - ru_0 = _mm_set1_ps(matrix[20]); - ru_1 = _mm_set1_ps(matrix[20]); - - // iu = _mm256_set1_ps(matrix[21]); - iu_0 = _mm_set1_ps(matrix[21]); - iu_1 = _mm_set1_ps(matrix[21]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[22]); - ru_0 = _mm_set1_ps(matrix[22]); - ru_1 = _mm_set1_ps(matrix[22]); - - // iu = _mm256_set1_ps(matrix[23]); - iu_0 = _mm_set1_ps(matrix[23]); - iu_1 = _mm_set1_ps(matrix[23]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml2); - temp = _mm_shuffle_ps(rm_0, rm_1, 78); - temp2 = _mm_shuffle_ps(temp, temp, 78); - // re-order this to save an instruction ? - rm_1 = _mm_shuffle_ps(rm_0, rm_1, 228); - rm_0 = temp2; - - // im = _mm256_permutevar8x32_ps(im, ml2); - temp = _mm_shuffle_ps(im_0, im_1, 78); - temp2 = _mm_shuffle_ps(temp, temp, 78); - // re-order this to save an instruction ? - im_1 = _mm_shuffle_ps(im_0, im_1, 228); - im_0 = temp2; - - // rn = _mm256_blendv_ps(rn, rm, mb2); - // rn_0 = rn_0; - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb2); - - // in = _mm256_blendv_ps(in, im, mb2); - // in_0 = in_0; - in_1 = _mm_blendv_ps(in_1, im_1, mb2); - - // ru = _mm256_set1_ps(matrix[24]); - ru_0 = _mm_set1_ps(matrix[24]); - ru_1 = _mm_set1_ps(matrix[24]); - - // iu = _mm256_set1_ps(matrix[25]); - iu_0 = _mm_set1_ps(matrix[25]); - iu_1 = _mm_set1_ps(matrix[25]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[26]); - ru_0 = _mm_set1_ps(matrix[26]); - ru_1 = _mm_set1_ps(matrix[26]); - - // iu = _mm256_set1_ps(matrix[27]); - iu_0 = _mm_set1_ps(matrix[27]); - iu_1 = _mm_set1_ps(matrix[27]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[28]); - ru_0 = _mm_set1_ps(matrix[28]); - ru_1 = _mm_set1_ps(matrix[28]); - - // iu = _mm256_set1_ps(matrix[29]); - iu_0 = _mm_set1_ps(matrix[29]); - iu_1 = _mm_set1_ps(matrix[29]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[30]); - ru_0 = _mm_set1_ps(matrix[30]); - ru_1 = _mm_set1_ps(matrix[30]); - - // iu = _mm256_set1_ps(matrix[31]); - iu_0 = _mm_set1_ps(matrix[31]); - iu_1 = _mm_set1_ps(matrix[31]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml3); - temp = _mm_shuffle_ps(rm_0, rm_1, 238); - temp2 = _mm_shuffle_ps(temp, temp, 78); - - temp = _mm_shuffle_ps(rm_0, rm_1, 68); - rm_1 = _mm_shuffle_ps(temp, temp, 78); - rm_0 = temp2; - - // im = _mm256_permutevar8x32_ps(im, ml3); - temp = _mm_shuffle_ps(im_0, im_1, 238); - temp2 = _mm_shuffle_ps(temp, temp, 78); - - temp = _mm_shuffle_ps(im_0, im_1, 68); - im_1 = _mm_shuffle_ps(temp, temp, 78); - im_0 = temp2; - - // rn = _mm256_blendv_ps(rn, rm, mb3); - // rn_0 = rn_0; - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb3); - - // in = _mm256_blendv_ps(in, im, mb3); - // in_0 = in_0; - in_1 = _mm_blendv_ps(in_1, im_1, mb3); - - //_mm256_store_ps(p, rn); - _mm_store_ps(p, rn_0); - _mm_store_ps(p + 4, rn_1); - - //_mm256_store_ps(p + 8, in); - _mm_store_ps(p + 8, in_0); - _mm_store_ps(p + 12, in_1); - } - - break; - } -} - -void StateSpaceSSE::ApplyGate2HL(const unsigned int q0, const unsigned int q1, - const float* matrix) { - __m128 mb; - - uint64_t sizei = uint64_t(1) << (GetNumQubits() + 1); - uint64_t sizej = uint64_t(1) << (q1 + 1); - - auto rstate = GetRawState(); - - switch (q0) { - case 0: - mb = _mm_castsi128_ps(_mm_set_epi32(-1, 0, -1, 0)); - - for (uint64_t i = 0; i < sizei; i += 2 * sizej) { - for (uint64_t j = 0; j < sizej; j += 16) { - uint64_t si = i | j; - - //__m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in, rm, im; - __m128 r0_0, i0_0, r1_0, i1_0, r2_0, i2_0, r3_0, i3_0, ru_0, iu_0, - rn_0, in_0, rm_0, im_0; - __m128 r0_1, i0_1, r1_1, i1_1, r2_1, i2_1, r3_1, i3_1, ru_1, iu_1, - rn_1, in_1, rm_1, im_1; - - // for fnmadd and fmadd. - __m128 temp; - - uint64_t p = si; - - // r0 = _mm256_load_ps(rstate + p); - r0_0 = _mm_load_ps(rstate + p); - r0_1 = _mm_load_ps(rstate + p + 4); - - // i0 = _mm256_load_ps(rstate + p + 8); - i0_0 = _mm_load_ps(rstate + p + 8); - i0_1 = _mm_load_ps(rstate + p + 12); - - // r1 = _mm256_permutevar8x32_ps(r0, ml); - r1_0 = _mm_shuffle_ps(r0_0, r0_0, 177); - r1_1 = _mm_shuffle_ps(r0_1, r0_1, 177); - - // i1 = _mm256_permutevar8x32_ps(i0, ml); - i1_0 = _mm_shuffle_ps(i0_0, i0_0, 177); - i1_1 = _mm_shuffle_ps(i0_1, i0_1, 177); - - p = si | sizej; - - // r2 = _mm256_load_ps(rstate + p); - r2_0 = _mm_load_ps(rstate + p); - r2_1 = _mm_load_ps(rstate + p + 4); - - // i2 = _mm256_load_ps(rstate + p + 8); - i2_0 = _mm_load_ps(rstate + p + 8); - i2_1 = _mm_load_ps(rstate + p + 12); - - // r3 = _mm256_permutevar8x32_ps(r2, ml); - r3_0 = _mm_shuffle_ps(r2_0, r2_0, 177); - r3_1 = _mm_shuffle_ps(r2_1, r2_1, 177); - - // i3 = _mm256_permutevar8x32_ps(i2, ml); - i3_0 = _mm_shuffle_ps(i2_0, i2_0, 177); - i3_1 = _mm_shuffle_ps(i2_1, i2_1, 177); - - // ru = _mm256_set1_ps(matrix[0]); - ru_0 = _mm_set1_ps(matrix[0]); - ru_1 = _mm_set1_ps(matrix[0]); - - // iu = _mm256_set1_ps(matrix[1]); - iu_0 = _mm_set1_ps(matrix[1]); - iu_1 = _mm_set1_ps(matrix[1]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[2]); - ru_0 = _mm_set1_ps(matrix[2]); - ru_1 = _mm_set1_ps(matrix[2]); - - // iu = _mm256_set1_ps(matrix[3]); - iu_0 = _mm_set1_ps(matrix[3]); - iu_1 = _mm_set1_ps(matrix[3]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[4]); - ru_0 = _mm_set1_ps(matrix[4]); - ru_1 = _mm_set1_ps(matrix[4]); - - // iu = _mm256_set1_ps(matrix[5]); - iu_0 = _mm_set1_ps(matrix[5]); - iu_1 = _mm_set1_ps(matrix[5]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[6]); - ru_0 = _mm_set1_ps(matrix[6]); - ru_1 = _mm_set1_ps(matrix[6]); - - // iu = _mm256_set1_ps(matrix[7]); - iu_0 = _mm_set1_ps(matrix[7]); - iu_1 = _mm_set1_ps(matrix[7]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[8]); - ru_0 = _mm_set1_ps(matrix[8]); - ru_1 = _mm_set1_ps(matrix[8]); - - // iu = _mm256_set1_ps(matrix[9]); - iu_0 = _mm_set1_ps(matrix[9]); - iu_1 = _mm_set1_ps(matrix[9]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[10]); - ru_0 = _mm_set1_ps(matrix[10]); - ru_1 = _mm_set1_ps(matrix[10]); - - // iu = _mm256_set1_ps(matrix[11]); - iu_0 = _mm_set1_ps(matrix[11]); - iu_1 = _mm_set1_ps(matrix[11]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[12]); - ru_0 = _mm_set1_ps(matrix[12]); - ru_1 = _mm_set1_ps(matrix[12]); - - // iu = _mm256_set1_ps(matrix[13]); - iu_0 = _mm_set1_ps(matrix[13]); - iu_1 = _mm_set1_ps(matrix[13]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[14]); - ru_0 = _mm_set1_ps(matrix[14]); - ru_1 = _mm_set1_ps(matrix[14]); - - // iu = _mm256_set1_ps(matrix[15]); - iu_0 = _mm_set1_ps(matrix[15]); - iu_1 = _mm_set1_ps(matrix[15]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml); - rm_0 = _mm_shuffle_ps(rm_0, rm_0, 177); - rm_1 = _mm_shuffle_ps(rm_1, rm_1, 177); - - // im = _mm256_permutevar8x32_ps(im, ml); - im_0 = _mm_shuffle_ps(im_0, im_0, 177); - im_1 = _mm_shuffle_ps(im_1, im_1, 177); - - // rn = _mm256_blendv_ps(rn, rm, mb); - rn_0 = _mm_blendv_ps(rn_0, rm_0, mb); - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb); - - // in = _mm256_blendv_ps(in, im, mb); - in_0 = _mm_blendv_ps(in_0, im_0, mb); - in_1 = _mm_blendv_ps(in_1, im_1, mb); - - p = si; - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - - // ru = _mm256_set1_ps(matrix[16]); - ru_0 = _mm_set1_ps(matrix[16]); - ru_1 = _mm_set1_ps(matrix[16]); - - // iu = _mm256_set1_ps(matrix[17]); - iu_0 = _mm_set1_ps(matrix[17]); - iu_1 = _mm_set1_ps(matrix[17]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[18]); - ru_0 = _mm_set1_ps(matrix[18]); - ru_1 = _mm_set1_ps(matrix[18]); - - // iu = _mm256_set1_ps(matrix[19]); - iu_0 = _mm_set1_ps(matrix[19]); - iu_1 = _mm_set1_ps(matrix[19]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[20]); - ru_0 = _mm_set1_ps(matrix[20]); - ru_1 = _mm_set1_ps(matrix[20]); - - // iu = _mm256_set1_ps(matrix[21]); - iu_0 = _mm_set1_ps(matrix[21]); - iu_1 = _mm_set1_ps(matrix[21]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[22]); - ru_0 = _mm_set1_ps(matrix[22]); - ru_1 = _mm_set1_ps(matrix[22]); - - // iu = _mm256_set1_ps(matrix[23]); - iu_0 = _mm_set1_ps(matrix[23]); - iu_1 = _mm_set1_ps(matrix[23]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[24]); - ru_0 = _mm_set1_ps(matrix[24]); - ru_1 = _mm_set1_ps(matrix[24]); - - // iu = _mm256_set1_ps(matrix[25]); - iu_0 = _mm_set1_ps(matrix[25]); - iu_1 = _mm_set1_ps(matrix[25]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[26]); - ru_0 = _mm_set1_ps(matrix[26]); - ru_1 = _mm_set1_ps(matrix[26]); - - // iu = _mm256_set1_ps(matrix[27]); - iu_0 = _mm_set1_ps(matrix[27]); - iu_1 = _mm_set1_ps(matrix[27]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[28]); - ru_0 = _mm_set1_ps(matrix[28]); - ru_1 = _mm_set1_ps(matrix[28]); - - // iu = _mm256_set1_ps(matrix[29]); - iu_0 = _mm_set1_ps(matrix[29]); - iu_1 = _mm_set1_ps(matrix[29]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[30]); - ru_0 = _mm_set1_ps(matrix[30]); - ru_1 = _mm_set1_ps(matrix[30]); - - // iu = _mm256_set1_ps(matrix[31]); - iu_0 = _mm_set1_ps(matrix[31]); - iu_1 = _mm_set1_ps(matrix[31]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml); - rm_0 = _mm_shuffle_ps(rm_0, rm_0, 177); - rm_1 = _mm_shuffle_ps(rm_1, rm_1, 177); - - // im = _mm256_permutevar8x32_ps(im, ml); - im_0 = _mm_shuffle_ps(im_0, im_0, 177); - im_1 = _mm_shuffle_ps(im_1, im_1, 177); - - // rn = _mm256_blendv_ps(rn, rm, mb); - rn_0 = _mm_blendv_ps(rn_0, rm_0, mb); - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb); - - // in = _mm256_blendv_ps(in, im, mb); - in_0 = _mm_blendv_ps(in_0, im_0, mb); - in_1 = _mm_blendv_ps(in_1, im_1, mb); - - p = si | sizej; - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - } - } - - break; - case 1: - mb = _mm_castsi128_ps(_mm_set_epi32(-1, -1, 0, 0)); - - for (uint64_t i = 0; i < sizei; i += 2 * sizej) { - for (uint64_t j = 0; j < sizej; j += 16) { - uint64_t si = i | j; - - //__m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in, rm, im; - __m128 r0_0, i0_0, r1_0, i1_0, r2_0, i2_0, r3_0, i3_0, ru_0, iu_0, - rn_0, in_0, rm_0, im_0; - __m128 r0_1, i0_1, r1_1, i1_1, r2_1, i2_1, r3_1, i3_1, ru_1, iu_1, - rn_1, in_1, rm_1, im_1; - - // for fnmadd and fmadd. - __m128 temp; - - uint64_t p = si; - - // r0 = _mm256_load_ps(rstate + p); - r0_0 = _mm_load_ps(rstate + p); - r0_1 = _mm_load_ps(rstate + p + 4); - - // i0 = _mm256_load_ps(rstate + p + 8); - i0_0 = _mm_load_ps(rstate + p + 8); - i0_1 = _mm_load_ps(rstate + p + 12); - - // r1 = _mm256_permutevar8x32_ps(r0, ml); - r1_0 = _mm_shuffle_ps(r0_0, r0_0, 78); - r1_1 = _mm_shuffle_ps(r0_1, r0_1, 78); - - // i1 = _mm256_permutevar8x32_ps(i0, ml); - i1_0 = _mm_shuffle_ps(i0_0, i0_0, 78); - i1_1 = _mm_shuffle_ps(i0_1, i0_1, 78); - - p = si | sizej; - - // r2 = _mm256_load_ps(rstate + p); - r2_0 = _mm_load_ps(rstate + p); - r2_1 = _mm_load_ps(rstate + p + 4); - - // i2 = _mm256_load_ps(rstate + p + 8); - i2_0 = _mm_load_ps(rstate + p + 8); - i2_1 = _mm_load_ps(rstate + p + 12); - - // r3 = _mm256_permutevar8x32_ps(r2, ml); - r3_0 = _mm_shuffle_ps(r2_0, r2_0, 78); - r3_1 = _mm_shuffle_ps(r2_1, r2_1, 78); - - // i3 = _mm256_permutevar8x32_ps(i2, ml); - i3_0 = _mm_shuffle_ps(i2_0, i2_0, 78); - i3_1 = _mm_shuffle_ps(i2_1, i2_1, 78); - - // ru = _mm256_set1_ps(matrix[0]); - ru_0 = _mm_set1_ps(matrix[0]); - ru_1 = _mm_set1_ps(matrix[0]); - - // iu = _mm256_set1_ps(matrix[1]); - iu_0 = _mm_set1_ps(matrix[1]); - iu_1 = _mm_set1_ps(matrix[1]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[2]); - ru_0 = _mm_set1_ps(matrix[2]); - ru_1 = _mm_set1_ps(matrix[2]); - - // iu = _mm256_set1_ps(matrix[3]); - iu_0 = _mm_set1_ps(matrix[3]); - iu_1 = _mm_set1_ps(matrix[3]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[4]); - ru_0 = _mm_set1_ps(matrix[4]); - ru_1 = _mm_set1_ps(matrix[4]); - - // iu = _mm256_set1_ps(matrix[5]); - iu_0 = _mm_set1_ps(matrix[5]); - iu_1 = _mm_set1_ps(matrix[5]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[6]); - ru_0 = _mm_set1_ps(matrix[6]); - ru_1 = _mm_set1_ps(matrix[6]); - - // iu = _mm256_set1_ps(matrix[7]); - iu_0 = _mm_set1_ps(matrix[7]); - iu_1 = _mm_set1_ps(matrix[7]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[8]); - ru_0 = _mm_set1_ps(matrix[8]); - ru_1 = _mm_set1_ps(matrix[8]); - - // iu = _mm256_set1_ps(matrix[9]); - iu_0 = _mm_set1_ps(matrix[9]); - iu_1 = _mm_set1_ps(matrix[9]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[10]); - ru_0 = _mm_set1_ps(matrix[10]); - ru_1 = _mm_set1_ps(matrix[10]); - - // iu = _mm256_set1_ps(matrix[11]); - iu_0 = _mm_set1_ps(matrix[11]); - iu_1 = _mm_set1_ps(matrix[11]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[12]); - ru_0 = _mm_set1_ps(matrix[12]); - ru_1 = _mm_set1_ps(matrix[12]); - - // iu = _mm256_set1_ps(matrix[13]); - iu_0 = _mm_set1_ps(matrix[13]); - iu_1 = _mm_set1_ps(matrix[13]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[14]); - ru_0 = _mm_set1_ps(matrix[14]); - ru_1 = _mm_set1_ps(matrix[14]); - - // iu = _mm256_set1_ps(matrix[15]); - iu_0 = _mm_set1_ps(matrix[15]); - iu_1 = _mm_set1_ps(matrix[15]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml); - rm_0 = _mm_shuffle_ps(rm_0, rm_0, 78); - rm_1 = _mm_shuffle_ps(rm_1, rm_1, 78); - - // im = _mm256_permutevar8x32_ps(im, ml); - im_0 = _mm_shuffle_ps(im_0, im_0, 78); - im_1 = _mm_shuffle_ps(im_1, im_1, 78); - - // rn = _mm256_blendv_ps(rn, rm, mb); - rn_0 = _mm_blendv_ps(rn_0, rm_0, mb); - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb); - - // in = _mm256_blendv_ps(in, im, mb); - in_0 = _mm_blendv_ps(in_0, im_0, mb); - in_1 = _mm_blendv_ps(in_1, im_1, mb); - - p = si; - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - - // ru = _mm256_set1_ps(matrix[16]); - ru_0 = _mm_set1_ps(matrix[16]); - ru_1 = _mm_set1_ps(matrix[16]); - - // iu = _mm256_set1_ps(matrix[17]); - iu_0 = _mm_set1_ps(matrix[17]); - iu_1 = _mm_set1_ps(matrix[17]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[18]); - ru_0 = _mm_set1_ps(matrix[18]); - ru_1 = _mm_set1_ps(matrix[18]); - - // iu = _mm256_set1_ps(matrix[19]); - iu_0 = _mm_set1_ps(matrix[19]); - iu_1 = _mm_set1_ps(matrix[19]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[20]); - ru_0 = _mm_set1_ps(matrix[20]); - ru_1 = _mm_set1_ps(matrix[20]); - - // iu = _mm256_set1_ps(matrix[21]); - iu_0 = _mm_set1_ps(matrix[21]); - iu_1 = _mm_set1_ps(matrix[21]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[22]); - ru_0 = _mm_set1_ps(matrix[22]); - ru_1 = _mm_set1_ps(matrix[22]); - - // iu = _mm256_set1_ps(matrix[23]); - iu_0 = _mm_set1_ps(matrix[23]); - iu_1 = _mm_set1_ps(matrix[23]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[24]); - ru_0 = _mm_set1_ps(matrix[24]); - ru_1 = _mm_set1_ps(matrix[24]); - - // iu = _mm256_set1_ps(matrix[25]); - iu_0 = _mm_set1_ps(matrix[25]); - iu_1 = _mm_set1_ps(matrix[25]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[26]); - ru_0 = _mm_set1_ps(matrix[26]); - ru_1 = _mm_set1_ps(matrix[26]); - - // iu = _mm256_set1_ps(matrix[27]); - iu_0 = _mm_set1_ps(matrix[27]); - iu_1 = _mm_set1_ps(matrix[27]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[28]); - ru_0 = _mm_set1_ps(matrix[28]); - ru_1 = _mm_set1_ps(matrix[28]); - - // iu = _mm256_set1_ps(matrix[29]); - iu_0 = _mm_set1_ps(matrix[29]); - iu_1 = _mm_set1_ps(matrix[29]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[30]); - ru_0 = _mm_set1_ps(matrix[30]); - ru_1 = _mm_set1_ps(matrix[30]); - - // iu = _mm256_set1_ps(matrix[31]); - iu_0 = _mm_set1_ps(matrix[31]); - iu_1 = _mm_set1_ps(matrix[31]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml); - rm_0 = _mm_shuffle_ps(rm_0, rm_0, 78); - rm_1 = _mm_shuffle_ps(rm_1, rm_1, 78); - - // im = _mm256_permutevar8x32_ps(im, ml); - im_0 = _mm_shuffle_ps(im_0, im_0, 78); - im_1 = _mm_shuffle_ps(im_1, im_1, 78); - - // rn = _mm256_blendv_ps(rn, rm, mb); - rn_0 = _mm_blendv_ps(rn_0, rm_0, mb); - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb); - - // in = _mm256_blendv_ps(in, im, mb); - in_0 = _mm_blendv_ps(in_0, im_0, mb); - in_1 = _mm_blendv_ps(in_1, im_1, mb); - - p = si | sizej; - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - } - } - - break; - case 2: - - mb = _mm_castsi128_ps(_mm_set_epi32(-1, -1, -1, -1)); - - for (uint64_t i = 0; i < sizei; i += 2 * sizej) { - for (uint64_t j = 0; j < sizej; j += 16) { - uint64_t si = i | j; - - //__m256 r0, i0, r1, i1, r2, i2, r3, i3, ru, iu, rn, in, rm, im; - __m128 r0_0, i0_0, r1_0, i1_0, r2_0, i2_0, r3_0, i3_0, ru_0, iu_0, - rn_0, in_0, rm_0, im_0; - __m128 r0_1, i0_1, r1_1, i1_1, r2_1, i2_1, r3_1, i3_1, ru_1, iu_1, - rn_1, in_1, rm_1, im_1; - - // for fnmadd and fmadd. - __m128 temp; - - uint64_t p = si; - - // r0 = _mm256_load_ps(rstate + p); - r0_0 = _mm_load_ps(rstate + p); - r0_1 = _mm_load_ps(rstate + p + 4); - - // i0 = _mm256_load_ps(rstate + p + 8); - i0_0 = _mm_load_ps(rstate + p + 8); - i0_1 = _mm_load_ps(rstate + p + 12); - - // r1 = _mm256_permutevar8x32_ps(r0, ml); - r1_0 = r0_1; - r1_1 = r0_0; - - // i1 = _mm256_permutevar8x32_ps(i0, ml); - i1_0 = i0_1; - i1_1 = i0_0; - - p = si | sizej; - - // r2 = _mm256_load_ps(rstate + p); - r2_0 = _mm_load_ps(rstate + p); - r2_1 = _mm_load_ps(rstate + p + 4); - - // i2 = _mm256_load_ps(rstate + p + 8); - i2_0 = _mm_load_ps(rstate + p + 8); - i2_1 = _mm_load_ps(rstate + p + 12); - - // r3 = _mm256_permutevar8x32_ps(r2, ml); - r3_0 = r2_1; - r3_1 = r2_0; - - // i3 = _mm256_permutevar8x32_ps(i2, ml); - i3_0 = i2_1; - i3_1 = i2_0; - - // ru = _mm256_set1_ps(matrix[0]); - ru_0 = _mm_set1_ps(matrix[0]); - ru_1 = _mm_set1_ps(matrix[0]); - - // iu = _mm256_set1_ps(matrix[1]); - iu_0 = _mm_set1_ps(matrix[1]); - iu_1 = _mm_set1_ps(matrix[1]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[2]); - ru_0 = _mm_set1_ps(matrix[2]); - ru_1 = _mm_set1_ps(matrix[2]); - - // iu = _mm256_set1_ps(matrix[3]); - iu_0 = _mm_set1_ps(matrix[3]); - iu_1 = _mm_set1_ps(matrix[3]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[4]); - ru_0 = _mm_set1_ps(matrix[4]); - ru_1 = _mm_set1_ps(matrix[4]); - - // iu = _mm256_set1_ps(matrix[5]); - iu_0 = _mm_set1_ps(matrix[5]); - iu_1 = _mm_set1_ps(matrix[5]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[6]); - ru_0 = _mm_set1_ps(matrix[6]); - ru_1 = _mm_set1_ps(matrix[6]); - - // iu = _mm256_set1_ps(matrix[7]); - iu_0 = _mm_set1_ps(matrix[7]); - iu_1 = _mm_set1_ps(matrix[7]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[8]); - ru_0 = _mm_set1_ps(matrix[8]); - ru_1 = _mm_set1_ps(matrix[8]); - - // iu = _mm256_set1_ps(matrix[9]); - iu_0 = _mm_set1_ps(matrix[9]); - iu_1 = _mm_set1_ps(matrix[9]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[10]); - ru_0 = _mm_set1_ps(matrix[10]); - ru_1 = _mm_set1_ps(matrix[10]); - - // iu = _mm256_set1_ps(matrix[11]); - iu_0 = _mm_set1_ps(matrix[11]); - iu_1 = _mm_set1_ps(matrix[11]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[12]); - ru_0 = _mm_set1_ps(matrix[12]); - ru_1 = _mm_set1_ps(matrix[12]); - - // iu = _mm256_set1_ps(matrix[13]); - iu_0 = _mm_set1_ps(matrix[13]); - iu_1 = _mm_set1_ps(matrix[13]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[14]); - ru_0 = _mm_set1_ps(matrix[14]); - ru_1 = _mm_set1_ps(matrix[14]); - - // iu = _mm256_set1_ps(matrix[15]); - iu_0 = _mm_set1_ps(matrix[15]); - iu_1 = _mm_set1_ps(matrix[15]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml); - temp = rm_0; - rm_0 = rm_1; - rm_1 = temp; - - // im = _mm256_permutevar8x32_ps(im, ml); - temp = im_0; - im_0 = im_1; - im_1 = temp; - - // rn = _mm256_blendv_ps(rn, rm, mb); - // rn_0 = _mm_blendv_ps(rn_0, rm_0, mb); - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb); - - // in = _mm256_blendv_ps(in, im, mb); - // in_0 = _mm_blendv_ps(in_0, im_0, mb); - in_1 = _mm_blendv_ps(in_1, im_1, mb); - - p = si; - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - - // ru = _mm256_set1_ps(matrix[16]); - ru_0 = _mm_set1_ps(matrix[16]); - ru_1 = _mm_set1_ps(matrix[16]); - - // iu = _mm256_set1_ps(matrix[17]); - iu_0 = _mm_set1_ps(matrix[17]); - iu_1 = _mm_set1_ps(matrix[17]); - - // rn = _mm256_mul_ps(r0, ru); - rn_0 = _mm_mul_ps(r0_0, ru_0); - rn_1 = _mm_mul_ps(r0_1, ru_1); - - // in = _mm256_mul_ps(r0, iu); - in_0 = _mm_mul_ps(r0_0, iu_0); - in_1 = _mm_mul_ps(r0_1, iu_1); - - // rn = _mm256_fnmadd_ps(i0, iu, rn); - temp = _mm_mul_ps(i0_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i0, ru, in); - temp = _mm_mul_ps(i0_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[18]); - ru_0 = _mm_set1_ps(matrix[18]); - ru_1 = _mm_set1_ps(matrix[18]); - - // iu = _mm256_set1_ps(matrix[19]); - iu_0 = _mm_set1_ps(matrix[19]); - iu_1 = _mm_set1_ps(matrix[19]); - - // rn = _mm256_fmadd_ps(r1, ru, rn); - temp = _mm_mul_ps(r1_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r1, iu, in); - temp = _mm_mul_ps(r1_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i1, iu, rn); - temp = _mm_mul_ps(i1_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i1, ru, in); - temp = _mm_mul_ps(i1_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[20]); - ru_0 = _mm_set1_ps(matrix[20]); - ru_1 = _mm_set1_ps(matrix[20]); - - // iu = _mm256_set1_ps(matrix[21]); - iu_0 = _mm_set1_ps(matrix[21]); - iu_1 = _mm_set1_ps(matrix[21]); - - // rn = _mm256_fmadd_ps(r2, ru, rn); - temp = _mm_mul_ps(r2_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r2, iu, in); - temp = _mm_mul_ps(r2_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i2, iu, rn); - temp = _mm_mul_ps(i2_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i2, ru, in); - temp = _mm_mul_ps(i2_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[22]); - ru_0 = _mm_set1_ps(matrix[22]); - ru_1 = _mm_set1_ps(matrix[22]); - - // iu = _mm256_set1_ps(matrix[23]); - iu_0 = _mm_set1_ps(matrix[23]); - iu_1 = _mm_set1_ps(matrix[23]); - - // rn = _mm256_fmadd_ps(r3, ru, rn); - temp = _mm_mul_ps(r3_0, ru_0); - rn_0 = _mm_add_ps(rn_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rn_1 = _mm_add_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(r3, iu, in); - temp = _mm_mul_ps(r3_0, iu_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - in_1 = _mm_add_ps(in_1, temp); - - // rn = _mm256_fnmadd_ps(i3, iu, rn); - temp = _mm_mul_ps(i3_0, iu_0); - rn_0 = _mm_sub_ps(rn_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rn_1 = _mm_sub_ps(rn_1, temp); - - // in = _mm256_fmadd_ps(i3, ru, in); - temp = _mm_mul_ps(i3_0, ru_0); - in_0 = _mm_add_ps(in_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - in_1 = _mm_add_ps(in_1, temp); - - // ru = _mm256_set1_ps(matrix[24]); - ru_0 = _mm_set1_ps(matrix[24]); - ru_1 = _mm_set1_ps(matrix[24]); - - // iu = _mm256_set1_ps(matrix[25]); - iu_0 = _mm_set1_ps(matrix[25]); - iu_1 = _mm_set1_ps(matrix[25]); - - // rm = _mm256_mul_ps(r0, ru); - rm_0 = _mm_mul_ps(r0_0, ru_0); - rm_1 = _mm_mul_ps(r0_1, ru_1); - - // im = _mm256_mul_ps(r0, iu); - im_0 = _mm_mul_ps(r0_0, iu_0); - im_1 = _mm_mul_ps(r0_1, iu_1); - - // rm = _mm256_fnmadd_ps(i0, iu, rm); - temp = _mm_mul_ps(i0_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i0_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i0, ru, im); - temp = _mm_mul_ps(i0_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i0_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[26]); - ru_0 = _mm_set1_ps(matrix[26]); - ru_1 = _mm_set1_ps(matrix[26]); - - // iu = _mm256_set1_ps(matrix[27]); - iu_0 = _mm_set1_ps(matrix[27]); - iu_1 = _mm_set1_ps(matrix[27]); - - // rm = _mm256_fmadd_ps(r1, ru, rm); - temp = _mm_mul_ps(r1_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r1_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r1, iu, im); - temp = _mm_mul_ps(r1_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r1_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i1, iu, rm); - temp = _mm_mul_ps(i1_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i1_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i1, ru, im); - temp = _mm_mul_ps(i1_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i1_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[28]); - ru_0 = _mm_set1_ps(matrix[28]); - ru_1 = _mm_set1_ps(matrix[28]); - - // iu = _mm256_set1_ps(matrix[29]); - iu_0 = _mm_set1_ps(matrix[29]); - iu_1 = _mm_set1_ps(matrix[29]); - - // rm = _mm256_fmadd_ps(r2, ru, rm); - temp = _mm_mul_ps(r2_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r2_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r2, iu, im); - temp = _mm_mul_ps(r2_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r2_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i2, iu, rm); - temp = _mm_mul_ps(i2_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i2_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i2, ru, im); - temp = _mm_mul_ps(i2_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i2_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // ru = _mm256_set1_ps(matrix[30]); - ru_0 = _mm_set1_ps(matrix[30]); - ru_1 = _mm_set1_ps(matrix[30]); - - // iu = _mm256_set1_ps(matrix[31]); - iu_0 = _mm_set1_ps(matrix[31]); - iu_1 = _mm_set1_ps(matrix[31]); - - // rm = _mm256_fmadd_ps(r3, ru, rm); - temp = _mm_mul_ps(r3_0, ru_0); - rm_0 = _mm_add_ps(rm_0, temp); - temp = _mm_mul_ps(r3_1, ru_1); - rm_1 = _mm_add_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(r3, iu, im); - temp = _mm_mul_ps(r3_0, iu_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(r3_1, iu_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_fnmadd_ps(i3, iu, rm); - temp = _mm_mul_ps(i3_0, iu_0); - rm_0 = _mm_sub_ps(rm_0, temp); - temp = _mm_mul_ps(i3_1, iu_1); - rm_1 = _mm_sub_ps(rm_1, temp); - - // im = _mm256_fmadd_ps(i3, ru, im); - temp = _mm_mul_ps(i3_0, ru_0); - im_0 = _mm_add_ps(im_0, temp); - temp = _mm_mul_ps(i3_1, ru_1); - im_1 = _mm_add_ps(im_1, temp); - - // rm = _mm256_permutevar8x32_ps(rm, ml); - temp = rm_0; - rm_0 = rm_1; - rm_1 = temp; - - // im = _mm256_permutevar8x32_ps(im, ml); - temp = im_0; - im_0 = im_1; - im_1 = temp; - - // rn = _mm256_blendv_ps(rn, rm, mb); - // rn_0 = _mm_blendv_ps(rn_0, rm_0, mb); - rn_1 = _mm_blendv_ps(rn_1, rm_1, mb); - - // in = _mm256_blendv_ps(in, im, mb); - // in_0 = _mm_blendv_ps(in_0, im_0, mb); - in_1 = _mm_blendv_ps(in_1, im_1, mb); - - p = si | sizej; - //_mm256_store_ps(rstate + p, rn); - _mm_store_ps(rstate + p, rn_0); - _mm_store_ps(rstate + p + 4, rn_1); - - //_mm256_store_ps(rstate + p + 8, in); - _mm_store_ps(rstate + p + 8, in_0); - _mm_store_ps(rstate + p + 12, in_1); - } - } - - break; - } -} - -} // namespace qsim -} // namespace tfq - -#endif diff --git a/tensorflow_quantum/core/qsim/state_space_sse.h b/tensorflow_quantum/core/qsim/state_space_sse.h deleted file mode 100644 index f2e58239d..000000000 --- a/tensorflow_quantum/core/qsim/state_space_sse.h +++ /dev/null @@ -1,87 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_QSIM_STATE_SPACE_SSE_H_ -#define TFQ_CORE_QSIM_STATE_SPACE_SSE_H_ - -#include -#include - -#include -#include - -#include "tensorflow_quantum/core/qsim/state_space.h" - -namespace tfq { -namespace qsim { - -class StateSpaceSSE : public StateSpace { - public: - StateSpaceSSE(const uint64_t num_qubits, const uint64_t num_threads); - - virtual ~StateSpaceSSE(); - - virtual StateSpaceType GetType() const override; - - // Reserve the memory associated with the state in this space - virtual void CreateState() override; - - // Free the memory associated with the state in this space - virtual void DeleteState() override; - - // Return a pointer to a copy of this StateSpace. - // NOTE: user is responsible for deleting the returned copy. - virtual StateSpace* Clone() const override; - - // Copy the state information from another statespace. - // Assumes the state has been initialized/created. - virtual void CopyFrom(const StateSpace& other) const override; - - // Function to apply a two qubit gate to the state on indices q0 and q1. - virtual void ApplyGate2(const unsigned int q0, const unsigned int q1, - const float* matrix) override; - - // Function to apply a one-qubit gate if there is only one qubit in the state. - // Implementations are given the option to return an error. - virtual tensorflow::Status ApplyGate1(const float* matrix) override; - - // Set all entries in the state to zero - virtual void SetStateZero() override; - - // Get the inner product between this state and the state in `other` - virtual float GetRealInnerProduct(const StateSpace& other) const override; - - // Get the amplitude at the given state index - virtual std::complex GetAmpl(const uint64_t i) const override; - - // Set the amplitude at the given state index - virtual void SetAmpl(const uint64_t i, - const std::complex& val) override; - - private: - void ApplyGate2HH(const unsigned int q0, const unsigned int q1, - const float* matrix); - - void ApplyGate2HL(const unsigned int q0, const unsigned int q1, - const float* matrix); - - void ApplyGate2LL(const unsigned int q0, const unsigned int q1, - const float* matrix); -}; - -} // namespace qsim -} // namespace tfq - -#endif // TFQ_CORE_QSIM_STATE_SPACE_SSE_H_ diff --git a/tensorflow_quantum/core/qsim/util.cc b/tensorflow_quantum/core/qsim/util.cc deleted file mode 100644 index b0c9df022..000000000 --- a/tensorflow_quantum/core/qsim/util.cc +++ /dev/null @@ -1,37 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/qsim/util.h" - -#include -#include - -namespace tfq { -namespace qsim { - -void* _aligned_malloc(size_t size) { - // choose 64 bit alignment in case we ever introduce avx 512 support. - const size_t al = 64; - void* initial_mem = malloc(size + al); - void* aligned_mem = reinterpret_cast( - (reinterpret_cast(initial_mem) & ~(al - 1)) + al); - *(reinterpret_cast(aligned_mem) - 1) = initial_mem; - return aligned_mem; -} - -void _aligned_free(void* ptr) { free(*(reinterpret_cast(ptr) - 1)); } - -} // namespace qsim -} // namespace tfq diff --git a/tensorflow_quantum/core/qsim/util.h b/tensorflow_quantum/core/qsim/util.h deleted file mode 100644 index c2daa4a76..000000000 --- a/tensorflow_quantum/core/qsim/util.h +++ /dev/null @@ -1,33 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_QSIM_UTIL_H_ -#define TFQ_CORE_QSIM_UTIL_H_ - -#include - -namespace tfq { -namespace qsim { - -// Workaround for std::aligned_alloc not working on C++11. -void* _aligned_malloc(size_t size); - -// Workaround for std::alligned_alloc not working on C++11. -void _aligned_free(void* ptr); - -} // namespace qsim -} // namespace tfq - -#endif // TFQ_CORE_QSIM_UTIL_H_ diff --git a/tensorflow_quantum/core/serialize/BUILD b/tensorflow_quantum/core/serialize/BUILD deleted file mode 100644 index 7c12ed7ff..000000000 --- a/tensorflow_quantum/core/serialize/BUILD +++ /dev/null @@ -1,23 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_library( - name = "serializer", - srcs = ["serializer.py"], - deps = [ - "//tensorflow_quantum/core/proto:pauli_sum_py_proto", - ], -) - -py_test( - name = "serializer_test", - srcs = ["serializer_test.py"], - python_version = "PY3", - deps = [ - ":serializer", - ], -) diff --git a/tensorflow_quantum/core/serialize/__init__.py b/tensorflow_quantum/core/serialize/__init__.py deleted file mode 100644 index 6ee678723..000000000 --- a/tensorflow_quantum/core/serialize/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module for tfq.core.serialize.*""" -from tensorflow_quantum.core.serialize.serializer import (serialize_circuit, - deserialize_circuit, - serialize_paulisum, - deserialize_paulisum) diff --git a/tensorflow_quantum/core/serialize/serializer.py b/tensorflow_quantum/core/serialize/serializer.py deleted file mode 100644 index c375757a2..000000000 --- a/tensorflow_quantum/core/serialize/serializer.py +++ /dev/null @@ -1,483 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A basic serializer used to serialize/deserialize Cirq circuits for tfq.""" -# TODO(pmassey / anyone): determine if this should be kept as globals. -import copy -import numbers -import sympy - -import cirq -import cirq.google.api.v2 as v2 -from tensorflow_quantum.core.proto import pauli_sum_pb2 - -# Needed to allow autograph to crawl AST without erroring. -_CONSTANT_TRUE = lambda x: True - - -def _parse_mul(expr): - """Returns the lhs and rhs of a sympy.Mul. This is written - to prevent autograph from going into sympy library code and having - conflicts with the @cacheit decorator.""" - if len(expr.args) == 1: - return sympy.S.One, expr.args[0] - if len(expr.args) == 2: - return expr.args[0], expr.args[1] - - raise ValueError("Arithmetic expression outside of simple " - "scalar multiplication is currently not " - "supported. See serializer.py for more " - "information.") - - -def _scalar_extractor(x): - """This is a workaround to support symbol scalar multiplication. - In the future we should likely get rid of this in favor of proper - expression parsing once cirq supports it. See cirq.op_serializer - and cirq's program protobuf for details. This is needed for things - like cirq.Rx('alpha'). - """ - if not isinstance(x, (numbers.Real, sympy.Expr)): - raise TypeError("Invalid input argument for exponent.") - - if isinstance(x, (numbers.Real, sympy.Symbol)): - return 1.0 - - expr = x.evalf() - if isinstance(expr, sympy.mul.Mul): - lhs_eval, rhs_eval = _parse_mul(expr) - - if isinstance(lhs_eval, sympy.Symbol) and isinstance( - rhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)): - # lhs contains symbol rhs contains number. - return float(rhs_eval) - - if isinstance(rhs_eval, sympy.Symbol) and isinstance( - lhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)): - # lhs contains number. - return float(lhs_eval) - - raise ValueError("Arithmatic expression outside of simple " - "scalar multiplication is currently not " - "supported. See serializer.py for more " - "information.") - - -def _symbol_extractor(x): - """This is the second extractor for above.""" - if not isinstance(x, (numbers.Real, sympy.Expr)): - raise TypeError("Invalid input argument for exponent.") - - if isinstance(x, numbers.Real): - return float(x) - if isinstance(x, sympy.Symbol): - return x - - expr = x.evalf() - if isinstance(expr, sympy.mul.Mul): - lhs_eval, rhs_eval = _parse_mul(expr) - - if isinstance(lhs_eval, sympy.Symbol) and isinstance( - rhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)): - # lhs contains symbol rhs contains number. - return lhs_eval - - if isinstance(rhs_eval, sympy.Symbol) and isinstance( - lhs_eval, (sympy.numbers.Float, sympy.numbers.Integer)): - # lhs contains number. - return rhs_eval - - raise ValueError("Arithmetic expression outside of simple " - "scalar multiplication is currently not " - "supported. See serializer.py for more " - "information.") - - -def _eigen_gate_serializer(gate_type, serialized_id): - """Make standard serializer for eigen gates.""" - - args = [ - cirq.google.SerializingArg( - serialized_name="exponent", - serialized_type=float, - gate_getter=lambda x: _symbol_extractor(x.exponent)), - cirq.google.SerializingArg( - serialized_name="exponent_scalar", - serialized_type=float, - gate_getter=lambda x: _scalar_extractor(x.exponent)), - cirq.google.SerializingArg(serialized_name="global_shift", - serialized_type=float, - gate_getter=lambda x: float(x._global_shift)) - ] - return cirq.google.GateOpSerializer(gate_type=gate_type, - serialized_gate_id=serialized_id, - args=args, - can_serialize_predicate=_CONSTANT_TRUE) - - -def _eigen_gate_deserializer(gate_type, serialized_id): - """Make standard deserializer for eigen gates.""" - - def _scalar_combiner(exponent, global_shift, exponent_scalar): - """This is a workaround to support symbol scalar multiplication. - In the future we should likely get rid of this in favor of proper - expression parsing once cirq supports it. See cirq.op_serializer - and cirq's program protobuf for details. This is needed for things - like cirq.Rx('alpha'). - """ - if exponent_scalar == 1.0: - return gate_type(exponent=exponent, global_shift=global_shift) - return gate_type(exponent=exponent * exponent_scalar, - global_shift=global_shift) - - args = [ - cirq.google.DeserializingArg(serialized_name="exponent", - constructor_arg_name="exponent"), - cirq.google.DeserializingArg(serialized_name="global_shift", - constructor_arg_name="global_shift"), - cirq.google.DeserializingArg(serialized_name="exponent_scalar", - constructor_arg_name="exponent_scalar") - ] - return cirq.google.GateOpDeserializer(serialized_gate_id=serialized_id, - gate_constructor=_scalar_combiner, - args=args) - - -def _fsim_gate_serializer(): - """Make standard serializer for fsim gate.""" - - args = [ - cirq.google.SerializingArg( - serialized_name="theta", - serialized_type=float, - gate_getter=lambda x: _symbol_extractor(x.theta)), - cirq.google.SerializingArg( - serialized_name="phi", - serialized_type=float, - gate_getter=lambda x: _symbol_extractor(x.phi)), - cirq.google.SerializingArg( - serialized_name="theta_scalar", - serialized_type=float, - gate_getter=lambda x: _scalar_extractor(x.theta)), - cirq.google.SerializingArg( - serialized_name="phi_scalar", - serialized_type=float, - gate_getter=lambda x: _scalar_extractor(x.phi)), - ] - return cirq.google.GateOpSerializer(gate_type=cirq.FSimGate, - serialized_gate_id="FSIM", - args=args, - can_serialize_predicate=_CONSTANT_TRUE) - - -def _fsim_gate_deserializer(): - """Make standard deserializer for fsim gate.""" - - def _scalar_combiner(theta, theta_scalar, phi, phi_scalar): - """This is a workaround to support symbol scalar multiplication. - See `_eigen_gate_deserializer` for details. - """ - return cirq.FSimGate(theta=theta * theta_scalar, phi=phi * phi_scalar) - - args = [ - cirq.google.DeserializingArg(serialized_name="theta", - constructor_arg_name="theta"), - cirq.google.DeserializingArg(serialized_name="phi", - constructor_arg_name="phi"), - cirq.google.DeserializingArg(serialized_name="theta_scalar", - constructor_arg_name="theta_scalar"), - cirq.google.DeserializingArg(serialized_name="phi_scalar", - constructor_arg_name="phi_scalar"), - ] - return cirq.google.GateOpDeserializer(serialized_gate_id="FSIM", - gate_constructor=_scalar_combiner, - args=args) - - -def _phased_eigen_gate_serializer(gate_type, serialized_id): - """Make a standard serializer for phased eigen gates.""" - - args = [ - cirq.google.SerializingArg( - serialized_name="phase_exponent", - serialized_type=float, - gate_getter=lambda x: _symbol_extractor(x.phase_exponent)), - cirq.google.SerializingArg( - serialized_name="phase_exponent_scalar", - serialized_type=float, - gate_getter=lambda x: _scalar_extractor(x.phase_exponent)), - cirq.google.SerializingArg( - serialized_name="exponent", - serialized_type=float, - gate_getter=lambda x: _symbol_extractor(x.exponent)), - cirq.google.SerializingArg( - serialized_name="exponent_scalar", - serialized_type=float, - gate_getter=lambda x: _scalar_extractor(x.exponent)), - cirq.google.SerializingArg(serialized_name="global_shift", - serialized_type=float, - gate_getter=lambda x: float(x.global_shift)) - ] - return cirq.google.GateOpSerializer(gate_type=gate_type, - serialized_gate_id=serialized_id, - args=args, - can_serialize_predicate=_CONSTANT_TRUE) - - -def _phased_eigen_gate_deserializer(gate_type, serialized_id): - """Make a standard deserializer for phased eigen gates.""" - - def _scalar_combiner(exponent, global_shift, exponent_scalar, - phase_exponent, phase_exponent_scalar): - """This is a workaround to support symbol scalar multiplication. - In the future we should likely get rid of this in favor of proper - expression parsing once cirq supports it. See cirq.op_serializer - and cirq's program protobuf for details. This is needed for things - like cirq.Rx('alpha'). - """ - # Do this to help with rounding. it's ugly. - exponent = exponent if exponent_scalar == 1.0 \ - else exponent * exponent_scalar - phase_exponent = phase_exponent if phase_exponent_scalar == 1.0 \ - else phase_exponent * phase_exponent_scalar - if global_shift != 0: - # needed in case this specific phasedeigengate doesn't - # have a global_phase in constructor. - return gate_type(exponent=exponent, - global_shift=global_shift, - phase_exponent=phase_exponent) - return gate_type(exponent=exponent, phase_exponent=phase_exponent) - - args = [ - cirq.google.DeserializingArg(serialized_name="phase_exponent", - constructor_arg_name="phase_exponent"), - cirq.google.DeserializingArg( - serialized_name="phase_exponent_scalar", - constructor_arg_name="phase_exponent_scalar"), - cirq.google.DeserializingArg(serialized_name="exponent", - constructor_arg_name="exponent"), - cirq.google.DeserializingArg(serialized_name="exponent_scalar", - constructor_arg_name="exponent_scalar"), - cirq.google.DeserializingArg(serialized_name="global_shift", - constructor_arg_name="global_shift"), - ] - return cirq.google.GateOpDeserializer(serialized_gate_id=serialized_id, - gate_constructor=_scalar_combiner, - args=args) - - -EIGEN_GATES_DICT = { - cirq.XPowGate: "XP", - cirq.XXPowGate: "XXP", - cirq.YPowGate: "YP", - cirq.YYPowGate: "YYP", - cirq.ZPowGate: "ZP", - cirq.ZZPowGate: "ZZP", - cirq.HPowGate: "HP", - cirq.CZPowGate: "CZP", - cirq.CNotPowGate: "CNP", - cirq.SwapPowGate: "SP", - cirq.ISwapPowGate: "ISP", -} - -PHASED_EIGEN_GATES_DICT = { - cirq.PhasedXPowGate: "PXP", - cirq.PhasedISwapPowGate: "PISP", -} - -SERIALIZERS = [ - _eigen_gate_serializer(g, g_name) for g, g_name in EIGEN_GATES_DICT.items() -] + [ - _fsim_gate_serializer(), -] + [ - _phased_eigen_gate_serializer(g, g_name) - for g, g_name in PHASED_EIGEN_GATES_DICT.items() -] - -DESERIALIZERS = [ - _eigen_gate_deserializer(g, g_name) - for g, g_name in EIGEN_GATES_DICT.items() -] + [ - _fsim_gate_deserializer(), -] + [ - _phased_eigen_gate_deserializer(g, g_name) - for g, g_name in PHASED_EIGEN_GATES_DICT.items() -] - -SERIALIZER = cirq.google.SerializableGateSet(gate_set_name="tfq_gate_set", - serializers=SERIALIZERS, - deserializers=DESERIALIZERS) - - -def serialize_circuit(circuit_inp): - """Returns a `cirq.Program` proto representing the `cirq.Circuit`. - - Note that the circuit must use gates valid in the tfq_gate_set. - Currently we only support scalar multiplication of symbols and - no other more complex arithmetic expressions. This means - we can support things like X**(3*alpha), and Rx(alpha). Because - we use the `cirq.Program` proto, we only support `cirq.GridQubit` instances - during serialization of circuits. - - Note: once serialized terminal measurements are removed. - - Args: - circuit_inp: A `cirq.Circuit`. - - Returns: - A `cirq.google.api.v2.Program` proto. - """ - circuit = copy.deepcopy(circuit_inp) - if not isinstance(circuit, cirq.Circuit): - raise TypeError("serialize requires cirq.Circuit objects." - " Given: " + str(type(circuit))) - - # TODO(peterse): parsing circuits for Identity types is currently broken. - # see https://github.com/quantumlib/Cirq/issues/2520. This will be - # fixed in cirq 0.7. - - # This code is intentionally written to avoid using cirq functions - # as this get analyzed by tensorflow-autograph. - - # Gives a map from moment index to measure qubits in moment - measured_moments = dict() - - # Tracks qubits that have been measured already. - all_measured_qubits = set() - for i, moment in enumerate(circuit.moments): - measured_qubits = set() - for op in moment: - if isinstance(op, cirq.IdentityGate): - raise TypeError( - "Circuits containing cirq.I are currently not supported. " - "Remove any instances of cirq.I and try again.") - for qubit in op.qubits: - if not isinstance(qubit, cirq.GridQubit): - raise ValueError( - "Attempted to serialize circuit that don't use " - "only cirq.GridQubits.") - - if isinstance(op.gate, cirq.MeasurementGate): - for qubit in op.qubits: - if qubit in all_measured_qubits: - raise ValueError("Serialization of circuit failed. " - "Circuits with non-terminal " - "measurement operations are not " - "supported.") - measured_qubits.add(qubit) - all_measured_qubits.add(qubit) - - if len(measured_qubits) > 0: - measured_moments[i] = measured_qubits - - # Remove terminal measurements. - for moment_ind in measured_moments: - old_moment = circuit[moment_ind] - measured_qubits = measured_moments[moment_ind] - new_moment = cirq.Moment( - filter(lambda x: not any(y in measured_qubits for y in x.qubits), - old_moment.operations)) - circuit[moment_ind] = new_moment - - return SERIALIZER.serialize(circuit) - - -def deserialize_circuit(proto): - """Constructs a `cirq.Circuit` from a `cirq.Program` proto. - - Note that the proto must use gates valid in the tfq_gate_set. - - Args: - proto: A `cirq.google.api.v2.Program` proto - - Returns: - A `cirq.Circuit`. - """ - if not isinstance(proto, cirq.google.api.v2.program_pb2.Program): - raise TypeError("deserialize requires " - "cirq.google.api.v2.program_pb2.Program object." - " Given: " + str(type(proto))) - - return SERIALIZER.deserialize(proto) - - -def serialize_paulisum(paulisum): - """Constructs a pauli_sum proto from `cirq.PauliSum` or `cirq.PauliString`. - - Args: - paulisum: A `cirq.PauliSum` object. - - Returns: - A pauli_sum proto object. - """ - if isinstance(paulisum, cirq.PauliString): - paulisum = cirq.PauliSum.from_pauli_strings(paulisum) - - if not isinstance(paulisum, cirq.PauliSum): - raise TypeError("serialize requires a cirq.PauliSum object." - " Given: " + str(type(paulisum))) - - if any(not isinstance(qubit, cirq.GridQubit) for qubit in paulisum.qubits): - raise ValueError("Attempted to serialize a paulisum that doesn't use " - "only cirq.GridQubits.") - - paulisum_proto = pauli_sum_pb2.PauliSum() - for term in paulisum: - pauliterm_proto = pauli_sum_pb2.PauliTerm() - - pauliterm_proto.coefficient_real = term.coefficient.real - pauliterm_proto.coefficient_imag = term.coefficient.imag - for t in sorted(term.items()): # sort to keep qubits ordered. - pauliterm_proto.paulis.add( - qubit_id=v2.qubit_to_proto_id(t[0]), - pauli_type=str(t[1]), - ) - paulisum_proto.terms.extend([pauliterm_proto]) - - return paulisum_proto - - -def deserialize_paulisum(proto): - """Constructs a `cirq.PauliSum` from pauli_sum proto. - - Args: - proto: A pauli_sum proto object. - - Returns: - A `cirq.PauliSum` object. - """ - if not isinstance(proto, pauli_sum_pb2.PauliSum): - raise TypeError("deserialize requires a pauli_sum_pb2 object." - " Given: " + str(type(proto))) - - res = cirq.PauliSum() - for term_proto in proto.terms: - coef = term_proto.coefficient_real + 1.0j * term_proto.coefficient_imag - term = coef * cirq.PauliString() - for pauli_qubit_pair in term_proto.paulis: - op = _process_pauli_type(pauli_qubit_pair.pauli_type) - term *= op(v2.grid_qubit_from_proto_id(pauli_qubit_pair.qubit_id)) - res += term - - return res - - -def _process_pauli_type(char): - if char == 'Z': - return cirq.Z - if char == 'X': - return cirq.X - if char == 'Y': - return cirq.Y - raise ValueError("Invalid pauli type.") diff --git a/tensorflow_quantum/core/serialize/serializer_test.py b/tensorflow_quantum/core/serialize/serializer_test.py deleted file mode 100644 index 45d2277ba..000000000 --- a/tensorflow_quantum/core/serialize/serializer_test.py +++ /dev/null @@ -1,649 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module to test serialization core.""" -import copy -import numpy as np -import sympy -import tensorflow as tf - -import cirq -from cirq.google.api.v2 import program_pb2 -from absl.testing import parameterized -from tensorflow_quantum.core.proto import pauli_sum_pb2 -from tensorflow_quantum.core.serialize import serializer - - -def _build_gate_proto(gate_id, arg_names, arg_vals, qubit_ids): - """Helper function to generate proto for a given circuit spec. - - Understand how it works from this example: - - _build_gate_proto("HP", - ['exponent', 'global_shift'], - ['alpha', 0.0], - ['0_0']) - - would produce the following: - - language { - gate_set: "tfq_gate_set" - } - circuit { - scheduling_strategy: MOMENT_BY_MOMENT - moments { - operations { - gate { - id: "HP" - } - args { - key: "global_shift" - value { - arg_value { - float_value: 0.0 - } - } - } - args { - key: "exponent" - value { - symbol: "alpha" - } - } - qubits { - id: "0_0" - } - } - } - } - """ - - program_proto = program_pb2.Program() - program_proto.language.gate_set = 'tfq_gate_set' - - circuit_proto = program_proto.circuit - circuit_proto.scheduling_strategy = circuit_proto.MOMENT_BY_MOMENT #'1'. - circuit_proto.moments.add(operations=[program_pb2.Operation( - gate = program_pb2.Gate(id=gate_id), - args = {arg_names[i]: (program_pb2.Arg(symbol=arg_vals[i]) \ - if isinstance(arg_vals[i], str) else \ - program_pb2.Arg( - arg_value=cirq.google.api.v2.program_pb2.ArgValue( - float_value=arg_vals[i]))) for i in range(len(arg_vals))}, - qubits=[program_pb2.Qubit( - id=q_id) for q_id in qubit_ids])]) - - return program_proto - - -def _get_valid_circuit_proto_pairs(): - q0 = cirq.GridQubit(0, 0) - q1 = cirq.GridQubit(0, 1) - - pairs = [ - # HPOW and aliases. - (cirq.Circuit(cirq.HPowGate(exponent=0.3)(q0)), - _build_gate_proto("HP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.HPowGate(exponent=sympy.Symbol('alpha'))(q0)), - _build_gate_proto("HP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.HPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0)), - _build_gate_proto("HP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.H(q0)), - _build_gate_proto("HP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0'])), - - # XPOW and aliases. - (cirq.Circuit(cirq.XPowGate(exponent=0.3)(q0)), - _build_gate_proto("XP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.XPowGate(exponent=sympy.Symbol('alpha'))(q0)), - _build_gate_proto("XP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.XPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0)), - _build_gate_proto("XP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.X(q0)), - _build_gate_proto("XP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0'])), - - # YPOW and aliases - (cirq.Circuit(cirq.YPowGate(exponent=0.3)(q0)), - _build_gate_proto("YP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.YPowGate(exponent=sympy.Symbol('alpha'))(q0)), - _build_gate_proto("YP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.YPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0)), - _build_gate_proto("YP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.Y(q0)), - _build_gate_proto("YP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0'])), - - # ZPOW and aliases. - (cirq.Circuit(cirq.ZPowGate(exponent=0.3)(q0)), - _build_gate_proto("ZP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.ZPowGate(exponent=sympy.Symbol('alpha'))(q0)), - _build_gate_proto("ZP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.ZPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0)), - _build_gate_proto("ZP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0'])), - (cirq.Circuit(cirq.Z(q0)), - _build_gate_proto("ZP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0'])), - - # XXPow and aliases - (cirq.Circuit(cirq.XXPowGate(exponent=0.3)(q0, q1)), - _build_gate_proto("XXP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.XXPowGate(exponent=sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("XXP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit( - cirq.XXPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("XXP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.XX(q0, q1)), - _build_gate_proto("XXP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0', '0_1'])), - - # YYPow and aliases - (cirq.Circuit(cirq.YYPowGate(exponent=0.3)(q0, q1)), - _build_gate_proto("YYP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.YYPowGate(exponent=sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("YYP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit( - cirq.YYPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("YYP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.YY(q0, q1)), - _build_gate_proto("YYP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0', '0_1'])), - - # ZZPow and aliases - (cirq.Circuit(cirq.ZZPowGate(exponent=0.3)(q0, q1)), - _build_gate_proto("ZZP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.ZZPowGate(exponent=sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("ZZP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit( - cirq.ZZPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("ZZP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.ZZ(q0, q1)), - _build_gate_proto("ZZP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0', '0_1'])), - - # CZPow and aliases - (cirq.Circuit(cirq.CZPowGate(exponent=0.3)(q0, q1)), - _build_gate_proto("CZP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.CZPowGate(exponent=sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("CZP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit( - cirq.CZPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("CZP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.CZ(q0, q1)), - _build_gate_proto("CZP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0', '0_1'])), - - # CNOTPow and aliases - (cirq.Circuit(cirq.CNotPowGate(exponent=0.3)(q0, q1)), - _build_gate_proto("CNP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.CNotPowGate(exponent=sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("CNP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit( - cirq.CNotPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("CNP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.CNOT(q0, q1)), - _build_gate_proto("CNP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0', '0_1'])), - - # SWAPPow and aliases - (cirq.Circuit(cirq.SwapPowGate(exponent=0.3)(q0, q1)), - _build_gate_proto("SP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.SwapPowGate(exponent=sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("SP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit( - cirq.SwapPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("SP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.SWAP(q0, q1)), - _build_gate_proto("SP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0', '0_1'])), - - # ISWAPPow and aliases - (cirq.Circuit(cirq.ISwapPowGate(exponent=0.3)(q0, q1)), - _build_gate_proto("ISP", - ['exponent', 'exponent_scalar', 'global_shift'], - [0.3, 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit( - cirq.ISwapPowGate(exponent=sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("ISP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 1.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit( - cirq.ISwapPowGate(exponent=3.0 * sympy.Symbol('alpha'))(q0, q1)), - _build_gate_proto("ISP", - ['exponent', 'exponent_scalar', 'global_shift'], - ['alpha', 3.0, 0.0], ['0_0', '0_1'])), - (cirq.Circuit(cirq.ISWAP(q0, q1)), - _build_gate_proto("ISP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, 0.0], ['0_0', '0_1'])), - - # PhasedXPow and aliases - (cirq.Circuit( - cirq.PhasedXPowGate(phase_exponent=0.9, - exponent=0.3, - global_shift=0.2)(q0)), - _build_gate_proto("PXP", [ - 'phase_exponent', 'phase_exponent_scalar', 'exponent', - 'exponent_scalar', 'global_shift' - ], [0.9, 1.0, 0.3, 1.0, 0.2], ['0_0'])), - (cirq.Circuit( - cirq.PhasedXPowGate(phase_exponent=sympy.Symbol('alpha'), - exponent=0.3)(q0)), - _build_gate_proto("PXP", [ - 'phase_exponent', 'phase_exponent_scalar', 'exponent', - 'exponent_scalar', 'global_shift' - ], ['alpha', 1.0, 0.3, 1.0, 0.0], ['0_0'])), - (cirq.Circuit( - cirq.PhasedXPowGate(phase_exponent=3.0 * sympy.Symbol('alpha'), - exponent=0.3)(q0)), - _build_gate_proto("PXP", [ - 'phase_exponent', 'phase_exponent_scalar', 'exponent', - 'exponent_scalar', 'global_shift' - ], ['alpha', 3.0, 0.3, 1.0, 0.0], ['0_0'])), - (cirq.Circuit( - cirq.PhasedXPowGate(phase_exponent=0.9, - exponent=sympy.Symbol('beta'))(q0)), - _build_gate_proto("PXP", [ - 'phase_exponent', 'phase_exponent_scalar', 'exponent', - 'exponent_scalar', 'global_shift' - ], [0.9, 1.0, 'beta', 1.0, 0.0], ['0_0'])), - (cirq.Circuit( - cirq.PhasedXPowGate(phase_exponent=0.9, - exponent=5.0 * sympy.Symbol('beta'))(q0)), - _build_gate_proto("PXP", [ - 'phase_exponent', 'phase_exponent_scalar', 'exponent', - 'exponent_scalar', 'global_shift' - ], [0.9, 1.0, 'beta', 5.0, 0.0], ['0_0'])), - (cirq.Circuit( - cirq.PhasedXPowGate(phase_exponent=3.0 * sympy.Symbol('alpha'), - exponent=5.0 * sympy.Symbol('beta'))(q0)), - _build_gate_proto("PXP", [ - 'phase_exponent', 'phase_exponent_scalar', 'exponent', - 'exponent_scalar', 'global_shift' - ], ['alpha', 3.0, 'beta', 5.0, 0.0], ['0_0'])), - - # RX, RY, RZ with symbolization is tested in special cases as the - # string comparison of the float converted sympy.pi does not happen - # smoothly. See: test_serialize_deserialize_special_case_one_qubit - (cirq.Circuit(cirq.Rx(np.pi)(q0)), - _build_gate_proto("XP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, -0.5], ['0_0'])), - (cirq.Circuit(cirq.Ry(np.pi)(q0)), - _build_gate_proto("YP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, -0.5], ['0_0'])), - (cirq.Circuit(cirq.Rz(np.pi)(q0)), - _build_gate_proto("ZP", - ['exponent', 'exponent_scalar', 'global_shift'], - [1.0, 1.0, -0.5], ['0_0'])), - - # FSimGate - (cirq.Circuit(cirq.FSimGate(theta=0.1, phi=0.2)(q0, q1)), - _build_gate_proto("FSIM", - ['theta', 'theta_scalar', 'phi', 'phi_scalar'], - [0.1, 1.0, 0.2, 1.0], ['0_0', '0_1'])), - (cirq.Circuit( - cirq.FSimGate(theta=2.0 * sympy.Symbol("alpha"), - phi=1.3 * sympy.Symbol("beta"))(q0, q1)), - _build_gate_proto("FSIM", - ['theta', 'theta_scalar', 'phi', 'phi_scalar'], - ['alpha', 2.0, 'beta', 1.3], ['0_0', '0_1'])), - ] - - return pairs - - -def _get_valid_pauli_proto_pairs(): - """Generate valid paulisum proto pairs.""" - q0 = cirq.GridQubit(0, 0) - q1 = cirq.GridQubit(1, 0) - pairs = [ - (cirq.PauliSum.from_pauli_strings((2.1 + 0.2j) * cirq.Z(q0)), - _build_pauli_proto([2.1 + 0.2j], [['Z']], [['0_0']])), - (cirq.PauliSum.from_pauli_strings((1.0 + 0.0j) * cirq.X(q0)), - _build_pauli_proto([1.0 + 0.0j], [['X']], [['0_0']])), - (cirq.PauliSum.from_pauli_strings((0.0 + 1.0j) * cirq.Y(q0)), - _build_pauli_proto([0.0 + 1.0j], [['Y']], [['0_0']])), - ((0.0 + 1.0j) * cirq.Y(q0) + 1.0 * cirq.Z(q1), - _build_pauli_proto([0.0 + 1.0j, 1.0 + 0.0j], [['Y'], ['Z']], - [['0_0'], ['1_0']])), - (2.0 * cirq.Y(q1) + 1.0 * cirq.Z(q0) + cirq.X(q0) * cirq.X(q1), - _build_pauli_proto([2.0 + 0.0j, 1.0 + 0.0j, 1.0 + 0.0j], - [['Y'], ['Z'], ['X', 'X']], - [['1_0'], ['0_0'], ['0_0', '1_0']])), - ] - - return pairs - - -def _build_pauli_proto(coefs, ops, qubit_ids): - """Construct pauli_sum proto explicitly.""" - terms = [] - for i in range(len(coefs)): - term = pauli_sum_pb2.PauliTerm() - term.coefficient_real = coefs[i].real - term.coefficient_imag = coefs[i].imag - for j in range(len(qubit_ids[i])): - term.paulis.add(qubit_id=qubit_ids[i][j], pauli_type=ops[i][j]) - - terms.append(term) - - a = pauli_sum_pb2.PauliSum() - a.terms.extend(terms) - return a - - -class SerializerTest(tf.test.TestCase, parameterized.TestCase): - """Tests basic serializer functionality""" - - @parameterized.parameters([{ - 'circ_proto_pair': v - } for v in _get_valid_circuit_proto_pairs()]) - def test_serialize_circuit_valid(self, circ_proto_pair): - """Test conversion of cirq Circuits to tfq_gate_set proto.""" - self.assertProtoEquals(serializer.serialize_circuit(circ_proto_pair[0]), - circ_proto_pair[1]) - - @parameterized.parameters([{ - 'circ_proto_pair': v - } for v in _get_valid_circuit_proto_pairs()]) - def test_deserialize_circuit_valid(self, circ_proto_pair): - """Test deserialization of protos in tfq_gate_set.""" - - # String casting is done here to round floating point values. - # cirq.testing.assert_same_circuits will call break and think - # cirq.Z^0.30000001 is different from cirq.Z^0.3 - self.assertEqual(circ_proto_pair[0], - serializer.deserialize_circuit(circ_proto_pair[1])) - - @parameterized.parameters([{ - 'circ_proto_pair': v - } for v in _get_valid_circuit_proto_pairs()]) - def test_serialize_deserialize_circuit_consistency(self, circ_proto_pair): - """Ensure that serializing followed by deserializing works.""" - - # String casting is done here to round floating point values. - # cirq.testing.assert_same_circuits will call break and think - # cirq.Z^0.30000001 is different from cirq.Z^0.3 - self.assertProtoEquals( - serializer.serialize_circuit( - serializer.deserialize_circuit(circ_proto_pair[1])), - circ_proto_pair[1]) - self.assertEqual( - serializer.deserialize_circuit( - serializer.serialize_circuit(circ_proto_pair[0])), - circ_proto_pair[0]) - - def test_serialize_circuit_unsupported_gate(self): - """Ensure we error on unsupported gates.""" - q0 = cirq.GridQubit(0, 0) - q1 = cirq.GridQubit(0, 1) - unsupported_circuit = cirq.Circuit(cirq.QFT(q0, q1)) - - with self.assertRaises(ValueError): - serializer.serialize_circuit(unsupported_circuit) - - def test_serialize_circuit_with_identity(self): - """A more generous error message for circuits containing cirq.I.""" - q0 = cirq.GridQubit(0, 0) - unsupported_circuit = cirq.Circuit.from_ops(cirq.I(q0)) - - with self.assertRaisesRegex(ValueError, expected_regex="cirq.I"): - serializer.serialize_circuit(unsupported_circuit) - - @parameterized.parameters([ - { - "gate_with_param": g(p) - } - # Use a gate from each category of serializer - for g in [ - # eigen - lambda p: cirq.Circuit( - cirq.HPowGate(exponent=p, global_shift=p) - (cirq.GridQubit(0, 0))), - # phased eigen - lambda p: cirq.Circuit( - cirq.PhasedXPowGate( - phase_exponent=p, exponent=p, global_shift=p) - (cirq.GridQubit(0, 0))), - # fsim - lambda p: cirq.Circuit( - cirq.FSimGate(theta=p, phi=p) - (cirq.GridQubit(0, 0), cirq.GridQubit(0, 1))), - ] - # Attempt parameterization with a variety of numeric types - for p in - [0.35, float(0.35), 35e-2, - np.float32(0.35), - np.float64(0.35), 7] - ]) - def test_serialize_circuit_valid_number_types(self, gate_with_param): - """Tests number datatype support by our serializer.""" - self.assertAllClose( - gate_with_param.unitary(), - serializer.deserialize_circuit( - serializer.serialize_circuit(gate_with_param)).unitary()) - - def test_serialize_circuit_unsupported_value(self): - """Ensure we error on unsupported arithmetic expressions.""" - q0 = cirq.GridQubit(0, 0) - unsupported_circuit = cirq.Circuit( - cirq.HPowGate()(q0)**(sympy.Symbol('alpha') + 1)) - - q1 = cirq.NamedQubit('wont work') - unsupported_circuit2 = cirq.Circuit(cirq.H(q1)) - - with self.assertRaises(ValueError): - serializer.serialize_circuit(unsupported_circuit) - - with self.assertRaises(ValueError): - serializer.serialize_circuit(unsupported_circuit2) - - @parameterized.parameters([{'inp': v} for v in ['wrong', 1.0, None, []]]) - def test_serialize_circuit_wrong_type(self, inp): - """Attempt to serialize invalid objects types.""" - with self.assertRaises(TypeError): - serializer.serialize_circuit(input) - - @parameterized.parameters([{'inp': v} for v in ['wrong', 1.0, None, []]]) - def test_deserialize_circuit_wrong_type(self, inp): - """Attempt to deserialize invalid objects types.""" - with self.assertRaises(TypeError): - serializer.deserialize_circuit(input) - - @parameterized.parameters([{'inp': v} for v in ['wrong', 1.0, None, []]]) - def test_serialize_paulisum_wrong_type(self, inp): - """Attempt to serialize invalid object types.""" - with self.assertRaises(TypeError): - serializer.serialize_paulisum(inp) - - @parameterized.parameters([{'inp': v} for v in ['wrong', 1.0, None, []]]) - def test_deserialize_paulisum_wrong_type(self, inp): - """Attempt to deserialize invalid object types.""" - with self.assertRaises(TypeError): - serializer.deserialize_paulisum(inp) - - def test_serialize_paulisum_invalid(self): - """Ensure we don't support anything but GridQubits.""" - q0 = cirq.NamedQubit('wont work') - a = 3.0 * cirq.Z(q0) - 2.0 * cirq.X(q0) - with self.assertRaises(ValueError): - serializer.serialize_paulisum(a) - - @parameterized.parameters([{ - 'sum_proto_pair': v - } for v in _get_valid_pauli_proto_pairs()]) - def test_serialize_paulisum_simple(self, sum_proto_pair): - """Ensure serialization is correct.""" - self.assertProtoEquals(sum_proto_pair[1], - serializer.serialize_paulisum(sum_proto_pair[0])) - - @parameterized.parameters([{ - 'sum_proto_pair': v - } for v in _get_valid_pauli_proto_pairs()]) - def test_deserialize_paulisum_simple(self, sum_proto_pair): - """Ensure deserialization is correct.""" - self.assertEqual(serializer.deserialize_paulisum(sum_proto_pair[1]), - sum_proto_pair[0]) - - @parameterized.parameters([{ - 'sum_proto_pair': v - } for v in _get_valid_pauli_proto_pairs()]) - def test_serialize_deserialize_paulisum_consistency(self, sum_proto_pair): - """Serialize and deserialize and ensure nothing changed.""" - self.assertEqual( - serializer.serialize_paulisum( - serializer.deserialize_paulisum(sum_proto_pair[1])), - sum_proto_pair[1]) - - self.assertEqual( - serializer.deserialize_paulisum( - serializer.serialize_paulisum(sum_proto_pair[0])), - sum_proto_pair[0]) - - @parameterized.parameters([ - { - 'gate': cirq.Rx(3.0 * sympy.Symbol('alpha')) - }, - { - 'gate': cirq.Ry(-1.0 * sympy.Symbol('alpha')) - }, - { - 'gate': cirq.Rz(sympy.Symbol('alpha')) - }, - ]) - def test_serialize_deserialize_special_case_one_qubit(self, gate): - """Check output state equality.""" - q0 = cirq.GridQubit(0, 0) - c = cirq.Circuit(gate(q0)) - - c = c._resolve_parameters_(cirq.ParamResolver({"alpha": 0.1234567})) - before = c.unitary() - c2 = serializer.deserialize_circuit(serializer.serialize_circuit(c)) - after = c2.unitary() - self.assertAllClose(before, after) - - def test_terminal_measurement_support(self): - """Test that non-terminal measurements error during serialization.""" - q0 = cirq.GridQubit(0, 0) - q1 = cirq.GridQubit(0, 1) - simple_circuit = cirq.Circuit(cirq.H(q0), cirq.measure(q0), cirq.H(q1), - cirq.Z(q1), cirq.measure(q1)) - - simple_circuit_before_call = copy.deepcopy(simple_circuit) - - expected_circuit = cirq.Circuit(cirq.Moment([cirq.H(q0), - cirq.H(q1)]), - cirq.Moment([cirq.Z(q1)]), - cirq.Moment([])) - - self.assertEqual(serializer.serialize_circuit(simple_circuit), - serializer.serialize_circuit(expected_circuit)) - - # Check that serialization didn't modify existing circuit. - self.assertEqual(simple_circuit, simple_circuit_before_call) - - invalid_circuit = cirq.Circuit(cirq.H(q0), cirq.measure(q0), - cirq.measure(q0)) - - with self.assertRaisesRegex(ValueError, expected_regex="non-terminal"): - serializer.serialize_circuit(invalid_circuit) - - def test_serialize_deserialize_identity(self): - """Confirm that identity gates can be serialized and deserialized.""" - q0 = cirq.GridQubit(0, 0) - q1 = cirq.GridQubit(0, 1) - paulisum_with_identity = cirq.PauliSum.from_pauli_strings([ - cirq.PauliString(cirq.I(q0)), - cirq.PauliString(cirq.Z(q0), cirq.Z(q1)), - ]) - self.assertEqual( - paulisum_with_identity, - serializer.deserialize_paulisum( - serializer.serialize_paulisum(paulisum_with_identity))) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/core/src/BUILD b/tensorflow_quantum/core/src/BUILD deleted file mode 100644 index 030fd459b..000000000 --- a/tensorflow_quantum/core/src/BUILD +++ /dev/null @@ -1,126 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -cc_library( - name = "circuit", - srcs = ["circuit.cc"], - hdrs = ["circuit.h"], - deps = [ - ":gates_def", - ], -) - -cc_test( - name = "circuit_test", - size = "small", - srcs = ["circuit_test.cc"], - linkstatic = 1, - deps = [ - ":circuit", - "@com_google_googletest//:gtest_main", - ], -) - -cc_library( - name = "circuit_parser", - srcs = ["circuit_parser.cc"], - hdrs = ["circuit_parser.h"], - deps = [ - ":circuit", - ":gates_def", - "//tensorflow_quantum/core/proto:pauli_sum_cc_proto", - "//tensorflow_quantum/core/proto:program_cc_proto", - "@com_google_absl//absl/container:flat_hash_map", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_test( - name = "circuit_parser_test", - size = "small", - srcs = ["circuit_parser_test.cc"], - linkstatic = 1, - deps = [ - ":circuit_parser", - ":gates_def", - "//tensorflow_quantum/core/proto:program_cc_proto", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/strings", - "@com_google_googletest//:gtest_main", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_library( - name = "gates_def", - srcs = ["gates_def.cc"], - hdrs = ["gates_def.h"], - deps = [ - ":matrix", - "@com_google_absl//absl/container:flat_hash_map", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_test( - name = "gates_def_test", - size = "small", - srcs = ["gates_def_test.cc"], - deps = [ - ":gates_def", - "@com_google_googletest//:gtest_main", - ], -) - -cc_library( - name = "matrix", - hdrs = ["matrix.h"], - deps = [], -) - -cc_test( - name = "matrix_test", - size = "small", - srcs = ["matrix_test.cc"], - deps = [ - ":gates_def", - ":matrix", - "@com_google_googletest//:gtest_main", - ], -) - -cc_library( - name = "program_resolution", - srcs = ["program_resolution.cc"], - hdrs = ["program_resolution.h"], - deps = [ - "//tensorflow_quantum/core/proto:pauli_sum_cc_proto", - "//tensorflow_quantum/core/proto:program_cc_proto", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_absl//absl/container:flat_hash_set", - "@com_google_absl//absl/strings", - "@local_config_tf//:libtensorflow_framework", - "@local_config_tf//:tf_header_lib", - ], -) - -cc_test( - name = "program_resolution_test", - size = "small", - srcs = ["program_resolution_test.cc"], - linkstatic = 1, - deps = [ - ":program_resolution", - "//tensorflow_quantum/core/proto:program_cc_proto", - "@com_google_absl//absl/container:flat_hash_map", - "@com_google_googletest//:gtest_main", - "@local_config_tf//:tf_header_lib", - ], -) diff --git a/tensorflow_quantum/core/src/circuit.cc b/tensorflow_quantum/core/src/circuit.cc deleted file mode 100644 index e3f6f99e1..000000000 --- a/tensorflow_quantum/core/src/circuit.cc +++ /dev/null @@ -1,45 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/src/circuit.h" - -#include - -#include "tensorflow_quantum/core/src/gates_def.h" - -namespace tfq { - -Circuit::Circuit() : num_qubits(0) {} -Circuit::Circuit(unsigned int num_qubits, std::vector& gates) - : num_qubits(num_qubits), gates(gates) {} - -bool Circuit::operator==(const Circuit& r) const { - if (this->num_qubits != r.num_qubits) { - return false; - } - if (this->gates.size() != r.gates.size()) { - return false; - } - for (size_t i = 0; i < this->gates.size(); i++) { - if (this->gates.at(i) != r.gates.at(i)) { - return false; - } - } - return true; -} - -bool Circuit::operator!=(const Circuit& r) const { return !(*this == r); } - -} // namespace tfq diff --git a/tensorflow_quantum/core/src/circuit.h b/tensorflow_quantum/core/src/circuit.h deleted file mode 100644 index d17453a02..000000000 --- a/tensorflow_quantum/core/src/circuit.h +++ /dev/null @@ -1,39 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_SRC_CIRCUIT_H_ -#define TFQ_CORE_SRC_CIRCUIT_H_ - -#include - -#include "tensorflow_quantum/core/src/gates_def.h" - -namespace tfq { - -class Circuit { - public: - unsigned int num_qubits; - std::vector gates; - - Circuit(); - Circuit(unsigned int num_qubits, std::vector& gates); - - bool operator==(const Circuit& r) const; - bool operator!=(const Circuit& r) const; -}; - -} // namespace tfq - -#endif // TFQ_CORE_SRC_CIRCUIT_H_ diff --git a/tensorflow_quantum/core/src/circuit_parser.cc b/tensorflow_quantum/core/src/circuit_parser.cc deleted file mode 100644 index 46c39cc82..000000000 --- a/tensorflow_quantum/core/src/circuit_parser.cc +++ /dev/null @@ -1,162 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/src/circuit_parser.h" - -#include -#include -#include -#include -#include - -#include "absl/container/flat_hash_map.h" -#include "absl/strings/numbers.h" -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/proto/pauli_sum.pb.h" -#include "tensorflow_quantum/core/src/circuit.h" -#include "tensorflow_quantum/core/src/gates_def.h" - -namespace tfq { -namespace { - -using ::cirq::google::api::v2::Moment; -using ::cirq::google::api::v2::Operation; -using ::cirq::google::api::v2::Program; -using ::cirq::google::api::v2::Qubit; -using ::tensorflow::Status; - -// Adds the operation as a Gate in the circuit. The index is the moment number. -// TODO(pmassey): Remove duplciate code between this and ../src/moment -Status ParseOperation(const Operation& op, const int num_qubits, - const int index, Gate* gate) { - std::vector locations; - unsigned int location; - for (const Qubit& qubit : op.qubits()) { - if (!absl::SimpleAtoi(qubit.id(), &location)) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Could not parse Qubit id: " + qubit.ShortDebugString()); - } - locations.push_back(num_qubits - location - 1); - } - - // Control and target are swapped relative to cirq convention - std::reverse(locations.begin(), locations.end()); - - absl::flat_hash_map arg_map; - for (const auto& pair : op.args()) { - arg_map[pair.first] = pair.second.arg_value().float_value(); - } - - const std::string& gate_name = op.gate().id(); - return InitGate(gate_name, index, locations, arg_map, gate); -} - -} // namespace - -Status CircuitFromProgram(const Program& program, const int num_qubits, - Circuit* circuit) { - circuit->num_qubits = num_qubits; - - const cirq::google::api::v2::Circuit& cirq_circuit = program.circuit(); - if (cirq_circuit.scheduling_strategy() != - cirq::google::api::v2::Circuit::MOMENT_BY_MOMENT) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Circuit must be moment by moment."); - } - - int i = 0; - for (const Moment& moment : cirq_circuit.moments()) { - for (const Operation& op : moment.operations()) { - Gate gate; - Status status = ParseOperation(op, num_qubits, i, &gate); - if (!status.ok()) { - return status; - } - - // Assert that q0 is always less than q1. - // Note that gate construction must handle this, so this error should - // only be thrown if a gate implementation is incorrect. - if (gate.num_qubits == 2 && gate.qubits[0] > gate.qubits[1]) { - return Status( - tensorflow::error::INVALID_ARGUMENT, - "Gate has q0 > q1 for operation: " + op.ShortDebugString()); - } - - circuit->gates.push_back(gate); - } - - i++; - } - - // TODO(zaqqwerty): identities hack added to collect orphan gates - if (num_qubits > 1) { - I2GateBuilder builder; - std::vector locations; - absl::flat_hash_map arg_map; - Gate gate; - tensorflow::Status status; - for (int w = 0; w < num_qubits - 1; w += 2) { - locations.clear(); - locations.push_back(w); - locations.push_back(w + 1); - status = builder.Build(i, locations, arg_map, &gate); - if (!status.ok()) { - return status; - } - circuit->gates.push_back(gate); - } - i++; - if (num_qubits % 2 == 1) { - locations.clear(); - locations.push_back(num_qubits - 2); - locations.push_back(num_qubits - 1); - status = builder.Build(i, locations, arg_map, &gate); - if (!status.ok()) { - return status; - } - circuit->gates.push_back(gate); - } - } - - return Status::OK(); -} - -Status CircuitFromPauliTerm(const tfq::proto::PauliTerm& term, - const int num_qubits, Circuit* circuit) { - Program measurement_program; - measurement_program.mutable_circuit()->set_scheduling_strategy( - cirq::google::api::v2::Circuit::MOMENT_BY_MOMENT); - Moment* term_moment = measurement_program.mutable_circuit()->add_moments(); - for (const tfq::proto::PauliQubitPair& pair : term.paulis()) { - Operation* new_op = term_moment->add_operations(); - - // create corresponding eigen gate op. - new_op->add_qubits()->set_id(pair.qubit_id()); - new_op->mutable_gate()->set_id(pair.pauli_type() + "P"); - (*new_op->mutable_args())["exponent"].mutable_arg_value()->set_float_value( - 1.0); - (*new_op->mutable_args())["global_shift"] - .mutable_arg_value() - ->set_float_value(0.0); - (*new_op->mutable_args())["exponent_scalar"] - .mutable_arg_value() - ->set_float_value(1.0); - } - - return CircuitFromProgram(measurement_program, num_qubits, circuit); -} - -} // namespace tfq diff --git a/tensorflow_quantum/core/src/circuit_parser.h b/tensorflow_quantum/core/src/circuit_parser.h deleted file mode 100644 index ad0abc4fe..000000000 --- a/tensorflow_quantum/core/src/circuit_parser.h +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_SRC_CIRCUIT_PARSER_H_ -#define TFQ_CORE_SRC_CIRCUIT_PARSER_H_ - -#include -#include -#include - -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/proto/pauli_sum.pb.h" -#include "tensorflow_quantum/core/src/circuit.h" - -namespace tfq { - -// parse a serialized Cirq program into our internal representation -tensorflow::Status CircuitFromProgram( - const cirq::google::api::v2::Program& program, const int num_qubits, - Circuit* circuit); - -// build the circuit taking the computational basis to the measurement basis -tensorflow::Status CircuitFromPauliTerm(const tfq::proto::PauliTerm& term, - const int num_qubits, Circuit* circuit); - -} // namespace tfq - -#endif // TFQ_CORE_SRC_CIRCUIT_PARSER_H_ diff --git a/tensorflow_quantum/core/src/circuit_parser_test.cc b/tensorflow_quantum/core/src/circuit_parser_test.cc deleted file mode 100644 index a63f7e6c2..000000000 --- a/tensorflow_quantum/core/src/circuit_parser_test.cc +++ /dev/null @@ -1,242 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/src/circuit_parser.h" - -#include - -#include - -#include "absl/container/flat_hash_map.h" -#include "absl/strings/numbers.h" -#include "cirq/google/api/v2/program.pb.h" -#include "gtest/gtest.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/src/circuit.h" -#include "tensorflow_quantum/core/src/circuit_parser.h" -#include "tensorflow_quantum/core/src/gates_def.h" - -namespace tfq { -namespace { - -using ::cirq::google::api::v2::Program; - -TEST(CircuitParserTest, CircuitFromProgramInvalidSchedule) { - Program program_proto; - ::cirq::google::api::v2::Circuit* circuit_proto = - program_proto.mutable_circuit(); - circuit_proto->set_scheduling_strategy( - circuit_proto->SCHEDULING_STRATEGY_UNSPECIFIED); - Circuit test_circuit; - ASSERT_EQ(CircuitFromProgram(program_proto, 0, &test_circuit), - tensorflow::Status(tensorflow::error::INVALID_ARGUMENT, - "Circuit must be moment by moment.")); -} - -TEST(CircuitParserTest, CircuitFromProgramInvalidQubitId) { - Program program_proto; - ::cirq::google::api::v2::Circuit* circuit_proto = - program_proto.mutable_circuit(); - circuit_proto->set_scheduling_strategy(circuit_proto->MOMENT_BY_MOMENT); - ::cirq::google::api::v2::Moment* moments_proto = circuit_proto->add_moments(); - - // Add CNOT gate with invalid qubit - ::cirq::google::api::v2::Operation* operations_proto = - moments_proto->add_operations(); - ::cirq::google::api::v2::Gate* gate_proto = operations_proto->mutable_gate(); - gate_proto->set_id("CNOT"); - ::cirq::google::api::v2::Qubit* qubits_proto = operations_proto->add_qubits(); - qubits_proto->set_id("0"); - qubits_proto = operations_proto->add_qubits(); - qubits_proto->set_id("0_0"); - - Circuit test_circuit; - ASSERT_EQ(CircuitFromProgram(program_proto, 2, &test_circuit), - tensorflow::Status(tensorflow::error::INVALID_ARGUMENT, - "Could not parse Qubit id: " + - qubits_proto->ShortDebugString())); -} - -TEST(CircuitParserTest, CircuitFromProgramEmpty) { - Program program_proto; - ::cirq::google::api::v2::Circuit* circuit_proto = - program_proto.mutable_circuit(); - circuit_proto->set_scheduling_strategy(circuit_proto->MOMENT_BY_MOMENT); - - Circuit real_circuit, test_circuit; - // TODO(zaqqwerty): num_qubits <= 1 due to orphan gate collection method - real_circuit.num_qubits = 0; - ASSERT_TRUE(CircuitFromProgram(program_proto, 0, &test_circuit).ok()); - - ASSERT_EQ(test_circuit.num_qubits, real_circuit.num_qubits); - ASSERT_EQ(test_circuit.gates, real_circuit.gates); - - // Test application of orphan gate collection identity gates - // TODO(zaqqwerty): remove when orphan gate collection is moved to fuser - Program program_proto_ident_odd; - ::cirq::google::api::v2::Circuit* circuit_proto_ident_odd = - program_proto_ident_odd.mutable_circuit(); - circuit_proto_ident_odd->set_scheduling_strategy( - circuit_proto_ident_odd->MOMENT_BY_MOMENT); - Program program_proto_ident_even; - ::cirq::google::api::v2::Circuit* circuit_proto_ident_even = - program_proto_ident_even.mutable_circuit(); - circuit_proto_ident_even->set_scheduling_strategy( - circuit_proto_ident_even->MOMENT_BY_MOMENT); - - I2GateBuilder ident_builder; - std::vector locations; - absl::flat_hash_map arg_map; - Gate gate_01, gate_12, gate_23; - locations.clear(); - locations.push_back(0); - locations.push_back(1); - ASSERT_TRUE(ident_builder.Build(0, locations, arg_map, &gate_01).ok()); - locations.clear(); - locations.push_back(1); - locations.push_back(2); - ASSERT_TRUE(ident_builder.Build(1, locations, arg_map, &gate_12).ok()); - locations.clear(); - locations.push_back(2); - locations.push_back(3); - ASSERT_TRUE(ident_builder.Build(0, locations, arg_map, &gate_23).ok()); - - Circuit real_circuit_ident_odd, test_circuit_ident_odd; - real_circuit_ident_odd.num_qubits = 3; - real_circuit_ident_odd.gates.push_back(gate_01); - real_circuit_ident_odd.gates.push_back(gate_12); - ASSERT_TRUE( - CircuitFromProgram(program_proto_ident_odd, 3, &test_circuit_ident_odd) - .ok()); - ASSERT_EQ(test_circuit_ident_odd, real_circuit_ident_odd); - - Circuit real_circuit_ident_even, test_circuit_ident_even; - real_circuit_ident_even.num_qubits = 4; - real_circuit_ident_even.gates.push_back(gate_01); - real_circuit_ident_even.gates.push_back(gate_23); - ASSERT_TRUE( - CircuitFromProgram(program_proto_ident_even, 4, &test_circuit_ident_even) - .ok()); - ASSERT_EQ(test_circuit_ident_even, real_circuit_ident_even); -} - -TEST(CircuitParserTest, CircuitFromProgramPaulis) { - Program program_proto; - ::cirq::google::api::v2::Circuit* circuit_proto = - program_proto.mutable_circuit(); - circuit_proto->set_scheduling_strategy(circuit_proto->MOMENT_BY_MOMENT); - ::cirq::google::api::v2::Moment* moments_proto = circuit_proto->add_moments(); - - // Add X gate - ::cirq::google::api::v2::Operation* operations_proto = - moments_proto->add_operations(); - ::cirq::google::api::v2::Gate* gate_proto = operations_proto->mutable_gate(); - gate_proto->set_id("XP"); - ::cirq::google::api::v2::Qubit* qubits_proto = operations_proto->add_qubits(); - qubits_proto->set_id("0"); - - // Create the required Arg protos - ::cirq::google::api::v2::Arg global_shift_arg; - ::cirq::google::api::v2::ArgValue* global_shift_arg_value = - global_shift_arg.mutable_arg_value(); - global_shift_arg_value->set_float_value(0.0); - ::cirq::google::api::v2::Arg exponent_arg; - ::cirq::google::api::v2::ArgValue* exponent_arg_value = - exponent_arg.mutable_arg_value(); - exponent_arg_value->set_float_value(1.0); - ::cirq::google::api::v2::Arg exponent_scalar_arg; - ::cirq::google::api::v2::ArgValue* exponent_scalar_arg_value = - exponent_scalar_arg.mutable_arg_value(); - exponent_scalar_arg_value->set_float_value(1.0); - - // Add Arg protos to the operation arg map - google::protobuf::Map* args_proto = - operations_proto->mutable_args(); - (*args_proto)["global_shift"] = global_shift_arg; - (*args_proto)["exponent"] = exponent_arg; - (*args_proto)["exponent_scalar"] = exponent_scalar_arg; - - ASSERT_EQ(program_proto.circuit().moments()[0].operations_size(), 1); - - // Build the corresponding correct circuit - Circuit real_circuit; - std::vector locations; - XPowGateBuilder builder; - Gate gate_x; - ::tensorflow::Status status; - - real_circuit.num_qubits = 1; - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - locations.push_back(real_circuit.num_qubits - 0 - 1); - status = builder.Build(0, locations, arg_map, &gate_x); - ASSERT_EQ(status, tensorflow::Status::OK()); - real_circuit.gates.push_back(gate_x); - locations.clear(); - - // Check conversion - Circuit test_circuit; - status = CircuitFromProgram(program_proto, 1, &test_circuit); - ASSERT_EQ(status, tensorflow::Status::OK()); - ASSERT_EQ(test_circuit, real_circuit); -} - -TEST(CircuitParserTest, CircuitFromPauliTermEmpty) { - tfq::proto::PauliTerm pauli_proto; - tensorflow::Status status; - Circuit test_circuit, real_circuit; - real_circuit.num_qubits = 0; - status = CircuitFromPauliTerm(pauli_proto, 0, &test_circuit); - ASSERT_EQ(status, tensorflow::Status::OK()); - ASSERT_EQ(test_circuit, real_circuit); -} - -TEST(CircuitParserTest, CircuitFromPauliTermPauli) { - tfq::proto::PauliTerm pauli_proto; - // The created circuit should not depend on the coefficient - pauli_proto.set_coefficient_real(3.14); - tfq::proto::PauliQubitPair* pair_proto = pauli_proto.add_paulis(); - pair_proto->set_qubit_id("0"); - pair_proto->set_pauli_type("X"); - - // Build the corresponding correct circuit - Circuit real_circuit; - std::vector locations; - XPowGateBuilder builder; - Gate gate_x; - ::tensorflow::Status status; - - real_circuit.num_qubits = 1; - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - locations.push_back(real_circuit.num_qubits - 0 - 1); - status = builder.Build(0, locations, arg_map, &gate_x); - ASSERT_EQ(status, tensorflow::Status::OK()); - real_circuit.gates.push_back(gate_x); - locations.clear(); - - // Check conversion - Circuit test_circuit; - status = CircuitFromPauliTerm(pauli_proto, 1, &test_circuit); - ASSERT_EQ(status, tensorflow::Status::OK()); - ASSERT_EQ(test_circuit, real_circuit); -} - -} // namespace -} // namespace tfq diff --git a/tensorflow_quantum/core/src/circuit_test.cc b/tensorflow_quantum/core/src/circuit_test.cc deleted file mode 100644 index c39c2f2bd..000000000 --- a/tensorflow_quantum/core/src/circuit_test.cc +++ /dev/null @@ -1,77 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/src/circuit.h" - -#include - -#include "gtest/gtest.h" - -namespace tfq { -namespace { - -float RandomFloat() { - return static_cast(std::rand()) / static_cast(RAND_MAX); -} - -void GetTestGate1q(Gate* test_gate) { - std::array matrix; - std::generate(matrix.begin(), matrix.begin() + 8, RandomFloat); - *test_gate = Gate(18, 3, matrix); -} - -void GetTestGate2q(Gate* test_gate) { - std::array matrix; - std::generate(matrix.begin(), matrix.begin() + 32, RandomFloat); - *test_gate = Gate(19, 3, 4, matrix); -} - -TEST(CircuitTest, CircuitEmpty) { - Circuit true_circuit, test_circuit; - test_circuit.num_qubits = true_circuit.num_qubits = 53; - - // check equality operator overload - test_circuit.num_qubits = true_circuit.num_qubits + 1; - ASSERT_NE(test_circuit, true_circuit); - test_circuit.num_qubits = true_circuit.num_qubits; - ASSERT_EQ(test_circuit, true_circuit); -} - -TEST(CircuitTest, CircuitFull) { - Circuit true_circuit, test_circuit; - test_circuit.num_qubits = true_circuit.num_qubits = 53; - - Gate gate_0, gate_1, gate_2; - GetTestGate1q(&gate_1); - GetTestGate2q(&gate_2); - test_circuit.gates.push_back(gate_0); - test_circuit.gates.push_back(gate_1); - test_circuit.gates.push_back(gate_2); - true_circuit.gates.push_back(gate_0); - true_circuit.gates.push_back(gate_1); - true_circuit.gates.push_back(gate_2); - - // check equality operator overload - test_circuit.num_qubits = true_circuit.num_qubits + 1; - ASSERT_NE(test_circuit, true_circuit); - test_circuit.num_qubits = true_circuit.num_qubits; - test_circuit.gates[2] = true_circuit.gates[0]; - ASSERT_NE(test_circuit, true_circuit); - test_circuit.gates[2] = true_circuit.gates[2]; - ASSERT_EQ(test_circuit, true_circuit); -} - -} // namespace -} // namespace tfq diff --git a/tensorflow_quantum/core/src/gates_def.cc b/tensorflow_quantum/core/src/gates_def.cc deleted file mode 100644 index 3ebd892cf..000000000 --- a/tensorflow_quantum/core/src/gates_def.cc +++ /dev/null @@ -1,531 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/src/gates_def.h" - -#define _USE_MATH_DEFINES -#include -#include -#include - -#include "absl/container/flat_hash_map.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/src/matrix.h" - -namespace tfq { -namespace { - -using ::tensorflow::Status; - -static const float DENOM_PLUS = 4.0f + 2.0f * std::sqrt(2); -static const float DENOM_MINUS = 4.0f - 2.0f * std::sqrt(2); -static const std::complex PRE_00_PLUS = - std::complex((3 + 2 * std::sqrt(2)) / DENOM_PLUS, 0); -static const std::complex PRE_00_MINUS = - std::complex((3 - 2 * std::sqrt(2)) / DENOM_MINUS, 0); -static const std::complex PRE_0110_PLUS = - std::complex((1 + std::sqrt(2)) / DENOM_PLUS, 0); -static const std::complex PRE_0110_MINUS = - std::complex((1 - std::sqrt(2)) / DENOM_MINUS, 0); -static const std::complex PRE_11_PLUS = - std::complex(1 / DENOM_PLUS, 0); -static const std::complex PRE_11_MINUS = - std::complex(1 / DENOM_MINUS, 0); - -static constexpr std::complex I_UNIT = std::complex(0, 1); -static constexpr float pi = static_cast(M_PI); -inline std::complex global_phase(const float exponent, - const float global_shift) { - return std::exp(I_UNIT * pi * global_shift * exponent); -} - -// Caller must free returned object -GateBuilder* GateNameMapper(const std::string& gate_name) { - // clang-format off - if (gate_name == "I") {return new IGateBuilder();} - if (gate_name == "HP") {return new HPowGateBuilder();} - if (gate_name == "XP") {return new XPowGateBuilder();} - if (gate_name == "XXP") {return new XXPowGateBuilder();} - if (gate_name == "YP") {return new YPowGateBuilder();} - if (gate_name == "YYP") {return new YYPowGateBuilder();} - if (gate_name == "ZP") {return new ZPowGateBuilder();} - if (gate_name == "ZZP") {return new ZZPowGateBuilder();} - if (gate_name == "CZP") {return new CZPowGateBuilder();} - if (gate_name == "I2") {return new I2GateBuilder();} - if (gate_name == "CNP") {return new CNotPowGateBuilder();} - if (gate_name == "SP") {return new SwapPowGateBuilder();} - if (gate_name == "ISP") {return new ISwapPowGateBuilder();} - if (gate_name == "PXP") {return new PhasedXPowGateBuilder();} - if (gate_name == "FSIM") {return new FSimGateBuilder();} - if (gate_name == "PISP") {return new PhasedISwapPowGateBuilder();} - // clang-format on - return NULL; -} - -} // namespace - -Gate::Gate() : time(0), num_qubits(0) {} - -Gate::Gate(const unsigned int time_in, const unsigned int qubit_in, - const std::array& matrix_in) - : time(time_in), num_qubits(1) { - qubits[0] = qubit_in; - std::copy(matrix_in.begin(), matrix_in.end(), matrix.begin()); -} - -Gate::Gate(const unsigned int time_in, const unsigned int q1, - const unsigned int q2, const std::array& matrix_in) - : time(time_in), num_qubits(2) { - qubits[0] = q1; - qubits[1] = q2; - std::copy(matrix_in.begin(), matrix_in.end(), matrix.begin()); - // The simulators expect qubits to be in ascending order. - // To correct for this we swap the qubits, then permute the matrix accordingly - if (q2 < q1) { - std::swap(qubits[0], qubits[1]); - Matrix4Permute(matrix); - } -} - -bool operator==(const Gate& l, const Gate& r) { - if (l.time != r.time) { - return false; - } - if (l.num_qubits != r.num_qubits) { - return false; - } - for (unsigned int i = 0; i < l.num_qubits; i++) { - if (l.qubits.at(i) != r.qubits.at(i)) { - return false; - } - } - if (l.num_qubits > 0) { - // real and imaginary component for each matrix site - const unsigned int true_mat_size = - 2 * (1 << l.num_qubits) * (1 << l.num_qubits); - for (unsigned int i = 0; i < true_mat_size; i++) { - if (std::fabs(l.matrix[i] - r.matrix[i]) > 1e-6) { - return false; - } - } - } - return true; -} - -bool operator!=(const Gate& l, const Gate& r) { return !(l == r); } - -Status InitGate(const std::string& gate_name, const unsigned int time, - const std::vector& locations, - const absl::flat_hash_map& args, - Gate* gate) { - GateBuilder* builder; - builder = GateNameMapper(gate_name); - if (builder == NULL) { - return Status(tensorflow::error::UNIMPLEMENTED, - "The given gate id, " + gate_name + - ", does not match any available TFQ gate."); - } - Status status = builder->Build(time, locations, args, gate); - delete builder; - return status; -} - -Status OneQubitGateBuilder::Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) { - if (locations.size() != 1) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Only one qubit location should be provided."); - } - - float exponent; - float global_shift; - const auto itr_exponent = args.find("exponent"); - const auto itr_global_shift = args.find("global_shift"); - // Workaround to support scalar multiplication of symbols. See serialize.py. - const auto itr_exponent_scalar = args.find("exponent_scalar"); - if (itr_exponent == args.end() || itr_global_shift == args.end() || - itr_exponent_scalar == args.end()) { - return tensorflow::Status(tensorflow::error::INVALID_ARGUMENT, - "Eigen gates require exponent and " - "global_shift args."); - } - exponent = itr_exponent->second * itr_exponent_scalar->second; - global_shift = itr_global_shift->second; - *gate = Gate(time, locations[0], GetMatrix(exponent, global_shift)); - return Status::OK(); -} - -Status OneQubitConstantGateBuilder::Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) { - if (locations.size() != 1) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Only one qubit location should be provided."); - } - if (!args.empty()) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Constant gates take no arguments, " + - std::to_string(args.size()) + " were given."); - } - *gate = Gate(time, locations[0], GetMatrix()); - return Status::OK(); -} - -Status OneQubitPhasedGateBuilder::Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) { - if (locations.size() != 1) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Only one qubit location should be provided."); - } - float exponent; - float phase_exponent; - float global_shift; - const auto itr_exponent = args.find("exponent"); - const auto itr_phase_exponent = args.find("phase_exponent"); - const auto itr_global_shift = args.find("global_shift"); - // Workaround to support scalar multiplication of symbols. See serialize.py. - const auto itr_exponent_scalar = args.find("exponent_scalar"); - const auto itr_phase_exponent_scalar = args.find("phase_exponent_scalar"); - if (itr_exponent == args.end() || itr_global_shift == args.end() || - itr_exponent_scalar == args.end()) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Eigen gates require exponent and global_shift args."); - } - exponent = itr_exponent->second * itr_exponent_scalar->second; - phase_exponent = - itr_phase_exponent->second * itr_phase_exponent_scalar->second; - global_shift = itr_global_shift->second; - *gate = Gate(time, locations[0], - GetMatrix(exponent, phase_exponent, global_shift)); - return Status::OK(); -} - -Status TwoQubitGateBuilder::Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) { - if (locations.size() != 2) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Only two qubit locations should be provided."); - } - float exponent; - float global_shift; - const auto itr_exponent = args.find("exponent"); - const auto itr_global_shift = args.find("global_shift"); - // Workaround to support scalar multiplication of symbols. See serialize.py. - const auto itr_exponent_scalar = args.find("exponent_scalar"); - if (itr_exponent == args.end() || itr_global_shift == args.end() || - itr_exponent_scalar == args.end()) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Eigen gates require exponent and global_shift args."); - } - exponent = itr_exponent->second * itr_exponent_scalar->second; - global_shift = itr_global_shift->second; - *gate = - Gate(time, locations[0], locations[1], GetMatrix(exponent, global_shift)); - return Status::OK(); -} - -Status TwoQubitPhasedGateBuilder::Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) { - if (locations.size() != 2) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Only two qubit locations should be provided."); - } - float exponent; - float global_shift; - float phase_exponent; - const auto itr_exponent = args.find("exponent"); - const auto itr_phase_exponent = args.find("phase_exponent"); - const auto itr_global_shift = args.find("global_shift"); - // Workaround to support scalar multiplication of symbols. See serialize.py. - const auto itr_exponent_scalar = args.find("exponent_scalar"); - const auto itr_phase_exponent_scalar = args.find("phase_exponent_scalar"); - if (itr_exponent == args.end() || itr_global_shift == args.end() || - itr_exponent_scalar == args.end() || - itr_phase_exponent_scalar == args.end()) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Phased Eigen gates require exponent, phase_exponent " - "and global_shift args."); - } - exponent = itr_exponent->second * itr_exponent_scalar->second; - phase_exponent = - itr_phase_exponent->second * itr_phase_exponent_scalar->second; - global_shift = itr_global_shift->second; - *gate = Gate(time, locations[0], locations[1], - GetMatrix(exponent, phase_exponent, global_shift)); - return Status::OK(); -} - -Status TwoQubitConstantGateBuilder::Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) { - if (locations.size() != 2) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Only two qubit locations should be provided."); - } - if (!args.empty()) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Constant gates take no arguments, " + - std::to_string(args.size()) + " were given."); - } - *gate = Gate(time, locations[0], locations[1], GetMatrix()); - return Status::OK(); -} - -Matrix1q XPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex plus = 0.5f * g * (1.0f + w); - const std::complex minus = 0.5f * g * (1.0f - w); - - return {{plus.real(), plus.imag(), minus.real(), minus.imag(), minus.real(), - minus.imag(), plus.real(), plus.imag()}}; -} - -Matrix1q YPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex plus = 0.5f * g * (1.0f + w); - const std::complex v01 = 0.5f * g * (-1.0f + w) * I_UNIT; - const std::complex v10 = 0.5f * g * (1.0f - w) * I_UNIT; - - return {{plus.real(), plus.imag(), v01.real(), v01.imag(), v10.real(), - v10.imag(), plus.real(), plus.imag()}}; -} - -Matrix1q ZPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex prod = g * w; - - return { - {g.real(), g.imag(), 0.0f, 0.0f, 0.0f, 0.0f, prod.real(), prod.imag()}}; -} - -Matrix1q HPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - - const std::complex v00 = g * (PRE_00_PLUS + PRE_00_MINUS * w); - const std::complex v01_and_10 = - g * (PRE_0110_PLUS + PRE_0110_MINUS * w); - const std::complex v11 = g * (PRE_11_PLUS + PRE_11_MINUS * w); - - return {{v00.real(), v00.imag(), v01_and_10.real(), v01_and_10.imag(), - v01_and_10.real(), v01_and_10.imag(), v11.real(), v11.imag()}}; -} - -Matrix1q IGateBuilder::GetMatrix() { - static constexpr Matrix1q matrix = {{1, 0, 0, 0, 0, 0, 1, 0}}; - return matrix; -} - -Matrix1q PhasedXPowGateBuilder::GetMatrix(const float exponent, - const float phase_exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex w2 = global_phase(phase_exponent, 1); - const std::complex w2_n = global_phase(-1.0f * phase_exponent, 1); - - const std::complex v00_and_11 = 0.5f * g * (1.0f + w); - const std::complex v01 = 0.5f * g * (1.0f - w) * w2_n; - const std::complex v10 = 0.5f * g * (1.0f - w) * w2; - - return {{v00_and_11.real(), v00_and_11.imag(), v01.real(), v01.imag(), - v10.real(), v10.imag(), v00_and_11.real(), v00_and_11.imag()}}; -} - -Matrix2q XXPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex pc = 0.5f * g * (1.0f + w); - const std::complex mc = 0.5f * g * (1.0f - w); - - // clang-format off - return {{pc.real(), pc.imag(), 0, 0, 0, 0, mc.real(), mc.imag(), - 0, 0, pc.real(), pc.imag(), mc.real(), mc.imag(), 0, 0, - 0, 0, mc.real(), mc.imag(), pc.real(), pc.imag(), 0, 0, - mc.real(), mc.imag(), 0, 0, 0, 0, pc.real(), pc.imag()}}; - // clang-format on -} - -Matrix2q YYPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex pc = 0.5f * g * (1.0f + w); - const std::complex mc = 0.5f * g * (1.0f - w); - const std::complex n_mc = -1.0f * mc; - - // clang-format off - return {{pc.real(), pc.imag(), 0, 0, 0, 0, n_mc.real(), n_mc.imag(), - 0, 0, pc.real(), pc.imag(), mc.real(), mc.imag(), 0, 0, - 0, 0, mc.real(), mc.imag(), pc.real(), pc.imag(), 0, 0, - n_mc.real(), n_mc.imag(), 0, 0, 0, 0, pc.real(), pc.imag()}}; - // clang-format on -} - -Matrix2q ZZPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex gw = g * w; - - // clang-format off - return {{g.real(), g.imag(), 0, 0, 0, 0, 0, 0, - 0, 0, gw.real(), gw.imag(), 0, 0, 0, 0, - 0, 0, 0, 0, gw.real(), gw.imag(), 0, 0, - 0, 0, 0, 0, 0, 0, g.real(), g.imag()}}; - // clang-format on -} - -Matrix2q CZPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex gw = g * w; - - // clang-format off - return {{g.real(), g.imag(), 0, 0, 0, 0, 0, 0, - 0, 0, g.real(), g.imag(), 0, 0, 0, 0, - 0, 0, 0, 0, g.real(), g.imag(), 0, 0, - 0, 0, 0, 0, 0, 0, gw.real(), gw.imag()}}; - // clang-format on -} - -Matrix2q CNotPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex plus = 0.5f * g * (1.0f + w); - const std::complex minus = 0.5f * g * (1.0f - w); - - // clang-format off - return {{g.real(), g.imag(), 0, 0, 0, 0, 0, 0, - 0, 0, g.real(), g.imag(), 0, 0, 0, 0, - 0, 0, 0, 0, plus.real(), plus.imag(), minus.real(), minus.imag(), - 0, 0, 0, 0, minus.real(), minus.imag(), plus.real(), plus.imag()}}; - // clang-format on -} - -Matrix2q SwapPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex w = global_phase(exponent, 1); - const std::complex plus = 0.5f * g * (1.0f + w); - const std::complex minus = 0.5f * g * (1.0f - w); - - // clang-format off - return {{g.real(), g.imag(), 0, 0, 0, 0, 0, 0, - 0, 0, plus.real(), plus.imag(), minus.real(), minus.imag(), 0, 0, - 0, 0, minus.real(), minus.imag(), plus.real(), plus.imag(), 0, 0, - 0, 0, 0, 0, 0, 0, g.real(), g.imag()}}; - // clang-format on -} - -Matrix2q ISwapPowGateBuilder::GetMatrix(const float exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex wp = global_phase(exponent, 0.5); - const std::complex wm = global_phase(exponent, -0.5); - const std::complex plus = 0.5f * g * (wp + wm); - const std::complex minus = 0.5f * g * (wp - wm); - - // clang-format off - return {{g.real(), g.imag(), 0, 0, 0, 0, 0, 0, - 0, 0, plus.real(), plus.imag(), minus.real(), minus.imag(), 0, 0, - 0, 0, minus.real(), minus.imag(), plus.real(), plus.imag(), 0, 0, - 0, 0, 0, 0, 0, 0, g.real(), g.imag()}}; - // clang-format on -} - -Matrix2q PhasedISwapPowGateBuilder::GetMatrix(const float exponent, - const float phase_exponent, - const float global_shift) { - const std::complex g = global_phase(exponent, global_shift); - const std::complex wp = global_phase(exponent, 0.5); - const std::complex wm = global_phase(exponent, -0.5); - const std::complex plus = 0.5f * g * (wp + wm); - const std::complex minus = 0.5f * g * (wp - wm); - const std::complex f = global_phase(phase_exponent, 2.0); - const std::complex f_star = std::conj(f); - const std::complex ur = minus * f; - const std::complex bl = minus * f_star; - - // clang-format off - return {{g.real(), g.imag(), 0, 0, 0, 0, 0, 0, - 0, 0, plus.real(), plus.imag(), ur.real(), ur.imag(), 0, 0, - 0, 0, bl.real(), bl.imag(), plus.real(), plus.imag(), 0, 0, - 0, 0, 0, 0, 0, 0, g.real(), g.imag()}}; - // clang-format on -} - -Matrix2q I2GateBuilder::GetMatrix() { - // clang-format off - static constexpr Matrix2q matrix = {{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0}}; - // clang-format on - return matrix; -} - -Status FSimGateBuilder::Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) { - if (locations.size() != 2) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "Two qubit locations should be provided."); - } - double theta; - double phi; - const auto itr_theta = args.find("theta"); - const auto itr_phi = args.find("phi"); - const auto itr_theta_scalar = args.find("theta_scalar"); - const auto itr_phi_scalar = args.find("phi_scalar"); - if (itr_theta == args.end() || itr_phi == args.end() || - itr_theta_scalar == args.end() || itr_phi_scalar == args.end()) { - return Status(tensorflow::error::INVALID_ARGUMENT, - "FSimGate requires theta and phi args."); - } - theta = itr_theta->second * itr_theta_scalar->second; - phi = itr_phi->second * itr_phi_scalar->second; - *gate = Gate(time, locations[0], locations[1], GetMatrix(theta, phi)); - return Status::OK(); -} - -Matrix2q FSimGateBuilder::GetMatrix(const float theta, const float phi) { - const std::complex minus_i_unit = std::complex(0, -1.0); - const std::complex a = std::cos(theta); - const std::complex b = minus_i_unit * std::sin(theta); - const std::complex c = std::exp(minus_i_unit * phi); - // clang-format off - return {{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, a.real(), a.imag(), b.real(), b.imag(), 0, 0, - 0, 0, b.real(), b.imag(), a.real(), a.imag(), 0, 0, - 0, 0, 0, 0, 0, 0, c.real(), c.imag()}}; - // clang-format on -} - -} // namespace tfq diff --git a/tensorflow_quantum/core/src/gates_def.h b/tensorflow_quantum/core/src/gates_def.h deleted file mode 100644 index 9628c8ced..000000000 --- a/tensorflow_quantum/core/src/gates_def.h +++ /dev/null @@ -1,236 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_SRC_GATES_DEF_H_ -#define TFQ_CORE_SRC_GATES_DEF_H_ - -// These gates were originally designed to take a template fp_type variable, but -// using that causes linking problems around the matrix element in the Gate -// class. Only floats are implemented now as a result. -// -// TODO(pmassey): Add tests to assert that the swapped versions of matrices are -// the same as applying swap gates and the original gate. - -#include -#include -#include -#include - -#include "absl/container/flat_hash_map.h" -#include "tensorflow/core/lib/core/status.h" - -namespace tfq { - -class Gate { - public: - unsigned int time; - unsigned int num_qubits; - std::array qubits; - std::array matrix; - - // Overload for empty gates (to be assigned to) - Gate(); - - // Overload for one-qubit gates - Gate(const unsigned int time_in, const unsigned int qubit_in, - const std::array& matrix_in); - - // Overload for two-qubit gates - Gate(const unsigned int time_in, const unsigned int q1, const unsigned int q2, - const std::array& matrix_in); - - ~Gate() {} -}; - -bool operator==(const Gate& l, const Gate& r); -bool operator!=(const Gate& l, const Gate& r); - -using Matrix1q = std::array; -using Matrix2q = std::array; - -// Constructor base class that takes in Gate parameters and returns a Gate. -class GateBuilder { - public: - virtual tensorflow::Status Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) = 0; - - virtual ~GateBuilder() = default; -}; - -// Fill the pointed-to gate using the passed parameters. -// Returns UNIMPLEMENTED error if the given gate_id matches no TFQuantum gate, -// or INVALID_ARGUMENT error if a parameter is invalid for the given gate_id. -tensorflow::Status InitGate(const std::string& gate_name, - const unsigned int time, - const std::vector& locations, - const absl::flat_hash_map& args, - Gate* gate); - -// ============================================================================ -// GateBuilder Interfaces. -// ============================================================================ - -class OneQubitGateBuilder : public GateBuilder { - public: - virtual tensorflow::Status Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) override; - - virtual Matrix1q GetMatrix(const float exponent, - const float global_shift) = 0; -}; - -class OneQubitConstantGateBuilder : public GateBuilder { - public: - virtual tensorflow::Status Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) override; - - virtual Matrix1q GetMatrix() = 0; -}; - -class OneQubitPhasedGateBuilder : public GateBuilder { - public: - virtual tensorflow::Status Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) override; - - virtual Matrix1q GetMatrix(const float exponent, const float phase_exponent, - const float global_shift) = 0; -}; - -class TwoQubitGateBuilder : public GateBuilder { - public: - virtual tensorflow::Status Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) override; - - virtual Matrix2q GetMatrix(const float exponent, - const float global_shift) = 0; -}; - -class TwoQubitPhasedGateBuilder : public GateBuilder { - public: - virtual tensorflow::Status Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) override; - - virtual Matrix2q GetMatrix(const float exponent, const float phase_exponent, - const float global_shift) = 0; -}; - -class TwoQubitConstantGateBuilder : public GateBuilder { - public: - virtual tensorflow::Status Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) override; - - virtual Matrix2q GetMatrix() = 0; -}; - -// ============================================================================ -// GateBuilder implementations. -// ============================================================================ - -class XPowGateBuilder : public OneQubitGateBuilder { - public: - Matrix1q GetMatrix(const float exponent, const float global_shift) override; -}; - -class YPowGateBuilder : public OneQubitGateBuilder { - public: - Matrix1q GetMatrix(const float exponent, const float global_shift) override; -}; - -class ZPowGateBuilder : public OneQubitGateBuilder { - public: - Matrix1q GetMatrix(const float exponent, const float global_shift) override; -}; - -class HPowGateBuilder : public OneQubitGateBuilder { - public: - Matrix1q GetMatrix(const float exponent, const float global_shift) override; -}; - -class IGateBuilder : public OneQubitConstantGateBuilder { - public: - Matrix1q GetMatrix() override; -}; - -class PhasedXPowGateBuilder : public OneQubitPhasedGateBuilder { - public: - Matrix1q GetMatrix(const float exponent, const float phase_exponent, - const float global_shift) override; -}; - -class XXPowGateBuilder : public TwoQubitGateBuilder { - public: - Matrix2q GetMatrix(const float exponent, const float global_shift) override; -}; - -class YYPowGateBuilder : public TwoQubitGateBuilder { - public: - Matrix2q GetMatrix(const float exponent, const float global_shift) override; -}; - -class ZZPowGateBuilder : public TwoQubitGateBuilder { - public: - Matrix2q GetMatrix(const float exponent, const float global_shift) override; -}; - -class CZPowGateBuilder : public TwoQubitGateBuilder { - public: - Matrix2q GetMatrix(const float exponent, const float global_shift) override; -}; - -class CNotPowGateBuilder : public TwoQubitGateBuilder { - public: - Matrix2q GetMatrix(const float exponent, const float global_shift) override; -}; - -class SwapPowGateBuilder : public TwoQubitGateBuilder { - public: - Matrix2q GetMatrix(const float exponent, const float global_shift) override; -}; - -class ISwapPowGateBuilder : public TwoQubitGateBuilder { - public: - Matrix2q GetMatrix(const float exponent, const float global_shift) override; -}; - -class PhasedISwapPowGateBuilder : public TwoQubitPhasedGateBuilder { - public: - Matrix2q GetMatrix(const float exponent, const float phase_exponent, - const float global_shift) override; -}; - -class I2GateBuilder : public TwoQubitConstantGateBuilder { - public: - Matrix2q GetMatrix() override; -}; - -class FSimGateBuilder : public GateBuilder { - public: - virtual tensorflow::Status Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, Gate* gate) override; - - Matrix2q GetMatrix(const float theta, const float phi); -}; - -} // namespace tfq - -#endif // TFQ_CORE_QSIM_GATES_DEF_H_ diff --git a/tensorflow_quantum/core/src/gates_def_test.cc b/tensorflow_quantum/core/src/gates_def_test.cc deleted file mode 100644 index a7c321cb2..000000000 --- a/tensorflow_quantum/core/src/gates_def_test.cc +++ /dev/null @@ -1,694 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/src/gates_def.h" - -#define _USE_MATH_DEFINES -#include -#include -#include -#include -#include - -#include "absl/container/flat_hash_map.h" -#include "gtest/gtest.h" -#include "tensorflow/core/lib/core/status.h" - -namespace tfq { -namespace { - -TEST(GatesDefTest, GateConstructors) { - // Empty gate constructor - Gate gate0q; - EXPECT_EQ(gate0q.time, 0); - EXPECT_EQ(gate0q.num_qubits, 0); - - // One-qubit gate constructor - const unsigned int time1q = 256; - const unsigned int qubits1q = 53; - const std::array matrix1q{0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7}; - Gate gate1q(time1q, qubits1q, matrix1q); - EXPECT_EQ(gate1q.time, time1q); - EXPECT_EQ(gate1q.num_qubits, 1); - EXPECT_EQ(gate1q.qubits[0], qubits1q); - for (int i = 0; i < 8; i++) { - EXPECT_EQ(gate1q.matrix[i], matrix1q[i]); - } - - // Two-qubit gate constructor - const unsigned int time2q = 512; - const unsigned int qubits2q1 = 53; - const unsigned int qubits2q2 = 256; - const std::array matrix2q{ - 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.10, - 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.20, 0.21, - 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.30, 0.31}; - Gate gate2q(time2q, qubits2q1, qubits2q2, matrix2q); - EXPECT_EQ(gate2q.time, time2q); - EXPECT_EQ(gate2q.num_qubits, 2); - EXPECT_EQ(gate2q.qubits[0], qubits2q1); - EXPECT_EQ(gate2q.qubits[1], qubits2q2); - for (int i = 0; i < 32; i++) { - EXPECT_EQ(gate2q.matrix[i], matrix2q[i]); - } - - // Confirm swapping in constructor - // clang-format off - std::array matrix_original{ - 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, - 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, - 8, 8.5, 9, 9.5, 10, 10.5, 11, 11.5, - 12, 12.5, 13, 13.5, 14, 14.5, 15, 15.5}; - const std::array matrix_swapped{ - 0, 0.5, 2, 2.5, 1, 1.5, 3, 3.5, - 8, 8.5, 10, 10.5, 9, 9.5, 11, 11.5, - 4, 4.5, 6, 6.5, 5, 5.5, 7, 7.5, - 12, 12.5, 14, 14.5, 13, 13.5, 15, 15.5}; - // clang-format on - Gate gate_original(0, 1, 2, matrix_original); - Gate gate_swapped(0, 2, 1, matrix_swapped); - EXPECT_EQ(gate_original, gate_swapped); -} - -TEST(GatesDefTest, GateEquality) { - // Empty gate - Gate test_gate_0q, real_gate_0q; - - test_gate_0q.time = real_gate_0q.time + 1; - EXPECT_NE(test_gate_0q, real_gate_0q); - test_gate_0q.time = real_gate_0q.time; - - test_gate_0q.num_qubits = real_gate_0q.num_qubits + 1; - EXPECT_NE(test_gate_0q, real_gate_0q); - test_gate_0q.num_qubits = real_gate_0q.num_qubits; - - EXPECT_EQ(test_gate_0q, real_gate_0q); - - // One-qubit gate - const unsigned int time1q = 1256; - const unsigned int qubits1q = 153; - const std::array matrix1q{0.10, 0.11, 0.12, 0.13, - 0.14, 0.15, 0.16, 0.17}; - Gate test_gate_1q(time1q, qubits1q, matrix1q); - Gate real_gate_1q(time1q, qubits1q, matrix1q); - - test_gate_1q.time = real_gate_1q.time + 1; - ASSERT_NE(test_gate_1q, real_gate_1q); - test_gate_1q.time = real_gate_1q.time; - - test_gate_1q.num_qubits = real_gate_1q.num_qubits + 1; - ASSERT_NE(test_gate_1q, real_gate_1q); - test_gate_1q.num_qubits = real_gate_1q.num_qubits; - - test_gate_1q.qubits[0] = real_gate_1q.qubits[0] + 1; - ASSERT_NE(test_gate_1q, real_gate_1q); - test_gate_1q.qubits[0] = real_gate_1q.qubits[0]; - - test_gate_1q.matrix[0] = real_gate_1q.matrix[0] + 1; - ASSERT_NE(test_gate_1q, real_gate_1q); - test_gate_1q.matrix[0] = real_gate_1q.matrix[0]; - - test_gate_1q.matrix[7] = real_gate_1q.matrix[7] + 1; - ASSERT_NE(test_gate_1q, real_gate_1q); - test_gate_1q.matrix[7] = real_gate_1q.matrix[7]; - - ASSERT_EQ(test_gate_1q, real_gate_1q); - - // Two-qubit gate - const unsigned int time2q = 2512; - const unsigned int qubits2q1 = 253; - const unsigned int qubits2q2 = 2256; - const std::array matrix2q{ - 0.20, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, - 0.28, 0.29, 0.210, 0.211, 0.212, 0.213, 0.214, 0.215, - 0.216, 0.217, 0.218, 0.219, 0.220, 0.221, 0.223, 0.224, - 0.225, 0.226, 0.227, 0.228, 0.229, 0.230, 0.231}; - Gate test_gate_2q(time2q, qubits2q1, qubits2q2, matrix2q); - Gate real_gate_2q(time2q, qubits2q1, qubits2q2, matrix2q); - - test_gate_2q.time = real_gate_2q.time + 1; - ASSERT_NE(test_gate_2q, real_gate_2q); - test_gate_2q.time = real_gate_2q.time; - - test_gate_2q.num_qubits = real_gate_2q.num_qubits + 1; - ASSERT_NE(test_gate_2q, real_gate_2q); - test_gate_2q.num_qubits = real_gate_2q.num_qubits; - - test_gate_2q.qubits[0] = real_gate_2q.qubits[0] + 1; - ASSERT_NE(test_gate_2q, real_gate_2q); - test_gate_2q.qubits[0] = real_gate_2q.qubits[0]; - - test_gate_2q.qubits[1] = real_gate_2q.qubits[1] + 1; - ASSERT_NE(test_gate_2q, real_gate_2q); - test_gate_2q.qubits[1] = real_gate_2q.qubits[1]; - - test_gate_2q.matrix[0] = real_gate_2q.matrix[0] + 1; - ASSERT_NE(test_gate_2q, real_gate_2q); - test_gate_2q.matrix[0] = real_gate_2q.matrix[0]; - - test_gate_2q.matrix[31] = real_gate_2q.matrix[31] + 1; - ASSERT_NE(test_gate_2q, real_gate_2q); - test_gate_2q.matrix[31] = real_gate_2q.matrix[31]; - - ASSERT_EQ(test_gate_2q, real_gate_2q); -} - -// ============================================================================ -// GateBuilder interface tests. -// ============================================================================ - -TEST(GatesDefTest, GateBuilder) { - const unsigned int time_1q = 15; - const unsigned int qubit_1q = 53; - const std::array matrix_1q{0, 1, 2, 3, 4, 5, 6, 7}; - - class ConstantGateBuilder : public GateBuilder { - public: - virtual tensorflow::Status Build( - const unsigned int time, const std::vector& locations, - const absl::flat_hash_map& args, - Gate* gate) override { - const std::array matrix_1q_internal{0, 1, 2, 3, 4, 5, 6, 7}; - *gate = Gate(time_1q, qubit_1q, matrix_1q_internal); - return tensorflow::Status::OK(); - } - }; - - ConstantGateBuilder test_builder; - Gate test_gate; - const unsigned int time_ignored = 4444; - ASSERT_EQ( - test_builder.Build(time_ignored, std::vector(), - absl::flat_hash_map(), &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, Gate(time_1q, qubit_1q, matrix_1q)); -} - -// ============================================================================ -// GateBuilder implementation tests. -// ============================================================================ - -TEST(GatesDefTest, XPow) { - XPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int qubit{53}; - std::vector locations; - locations.push_back(qubit); - - // cirq X gate is XPowGate at exponent of 1. - std::array matrix{0, 0, 1, 0, 1, 0, 0, 0}; - Gate real_gate(time, qubit, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); - - // RX gates are XPow gates with global shift of -0.5 - for (auto const& angle : {0.1234, 5.4321}) { - std::array matrix_rot{ - (float)std::cos(angle / 2.), 0, 0, - (float)-std::sin(angle / 2.), 0, (float)-std::sin(angle / 2.), - (float)std::cos(angle / 2.), 0}; - Gate real_gate_rot(time, qubit, matrix_rot); - absl::flat_hash_map arg_map_rot; - arg_map_rot["global_shift"] = -0.5; - arg_map_rot["exponent"] = angle / M_PI; - arg_map_rot["exponent_scalar"] = 1.0; - Gate test_gate_rot; - ASSERT_EQ(builder.Build(time, locations, arg_map_rot, &test_gate_rot), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate_rot, real_gate_rot); - } -} - -TEST(GatesDefTest, YPow) { - YPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int qubit{53}; - std::vector locations; - locations.push_back(qubit); - - // cirq Y gate is YPowGate at exponent of 1. - std::array matrix{0, 0, 0, -1, 0, 1, 0, 0}; - Gate real_gate(time, qubit, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); - - // RY gates are YPow gates with global shift of -0.5 - for (auto const& angle : {0.1234, 5.4321}) { - std::array matrix_rot{ - (float)std::cos(angle / 2.), 0, (float)-std::sin(angle / 2.), 0, - (float)std::sin(angle / 2.), 0, (float)std::cos(angle / 2.), 0}; - Gate real_gate_rot(time, qubit, matrix_rot); - absl::flat_hash_map arg_map_rot; - arg_map_rot["global_shift"] = -0.5; - arg_map_rot["exponent"] = angle / M_PI; - arg_map_rot["exponent_scalar"] = 1.0; - Gate test_gate_rot; - ASSERT_EQ(builder.Build(time, locations, arg_map_rot, &test_gate_rot), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate_rot, real_gate_rot); - } -} - -TEST(GatesDefTest, ZPow) { - ZPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int qubit{53}; - std::vector locations; - locations.push_back(qubit); - - // cirq Z gate is ZPowGate at exponent of 1. - std::array matrix{1, 0, 0, 0, 0, 0, -1, 0}; - Gate real_gate(time, qubit, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); - - // S gate is ZPowGate with exponent of 0.5 - std::array matrix_s{1.0, 0, 0, 0, 0, 0, 0, 1.0}; - Gate real_gate_s(time, qubit, matrix_s); - absl::flat_hash_map arg_map_s; - arg_map_s["global_shift"] = 0.0; - arg_map_s["exponent"] = 0.5; - arg_map_s["exponent_scalar"] = 1.0; - Gate test_gate_s; - ASSERT_EQ(builder.Build(time, locations, arg_map_s, &test_gate_s), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate_s, real_gate_s); - - // T gate is ZPowGate with exponent of 0.25 - std::array matrix_tg{ - 1.0, 0, 0, 0, 0, 0, 1 / std::sqrt(2), 1 / std::sqrt(2)}; - Gate real_gate_tg(time, qubit, matrix_tg); - absl::flat_hash_map arg_map_tg; - arg_map_tg["global_shift"] = 0.0; - arg_map_tg["exponent"] = 0.25; - arg_map_tg["exponent_scalar"] = 1.0; - Gate test_gate_tg; - ASSERT_EQ(builder.Build(time, locations, arg_map_tg, &test_gate_tg), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate_tg, real_gate_tg); - - // RZ gates are ZPow gates with global shift of -0.5 - for (auto const& angle : {0.1234, 5.4321}) { - std::complex m_00; - m_00 = std::exp(std::complex(0, -1.0 * angle / 2.)); - std::complex m_11; - m_11 = std::exp(std::complex(0, angle / 2.)); - std::array matrix_rot{ - (float)m_00.real(), (float)m_00.imag(), 0, 0, 0, 0, - (float)m_11.real(), (float)m_11.imag()}; - Gate real_gate_rot(time, qubit, matrix_rot); - absl::flat_hash_map arg_map_rot; - arg_map_rot["global_shift"] = -0.5; - arg_map_rot["exponent"] = angle / M_PI; - arg_map_rot["exponent_scalar"] = 1.0; - Gate test_gate_rot; - ASSERT_EQ(builder.Build(time, locations, arg_map_rot, &test_gate_rot), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate_rot, real_gate_rot); - } -} - -TEST(GatesDefTest, HPow) { - HPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int qubit{53}; - std::vector locations; - locations.push_back(qubit); - - // cirq H gate is HPowGate at exponent of 1. - std::array matrix{1 / std::sqrt(2), 0, 1 / std::sqrt(2), 0, - 1 / std::sqrt(2), 0, -1 / std::sqrt(2), 0}; - Gate real_gate(time, qubit, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, IdentityGate) { - IGateBuilder builder; - const unsigned int time{3}; - const unsigned int qubit{53}; - std::vector locations; - locations.push_back(qubit); - - std::array matrix{1, 0, 0, 0, 0, 0, 1, 0}; - Gate real_gate(time, qubit, matrix); - absl::flat_hash_map arg_map; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, PhasedXPow) { - PhasedXPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int qubit{53}; - std::vector locations; - locations.push_back(qubit); - - absl::flat_hash_map arg_map; - arg_map["global_shift"] = -0.2; - arg_map["exponent"] = 1.7; - arg_map["exponent_scalar"] = 1.0; - arg_map["phase_exponent"] = 1.1; - arg_map["phase_exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - - // Associated matrix elements for above parameters extracted using cirq - std::array matrix{0.02798719, -0.89056687, -0.43596421, - 0.12665931, -0.42715093, -0.1537838, - 0.02798719, -0.89056687}; - Gate real_gate(time, qubit, matrix); - - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, XXPow) { - XXPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // cirq XX gate is XXPowGate at exponent of 1 - // clang-format off - std::array matrix{0, 0, 0, 0, 0, 0, 1, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0}; - // clang-format on - Gate real_gate(time, q1, q2, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, YYPow) { - YYPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // cirq YY gate is YYPowGate at exponent of 1 - // clang-format off - std::array matrix{0, 0, 0, 0, 0, 0, -1, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - -1, 0, 0, 0, 0, 0, 0, 0}; - // clang-format on - Gate real_gate(time, q1, q2, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesTest, ZZPow) { - ZZPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // cirq ZZ gate is ZZPowGate at exponent of 1 - // clang-format off - std::array matrix{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, -1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0}; - // clang-format on - Gate real_gate(time, q1, q2, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, CZPow) { - CZPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // CZ gate is CZPowGate at exponent of 1 - // clang-format off - std::array matrix{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, -1, 0}; - // clang-format on - Gate real_gate(time, q1, q2, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, CNotPow) { - CNotPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // CNot gate is CNotPowGate at exponent of 1 - // clang-format off - std::array matrix{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 0, 0, 0, 1, 0, 0, 0}; - // clang-format on - Gate real_gate(time, q1, q2, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, SwapPow) { - SwapPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // Swap gate is SwapPowGate at exponent of 1 - // clang-format off - std::array matrix{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0}; - // clang-format on - Gate real_gate(time, q1, q2, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, ISwapPow) { - ISwapPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // ISwap gate is ISwapPowGate at exponent of 1 - // clang-format off - std::array matrix{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0}; - // clang-format on - Gate real_gate(time, q1, q2, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, PhasedISwapPow) { - PhasedISwapPowGateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // ISwap gate is PhasedISwapPowGate at exponent of 1 - // and phase_exponent of 0. - // clang-format off - std::array matrix{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 1, 0, 0, - 0, 0, 0, 1, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0}; - // clang-format on - Gate real_gate(time, q1, q2, matrix); - absl::flat_hash_map arg_map; - arg_map["global_shift"] = 0.0; - arg_map["exponent"] = 1.0; - arg_map["exponent_scalar"] = 1.0; - arg_map["phase_exponent"] = 0.0; - arg_map["phase_exponent_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, I2) { - I2GateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // clang-format off - std::array matrix{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0}; - // clang-format on - Gate real_gate(time, q1, q2, matrix); - absl::flat_hash_map arg_map; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); -} - -TEST(GatesDefTest, FSim) { - FSimGateBuilder builder; - const unsigned int time{3}; - const unsigned int q1{53}; - const unsigned int q2{55}; - std::vector locations; - locations.push_back(q1); - locations.push_back(q2); - - // FSimGate has limiting forms of iSWAP and CZ, with some relative phasing. - const std::array angle_pair_1{M_PI / 2, 0}; - const std::array angle_pair_2{0, M_PI}; - const std::array angle_pair_3{M_PI / 2, M_PI / 6}; - const std::array, 3> angles{angle_pair_1, angle_pair_2, - angle_pair_3}; - - // clang-format off - const std::array matrix_1{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, -1, 0, 0, - 0, 0, 0, -1, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0}; - const std::array matrix_2{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, -1, 0}; - const std::array matrix_3{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, -1, 0, 0, - 0, 0, 0, -1, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, std::sqrt(3)/2, -1.0/2}; - // clang-format on - const std::array, 3> matrices{matrix_1, matrix_2, - matrix_3}; - - for (long unsigned int i = 0; i < angles.size(); i++) { - Gate real_gate(time, q1, q2, matrices.at(i)); - absl::flat_hash_map arg_map; - arg_map["theta"] = angles.at(i).at(0); - arg_map["theta_scalar"] = 1.0; - arg_map["phi"] = angles.at(i).at(1); - arg_map["phi_scalar"] = 1.0; - Gate test_gate; - ASSERT_EQ(builder.Build(time, locations, arg_map, &test_gate), - tensorflow::Status::OK()); - ASSERT_EQ(test_gate, real_gate); - } -} - -} // namespace -} // namespace tfq diff --git a/tensorflow_quantum/core/src/matrix.h b/tensorflow_quantum/core/src/matrix.h deleted file mode 100644 index 4aac93c54..000000000 --- a/tensorflow_quantum/core/src/matrix.h +++ /dev/null @@ -1,234 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TFQ_CORE_QSIM_MATRIX_H_ -#define TFQ_CORE_QSIM_MATRIX_H_ - -#include -#include - -namespace tfq { - -// Routines for 2x2 complex matrices. -// Matrices are arrays of floating-point numbers. -// There are no checks for validity of arguments. -// We do not care about performance here. - -template -inline void Matrix2SetZero(Array2& mat) { - for (unsigned i = 0; i < 8; ++i) { - mat[i] = 0; - } -} - -template -inline void Matrix2SetId(Array2& mat) { - Matrix2SetZero(mat); - - mat[0] = 1; - mat[6] = 1; -} - -template -inline void Matrix2Set(const Array1& u, Array2& mat) { - for (unsigned i = 0; i < 8; ++i) { - mat[i] = u[i]; - } -} - -// Multiply two 2x2 matrices. -template -inline void Matrix2Multiply(const Array1& u, Array2& mat) { - typename Array1::value_type mat0[8]; - for (unsigned i = 0; i < 8; ++i) { - mat0[i] = mat[i]; - } - - for (unsigned i = 0; i < 2; ++i) { - for (unsigned j = 0; j < 2; ++j) { - typename Array1::value_type tr = 0; - typename Array1::value_type ti = 0; - - for (unsigned k = 0; k < 2; ++k) { - auto mr0 = mat0[4 * k + 2 * j + 0]; - auto mi0 = mat0[4 * k + 2 * j + 1]; - - auto uik = &u[4 * i + 2 * k]; - - tr += uik[0] * mr0 - uik[1] * mi0; - ti += uik[0] * mi0 + uik[1] * mr0; - } - - mat[4 * i + 2 * j + 0] = tr; - mat[4 * i + 2 * j + 1] = ti; - } - } -} - -// Routines for 4x4 complex matrices. -// Matrices are arrays of floating-point numbers. -// There are no checks for validity of arguments. -// We do not care about performance here. - -template -inline void Matrix4SetZero(Array2& mat) { - for (unsigned i = 0; i < 32; ++i) { - mat[i] = 0; - } -} - -template -inline void Matrix4SetId(Array2& mat) { - Matrix4SetZero(mat); - - mat[0] = 1; - mat[10] = 1; - mat[20] = 1; - mat[30] = 1; -} - -template -inline void Matrix4Set(const Array1& u, Array2& mat) { - for (unsigned i = 0; i < 32; ++i) { - mat[i] = u[i]; - } -} - -// Multiply 4x4 matrix by one qubit matrix corresponding to qubit 1. -// First arg is 2x2 matrix, second arg is 4x4 matrix. -// In this function, qubit order is taken to be big-endian. See #936 -template -inline void Matrix4Multiply20(const Array1& u, Array2& mat) { - auto u00 = &u[0]; - auto u01 = &u[2]; - auto u10 = &u[4]; - auto u11 = &u[6]; - - for (unsigned i = 0; i < 4; ++i) { - for (unsigned j = 0; j < 2; ++j) { - auto mr0 = mat[16 * j + 0 + 2 * i]; - auto mi0 = mat[16 * j + 1 + 2 * i]; - auto mr1 = mat[16 * j + 8 + 2 * i]; - auto mi1 = mat[16 * j + 9 + 2 * i]; - - mat[16 * j + 0 + 2 * i] = - u00[0] * mr0 - u00[1] * mi0 + u01[0] * mr1 - u01[1] * mi1; - mat[16 * j + 1 + 2 * i] = - u00[0] * mi0 + u00[1] * mr0 + u01[0] * mi1 + u01[1] * mr1; - mat[16 * j + 8 + 2 * i] = - u10[0] * mr0 - u10[1] * mi0 + u11[0] * mr1 - u11[1] * mi1; - mat[16 * j + 9 + 2 * i] = - u10[0] * mi0 + u10[1] * mr0 + u11[0] * mi1 + u11[1] * mr1; - } - } -} - -// Multiply 4x4 matrix by one qubit matrix corresponding to qubit 0. -// First arg is 2x2 matrix, second arg is 4x4 matrix. -// In this function, qubit order is taken to be big-endian. See #936 -template -inline void Matrix4Multiply21(const Array1& u, Array2& mat) { - auto u00 = &u[0]; - auto u01 = &u[2]; - auto u10 = &u[4]; - auto u11 = &u[6]; - - for (unsigned i = 0; i < 4; ++i) { - for (unsigned j = 0; j < 2; ++j) { - auto mr0 = mat[8 * j + 0 + 2 * i]; - auto mi0 = mat[8 * j + 1 + 2 * i]; - auto mr1 = mat[8 * j + 16 + 2 * i]; - auto mi1 = mat[8 * j + 17 + 2 * i]; - - mat[8 * j + 0 + 2 * i] = - u00[0] * mr0 - u00[1] * mi0 + u01[0] * mr1 - u01[1] * mi1; - mat[8 * j + 1 + 2 * i] = - u00[0] * mi0 + u00[1] * mr0 + u01[0] * mi1 + u01[1] * mr1; - mat[8 * j + 16 + 2 * i] = - u10[0] * mr0 - u10[1] * mi0 + u11[0] * mr1 - u11[1] * mi1; - mat[8 * j + 17 + 2 * i] = - u10[0] * mi0 + u10[1] * mr0 + u11[0] * mi1 + u11[1] * mr1; - } - } -} - -// Multiply two 4x4 matrices. -template -inline void Matrix4Multiply(const Array1& u, Array2& mat) { - typename Array1::value_type mat0[32]; - for (unsigned i = 0; i < 32; ++i) { - mat0[i] = mat[i]; - } - - for (unsigned i = 0; i < 4; ++i) { - for (unsigned j = 0; j < 4; ++j) { - typename Array1::value_type tr = 0; - typename Array1::value_type ti = 0; - - for (unsigned k = 0; k < 4; ++k) { - auto mr0 = mat0[8 * k + 2 * j + 0]; - auto mi0 = mat0[8 * k + 2 * j + 1]; - - auto uik = &u[8 * i + 2 * k]; - - tr += uik[0] * mr0 - uik[1] * mi0; - ti += uik[0] * mi0 + uik[1] * mr0; - } - - mat[8 * i + 2 * j + 0] = tr; - mat[8 * i + 2 * j + 1] = ti; - } - } -} - -// Calculate 4x4 fused gate matrix. -template -inline void CalcMatrix4(unsigned q0, unsigned q1, - const std::vector& gates, Array2& mat) { - Matrix4SetId(mat); - - for (auto pgate : gates) { - if (pgate->num_qubits == 1) { - if (pgate->qubits[0] == q0) { - Matrix4Multiply20(pgate->matrix, mat); - } else if (pgate->qubits[0] == q1) { - Matrix4Multiply21(pgate->matrix, mat); - } - } else { - Matrix4Multiply(pgate->matrix, mat); - } - } -} - -// Permute 4x4 matrix to switch between two qubits. -template -static void Matrix4Permute(Array2& mat) { - std::swap(mat[2], mat[4]); - std::swap(mat[3], mat[5]); - std::swap(mat[8], mat[16]); - std::swap(mat[9], mat[17]); - std::swap(mat[10], mat[20]); - std::swap(mat[11], mat[21]); - std::swap(mat[12], mat[18]); - std::swap(mat[13], mat[19]); - std::swap(mat[14], mat[22]); - std::swap(mat[15], mat[23]); - std::swap(mat[26], mat[28]); - std::swap(mat[27], mat[29]); -} - -} // namespace tfq - -#endif // TFQ_CORE_QSIM_MATRIX_H_ diff --git a/tensorflow_quantum/core/src/matrix_test.cc b/tensorflow_quantum/core/src/matrix_test.cc deleted file mode 100644 index 2975d9009..000000000 --- a/tensorflow_quantum/core/src/matrix_test.cc +++ /dev/null @@ -1,299 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/src/matrix.h" - -#include - -#include -#include -#include - -#include "gtest/gtest.h" -#include "tensorflow_quantum/core/src/gates_def.h" - -namespace tfq { - -namespace { - -float RandomFloat() { - float random = static_cast(std::rand()) / static_cast(RAND_MAX); - return random; -} - -TEST(MatrixTest, Matrix2SetZero) { - std::array matrix; - std::generate(begin(matrix), end(matrix), RandomFloat); - Matrix2SetZero(matrix); - for (int i = 0; i < 8; i++) { - EXPECT_EQ(matrix[i], 0.0); - } -} - -TEST(MatrixTest, Matrix2SetId) { - std::array matrix; - std::generate(begin(matrix), end(matrix), RandomFloat); - Matrix2SetId(matrix); - for (int i = 0; i < 8; i++) { - if (i == 0 || i == 6) { - EXPECT_EQ(matrix[i], 1.0); - continue; - } - EXPECT_EQ(matrix[i], 0.0); - } -} - -TEST(MatrixTest, Matrix2Set) { - std::array matrix; - std::generate(begin(matrix), end(matrix), RandomFloat); - std::array target{1, 2, 3, 4, 5, 6, 7, 8}; - - Matrix2Set(target, matrix); - for (int i = 0; i < 8; i++) { - EXPECT_EQ(matrix[i], target[i]); - } -} - -TEST(MatrixTest, Matrix2Multiply) { - std::array a, b; - std::generate(begin(a), end(a), RandomFloat); - std::generate(begin(b), end(b), RandomFloat); - // Just use complex matmul on transformed versions of the above - std::complex c[2][2], d[2][2], f[2][2]; - for (int i = 0; i < 2; i++) { - for (int j = 0; j < 2; j++) { - c[i][j] = std::complex(a[4 * i + 2 * j], a[4 * i + 2 * j + 1]); - d[i][j] = std::complex(b[4 * i + 2 * j], b[4 * i + 2 * j + 1]); - } - } - - for (int i = 0; i < 2; i++) { - for (int j = 0; j < 2; j++) { - for (int k = 0; k < 2; k++) { - f[i][j] += c[i][k] * d[k][j]; - } - } - } - - Matrix2Multiply(a, b); - for (int i = 0; i < 2; i++) { - for (int j = 0; j < 2; j++) { - EXPECT_NEAR(real(f[i][j]), b[4 * i + 2 * j], 1E-6); - EXPECT_NEAR(imag(f[i][j]), b[4 * i + 2 * j + 1], 1E-6); - } - } -} - -TEST(MatrixTest, Matrix4SetZero) { - std::array matrix; - std::generate(begin(matrix), end(matrix), RandomFloat); - Matrix4SetZero(matrix); - for (int i = 0; i < 32; i++) { - EXPECT_EQ(matrix[i], 0.0); - } -} - -TEST(MatrixTest, Matrix4SetId) { - std::array matrix; - std::generate(begin(matrix), end(matrix), RandomFloat); - Matrix4SetId(matrix); - for (int i = 0; i < 32; i++) { - if (i == 0 || i == 10 || i == 20 || i == 30) { - EXPECT_EQ(matrix[i], 1.0); - continue; - } - EXPECT_EQ(matrix[i], 0.0); - } -} - -TEST(MatrixTest, Matrix4Set) { - std::array matrix; - std::generate(begin(matrix), end(matrix), RandomFloat); - std::array target; - for (int i = 0; i < 32; i++) { - target[i] = i; - } - - Matrix4Set(target, matrix); - for (int i = 0; i < 32; i++) { - EXPECT_EQ(matrix[i], target[i]); - } -} - -TEST(MatrixTest, Matrix4Multiply21) { - std::array a; - std::array b; - std::generate(begin(a), end(a), RandomFloat); - std::generate(begin(b), end(b), RandomFloat); - - // Tensor up single-qubit matrix for ordinary complex matmul. - // the matrix below represents C \otimes Id - std::complex c[4][4], f[4][4]; - c[0][0] = std::complex(a[0], a[1]); - c[1][1] = c[0][0]; - c[0][2] = std::complex(a[2], a[3]); - c[1][3] = c[0][2]; - c[2][0] = std::complex(a[4], a[5]); - c[3][1] = c[2][0]; - c[2][2] = std::complex(a[6], a[7]); - c[3][3] = c[2][2]; - - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 4; j++) { - for (int k = 0; k < 4; k++) { - f[i][j] += c[i][k] * - std::complex(b[8 * k + 2 * j], b[8 * k + 2 * j + 1]); - } - } - } - - Matrix4Multiply21(a, b); - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 4; j++) { - EXPECT_NEAR(real(f[i][j]), b[8 * i + 2 * j], 1E-6); - EXPECT_NEAR(imag(f[i][j]), b[8 * i + 2 * j + 1], 1E-6); - } - } -} - -TEST(MatrixTest, Matrix4Multiply20) { - std::array a; - std::array b; - std::generate(begin(a), end(a), RandomFloat); - std::generate(begin(b), end(b), RandomFloat); - - // Use complex matmul on transformed versions of the above. - // the matrix below represents Id \otimes C - std::complex c[4][4], f[4][4]; - c[0][0] = std::complex(a[0], a[1]); - c[0][1] = std::complex(a[2], a[3]); - c[1][0] = std::complex(a[4], a[5]); - c[1][1] = std::complex(a[6], a[7]); - c[2][2] = c[0][0]; - c[2][3] = c[0][1]; - c[3][2] = c[1][0]; - c[3][3] = c[1][1]; - - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 4; j++) { - for (int k = 0; k < 4; k++) { - f[i][j] += c[i][k] * - std::complex(b[8 * k + 2 * j], b[8 * k + 2 * j + 1]); - } - } - } - - Matrix4Multiply20(a, b); - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 4; j++) { - EXPECT_NEAR(real(f[i][j]), b[8 * i + 2 * j], 1E-6); - EXPECT_NEAR(imag(f[i][j]), b[8 * i + 2 * j + 1], 1E-6); - } - } -} - -TEST(MatrixTest, Matrix4Multiply) { - std::array a, b; - std::generate(begin(a), end(a), RandomFloat); - std::generate(begin(b), end(b), RandomFloat); - // Just use complex matmul on transformed versions of the above - std::complex c[4][4], d[4][4], f[4][4]; - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 4; j++) { - c[i][j] = std::complex(a[8 * i + 2 * j], a[8 * i + 2 * j + 1]); - d[i][j] = std::complex(b[8 * i + 2 * j], b[8 * i + 2 * j + 1]); - } - } - - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 4; j++) { - for (int k = 0; k < 4; k++) { - f[i][j] += c[i][k] * d[k][j]; - } - } - } - - Matrix4Multiply(a, b); - for (int i = 0; i < 4; i++) { - for (int j = 0; j < 4; j++) { - EXPECT_NEAR(real(f[i][j]), b[8 * i + 2 * j], 1E-6); - EXPECT_NEAR(imag(f[i][j]), b[8 * i + 2 * j + 1], 1E-6); - } - } -} - -TEST(MatrixTest, Calc4Matrix) { - // Build a test circuit that goes through all three mul types: - // q0 -- X -- -- |CNOT| - // q1 -- -- Z -- |CNOT| - // Associated matrix: - // | 0 1 0 0 | - // | 1 0 0 0 | - // | 0 0 -1 0 | - // | 0 0 0 -1 | - const Matrix1q x_mat{0, 0, 1, 0, 1, 0, 0, 0}; - const Matrix1q z_mat{1, 0, 0, 0, 0, 0, -1, 0}; - // clang-format off - const Matrix2q cnot_mat{1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 1, 0, - 0, 0, 0, 0, 1, 0, 0, 0}; - const Matrix2q expected_mat{0, 0, 1, 0, 0, 0, 0, 0, - 1, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, -1, 0, 0, 0, - 0, 0, 0, 0, 0, 0, -1, 0}; - // clang-format on - Gate x_gate(0, 0, x_mat); - Gate z_gate(1, 1, z_mat); - Gate cnot_gate(2, 0, 1, cnot_mat); - - std::vector gates; - gates.push_back(&x_gate); - gates.push_back(&z_gate); - gates.push_back(&cnot_gate); - - Matrix2q test_mat; - CalcMatrix4(0, 1, gates, test_mat); - for (int i = 0; i < 32; i++) { - EXPECT_EQ(test_mat[i], expected_mat[i]); - } -} - -TEST(MatrixTest, Matrix4Permute) { - // Conjugation by swap gate: - // | 0 1 2 3 | | 0 2 1 3 | - // | 4 5 6 7 | | 8 10 9 11 | - // | 8 9 10 11 | ---> | 4 6 5 7 | - // | 12 13 14 15 | | 12 14 13 15 | - // clang-format off - std::array matrix{ - 0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, - 4, 4.5, 5, 5.5, 6, 6.5, 7, 7.5, - 8, 8.5, 9, 9.5, 10, 10.5, 11, 11.5, - 12, 12.5, 13, 13.5, 14, 14.5, 15, 15.5}; - const std::array matrix_swapped{ - 0, 0.5, 2, 2.5, 1, 1.5, 3, 3.5, - 8, 8.5, 10, 10.5, 9, 9.5, 11, 11.5, - 4, 4.5, 6, 6.5, 5, 5.5, 7, 7.5, - 12, 12.5, 14, 14.5, 13, 13.5, 15, 15.5}; - // clang-format on - Matrix4Permute(matrix); - for (int i = 0; i < 32; i++) { - EXPECT_EQ(matrix[i], matrix_swapped[i]); - } -} - -} // namespace -} // namespace tfq diff --git a/tensorflow_quantum/core/src/program_resolution.cc b/tensorflow_quantum/core/src/program_resolution.cc deleted file mode 100644 index 0c860ebaa..000000000 --- a/tensorflow_quantum/core/src/program_resolution.cc +++ /dev/null @@ -1,125 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/src/program_resolution.h" - -#include - -#include "absl/container/flat_hash_map.h" -#include "absl/container/flat_hash_set.h" -#include "absl/strings/str_cat.h" -#include "absl/strings/str_split.h" -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/lib/core/error_codes.pb.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/proto/pauli_sum.pb.h" - -namespace tfq { - -using cirq::google::api::v2::Arg; -using cirq::google::api::v2::Moment; -using cirq::google::api::v2::Operation; -using cirq::google::api::v2::Program; -using cirq::google::api::v2::Qubit; -using tensorflow::Status; -using tfq::proto::PauliQubitPair; -using tfq::proto::PauliSum; -using tfq::proto::PauliTerm; - -Status ResolveQubitIds(Program* program, unsigned int* num_qubits, - std::vector* p_sums /*=nullptr*/) { - if (program->circuit().moments().empty()) { - // (#679) Just ignore empty program. - // Number of qubits in empty programs is zero. - *num_qubits = 0; - return Status::OK(); - } - - absl::flat_hash_set id_set; - for (const Moment& moment : program->circuit().moments()) { - for (const Operation& operation : moment.operations()) { - for (const Qubit& qubit : operation.qubits()) { - id_set.insert(qubit.id()); - } - } - } - *num_qubits = id_set.size(); - - std::vector ids(id_set.begin(), id_set.end()); - std::sort(ids.begin(), ids.end()); - - absl::flat_hash_map id_to_index; - for (size_t i = 0; i < ids.size(); i++) { - id_to_index[ids[i]] = i; - } - - // Replace the Program Qubit ids with the indices. - for (Moment& moment : *program->mutable_circuit()->mutable_moments()) { - for (Operation& operation : *moment.mutable_operations()) { - for (Qubit& qubit : *operation.mutable_qubits()) { - const int index = id_to_index.at(qubit.id()); - const std::string new_id = absl::StrCat(index); - qubit.set_id(new_id); - } - } - } - - if (p_sums) { - for (size_t i = 0; i < p_sums->size(); i++) { - // Replace the PauliSum Qubit ids with the indices. - for (PauliTerm& term : *(p_sums->at(i)).mutable_terms()) { - for (PauliQubitPair& pair : *term.mutable_paulis()) { - const auto result = id_to_index.find(pair.qubit_id()); - if (result == id_to_index.end()) { - return Status( - tensorflow::error::INVALID_ARGUMENT, - "Found a Pauli sum operating on qubits not found in circuit."); - } - const int index = result->second; - const std::string new_id = absl::StrCat(index); - pair.set_qubit_id(new_id); - } - } - } - } - - return Status::OK(); -} - -Status ResolveSymbols( - const absl::flat_hash_map>& param_map, - Program* program) { - for (Moment& moment : *program->mutable_circuit()->mutable_moments()) { - for (Operation& operation : *moment.mutable_operations()) { - for (auto& kv : *operation.mutable_args()) { - Arg& arg = kv.second; - if (!arg.symbol().empty()) { - auto iter = param_map.find(arg.symbol()); - if (iter == param_map.end()) { - return Status( - tensorflow::error::INVALID_ARGUMENT, - "Could not find symbol in parameter map: " + arg.symbol()); - } - - arg.mutable_arg_value()->set_float_value(iter->second.second); - } - } - } - } - - return Status::OK(); -} - -} // namespace tfq diff --git a/tensorflow_quantum/core/src/program_resolution.h b/tensorflow_quantum/core/src/program_resolution.h deleted file mode 100644 index 4e458af7a..000000000 --- a/tensorflow_quantum/core/src/program_resolution.h +++ /dev/null @@ -1,54 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -// Utilties around parsing Cirq Programs into forms that the TensorFlow op can -// better understand. - -#ifndef TFQ_CORE_SRC_PROGRAM_RESOLUTION -#define TFQ_CORE_SRC_PROGRAM_RESOLUTION - -#include - -#include "absl/container/flat_hash_map.h" -#include "cirq/google/api/v2/program.pb.h" -#include "tensorflow/core/lib/core/status.h" -#include "tensorflow_quantum/core/proto/pauli_sum.pb.h" - -namespace tfq { - -// Renames the ids of Qubits to be ordered from 0 to n, where n is the number -// of qubits. if p_sum is provided, we will also resolve ordering based on how -// we resolved program. All qubit types are supported, as long as the qubit ids -// are strings; all ids are extracted and lexicographically ordered, then simply -// replaced with their location in that ordering. -// -// The number of qubits in the program is recorded in `num_qubits`. -tensorflow::Status ResolveQubitIds( - cirq::google::api::v2::Program* program, unsigned int* num_qubits, - std::vector* p_sums = nullptr); - -// Resolves all of the symbols present in the Program. Iterates through all -// operations in all moments, and if any Args have a symbol, replaces the one-of -// with an ArgValue representing the value in the parameter map keyed by the -// symbol. Returns an error if the parameter value cannot be found. -// TODO(pmassey): Consider returning an error if a value in the parameter map -// isn't used. -tensorflow::Status ResolveSymbols( - const absl::flat_hash_map>& param_map, - cirq::google::api::v2::Program* program); - -} // namespace tfq - -#endif // TFQ_CORE_SRC_PROGRAM_RESOLUTION diff --git a/tensorflow_quantum/core/src/program_resolution_test.cc b/tensorflow_quantum/core/src/program_resolution_test.cc deleted file mode 100644 index ef746f8f5..000000000 --- a/tensorflow_quantum/core/src/program_resolution_test.cc +++ /dev/null @@ -1,307 +0,0 @@ -/* Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "tensorflow_quantum/core/src/program_resolution.h" - -#include - -#include - -#include "absl/container/flat_hash_map.h" -#include "cirq/google/api/v2/program.pb.h" -#include "gtest/gtest.h" -#include "tensorflow/core/lib/core/status.h" - -namespace tfq { -namespace { - -using cirq::google::api::v2::Program; - -TEST(ProgramResolutionTest, ResolveQubitIdsInvalidArg) { - const std::string text = R"( - circuit { - moments { - operations { - qubits { - id: "0_0" - } - qubits { - id: "1_0" - } - } - } - } - )"; - - const std::string text_good_p_sum = R"( - terms { - coefficient_real: 1.0 - coefficient_imag: 0.0 - paulis { - qubit_id: "0_0" - pauli_type: "Z" - } - } - )"; - - const std::string text_bad_p_sum = R"( - terms { - coefficient_real: 1.0 - coefficient_imag: 0.0 - paulis { - qubit_id: "0_1" - pauli_type: "X" - } - } - )"; - - std::vector p_sums; - tfq::proto::PauliSum p_sum_good, p_sum_bad; - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(text_good_p_sum, - &p_sum_good)); - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(text_bad_p_sum, - &p_sum_bad)); - p_sums.push_back(p_sum_good); - p_sums.push_back(p_sum_bad); - - Program program; - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(text, &program)); - - unsigned int num_qubits; - EXPECT_EQ(ResolveQubitIds(&program, &num_qubits, &p_sums), - tensorflow::Status( - tensorflow::error::INVALID_ARGUMENT, - "Found a Pauli sum operating on qubits not found in circuit.")); -} - -TEST(ProgramResolutionTest, ResolveQubitIds) { - const std::string text = R"( - circuit { - moments { - operations { - qubits { - id: "0_0" - } - qubits { - id: "1_0" - } - } - } - moments { - operations { - qubits { - id: "0_0" - } - qubits { - id: "0_1" - } - } - } - } - )"; - - const std::string text_p_sum_0 = R"( - terms { - coefficient_real: 1.0 - coefficient_imag: 0.0 - paulis { - qubit_id: "0_0" - pauli_type: "Z" - } - } - )"; - - const std::string text_p_sum_1 = R"( - terms { - coefficient_real: 1.0 - coefficient_imag: 0.0 - paulis { - qubit_id: "1_0" - pauli_type: "X" - } - } - )"; - - const std::string text_alphabet = R"( - circuit { - moments { - operations { - qubits { - id: "C" - } - qubits { - id: "D" - } - } - } - moments { - operations { - qubits { - id: "X" - } - qubits { - id: "A" - } - } - } - } - )"; - - const std::string text_alphabet_p_sum_0 = R"( - terms { - coefficient_real: 1.0 - coefficient_imag: 0.0 - paulis { - qubit_id: "D" - pauli_type: "Z" - } - } - )"; - - const std::string text_alphabet_p_sum_1 = R"( - terms { - coefficient_real: 1.0 - coefficient_imag: 0.0 - paulis { - qubit_id: "C" - pauli_type: "X" - } - } - )"; - - const std::string text_empty = R"( - circuit { - } - )"; - - std::vector p_sums, p_sums_alphabet; - tfq::proto::PauliSum p_sum_0, p_sum_1; - ASSERT_TRUE( - google::protobuf::TextFormat::ParseFromString(text_p_sum_0, &p_sum_0)); - ASSERT_TRUE( - google::protobuf::TextFormat::ParseFromString(text_p_sum_1, &p_sum_1)); - p_sums.push_back(p_sum_0); - p_sums.push_back(p_sum_1); - tfq::proto::PauliSum alphabet_p_sum_0, alphabet_p_sum_1; - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString( - text_alphabet_p_sum_0, &alphabet_p_sum_0)); - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString( - text_alphabet_p_sum_1, &alphabet_p_sum_1)); - p_sums_alphabet.push_back(alphabet_p_sum_0); - p_sums_alphabet.push_back(alphabet_p_sum_1); - - Program program, empty_program, alphabet_program; - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(text, &program)); - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(text_empty, - &empty_program)); - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(text_alphabet, - &alphabet_program)); - - unsigned int num_qubits, num_qubits_empty, num_qubits_alphabet; - EXPECT_TRUE(ResolveQubitIds(&program, &num_qubits, &p_sums).ok()); - EXPECT_TRUE(ResolveQubitIds(&empty_program, &num_qubits_empty).ok()); - EXPECT_TRUE( - ResolveQubitIds(&alphabet_program, &num_qubits_alphabet, &p_sums_alphabet) - .ok()); - - EXPECT_EQ(program.circuit().moments(0).operations(0).qubits(0).id(), "0"); - EXPECT_EQ(program.circuit().moments(0).operations(0).qubits(1).id(), "2"); - EXPECT_EQ(program.circuit().moments(1).operations(0).qubits(0).id(), "0"); - EXPECT_EQ(program.circuit().moments(1).operations(0).qubits(1).id(), "1"); - - EXPECT_EQ(alphabet_program.circuit().moments(0).operations(0).qubits(0).id(), - "1"); - EXPECT_EQ(alphabet_program.circuit().moments(0).operations(0).qubits(1).id(), - "2"); - EXPECT_EQ(alphabet_program.circuit().moments(1).operations(0).qubits(0).id(), - "3"); - EXPECT_EQ(alphabet_program.circuit().moments(1).operations(0).qubits(1).id(), - "0"); - - EXPECT_EQ(p_sums.at(0).terms(0).paulis(0).qubit_id(), "0"); - EXPECT_EQ(p_sums.at(1).terms(0).paulis(0).qubit_id(), "2"); - - EXPECT_EQ(p_sums_alphabet.at(0).terms(0).paulis(0).qubit_id(), "2"); - EXPECT_EQ(p_sums_alphabet.at(1).terms(0).paulis(0).qubit_id(), "1"); - - EXPECT_EQ(num_qubits, 3); - EXPECT_EQ(num_qubits_empty, 0); - EXPECT_EQ(num_qubits_alphabet, 4); -} - -TEST(ProgramResolutionTest, ResolveSymbolsInvalidArg) { - const std::string text = R"( - circuit { - scheduling_strategy: MOMENT_BY_MOMENT - moments { - operations { - args { - key: "exponent" - value { - symbol: "junk" - } - } - } - } - } - )"; - - Program program; - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(text, &program)); - - const absl::flat_hash_map> param_map = { - {"v1", {0, 1.0}}}; - - EXPECT_EQ(ResolveSymbols(param_map, &program), - tensorflow::Status(tensorflow::error::INVALID_ARGUMENT, - "Could not find symbol in parameter map: junk")); -} - -TEST(ProgramResolutionTest, ResolveSymbols) { - const std::string text = R"( - circuit { - scheduling_strategy: MOMENT_BY_MOMENT - moments { - operations { - args { - key: "exponent" - value { - symbol: "v1" - } - } - } - } - } - )"; - - Program program; - ASSERT_TRUE(google::protobuf::TextFormat::ParseFromString(text, &program)); - - const absl::flat_hash_map> param_map = { - {"v1", {0, 1.0}}}; - - EXPECT_TRUE(ResolveSymbols(param_map, &program).ok()); - EXPECT_EQ(program.circuit() - .moments(0) - .operations(0) - .args() - .at("exponent") - .arg_value() - .float_value(), - 1.0); -} - -} // namespace -} // namespace tfq diff --git a/tensorflow_quantum/datasets/BUILD b/tensorflow_quantum/datasets/BUILD deleted file mode 100644 index 41f591538..000000000 --- a/tensorflow_quantum/datasets/BUILD +++ /dev/null @@ -1,20 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_library( - name = "cluster_state", - srcs = ["cluster_state.py"], -) - -py_test( - name = "cluster_state_test", - srcs = ["cluster_state_test.py"], - python_version = "PY3", - deps = [ - ":cluster_state", - ], -) diff --git a/tensorflow_quantum/datasets/__init__.py b/tensorflow_quantum/datasets/__init__.py deleted file mode 100644 index 931487c1b..000000000 --- a/tensorflow_quantum/datasets/__init__.py +++ /dev/null @@ -1,17 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Experimental location for interesting quantum datasets.""" -# Import to the tensorflow_quantum.datasets.* level.""" -from tensorflow_quantum.datasets.cluster_state import excited_cluster_states diff --git a/tensorflow_quantum/datasets/cluster_state.py b/tensorflow_quantum/datasets/cluster_state.py deleted file mode 100644 index 2c7f5a909..000000000 --- a/tensorflow_quantum/datasets/cluster_state.py +++ /dev/null @@ -1,93 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Toy dataset showing boilerplate code for a cluster state example.""" -import numpy as np -import cirq - - -def excited_cluster_states(qubits): - """Return a tuple of potentially excited cluster states and their labels. - - For every qubit in `qubits` this method will create a cluster state circuit - on `qubits`, apply a `cirq.X` on that qubit along with a label of 1 and add - it to the return dataset. Finally a cluster state circuit on `qubits` that - doesn't contain any `cirq.X` gates with a label of -1 will be added to the - returned dataset. - - Note: This is a toy dataset that can serve as guidance for the community - to contribute new datasets to TensorFlow Quantum. - - - >>> circuits, labels = tfq.datasets.excited_cluster_states( - ... cirq.GridQubit.rect(1, 3) - ... ) - >>> print(circuits[0]) - (0, 0): ───H───@───────@───X─── - │ │ - (0, 1): ───H───@───@───┼─────── - │ │ - (0, 2): ───H───────@───@─────── - >>> labels[0] - 1 - >>> print(circuits[-1]) - (0, 0): ───H───@───────@─── - │ │ - (0, 1): ───H───@───@───┼─── - │ │ - (0, 2): ───H───────@───@─── - >>> labels[-1] - -1 - - - Circuits that feature a `cirq.X` gate on one of the qubits are labeled 1, - while the circuit that doesn't feature a `cirq.X` anywhere has the label -1. - - - Args: - qubits: Python `list` of `cirq.GridQubit`s on which the excited cluster - state dataset will be created. - - Returns: - A `tuple` of `cirq.Circuit`s and Python `int` labels. - - """ - if not isinstance(qubits, (tuple, list, np.ndarray)): - raise TypeError('qubits must be a list or np.ndarray. Given: '.format( - type(qubits))) - - for qubit in qubits: - if not isinstance(qubit, cirq.GridQubit): - raise ValueError('qubits must contain cirq.GridQubit only.') - - if len(qubits) <= 2: - raise ValueError('Cluster state dataset must be defined on more than ' - 'two qubits.') - - ref_circuit = cirq.Circuit() - ref_circuit.append(cirq.H.on_each(qubits)) - for this_bit, next_bit in zip(qubits, qubits[1:] + [qubits[0]]): - ref_circuit.append(cirq.CZ(this_bit, next_bit)) - - circuits = () - labels = () - - for qubit in qubits: - circuits += (ref_circuit + cirq.Circuit(cirq.X(qubit)),) - labels += (1,) - - circuits += (ref_circuit,) - labels += (-1,) - - return circuits, labels diff --git a/tensorflow_quantum/datasets/cluster_state_test.py b/tensorflow_quantum/datasets/cluster_state_test.py deleted file mode 100644 index 195081a35..000000000 --- a/tensorflow_quantum/datasets/cluster_state_test.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Test the cluster state dataset.""" -import tensorflow as tf -import cirq - -from tensorflow_quantum.datasets import cluster_state - - -class ClusterStateDataTest(tf.test.TestCase): - """Small test to make sure dataset for ClusterState works.""" - - def test_errors(self): - """Test that it errors on invalid qubits.""" - with self.assertRaisesRegex(TypeError, expected_regex='must be a list'): - cluster_state.excited_cluster_states('junk') - - with self.assertRaisesRegex(ValueError, - expected_regex='cirq.GridQubit'): - cluster_state.excited_cluster_states([cirq.NamedQubit('bob')]) - - with self.assertRaisesRegex(ValueError, - expected_regex='more than two qubits.'): - cluster_state.excited_cluster_states(cirq.GridQubit.rect(1, 2)) - - def test_creation(self): - """Test that it returns the correct number of circuits.""" - qubits = cirq.GridQubit.rect(1, 5) - circuits, labels = cluster_state.excited_cluster_states(qubits) - - self.assertEqual(len(circuits), 6) - self.assertEqual(len(labels), 6) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_quantum/python/BUILD b/tensorflow_quantum/python/BUILD deleted file mode 100644 index 0a917ccc6..000000000 --- a/tensorflow_quantum/python/BUILD +++ /dev/null @@ -1,24 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_library( - name = "util", - srcs = ["util.py"], - deps = [ - "//tensorflow_quantum/core/serialize:serializer", - ], -) - -py_test( - name = "util_test", - srcs = ["util_test.py"], - python_version = "PY3", - deps = [ - ":util", - "//tensorflow_quantum/core/serialize:serializer", - ], -) diff --git a/tensorflow_quantum/python/__init__.py b/tensorflow_quantum/python/__init__.py deleted file mode 100644 index d4e4b5c82..000000000 --- a/tensorflow_quantum/python/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module definitions for tensorflow_quantum.python.util.*""" -from tensorflow_quantum.python.util import ( - # Utility functions. - get_supported_gates, - exponential, -) diff --git a/tensorflow_quantum/python/differentiators/BUILD b/tensorflow_quantum/python/differentiators/BUILD deleted file mode 100644 index 4801c79d8..000000000 --- a/tensorflow_quantum/python/differentiators/BUILD +++ /dev/null @@ -1,133 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_library( - name = "differentiator", - srcs = ["differentiator.py"], -) - -py_library( - name = "linear_combination", - srcs = ["linear_combination.py"], - deps = [ - ":differentiator", - ], -) - -py_library( - name = "parameter_shift", - srcs = ["parameter_shift.py"], - deps = [ - ":differentiator", - ":parameter_shift_util", - ], -) - -py_library( - name = "parameter_shift_util", - srcs = ["parameter_shift_util.py"], - deps = [ - "//tensorflow_quantum/core/ops:tfq_ps_util_ops_py", - ], -) - -py_test( - name = "differentiator_test", - srcs = ["differentiator_test.py"], - python_version = "PY3", - deps = [ - ":differentiator", - ], -) - -py_test( - name = "linear_combination_test", - srcs = ["linear_combination_test.py"], - python_version = "PY3", - deps = [ - ":linear_combination", - "//tensorflow_quantum/core/ops:circuit_execution_ops", - "//tensorflow_quantum/python:util", - ], -) - -py_test( - name = "parameter_shift_test", - timeout = "long", - srcs = ["parameter_shift_test.py"], - python_version = "PY3", - deps = [ - ":parameter_shift", - "//tensorflow_quantum/core/ops:circuit_execution_ops", - "//tensorflow_quantum/core/ops:tfq_utility_ops_py", - "//tensorflow_quantum/python:util", - ], -) - -py_test( - name = "parameter_shift_util_test", - srcs = ["parameter_shift_util_test.py"], - python_version = "PY3", - deps = [ - ":parameter_shift_util", - "//tensorflow_quantum/python:util", - ], -) - -py_test( - name = "gradient_test", - timeout = "eternal", - srcs = ["gradient_test.py"], - python_version = "PY3", - deps = [ - ":linear_combination", - ":parameter_shift", - ":stochastic_differentiator", - "//tensorflow_quantum/core/ops:batch_util", - "//tensorflow_quantum/core/ops:circuit_execution_ops", - "//tensorflow_quantum/python:util", - ], -) - -py_library( - name = "stochastic_differentiator", - srcs = ["stochastic_differentiator.py"], - deps = [ - ":differentiator", - ":stochastic_differentiator_util", - ], -) - -py_test( - name = "stochastic_differentiator_test", - srcs = ["stochastic_differentiator_test.py"], - python_version = "PY3", - deps = [ - ":stochastic_differentiator", - "//tensorflow_quantum/core/ops:circuit_execution_ops", - "//tensorflow_quantum/python:util", - ], -) - -py_library( - name = "stochastic_differentiator_util", - srcs = ["stochastic_differentiator_util.py"], - deps = [ - ":parameter_shift_util", - "//tensorflow_quantum/core/ops:cirq_ops", - "//tensorflow_quantum/python:util", - ], -) - -py_test( - name = "stochastic_differentiator_util_test", - srcs = ["stochastic_differentiator_util_test.py"], - python_version = "PY3", - deps = [ - ":stochastic_differentiator_util", - ], -) diff --git a/tensorflow_quantum/python/differentiators/__init__.py b/tensorflow_quantum/python/differentiators/__init__.py deleted file mode 100644 index 26a982ad2..000000000 --- a/tensorflow_quantum/python/differentiators/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module functions for tfq.differentiators.*""" - -from tensorflow_quantum.python.differentiators.linear_combination import ( - ForwardDifference, - CentralDifference, - LinearCombination, -) - -from tensorflow_quantum.python.differentiators.parameter_shift import ( - ParameterShift,) - -from tensorflow_quantum.python.differentiators.stochastic_differentiator \ - import (SGDifferentiator,) - -from tensorflow_quantum.python.differentiators.differentiator import ( - Differentiator,) diff --git a/tensorflow_quantum/python/differentiators/differentiator.py b/tensorflow_quantum/python/differentiators/differentiator.py deleted file mode 100644 index df4a9c979..000000000 --- a/tensorflow_quantum/python/differentiators/differentiator.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Testing consistency in values across differentiation methods.""" -import abc -import inspect - -import tensorflow as tf - - -class Differentiator(metaclass=abc.ABCMeta): - """Interface that defines how to specify gradients for a quantum circuit. - - This abstract class allows for the creation of gradient calculation - procedures for (expectation values from) quantum circuits, with - respect to a set of input parameter values. This allows one - to backpropagate through a quantum circuit. - """ - - def generate_differentiable_op(self, *, sampled_op=None, analytic_op=None): - """Generate a differentiable op by attaching self to an op. - - This function returns a `tf.function` that passes values through to - `forward_op` during the forward pass and this differentiator (`self`) to - backpropagate through the op during the backward pass. If sampled_op - is provided the differentiators `differentiate_sampled` method will - be invoked (which requires sampled_op to be a sample based expectation - op with num_samples input tensor). If analytic_op is provided the - differentiators `differentiate_analytic` method will be invoked (which - requires analytic_op to be an analytic based expectation op that does - NOT have num_samples as an input). If both sampled_op and analytic_op - are provided an exception will be raised. - - ***CAUTION*** - - This `generate_differentiable_op()` can be called only ONCE because - of the `one differentiator per op` policy. You need to call `refresh()` - to reuse this differentiator with another op. - - Args: - sampled_op: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. - analytic_op: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - Returns: - A `callable` op that who's gradients are now registered to be - a call to this differentiators `differentiate_*` function. - - """ - if hasattr(self, 'expectation_op'): - raise TypeError('This differentiator is already used for other ' - 'op by calling generate_differentiable_op before. ' - 'You need to call `refresh()` to reuse this ' - 'differentiator with another op.') - - if sampled_op is None and analytic_op is None: - raise ValueError('generate_differentiable_op requires a sample ' - 'based expectation op to be provided with arg ' - '\'sampled_op\', or an analytically ' - 'calculated expectation op to be provided with ' - 'arg \'analytic_op\'.') - - if sampled_op is not None and analytic_op is not None: - raise ValueError('generate_differentiable_op was given both a ' - 'sampled_op and analytic_op. ' - 'Please provide analytic_op if the ' - 'operation you wish to make differentiable is ' - 'analytical. Otherwise provide ' - 'sampled_op if the operation you want ' - 'to make differentiable is sample based.') - - if not callable(sampled_op) and not callable(analytic_op): - raise TypeError('Provided arguments must be callable tensorflow ' - 'ops.') - - # TODO (mbbrough): find a better workaround than this to ensure - # that the correct sample based expectation wasn't accidentally - # put inside of the analytical_op argument or vice versa. - # right all that is checked is that the desire op signatures - # are substrings of the given op signature. - if analytic_op is not None: - signature = inspect.signature(analytic_op).parameters - expected_signature = [ - 'programs', 'symbol_names', 'symbol_values', 'pauli_sums' - ] - for key in expected_signature: - if not any(key in s for s in signature): - raise ValueError('unexpected signature for analytic_op. ' - 'Given arg: {}.'.format(str(key)) + '' - 'The signature should contain: {}.'.format( - list(expected_signature)) + '' - ' Given: {}'.format(list(signature))) - - if 'num_samples' in signature: - raise ValueError('found num_samples in analytic_op. Please ' - 'ensure that you are providing an analytical ' - 'expectation op in the analytic_op arg.') - - if sampled_op is not None: - signature = inspect.signature(sampled_op).parameters - expected_signature = [ - 'programs', 'symbol_names', 'symbol_values', 'pauli_sums', - 'num_samples' - ] - for key in expected_signature: - if not any(key in s for s in signature): - raise ValueError('unexpected signature for sampled_op. ' - 'Given arg: {}.'.format(str(key)) + '' - 'The signature should contain: {}.'.format( - list(expected_signature))) - - @tf.custom_gradient - def op_wrapper_analytic(programs, symbol_names, symbol_values, - pauli_sums): - forward_pass_vals = analytic_op(programs, symbol_names, - symbol_values, pauli_sums) - - def gradient(grad): - return self._differentiate_ana(programs, symbol_names, - symbol_values, pauli_sums, - forward_pass_vals, grad) - - return forward_pass_vals, gradient - - @tf.custom_gradient - def op_wrapper_sampled(programs, symbol_names, symbol_values, - pauli_sums, num_samples): - forward_pass_vals = sampled_op(programs, symbol_names, - symbol_values, pauli_sums, - num_samples) - - def gradient(grad): - return self._differentiate_sam(programs, symbol_names, - symbol_values, pauli_sums, - num_samples, forward_pass_vals, - grad) - - return forward_pass_vals, gradient - - self.expectation_op = analytic_op - return_func = op_wrapper_analytic - if analytic_op is None: - self.expectation_op = sampled_op - return_func = op_wrapper_sampled - - return return_func - - def _differentiate_ana(self, programs, symbol_names, symbol_values, - pauli_sums, forward_pass_vals, grad): - return None, None, self.differentiate_analytic( - programs, symbol_names, symbol_values, - pauli_sums, forward_pass_vals, grad), \ - None - - def _differentiate_sam(self, programs, symbol_names, symbol_values, - pauli_sums, num_samples, forward_pass_vals, grad): - return None, None, self.differentiate_sampled( - programs, symbol_names, symbol_values, - pauli_sums, num_samples, forward_pass_vals, grad), \ - None, None - - def refresh(self): - """Refresh this differentiator in order to use it with other ops.""" - # Now that self.expectation_op is removed, users can call - # generate_differentiable_op() again. - if hasattr(self, 'expectation_op'): - del self.expectation_op - return self - - @abc.abstractmethod - def differentiate_analytic(self, programs, symbol_names, symbol_values, - pauli_sums, forward_pass_vals, grad): - """Specify how to differentiate a circuit with analytical expectation. - - This is called at graph runtime by TensorFlow. `differentiate_analytic` - should calculate the gradient of a batch of circuits and return it - formatted as indicated below. See - `tfq.differentiators.ForwardDifference` for an example. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - forward_pass_vals: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. - grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - Returns: - A `tf.Tensor` with the same shape as `symbol_values` representing - the gradient backpropageted to the `symbol_values` input of the op - you are differentiating through. - """ - - @abc.abstractmethod - def differentiate_sampled(self, programs, symbol_names, symbol_values, - pauli_sums, num_samples, forward_pass_vals, grad): - """Specify how to differentiate a circuit with sampled expectation. - - This is called at graph runtime by TensorFlow. `differentiate_sampled` - should calculate the gradient of a batch of circuits and return it - formatted as indicated below. See - `tfq.differentiators.ForwardDifference` for an example. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - num_samples: `tf.Tensor` of positive integers representing the - number of samples per term in each term of pauli_sums used - during the forward pass. - forward_pass_vals: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. - grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - Returns: - A `tf.Tensor` with the same shape as `symbol_values` representing - the gradient backpropageted to the `symbol_values` input of the op - you are differentiating through. - """ diff --git a/tensorflow_quantum/python/differentiators/differentiator_test.py b/tensorflow_quantum/python/differentiators/differentiator_test.py deleted file mode 100644 index 52e4bcf3d..000000000 --- a/tensorflow_quantum/python/differentiators/differentiator_test.py +++ /dev/null @@ -1,91 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for the differentiator abstract class.""" -import tensorflow as tf -from tensorflow_quantum.python.differentiators import differentiator - - -class WorkingDifferentiator(differentiator.Differentiator): - """test.""" - - def differentiate_analytic(self, programs, symbol_names, symbol_values, - pauli_sums, forward_pass_vals, grad): - """test.""" - - def differentiate_sampled(self, programs, symbol_names, symbol_values, - num_samples, pauli_sums, forward_pass_vals, grad): - """test.""" - - -class DifferentiatorTest(tf.test.TestCase): - """Test that we can properly subclass differentiator.""" - - def test_subclass(self): - """Test that the BaseDifferentiator can be subclassed.""" - WorkingDifferentiator() - - def test_subclass_missing_differentiate(self): - """Test that BaseDifferentiator enforces abstract method existance.""" - - class BrokenDifferentiator(differentiator.Differentiator): - """test.""" - - with self.assertRaisesRegex(TypeError, expected_regex="instantiate"): - BrokenDifferentiator() - - def test_generate_differentiable_op(self): - """test the type checking on this method.""" - WorkingDifferentiator().generate_differentiable_op( - analytic_op=lambda programs, symbol_names, symbol_values, - pauli_sums: 1) - WorkingDifferentiator().generate_differentiable_op( - sampled_op=lambda programs, symbol_names, symbol_values, pauli_sums, - num_samples: 1) - with self.assertRaisesRegex(TypeError, expected_regex='callable'): - WorkingDifferentiator().generate_differentiable_op(analytic_op=1) - with self.assertRaisesRegex(ValueError, expected_regex='given both'): - WorkingDifferentiator().generate_differentiable_op( - analytic_op=lambda: 1, sampled_op=lambda: 1) - with self.assertRaisesRegex(ValueError, expected_regex='analytic_op'): - WorkingDifferentiator().generate_differentiable_op( - analytic_op=lambda programs, symbol_names, symbol_values: 1) - with self.assertRaisesRegex( - ValueError, expected_regex='num_samples in analytic_op'): - WorkingDifferentiator().generate_differentiable_op( - analytic_op=lambda programs, symbol_names, symbol_values, - pauli_sums, num_samples: 1) - with self.assertRaisesRegex(ValueError, expected_regex='sampled_op'): - WorkingDifferentiator().generate_differentiable_op( - sampled_op=lambda programs, symbol_names, pauli_sums: 1) - - def test_single_op_link(self): - """Tests if the `one-differentiator-per-op` policy is working well.""" - wd = WorkingDifferentiator() - wd.generate_differentiable_op(analytic_op=lambda programs, symbol_names, - symbol_values, pauli_sums: 1) - with self.assertRaisesRegex(TypeError, expected_regex='already used'): - wd.generate_differentiable_op( - analytic_op=lambda programs, symbol_names, symbol_values, - pauli_sums: 1) - wd.generate_differentiable_op( - sampled_op=lambda programs, symbol_names, symbol_values, - pauli_sums: 1) - wd.refresh() - wd.generate_differentiable_op(analytic_op=lambda programs, symbol_names, - symbol_values, pauli_sums: 1) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_quantum/python/differentiators/gradient_test.py b/tensorflow_quantum/python/differentiators/gradient_test.py deleted file mode 100644 index e18e97e79..000000000 --- a/tensorflow_quantum/python/differentiators/gradient_test.py +++ /dev/null @@ -1,331 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Testing for gradient calculation consistency in TFQ.""" -import copy - -import numpy as np -import sympy -import tensorflow as tf -from absl.testing import parameterized - -import cirq -from tensorflow_quantum.python import util -from tensorflow_quantum.python.differentiators import linear_combination -from tensorflow_quantum.python.differentiators import parameter_shift -from tensorflow_quantum.python.differentiators import stochastic_differentiator -from tensorflow_quantum.core.ops import circuit_execution_ops, batch_util - -DIFFS = [ - linear_combination.ForwardDifference(grid_spacing=0.0001), - linear_combination.ForwardDifference(error_order=2, grid_spacing=0.0001), - linear_combination.CentralDifference(grid_spacing=0.0001), - linear_combination.CentralDifference(error_order=4, grid_spacing=0.0001), - parameter_shift.ParameterShift(), -] - -STOCHASTIC_DIFFS = [ - stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=False, - stochastic_cost=False), - stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=False, - stochastic_cost=False), - stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=True, - stochastic_cost=False), - stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=True, - stochastic_cost=False), -] - -OPS = [ - circuit_execution_ops.get_expectation_op(cirq.sim.Simulator()), # WF - circuit_execution_ops.get_expectation_op( - cirq.DensityMatrixSimulator()), # DM - circuit_execution_ops.get_expectation_op() # C++ -] - - -def _cirq_simple_finite_difference(circuit_batch, - resolvers, - symbol_names, - op_batch, - grid_spacing=0.0001): - """A simple finite difference code that calculates the gradient of a - batch of circuits using cirq.""" - simulator = cirq.sim.Simulator() - - init_vals = batch_util.batch_calculate_expectation(circuit_batch, resolvers, - op_batch, simulator) - grad_circuits = [] - grad_resolvers = [] - grad_pauli_sums = [] - for this_program, this_pauli_sums, this_resolver in \ - zip(circuit_batch, op_batch, resolvers): - for symbol in symbol_names: - perturbed_resolver = copy.deepcopy(this_resolver) - perturbed_resolver.param_dict[symbol] += grid_spacing - grad_circuits.append(this_program) - grad_pauli_sums.append(this_pauli_sums) - grad_resolvers.append(perturbed_resolver) - - # shape: [n_programs * len(symbol_names), n_pauli_sums] - results = np.array( - batch_util.batch_calculate_expectation(circuits=grad_circuits, - param_resolvers=grad_resolvers, - ops=grad_pauli_sums, - simulator=simulator)) - - # shape: [n_pauli_sums, n_programs, len(symbol_names)] - gradient_generator = results.transpose().reshape( - (len(op_batch[0]), len(circuit_batch), len(symbol_names))) - - # shape: [n_pauli_sums, n_programs, len(symbol_names)] - forward_pass_vals = np.transpose( - np.vstack([np.expand_dims(init_vals, axis=0)] * len(symbol_names)), - (2, 1, 0)) - - return np.sum(1 / grid_spacing * (gradient_generator - forward_pass_vals), - axis=0) - - -class GradientCorrectnessTest(tf.test.TestCase, parameterized.TestCase): - """Test correctness of the differentiators to reference cirq algorithm.""" - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'differentiator': DIFFS + STOCHASTIC_DIFFS, - 'op': OPS, - 'stochastic_cost': [False, True] - }))) - def test_backprop(self, differentiator, op, stochastic_cost): - """Test that gradients are correctly backpropagated through a quantum - circuit via comparison to analytical results. - """ - # hack to add stoachastic cost. TODO (jaeyoo): remove this hack. - differentiator.stochastic_cost = stochastic_cost - differentiator.refresh() - op = differentiator.generate_differentiable_op(analytic_op=op) - - def exact_grad(theta): - new_theta = 2 * np.pi * theta - return -2 * np.pi * np.sin(new_theta) * np.exp(np.cos(new_theta)) - - bit = cirq.GridQubit(0, 0) - circuits = util.convert_to_tensor( - [cirq.Circuit(cirq.X(bit)**sympy.Symbol('rx')) for _ in range(2)]) - pstring = util.convert_to_tensor([[ - cirq.PauliSum.from_pauli_strings([cirq.PauliString({bit: cirq.Z})]) - ] for _ in circuits]) - base_rot_angles = tf.constant([[0.25], [0.125]]) - with tf.GradientTape() as g: - g.watch(base_rot_angles) - input_angles = 2 * base_rot_angles - exp_res = tf.exp(op(circuits, ['rx'], input_angles, pstring)) - - grad = g.gradient(exp_res, base_rot_angles) - exact = [[exact_grad(0.25)], [exact_grad(0.125)]] - - # will this be too tight? time will tell. - self.assertAllClose(exact, grad.numpy(), rtol=0.01, atol=0.01) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'differentiator': DIFFS, - 'op': OPS, - 'n_qubits': [5], - 'n_programs': [3], - 'n_ops': [3], - 'symbol_names': [['a', 'b']] - }))) - def test_gradients_vs_cirq_finite_difference(self, differentiator, op, - n_qubits, n_programs, n_ops, - symbol_names): - """Compare TFQ differentiators to fine-grained noiseless cirq finite - differencing. - DISCLAIMER : the consistency of STOCHASTIC_DIFFS is hard to be checked. - Its expectation value should be checked, but it takes long time because - SGDifferentiator is not optimized. Until optimized, the consistency - will be performed in benchmarks/scripts/differentiators:convergence_test - TODO(jaeyoo) : move convergence_test here once SGDifferentiator is - optimized. - """ - differentiator.refresh() - op = differentiator.generate_differentiable_op(analytic_op=op) - - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs) - - psums = [ - util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch - ] - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch], - dtype=np.float32) - - # calculate tfq gradient - symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - programs = util.convert_to_tensor(circuit_batch) - ops = util.convert_to_tensor(psums) - with tf.GradientTape() as g: - g.watch(symbol_values_tensor) - expectations = op(programs, symbol_names, symbol_values_tensor, ops) - tfq_grads = g.gradient(expectations, symbol_values_tensor) - - # calculate gradients in cirq using a very simple forward differencing - # scheme - cirq_grads = _cirq_simple_finite_difference(circuit_batch, - resolver_batch, - symbol_names, psums) - - # will this be too tight? time will tell. - self.assertAllClose(cirq_grads, tfq_grads, rtol=1e-2, atol=1e-2) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'differentiator': DIFFS + STOCHASTIC_DIFFS, - 'op': OPS, - 'stochastic_cost': [False, True] - }))) - def test_analytic_value_with_simple_circuit(self, differentiator, op, - stochastic_cost): - """Test the value of differentiator with simple circuit. - Since there are only one symbol, one gate and one op, there is only one - samling result, STOCHATIC_DIFFS shows the same result with that of - deterministic differentiators.""" - # Get an expectation op, with this differentiator attached. - differentiator.refresh() - differentiator.stochastic_cost = stochastic_cost - op = differentiator.generate_differentiable_op(analytic_op=op) - qubit = cirq.GridQubit(0, 0) - circuit = util.convert_to_tensor( - [cirq.Circuit(cirq.X(qubit)**sympy.Symbol('alpha'))]) - psums = util.convert_to_tensor([[cirq.Z(qubit)]]) - symbol_values_array = np.array([[0.123]], dtype=np.float32) - # Calculate tfq gradient. - symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - with tf.GradientTape() as g: - g.watch(symbol_values_tensor) - expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) - grads = g.gradient(expectations, symbol_values_tensor) - ground_truth_grads = np.array([[-1.1839752]]) - self.assertAllClose(ground_truth_grads, grads, rtol=1e-2, atol=1e-2) - - -class StochasticDifferentiatorCorrectnessTest(tf.test.TestCase, - parameterized.TestCase): - """Test correctness of the stochastic differentiators to reference cirq - algorithm. - DISCLAIMER: this test allows for a larger margin of error and as long - as convergence is happening then it passes""" - - # TODO(zaqqwerty): only this test was failing after adding cirq.I - # support, so it is disabled pending diagnosis - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'differentiator': STOCHASTIC_DIFFS, - 'op': OPS, - 'n_qubits': [5], - 'n_programs': [3], - 'n_ops': [3], - 'symbol_names': [['a', 'b']], - 'stochastic_cost_eps': [(False, 5e-1), (True, 7e-1)], - }))) - def gradients_vs_cirq_finite_difference(self, differentiator, op, n_qubits, - n_programs, n_ops, symbol_names, - stochastic_cost_eps): - """Compare TFQ differentiators to fine-grained noiseless cirq finite - differencing with a larger margin of error.""" - - # TODO (jaeyoo): cleanup this hacky wordkaround so variable - # assignment doesn't need to take place like this. - differentiator.stochastic_cost, eps = stochastic_cost_eps - differentiator.refresh() - op = differentiator.generate_differentiable_op(analytic_op=op) - - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs) - - psums = [ - util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch - ] - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch], - dtype=np.float32) - - # calculate tfq gradient - symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - programs = util.convert_to_tensor(circuit_batch) - ops = util.convert_to_tensor(psums) - - # calculate gradients in cirq using a very simple forward differencing - # scheme - cirq_grads = _cirq_simple_finite_difference(circuit_batch, - resolver_batch, - symbol_names, psums) - - def _get_gradient(): - with tf.GradientTape() as g: - g.watch(symbol_values_tensor) - expectations = op(programs, symbol_names, symbol_values_tensor, - ops) - return g.gradient(expectations, symbol_values_tensor) - - def _abs_diff(grad, mask): - return np.sum(np.abs(grad - cirq_grads * mask)) - - def _get_nonzero_mask(grad): - return (grad.numpy() != 0.0).astype(np.float32) - - # Get the non-zero mask because a few initial gradients have not sampled - # zero values. - tfq_grads_1 = _get_gradient() - mask_1 = _get_nonzero_mask(tfq_grads_1) - - if not np.allclose(tfq_grads_1, cirq_grads * mask_1, atol=eps): - tfq_grads_2 = 0.5 * (tfq_grads_1 + _get_gradient()) - mask_2 = _get_nonzero_mask(tfq_grads_2) - # Check if the 2nd error becomes smaller that 1st one. - if not _abs_diff(tfq_grads_1, mask_1) > _abs_diff( - tfq_grads_2, mask_2): - cnt = 2 - tfq_grads = (cnt * tfq_grads_2 + _get_gradient()) / (cnt + 1) - while (cnt < 10 and - not np.allclose(cirq_grads, tfq_grads, atol=eps)): - cnt += 1 - tfq_grads = (cnt * tfq_grads + _get_gradient()) / (cnt + 1) - self.assertAllClose(cirq_grads, tfq_grads, atol=eps) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_quantum/python/differentiators/linear_combination.py b/tensorflow_quantum/python/differentiators/linear_combination.py deleted file mode 100644 index f8e4366a7..000000000 --- a/tensorflow_quantum/python/differentiators/linear_combination.py +++ /dev/null @@ -1,516 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Compute gradients by combining function values linearly.""" -import numbers - -import numpy as np -import tensorflow as tf - -from tensorflow_quantum.python.differentiators import differentiator - - -class LinearCombination(differentiator.Differentiator): - """Differentiate a circuit with respect to its inputs by - linearly combining values obtained by evaluating the op using parameter - values perturbed about their forward-pass values. - - - >>> my_op = tfq.get_expectation_op() - >>> weights = [5, 6, 7] - >>> perturbations = [0, 0.5, 0.25] - >>> linear_differentiator = tfq.differentiators.LinearCombination( - ... weights, perturbations - ... ) - >>> # Get an expectation op, with this differentiator attached. - >>> op = linear_differentiator.generate_differentiable_op( - ... analytic_op=my_op - ... ) - >>> qubit = cirq.GridQubit(0, 0) - >>> circuit = tfq.convert_to_tensor([ - ... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) - ... ]) - >>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) - >>> symbol_values_array = np.array([[0.123]], dtype=np.float32) - >>> # Calculate tfq gradient. - >>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - >>> with tf.GradientTape() as g: - ... g.watch(symbol_values_tensor) - ... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums - ... ) - >>> # Gradient would be: 5 * f(x+0) + 6 * f(x+0.5) + 7 * f(x+0.25) - >>> grads = g.gradient(expectations, symbol_values_tensor) - >>> # Note: this gradient visn't correct in value, but showcases - >>> # the principle of how gradients can be defined in a very flexible - >>> # fashion. - >>> grads - tf.Tensor([[5.089467]], shape=(1, 1), dtype=float32) - - """ - - def __init__(self, weights, perturbations): - """Instantiate this differentiator. - - Create a LinearComobinationDifferentiator. Pass in weights and - perturbations as described below. - - Args: - weights: Python `list` of real numbers representing linear - combination coeffecients for each perturbed function - evaluation. - perturbations: Python `list` of real numbers representing - perturbation values. - """ - if not isinstance(weights, (np.ndarray, list, tuple)): - raise TypeError("weights must be a numpy array, list or tuple." - "Got {}".format(type(weights))) - if not all([isinstance(weight, numbers.Real) for weight in weights]): - raise TypeError("Each weight in weights must be a real number.") - if not isinstance(perturbations, (np.ndarray, list, tuple)): - raise TypeError("perturbations must be a numpy array," - " list or tuple. Got {}".format(type(weights))) - if not all([ - isinstance(perturbation, numbers.Real) - for perturbation in perturbations - ]): - raise TypeError("Each perturbation in perturbations must be a" - " real number.") - if not len(weights) == len(perturbations): - raise ValueError("weights and perturbations must have the same " - "length.") - if not len(list(set(perturbations))) == len(perturbations): - raise ValueError("All values in perturbations must be unique.") - self.weights = tf.constant(weights) - self.n_perturbations = tf.constant(len(perturbations)) - self.perturbations = tf.constant(perturbations) - - @tf.function - def differentiate_analytic(self, programs, symbol_names, symbol_values, - pauli_sums, forward_pass_vals, grad): - - # these get used a lot - n_symbols = tf.gather(tf.shape(symbol_names), 0) - n_programs = tf.gather(tf.shape(programs), 0) - n_ops = tf.gather(tf.shape(pauli_sums), 1) - - # STEP 1: Generate required inputs for executor - # in this case I can do this with existing tensorflow ops if i'm clever - - # don't do any computation for a perturbation of zero, just use - # forward pass values - mask = tf.not_equal(self.perturbations, - tf.zeros_like(self.perturbations)) - non_zero_perturbations = tf.boolean_mask(self.perturbations, mask) - non_zero_weights = tf.boolean_mask(self.weights, mask) - n_non_zero_perturbations = tf.gather(tf.shape(non_zero_perturbations), - 0) - - # tile up symbols to [n_non_zero_perturbations, n_programs, n_symbols] - perturbation_tiled_symbols = tf.tile( - tf.expand_dims(symbol_values, 0), - tf.stack([n_non_zero_perturbations, 1, 1])) - - def create_3d_perturbation(i, perturbation_values): - """Generate a tensor the same shape as perturbation_tiled_symbols - containing the perturbations specified by perturbation_values.""" - ones = tf.cast( - tf.concat([ - tf.zeros(tf.stack([n_non_zero_perturbations, n_programs, i - ])), - tf.ones(tf.stack([n_non_zero_perturbations, n_programs, 1 - ])), - tf.zeros( - tf.stack([ - n_non_zero_perturbations, n_programs, - tf.subtract(n_symbols, tf.add(i, 1)) - ])) - ], - axis=2), perturbation_values.dtype) - return tf.einsum('kij,k->kij', ones, perturbation_values) - - def generate_perturbation(i): - """Perturb each value in the ith column of - perturbation_tiled_symbols. - """ - return tf.add( - perturbation_tiled_symbols, - tf.cast(create_3d_perturbation(i, non_zero_perturbations), - perturbation_tiled_symbols.dtype)) - - # create a 4d tensor with the following dimensions: - # [n_symbols, n_perturbations, n_programs, n_symbols] - # the zeroth dimension represents the fact that we have to apply - # a perturbation in the direction of every parameter individually. - # the first dimension represents the number of perturbations that we - # have to apply, and the inner 2 dimensions represent the standard - # input format to the expectation ops - all_perturbations = tf.map_fn(generate_perturbation, - tf.range(n_symbols), - dtype=tf.float32) - - # reshape everything to fit into expectation op correctly - total_programs = tf.multiply( - tf.multiply(n_programs, n_non_zero_perturbations), n_symbols) - # tile up and then reshape to order programs correctly - flat_programs = tf.reshape( - tf.tile( - tf.expand_dims(programs, 0), - tf.stack([tf.multiply(n_symbols, n_non_zero_perturbations), - 1])), [total_programs]) - flat_perturbations = tf.reshape(all_perturbations, [ - tf.multiply(tf.multiply(n_symbols, n_non_zero_perturbations), - n_programs), n_symbols - ]) - # tile up and then reshape to order ops correctly - flat_ops = tf.reshape( - tf.tile( - tf.expand_dims(pauli_sums, 0), - tf.stack( - [tf.multiply(n_symbols, n_non_zero_perturbations), 1, 1])), - [total_programs, n_ops]) - - # STEP 2: calculate the required expectation values - expectations = self.expectation_op(flat_programs, symbol_names, - flat_perturbations, flat_ops) - - # STEP 3: generate gradients according to the results - - # we know the rows are grouped according to which parameter - # was perturbed, so reshape to reflect that - grouped_expectations = tf.reshape( - expectations, - [n_symbols, - tf.multiply(n_non_zero_perturbations, n_programs), -1]) - - # now we can calculate the partial of the circuit output with - # respect to each perturbed parameter - def rearrange_expectations(grouped): - - def split_vertically(i): - return tf.slice(grouped, [tf.multiply(i, n_programs), 0], - [n_programs, n_ops]) - - return tf.map_fn(split_vertically, - tf.range(n_non_zero_perturbations), - dtype=tf.float32) - - # reshape so that expectations calculated on different programs are - # separated by a dimension - rearranged_expectations = tf.map_fn(rearrange_expectations, - grouped_expectations) - - # now we will calculate all of the partial derivatives - - nonzero_partials = tf.einsum( - 'spco,p->sco', rearranged_expectations, - tf.cast(non_zero_weights, rearranged_expectations.dtype)) - - # now add the contribution of a zero term if required - - # find any zero terms - mask = tf.equal(self.perturbations, tf.zeros_like(self.perturbations)) - zero_weight = tf.boolean_mask(self.weights, mask) - n_zero_perturbations = tf.gather(tf.shape(zero_weight), 0) - - # this will have shape [n_symbols, n_programs, n_ops] - partials = tf.cond( - tf.equal(n_zero_perturbations, 0), lambda: nonzero_partials, - lambda: nonzero_partials + tf.multiply( - tf.tile(tf.expand_dims(forward_pass_vals, axis=0), - tf.stack([n_symbols, 1, 1])), - tf.cast(tf.gather(zero_weight, 0), forward_pass_vals.dtype))) - - # now apply the chain rule - return tf.einsum('sco,co -> cs', partials, grad) - - @tf.function - def differentiate_sampled(self, programs, symbol_names, symbol_values, - pauli_sums, num_samples, forward_pass_vals, grad): - - # these get used a lot - n_symbols = tf.gather(tf.shape(symbol_names), 0) - n_programs = tf.gather(tf.shape(programs), 0) - n_ops = tf.gather(tf.shape(pauli_sums), 1) - - # STEP 1: Generate required inputs for executor - # in this case I can do this with existing tensorflow ops if i'm clever - - # don't do any computation for a perturbation of zero, just use - # forward pass values - mask = tf.not_equal(self.perturbations, - tf.zeros_like(self.perturbations)) - non_zero_perturbations = tf.boolean_mask(self.perturbations, mask) - non_zero_weights = tf.boolean_mask(self.weights, mask) - n_non_zero_perturbations = tf.gather(tf.shape(non_zero_perturbations), - 0) - - # tile up symbols to [n_non_zero_perturbations, n_programs, n_symbols] - perturbation_tiled_symbols = tf.tile( - tf.expand_dims(symbol_values, 0), - tf.stack([n_non_zero_perturbations, 1, 1])) - - def create_3d_perturbation(i, perturbation_values): - """Generate a tensor the same shape as perturbation_tiled_symbols - containing the perturbations specified by perturbation_values.""" - ones = tf.cast( - tf.concat([ - tf.zeros(tf.stack([n_non_zero_perturbations, n_programs, i - ])), - tf.ones(tf.stack([n_non_zero_perturbations, n_programs, 1 - ])), - tf.zeros( - tf.stack([ - n_non_zero_perturbations, n_programs, - tf.subtract(n_symbols, tf.add(i, 1)) - ])) - ], - axis=2), perturbation_values.dtype) - return tf.einsum('kij,k->kij', ones, perturbation_values) - - def generate_perturbation(i): - """Perturb each value in the ith column of - perturbation_tiled_symbols. - """ - return tf.add( - perturbation_tiled_symbols, - tf.cast(create_3d_perturbation(i, non_zero_perturbations), - perturbation_tiled_symbols.dtype)) - - # create a 4d tensor with the following dimensions: - # [n_symbols, n_perturbations, n_programs, n_symbols] - # the zeroth dimension represents the fact that we have to apply - # a perturbation in the direction of every parameter individually. - # the first dimension represents the number of perturbations that we - # have to apply, and the inner 2 dimensions represent the standard - # input format to the expectation ops - all_perturbations = tf.map_fn(generate_perturbation, - tf.range(n_symbols), - dtype=tf.float32) - - # reshape everything to fit into expectation op correctly - total_programs = tf.multiply( - tf.multiply(n_programs, n_non_zero_perturbations), n_symbols) - # tile up and then reshape to order programs correctly - flat_programs = tf.reshape( - tf.tile( - tf.expand_dims(programs, 0), - tf.stack([tf.multiply(n_symbols, n_non_zero_perturbations), - 1])), [total_programs]) - flat_perturbations = tf.reshape(all_perturbations, [ - tf.multiply(tf.multiply(n_symbols, n_non_zero_perturbations), - n_programs), n_symbols - ]) - # tile up and then reshape to order ops correctly - flat_ops = tf.reshape( - tf.tile( - tf.expand_dims(pauli_sums, 0), - tf.stack( - [tf.multiply(n_symbols, n_non_zero_perturbations), 1, 1])), - [total_programs, n_ops]) - flat_num_samples = tf.reshape( - tf.tile( - tf.expand_dims(num_samples, 0), - tf.stack( - [tf.multiply(n_symbols, n_non_zero_perturbations), 1, 1])), - [total_programs, n_ops]) - - # STEP 2: calculate the required expectation values - expectations = self.expectation_op(flat_programs, symbol_names, - flat_perturbations, flat_ops, - flat_num_samples) - - # STEP 3: generate gradients according to the results - - # we know the rows are grouped according to which parameter - # was perturbed, so reshape to reflect that - grouped_expectations = tf.reshape( - expectations, - [n_symbols, - tf.multiply(n_non_zero_perturbations, n_programs), -1]) - - # now we can calculate the partial of the circuit output with - # respect to each perturbed parameter - def rearrange_expectations(grouped): - - def split_vertically(i): - return tf.slice(grouped, [tf.multiply(i, n_programs), 0], - [n_programs, n_ops]) - - return tf.map_fn(split_vertically, - tf.range(n_non_zero_perturbations), - dtype=tf.float32) - - # reshape so that expectations calculated on different programs are - # separated by a dimension - rearranged_expectations = tf.map_fn(rearrange_expectations, - grouped_expectations) - - # now we will calculate all of the partial derivatives - - nonzero_partials = tf.einsum( - 'spco,p->sco', rearranged_expectations, - tf.cast(non_zero_weights, rearranged_expectations.dtype)) - - # now add the contribution of a zero term if required - - # find any zero terms - mask = tf.equal(self.perturbations, tf.zeros_like(self.perturbations)) - zero_weight = tf.boolean_mask(self.weights, mask) - n_zero_perturbations = tf.gather(tf.shape(zero_weight), 0) - - # this will have shape [n_symbols, n_programs, n_ops] - partials = tf.cond( - tf.equal(n_zero_perturbations, 0), lambda: nonzero_partials, - lambda: nonzero_partials + tf.multiply( - tf.tile(tf.expand_dims(forward_pass_vals, axis=0), - tf.stack([n_symbols, 1, 1])), - tf.cast(tf.gather(zero_weight, 0), forward_pass_vals.dtype))) - - # now apply the chain rule - return tf.einsum('sco,co -> cs', partials, grad) - - -class ForwardDifference(LinearCombination): - """Differentiate a circuit using forward differencing. - - Forward differencing computes a derivative at a point x using only - points larger than x (in this way, it is 'one sided'). A closed form for - the coefficients of this derivative for an arbitrary positive error order - is used here, which is described in the following article: - https://www.sciencedirect.com/science/article/pii/S0377042799000886. - - - >>> my_op = tfq.get_expectation_op() - >>> linear_differentiator = tfq.differentiators.ForwardDifference(2, 0.01) - >>> # Get an expectation op, with this differentiator attached. - >>> op = linear_differentiator.generate_differentiable_op( - ... analytic_op=my_op - ... ) - >>> qubit = cirq.GridQubit(0, 0) - >>> circuit = tfq.convert_to_tensor([ - ... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) - ... ]) - >>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) - >>> symbol_values_array = np.array([[0.123]], dtype=np.float32) - >>> # Calculate tfq gradient. - >>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - >>> with tf.GradientTape() as g: - ... g.watch(symbol_values_tensor) - ... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) - >>> # Gradient would be: -50 * f(x + 0.02) + 200 * f(x + 0.01) - 150 * f(x) - >>> grads = g.gradient(expectations, symbol_values_tensor) - >>> grads - tf.Tensor([[-1.184372]], shape=(1, 1), dtype=float32) - - """ - - def __init__(self, error_order=1, grid_spacing=0.001): - """Instantiate a ForwardDifference. - - Create a ForwardDifference differentiator, passing along an error order - and grid spacing to be used to contstruct differentiator coeffecients. - - Args: - error_order: A positive `int` specifying the error order of this - differentiator. This corresponds to the smallest power - of `grid_spacing` remaining in the series that was truncated - to generate this finite differencing expression. - grid_spacing: A positive `float` specifying how large of a - grid to use in calculating this finite difference. - """ - if not (isinstance(error_order, numbers.Integral) and error_order > 0): - raise ValueError("error_order must be a positive integer.") - if not (isinstance(grid_spacing, numbers.Real) and grid_spacing > 0): - raise ValueError("grid_spacing must be a positive real number.") - self.error_order = error_order - self.grid_spacing = grid_spacing - grid_points_to_eval = np.arange(0, error_order + 1) - weights = [] - for point in grid_points_to_eval: - if point == 0: - weight = -1 * np.sum( - [1 / j for j in np.arange(1, error_order + 1)]) - else: - weight = ((-1) ** (point+1) * np.math.factorial(error_order))/\ - (point * np.math.factorial(error_order-point) - * np.math.factorial(point)) - weights.append(weight / grid_spacing) - super().__init__(weights, grid_points_to_eval * grid_spacing) - - -class CentralDifference(LinearCombination): - """Differentiates a circuit using Central Differencing. - - Central differencing computes a derivative at point x using an equal - number of points before and after x. A closed form for - the coefficients of this derivative for an arbitrary positive error order - is used here, which is described in the following article: - https://www.sciencedirect.com/science/article/pii/S0377042799000886. - - - >>> my_op = tfq.get_expectation_op() - >>> linear_differentiator = tfq.differentiators.CentralDifference(2, 0.01) - >>> # Get an expectation op, with this differentiator attached. - >>> op = linear_differentiator.generate_differentiable_op( - ... analytic_op=my_op - ... ) - >>> qubit = cirq.GridQubit(0, 0) - >>> circuit = tfq.convert_to_tensor([ - ... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) - ... ]) - >>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) - >>> symbol_values_array = np.array([[0.123]], dtype=np.float32) - >>> # Calculate tfq gradient. - >>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - >>> with tf.GradientTape() as g: - ... g.watch(symbol_values_tensor) - ... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) - >>> # Gradient would be: -50 * f(x + 0.02) + 200 * f(x + 0.01) - 150 * f(x) - >>> grads = g.gradient(expectations, symbol_values_tensor) - >>> grads - tf.Tensor([[-1.1837807]], shape=(1, 1), dtype=float32) - - - """ - - def __init__(self, error_order=2, grid_spacing=0.001): - """Instantiate a CentralDifference. - - Create a CentralDifference differentaitor, passing along an error order - and grid spacing to be used to contstruct differentiator coeffecients. - - Args: - error_order: A positive, even `int` specifying the error order - of this differentiator. This corresponds to the smallest power - of `grid_spacing` remaining in the series that was truncated - to generate this finite differencing expression. - grid_spacing: A positive `float` specifying how large of a - grid to use in calculating this finite difference. - """ - if not (isinstance(error_order, numbers.Integral) and - error_order > 0 and error_order % 2 == 0): - raise ValueError("error_order must be a positive, even integer.") - if not (isinstance(grid_spacing, numbers.Real) and grid_spacing > 0): - raise ValueError("grid_spacing must be a positive real number.") - grid_points_to_eval = np.concatenate([ - np.arange(-1 * error_order / 2, 0), - np.arange(1, error_order / 2 + 1) - ]) - weights = [] - n = error_order / 2 - for k in grid_points_to_eval: - numerator = (-1)**(k + 1) * np.math.factorial(n)**2 - denom = k * np.math.factorial(n - k) * np.math.factorial(n + k) - weights.append(numerator / (denom * grid_spacing)) - super().__init__(weights, grid_points_to_eval * grid_spacing) diff --git a/tensorflow_quantum/python/differentiators/linear_combination_test.py b/tensorflow_quantum/python/differentiators/linear_combination_test.py deleted file mode 100644 index f294e7077..000000000 --- a/tensorflow_quantum/python/differentiators/linear_combination_test.py +++ /dev/null @@ -1,196 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Basic tests for the LinearCombinationDifferentiator""" -import numpy as np -from absl.testing import parameterized -import tensorflow as tf -import sympy -import cirq - -from tensorflow_quantum.core.ops import circuit_execution_ops -from tensorflow_quantum.python import util -from tensorflow_quantum.python.differentiators import linear_combination - - -def _simple_op_inputs(): - qubit = cirq.GridQubit(0, 0) - symbol = 'alpha' - circuit = cirq.Circuit(cirq.Y(qubit)**sympy.Symbol(symbol)) - op = cirq.X(qubit) - value = 0.3 - n_samples = 2000 - - # Return inputs prepped for expectation ops. - # circuit, symbol_names, values, ops, n_samples - # along with expected feedforward expectation - # and expected gradient. - return (util.convert_to_tensor([circuit]), tf.convert_to_tensor([symbol]), - tf.convert_to_tensor([[value]]), util.convert_to_tensor([[op]]), - tf.convert_to_tensor([[n_samples]]), - tf.convert_to_tensor([[np.sin(np.pi * value)]]), - tf.convert_to_tensor([[np.pi * np.cos(np.pi * value)]])) - - -class LinearCombinationTest(tf.test.TestCase, parameterized.TestCase): - """Test the LinearCombination based Differentiators.""" - - def test_linear_combination_instantiate(self): - """Test LinearCombinationDifferentiator type checking.""" - linear_combination.LinearCombination([1, 1], [1, 0]) - with self.assertRaisesRegex(TypeError, - expected_regex="weights must be"): - linear_combination.LinearCombination("junk", [1, 0]) - with self.assertRaisesRegex(TypeError, - expected_regex="perturbations must be"): - linear_combination.LinearCombination([1, 1], "junk") - with self.assertRaisesRegex(TypeError, - expected_regex="weight in weights"): - linear_combination.LinearCombination([1, "junk"], [1, 0]) - with self.assertRaisesRegex( - TypeError, expected_regex="perturbation in perturbations"): - linear_combination.LinearCombination([1, 1], [1, "junk"]) - with self.assertRaisesRegex(ValueError, expected_regex="length"): - linear_combination.LinearCombination([1, 1, 1], [1, 0]) - with self.assertRaisesRegex(ValueError, expected_regex="unique"): - linear_combination.LinearCombination([1, 1], [1, 1]) - - def test_forward_instantiate(self): - """Test ForwardDifference type checking.""" - linear_combination.ForwardDifference() - linear_combination.ForwardDifference(1, 0.1) - with self.assertRaisesRegex(ValueError, - expected_regex="positive integer"): - linear_combination.ForwardDifference(0.1, 0.1) - with self.assertRaisesRegex(ValueError, - expected_regex="positive integer"): - linear_combination.ForwardDifference(-1, 0.1) - with self.assertRaisesRegex(ValueError, - expected_regex="positive integer"): - linear_combination.ForwardDifference(0, 0.1) - with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"): - linear_combination.ForwardDifference(1, -0.1) - with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"): - linear_combination.ForwardDifference(1, 1j) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'order_coef_perturbs': [(1, (-1, 1), ( - 0, 1)), (2, (-3 / 2, 2, -1 / 2), (0, 1, 2))], - 'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05] - }))) - def test_forward_coeffecients(self, order_coef_perturbs, grid_spacing): - """Test that ForwardDifference produces the right coeffecients for - common first and second order cases.""" - order = order_coef_perturbs[0] - expected_std_coeffs = order_coef_perturbs[1] - expected_perturbations = order_coef_perturbs[2] - forward = linear_combination.ForwardDifference(order, grid_spacing) - self.assertAllClose( - np.array(expected_std_coeffs) / grid_spacing, forward.weights) - self.assertAllClose( - np.array(expected_perturbations) * grid_spacing, - forward.perturbations) - - def test_central_instantiate(self): - """Test CentralDifference type checking.""" - linear_combination.CentralDifference() - linear_combination.CentralDifference(2, 0.1) - with self.assertRaisesRegex(ValueError, - expected_regex="positive, even"): - linear_combination.CentralDifference(0.1, 0.1) - with self.assertRaisesRegex(ValueError, - expected_regex="positive, even"): - linear_combination.CentralDifference(-1, 0.1) - with self.assertRaisesRegex(ValueError, - expected_regex="positive, even"): - linear_combination.CentralDifference(0, 0.1) - with self.assertRaisesRegex(ValueError, - expected_regex="positive, even"): - linear_combination.CentralDifference(1, 0.1) - with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"): - linear_combination.CentralDifference(2, -0.1) - with self.assertRaisesRegex(ValueError, expected_regex="grid_spacing"): - linear_combination.CentralDifference(2, 1j) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'order_coef_perturbs': [(2, (-1 / 2, 1 / 2), (-1, 1)), - (4, (1 / 12, -8 / 12, 8 / 12, - -1 / 12), (-2, -1, 1, 2))], - 'grid_spacing': [0.1, 0.01, 0.5, 1, 0.05] - }))) - def test_central_coefficients(self, order_coef_perturbs, grid_spacing): - """Test that CentralDifference produces the right coefficients for - common first and second order cases.""" - order = order_coef_perturbs[0] - expected_std_coeffs = order_coef_perturbs[1] - expected_perturbations = order_coef_perturbs[2] - forward = linear_combination.CentralDifference(order, grid_spacing) - self.assertAllClose( - np.array(expected_std_coeffs) / grid_spacing, forward.weights) - self.assertAllClose( - np.array(expected_perturbations) * grid_spacing, - forward.perturbations) - - @parameterized.parameters([{ - 'diff': linear_combination.ForwardDifference() - }, { - 'diff': linear_combination.CentralDifference() - }]) - def test_analytic_functional(self, diff): - """Test that the differentiate_analytic function WORKS.""" - differentiable_op = diff.generate_differentiable_op( - analytic_op=circuit_execution_ops.get_expectation_op()) - circuit, names, values, ops, _, true_f, true_g = _simple_op_inputs() - with tf.GradientTape() as g: - g.watch(values) - res = differentiable_op(circuit, names, values, ops) - - # Just check that it computes without failing. - self.assertAllClose(true_f, res, atol=1e-2, rtol=1e-2) - self.assertAllClose(true_g, - g.gradient(res, values), - atol=1e-2, - rtol=1e-2) - - @parameterized.parameters([{ - 'diff': linear_combination.ForwardDifference() - }, { - 'diff': linear_combination.CentralDifference() - }]) - def test_sampled_functional(self, diff): - """Test that the differentiate_sampled function WORKS.""" - differentiable_op = diff.generate_differentiable_op( - sampled_op=circuit_execution_ops.get_sampled_expectation_op()) - circuit, names, values, ops, n_samples, true_f, true_g = \ - _simple_op_inputs() - with tf.GradientTape() as g: - g.watch(values) - res = differentiable_op(circuit, names, values, ops, n_samples) - - # Just check that it computes without failing. - self.assertAllClose(true_f, res, atol=1e-1, rtol=1e-1) - self.assertAllClose(true_g, - g.gradient(res, values), - atol=1e-1, - rtol=1e-1) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/python/differentiators/parameter_shift.py b/tensorflow_quantum/python/differentiators/parameter_shift.py deleted file mode 100644 index 85bf0a7e9..000000000 --- a/tensorflow_quantum/python/differentiators/parameter_shift.py +++ /dev/null @@ -1,341 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Compute analytic gradients by using general parameter-shift rule. """ -import tensorflow as tf - -from tensorflow_quantum.python.differentiators import differentiator -from tensorflow_quantum.python.differentiators import parameter_shift_util - - -class ParameterShift(differentiator.Differentiator): - """Calculate the general version of parameter-shift rule based gradients. - - This ParameterShift is the gradient estimator of the following paper: - - [arXiv:1905.13311](https://arxiv.org/abs/1905.13311), Gavin E. Crooks. - - This ParameterShift is used for any programs with parameterized gates. - It internally decomposes any programs into array of gates with at most - two distinct eigenvalues. - - >>> non_diff_op = tfq.get_expectation_op() - >>> linear_differentiator = tfq.differentiators.ParameterShift() - >>> # Get an expectation op, with this differentiator attached. - >>> op = linear_differentiator.generate_differentiable_op( - ... analytic_op=non_diff_op - ... ) - >>> qubit = cirq.GridQubit(0, 0) - >>> circuit = tfq.convert_to_tensor([ - ... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) - ... ]) - >>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) - >>> symbol_values_array = np.array([[0.123]], dtype=np.float32) - >>> # Calculate tfq gradient. - >>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - >>> with tf.GradientTape() as g: - ... g.watch(symbol_values_tensor) - ... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) - >>> # This value is now computed via the ParameterShift rule. - >>> # https://arxiv.org/abs/1905.13311 - >>> grads = g.gradient(expectations, symbol_values_tensor) - >>> grads - tf.Tensor([[-1.1839752]], shape=(1, 1), dtype=float32) - - """ - - @tf.function - def differentiate_analytic(self, programs, symbol_names, symbol_values, - pauli_sums, forward_pass_vals, grad): - """Calculate the gradient. - - The gradient calculations follows the following steps: - - 1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) - 2. Use formula (31) from paper inside of TensorFlow to calculate - gradients from all the decomposed circuits. - 3. Sum up terms and reshape for the total gradient that is compatible - with TensorFlow. - - **CAUTION** - Analytic gradient measurements based on this ParameterShift generally - run at least K(=2) times SLOWER than the original circuit. - On top of it, since all parameters of gates are shifted individually, - the time complexity is linear in the number of parameterized gates L. - So, you will see O(KL) slower time & space complexity than the original - forward pass measurements. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - forward_pass_vals: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. - grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - Returns: - Backward gradient values for each program & each pauli sum. It has - the shape of [batch_size, n_symbols]. - """ - - # these get used a lot - n_symbols = tf.gather(tf.shape(symbol_names), 0) - n_programs = tf.gather(tf.shape(programs), 0) - n_ops = tf.gather(tf.shape(pauli_sums), 1) - # Assume cirq.decompose() generates gates with at most two distinct - # eigenvalues, which results in two parameter shifts. - n_shifts = 2 - - # STEP 1: Generate required inputs for executor - # Deserialize programs and parse the whole parameterized gates - # new_programs has [n_symbols, n_param_gates, n_shifts, n_programs]. - # These new_programs has programs that parameter-shift rule is applied, - # so those programs has - new_programs, weights, shifts, n_param_gates = \ - parameter_shift_util.parse_programs( - programs, symbol_names, symbol_values, n_symbols) - - # Reshape & transpose new_programs, weights and shifts to fit into - # the input format of tensorflow_quantum simulator. - # [n_symbols, n_param_gates, n_shifts, n_programs] - new_programs = tf.transpose(new_programs, [0, 2, 3, 1]) - weights = tf.transpose(weights, [0, 2, 3, 1]) - shifts = tf.transpose(shifts, [0, 2, 3, 1]) - - # reshape everything to fit into expectation op correctly - total_programs = n_programs * n_shifts * n_param_gates * n_symbols - # tile up and then reshape to order programs correctly - flat_programs = tf.reshape(new_programs, [total_programs]) - flat_shifts = tf.reshape(shifts, [total_programs]) - - # tile up and then reshape to order ops correctly - n_tile = n_shifts * n_param_gates * n_symbols - flat_perturbations = tf.concat([ - tf.reshape( - tf.tile(tf.expand_dims(symbol_values, 0), - tf.stack([n_tile, 1, 1])), [total_programs, n_symbols]), - tf.expand_dims(flat_shifts, axis=1) - ], - axis=1) - flat_ops = tf.reshape( - tf.tile(tf.expand_dims(pauli_sums, 0), tf.stack([n_tile, 1, 1])), - [total_programs, n_ops]) - # Append impurity symbol into symbol name - new_symbol_names = tf.concat([ - symbol_names, - tf.expand_dims(tf.constant( - parameter_shift_util._PARAMETER_IMPURITY_NAME), - axis=0) - ], - axis=0) - - # STEP 2: calculate the required expectation values - expectations = self.expectation_op(flat_programs, new_symbol_names, - flat_perturbations, flat_ops) - - # STEP 3: generate gradients according to the results - - # we know the rows are grouped according to which parameter - # was perturbed, so reshape to reflect that - grouped_expectations = tf.reshape( - expectations, - [n_symbols, n_shifts * n_programs * n_param_gates, -1]) - - # now we can calculate the partial of the circuit output with - # respect to each perturbed parameter - def rearrange_expectations(grouped): - - def split_vertically(i): - return tf.slice(grouped, [i * n_programs, 0], - [n_programs, n_ops]) - - return tf.map_fn(split_vertically, - tf.range(n_param_gates * n_shifts), - dtype=tf.float32) - - # reshape so that expectations calculated on different programs are - # separated by a dimension - rearranged_expectations = tf.map_fn(rearrange_expectations, - grouped_expectations) - - # now we will calculate all of the partial derivatives - partials = tf.einsum( - 'spco,spc->sco', rearranged_expectations, - tf.cast( - tf.reshape(weights, - [n_symbols, n_param_gates * n_shifts, n_programs]), - rearranged_expectations.dtype)) - - # now apply the chain rule - return tf.einsum('sco,co -> cs', partials, grad) - - @tf.function - def differentiate_sampled(self, programs, symbol_names, symbol_values, - pauli_sums, num_samples, forward_pass_vals, grad): - """Calculate the gradient. - - The gradient calculations follows the following steps: - - 1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) - 2. Use formula (31) from paper inside of TensorFlow to calculate - gradients from all the decomposed circuits. - 3. Sum up terms and reshape for the total gradient that is compatible - with TensorFlow. - - **CAUTION** - Analytic gradient measurements based on this ParameterShift generally - run at least K(=2) times SLOW than the original circuit. - On top of it, since all parameters of gates are shifted individually, - the time complexity is linear in the number of parameterized gates L. - So, you will see O(KL) slower time & space complexity than the original - forward pass measurements. - - Args: - programs: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - num_samples: `tf.Tensor` of positiver integers indicating the number - of samples used per term to calculate the expectation value - in the forward pass. - forward_pass_vals: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. - grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - Returns: - Backward gradient values for each program & each pauli sum. It has - the shape of [batch_size, n_symbols]. - """ - - # these get used a lot - n_symbols = tf.gather(tf.shape(symbol_names), 0) - n_programs = tf.gather(tf.shape(programs), 0) - n_ops = tf.gather(tf.shape(pauli_sums), 1) - # Assume cirq.decompose() generates gates with at most two distinct - # eigenvalues, which results in two parameter shifts. - n_shifts = 2 - - # STEP 1: Generate required inputs for executor - # Deserialize programs and parse the whole parameterized gates - # new_programs has [n_symbols, n_param_gates, n_shifts, n_programs]. - # These new_programs has programs that parameter-shift rule is applied, - # so those programs has - new_programs, weights, shifts, n_param_gates = \ - parameter_shift_util.parse_programs( - programs, symbol_names, symbol_values, n_symbols) - - # Reshape & transpose new_programs, weights and shifts to fit into - # the input format of tensorflow_quantum simulator. - # [n_symbols, n_param_gates, n_shifts, n_programs] - new_programs = tf.transpose(new_programs, [0, 2, 3, 1]) - weights = tf.transpose(weights, [0, 2, 3, 1]) - shifts = tf.transpose(shifts, [0, 2, 3, 1]) - - # reshape everything to fit into expectation op correctly - total_programs = n_programs * n_shifts * n_param_gates * n_symbols - # tile up and then reshape to order programs correctly - flat_programs = tf.reshape(new_programs, [total_programs]) - flat_shifts = tf.reshape(shifts, [total_programs]) - - # tile up and then reshape to order ops correctly - n_tile = n_shifts * n_param_gates * n_symbols - flat_perturbations = tf.concat([ - tf.reshape( - tf.tile(tf.expand_dims(symbol_values, 0), - tf.stack([n_tile, 1, 1])), [total_programs, n_symbols]), - tf.expand_dims(flat_shifts, axis=1) - ], - axis=1) - flat_ops = tf.reshape( - tf.tile(tf.expand_dims(pauli_sums, 0), tf.stack([n_tile, 1, 1])), - [total_programs, n_ops]) - flat_num_samples = tf.reshape( - tf.tile(tf.expand_dims(num_samples, 0), tf.stack([n_tile, 1, 1])), - [total_programs, n_ops]) - # Append impurity symbol into symbol name - new_symbol_names = tf.concat([ - symbol_names, - tf.expand_dims(tf.constant( - parameter_shift_util._PARAMETER_IMPURITY_NAME), - axis=0) - ], - axis=0) - - # STEP 2: calculate the required expectation values - expectations = self.expectation_op(flat_programs, new_symbol_names, - flat_perturbations, flat_ops, - flat_num_samples) - - # STEP 3: generate gradients according to the results - - # we know the rows are grouped according to which parameter - # was perturbed, so reshape to reflect that - grouped_expectations = tf.reshape( - expectations, - [n_symbols, n_shifts * n_programs * n_param_gates, -1]) - - # now we can calculate the partial of the circuit output with - # respect to each perturbed parameter - def rearrange_expectations(grouped): - - def split_vertically(i): - return tf.slice(grouped, [i * n_programs, 0], - [n_programs, n_ops]) - - return tf.map_fn(split_vertically, - tf.range(n_param_gates * n_shifts), - dtype=tf.float32) - - # reshape so that expectations calculated on different programs are - # separated by a dimension - rearranged_expectations = tf.map_fn(rearrange_expectations, - grouped_expectations) - - # now we will calculate all of the partial derivatives - partials = tf.einsum( - 'spco,spc->sco', rearranged_expectations, - tf.cast( - tf.reshape(weights, - [n_symbols, n_param_gates * n_shifts, n_programs]), - rearranged_expectations.dtype)) - - # now apply the chain rule - return tf.einsum('sco,co -> cs', partials, grad) diff --git a/tensorflow_quantum/python/differentiators/parameter_shift_test.py b/tensorflow_quantum/python/differentiators/parameter_shift_test.py deleted file mode 100644 index 34789af3a..000000000 --- a/tensorflow_quantum/python/differentiators/parameter_shift_test.py +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Basic tests for the ParameterShift differentiator""" -import numpy as np -from absl.testing import parameterized -import tensorflow as tf -import sympy -import cirq - -from tensorflow_quantum.python import util -from tensorflow_quantum.python.differentiators import parameter_shift -from tensorflow_quantum.core.ops import circuit_execution_ops - - -def _simple_op_inputs(): - qubit = cirq.GridQubit(0, 0) - symbol = 'alpha' - circuit = cirq.Circuit(cirq.Y(qubit)**sympy.Symbol(symbol)) - op = cirq.X(qubit) - value = 0.3 - n_samples = 2000 - - # Return inputs prepped for expectation ops. - # circuit, symbol_names, values, ops, n_samples - # along with expected feedforward expectation - # and expected gradient. - return (util.convert_to_tensor([circuit]), tf.convert_to_tensor([symbol]), - tf.convert_to_tensor([[value]]), util.convert_to_tensor([[op]]), - tf.convert_to_tensor([[n_samples]]), - tf.convert_to_tensor([[np.sin(np.pi * value)]]), - tf.convert_to_tensor([[np.pi * np.cos(np.pi * value)]])) - - -class ParameterShiftTest(tf.test.TestCase, parameterized.TestCase): - """Test the ParameterShift Differentiator will run end to end.""" - - def test_parameter_shift_analytic(self): - """Test if ParameterShift.differentiate_analytical doesn't crash before - running.""" - programs, names, values, ops, _, true_f, true_g = \ - _simple_op_inputs() - - ps = parameter_shift.ParameterShift() - op = ps.generate_differentiable_op( - analytic_op=circuit_execution_ops.get_expectation_op()) - - with tf.GradientTape() as g: - g.watch(values) - expectations = op(programs, names, values, ops) - grads = g.gradient(expectations, values) - self.assertAllClose(expectations, true_f, atol=1e-2, rtol=1e-2) - self.assertAllClose(grads, true_g, atol=1e-2, rtol=1e-2) - - def test_parameter_shift_sampled(self): - """Test if ParameterShift.differentiate_sampled doesn't crash before - running.""" - programs, names, values, ops, n_samples, true_f, true_g = \ - _simple_op_inputs() - ps = parameter_shift.ParameterShift() - op = ps.generate_differentiable_op( - sampled_op=circuit_execution_ops.get_sampled_expectation_op()) - - with tf.GradientTape() as g: - g.watch(values) - expectations = op(programs, names, values, ops, n_samples) - grads = g.gradient(expectations, values) - self.assertAllClose(expectations, true_f, atol=1e-1, rtol=1e-1) - self.assertAllClose(grads, true_g, atol=1e-1, rtol=1e-1) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/python/differentiators/parameter_shift_util.py b/tensorflow_quantum/python/differentiators/parameter_shift_util.py deleted file mode 100644 index 5e7b143f8..000000000 --- a/tensorflow_quantum/python/differentiators/parameter_shift_util.py +++ /dev/null @@ -1,100 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Util functions for general parameter-shift rule. """ -import numpy as np -import tensorflow as tf - -from tensorflow_quantum.core.ops import tfq_ps_util_ops - -_PARAMETER_IMPURITY_NAME = '_param_shift' - - -@tf.function -def parse_programs(programs, - symbol_names, - symbol_values, - n_symbols, - n_shifts=2): - """Helper function to get parameter-shifted programs after parsing programs. - - - It follows: - 1. Decomposes given programs with `tfq_ps_decompose` c++ op. - 2. Construct new_programs with parameter-shifted copies of decomposed - programs by `tfq_ps_symbol_replace` c++ op. - 3. Weights and shifts are also obtained by `tfq_ps_weights_from_symbols` - 3. Transpose the results to fed them into TensorFlow Quantum simulator. - - Args: - programs: `tf.Tensor` of strings with shape [n_programs] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_symbols], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [n_programs, n_symbols] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - n_symbols: `tf.Tensor` of a positive integer representing the number of - symbols. - n_shifts: `tf.Tensor` of a positive integer representing the number of - parameter-shift terms. Defaults to 2. - - Returns: - new_programs: the new programs whose program has only one gate with - impurity parameter-shift symbol name. - [n_symbols, n_programs, n_param_gates, n_shifts] - weights: parameter-shift coefficients of estimated observables. - [n_symbols, n_programs, n_param_gates, n_shifts] - shifts: parameter-shifted values (= matrix of symbol_values +/-shift) - [n_symbols, n_programs, n_param_gates, n_shifts] - n_param_gates: bypass of input n_param_gates to export it outside - """ - decomposed_programs = tfq_ps_util_ops.tfq_ps_decompose(programs) - delta_eig = 2.0 - - # Collecting doped programs with impurity sympy.Symbol from all programs - # with parameterized gates. - impurity = tf.tile(tf.convert_to_tensor([_PARAMETER_IMPURITY_NAME]), - [n_symbols]) - symbols = tf.convert_to_tensor(symbol_names) - - # Doping impurity sympy.Symbol into programs per gate per symbol. - new_programs = tf.tile( - tf.expand_dims(tf.transpose( - tfq_ps_util_ops.tfq_ps_symbol_replace(decomposed_programs, symbols, - impurity), [1, 0, 2]), - axis=-1), [1, 1, 1, n_shifts]) - n_param_gates = tf.cast(tf.gather(tf.shape(new_programs), 2), - dtype=tf.int32) - - coeff = tf.expand_dims(tf.transpose( - tfq_ps_util_ops.tfq_ps_weights_from_symbols(decomposed_programs, - symbols), [1, 0, 2]), - axis=-1) - - weights_plus = coeff * np.pi * 0.5 * 0.5 * delta_eig - weights = tf.concat([weights_plus, -weights_plus], axis=-1) - shifts_plus = tf.math.divide_no_nan(tf.math.divide(1.0, delta_eig), coeff) - - val = tf.tile( - tf.expand_dims(tf.expand_dims(tf.transpose(symbol_values, [1, 0]), - axis=-1), - axis=-1), [1, 1, n_param_gates, n_shifts]) - - shifts = val + tf.concat([shifts_plus, -shifts_plus], axis=-1) - - return new_programs, weights, shifts, n_param_gates diff --git a/tensorflow_quantum/python/differentiators/parameter_shift_util_test.py b/tensorflow_quantum/python/differentiators/parameter_shift_util_test.py deleted file mode 100644 index ee6e03f4b..000000000 --- a/tensorflow_quantum/python/differentiators/parameter_shift_util_test.py +++ /dev/null @@ -1,105 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Basic tests for utility functions for ParameterShift""" -import numpy as np -from absl.testing import parameterized -import tensorflow as tf -import sympy -import cirq - -from tensorflow_quantum.python import util -from tensorflow_quantum.python.differentiators import parameter_shift_util - - -class ParameterShiftUtilTest(tf.test.TestCase, parameterized.TestCase): - """Test the parameter_shift_util module.""" - - def test_parse_programs(self): - """Input & output check for parse_programs().""" - n_qubits = 5 - n_programs = 3 - n_shifts = 2 - symbol_names = ['a', 'b'] - n_symbols = len(symbol_names) - sympy_symbols = [sympy.Symbol(s) for s in symbol_names] - coeff = [1.0, -2.0, 3.0, -4.0, 5.0] - # Test circuit. - # (0, 0): ───Rz(1.0*a)──── - # - # (0, 1): ───Rz(-2.0*b)─── - # - # (0, 2): ───Rz(3.0*a)──── - # - # (0, 3): ───Rz(-4.0*b)─── - # - # (0, 4): ───Rz(5.0*a)──── - q = cirq.GridQubit.rect(1, n_qubits) - c = cirq.Circuit() - c.append([ - cirq.Rz(coeff[i] * sympy_symbols[i % 2]).on(q[i]) - for i in range(n_qubits) - ]) - circuit_batch = [c] * n_programs - symbol_values_array = np.array( - [[i for i, _ in enumerate(symbol_names)] for _ in range(n_programs) - ], - dtype=np.float32) - - symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - programs = util.convert_to_tensor(circuit_batch) - - new_programs, weights, shifts, n_param_gates = \ - parameter_shift_util.parse_programs( - programs, symbol_names, symbol_values_tensor, n_symbols) - - # shape check - ground_truth_shape = [n_symbols, n_programs, n_param_gates, n_shifts] - tf.assert_equal(ground_truth_shape, tf.shape(new_programs)) - tf.assert_equal(ground_truth_shape, tf.shape(weights)) - tf.assert_equal(ground_truth_shape, tf.shape(shifts)) - - # value check (1) weights - # the first 1x3x3x2 are +/- coefficients of Rz gates with symbol 'a'. - # they are divided by 2 in Rz. - # [:,:,:,0] have original coefficient and [:,:,:,1] are their negatives. - # the second 1x3x3x2 are with symbol 'b'. As we know, there are only - # 2 'b' symbols, which makes [1,:,2,:] are zeros. (padded) - ground_truth_weights = np.array([[[[0.5, -0.5], [1.5, -1.5], - [2.5, -2.5]], - [[0.5, -0.5], [1.5, -1.5], - [2.5, -2.5]], - [[0.5, -0.5], [1.5, -1.5], - [2.5, -2.5]]], - [[[-1., 1.], [-2., 2.], [0., -0.]], - [[-1., 1.], [-2., 2.], [0., -0.]], - [[-1., 1.], [-2., 2.], [0., -0.]]]]) - self.assertAllClose(ground_truth_weights, weights) - # value check (2) shifts - # Please ignore this divide-by-zero warning because it is intended. - ground_truth_shifts = np.divide(1, ground_truth_weights) / 4.0 * np.pi - new_symbol_values_array = np.tile( - np.expand_dims(np.expand_dims(np.transpose(symbol_values_array, - [1, 0]), - axis=-1), - axis=-1), [1, 1, 3, 2]) - # All inf's should be 0.0. This happens inside parse_programs() - # with tf.math.divide_no_nan() without any warning. - ground_truth_shifts[np.where(np.isinf(ground_truth_shifts))] = 0.0 - ground_truth_shifts = new_symbol_values_array + ground_truth_shifts - self.assertAllClose(ground_truth_shifts, shifts) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/python/differentiators/stochastic_differentiator.py b/tensorflow_quantum/python/differentiators/stochastic_differentiator.py deleted file mode 100644 index 5e6b14c33..000000000 --- a/tensorflow_quantum/python/differentiators/stochastic_differentiator.py +++ /dev/null @@ -1,456 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Compute gradients by using stochastic generator. -For the test of this SGDifferentiator's consistency & convergence, please see: -//benchmarks/scripts/differentiators:convergence_test -""" -import tensorflow as tf - -from tensorflow_quantum.python.differentiators import differentiator, \ - parameter_shift_util, stochastic_differentiator_util as sd_util - - -class SGDifferentiator(differentiator.Differentiator): - """Stochastic generator based differentiator class. - SGDifferentiator allows you to get the sampled gradient value from three - different stochastic processes: - - parameter coordinate sampling - Choose one of the symbols of the given programs and perform coordinate - descent optimization. - e.g. if a program has parameters ['a','b','c'], choose 'a' w.r.t given - probability and get the partial derivative of the direction 'a' only - - parameter-shift rule generators sampling - e.g. Given symbols, there could be many operators sharing the same - symbol, X**'a', Y**'a', Z**'a'. Choose Y**'a' w.r.t given - probability and get the partial derivative of the generator. - - cost Hamiltonian sampling - e.g. if there are cost Hamiltonians such as ['Z1',Z2',Z3'], then choose - 'Z2' w.r.t given probability and get the partial derivative of the - Hamiltonian observable only. - and the expectation value of the sampled gradient value converges into - the true ground truth gradient value. - This Stochastic Generator Differentiator is the modified gradient estimator - of the following two papers: - - [arXiv:1901.05374](https://arxiv.org/abs/1901.05374), Harrow et al. - - [arXiv:1910.01155](https://arxiv.org/abs/1910.01155), Sweke et al. - - >>> # Get an expectation op. - >>> my_op = tfq.get_expectation_op() - >>> # Attach a differentiator. - >>> my_dif = tfq.differentiators.SGDifferentiator() - >>> op = my_dif.generate_differentiable_op( - ... analytic_op=my_op - ... ) - >>> qubit = cirq.GridQubit(0, 0) - >>> circuit = tfq.convert_to_tensor([ - ... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) - ... ]) - >>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) - >>> symbol_values_array = np.array([[0.123]], dtype=np.float32) - >>> # Calculate tfq gradient. - >>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - >>> with tf.GradientTape() as g: - ... g.watch(symbol_values_tensor) - ... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) - >>> # This value is now computed via the stochastic processes described in: - >>> # https://arxiv.org/abs/1901.05374 - >>> # https://arxiv.org/abs/1910.01155 - >>> grads = g.gradient(expectations, symbol_values_tensor) - >>> # the result is non-deterministic in general, but in this special case, - >>> # it has only one result. - >>> grads - - - """ - - def __init__(self, - stochastic_coordinate=True, - stochastic_generator=True, - stochastic_cost=True, - uniform_sampling=False): - """Instantiate this differentiator. - Create a SGDifferentiator. - Args: - stochastic_coordinate: Python `bool` to determine if - sampling on coordinate is performed or not. Default to True. - stochastic_generator: Python `bool` to determine if - sampling on generator is performed or not. Default to True. - stochastic_cost: Python `bool` to determine if sampling on - cost Hamiltonian is performed or not. Default to True. - uniform_sampling: Python `bool` to determine the - probabilistic distributions on the sampling targets. - Default to False. - """ - - def _boolean_type_check(variable, variable_name): - if variable != True and variable != False: - raise TypeError("{} must be boolean: Got {} {}".format( - variable_name, variable, type(variable))) - - _boolean_type_check(stochastic_coordinate, "stochastic_coordinate") - _boolean_type_check(stochastic_generator, "stochastic_generator") - _boolean_type_check(stochastic_cost, "stochastic_cost") - _boolean_type_check(uniform_sampling, "uniform_sampling") - - self.stochastic_coordinate = stochastic_coordinate - self.stochastic_generator = stochastic_generator - self.stochastic_cost = stochastic_cost - self.uniform_sampling = uniform_sampling - - @tf.function - def differentiate_analytic(self, programs, symbol_names, symbol_values, - pauli_sums, forward_pass_vals, grad): - """Compute the sampled gradient with cascaded stochastic processes. - The gradient calculations follows the following steps: - 1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) - 2. Construct probability distributions & perform stochastic processes - to select parameter-shift terms. - - Stochastic generator : sampling on parameter-shifted gates. - - Stochastic coordinate : sampling on symbols. - - Stochastic cost : sampling on pauli sums - 3. Sum up terms and reshape for the total gradient that is compatible - with tensorflow differentiation. - Args: - programs: `tf.Tensor` of strings with shape [n_programs] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_symbols], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [n_programs, n_symbols] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops] - representing output observables for each program. - forward_pass_vals : `tf.Tensor` of real numbers for forward pass - values with the shape of [n_programs, n_ops] - grad : `tf.Tensor` of real numbers for backpropagated gradient - values from the upper layer with the shape of - [n_programs, n_ops] - Returns: - A `tf.Tensor` of real numbers for sampled gradients from the above - samplers with the shape of [n_programs, n_symbols] - """ - n_symbols = tf.gather(tf.shape(symbol_values), 1) - n_programs = tf.gather(tf.shape(programs), 0) - n_ops = tf.gather(tf.shape(pauli_sums), 1) - n_shifts = 2 - - # STEP 1: Generate required inputs for executor by using parsers - - # Deserialize programs and parse the whole parameterized gates - # new_programs has [n_symbols, n_programs, n_param_gates, n_shifts]. - new_programs, weights, shifts, n_param_gates = \ - parameter_shift_util.parse_programs( - programs, symbol_names, symbol_values, n_symbols) - - if self.stochastic_generator: - # Result : [n_symbols, n_programs, n_param_gates=1, n_shifts]. - new_programs, weights, shifts, n_param_gates = \ - sd_util.stochastic_generator_preprocessor( - new_programs, weights, shifts, n_programs, n_symbols, - n_param_gates, n_shifts, self.uniform_sampling) - - # Reshape & transpose new_programs, weights and shifts to fit into - # the input format of tensorflow_quantum simulator. - # [n_symbols, n_param_gates, n_shifts, n_programs] - new_programs = tf.transpose(new_programs, [0, 2, 3, 1]) - weights = tf.transpose(weights, [0, 2, 3, 1]) - shifts = tf.transpose(shifts, [0, 2, 3, 1]) - - if self.stochastic_cost: - # Result : pauli_sums [n_programs, n_ops] -> [n_programs, n_ops=1] - pauli_sums, cost_relocator, n_ops = \ - sd_util.stochastic_cost_preprocessor( - pauli_sums, n_programs, n_ops, self.uniform_sampling) - - if self.stochastic_coordinate: - flat_programs, flat_perturbations, flat_ops, _, weights, \ - coordinate_relocator = sd_util.stochastic_coordinate_preprocessor( - new_programs, symbol_values, pauli_sums, weights, shifts, - n_programs, n_symbols, n_param_gates, n_shifts, n_ops, - self.uniform_sampling) - else: - # reshape everything to fit into expectation op correctly - total_programs = n_programs * n_shifts * n_symbols * n_param_gates - # tile up and then reshape to order programs correctly - flat_programs = tf.reshape(new_programs, [total_programs]) - flat_shifts = tf.reshape(shifts, [total_programs]) - - # tile up and then reshape to order ops correctly - n_tile = n_shifts * n_symbols * n_param_gates - flat_perturbations = tf.concat([ - tf.reshape( - tf.tile(tf.expand_dims(symbol_values, 0), - tf.stack([n_tile, 1, 1])), - [total_programs, n_symbols]), - tf.expand_dims(flat_shifts, axis=1) - ], - axis=1) - flat_ops = tf.reshape( - tf.tile(tf.expand_dims(pauli_sums, 0), - tf.stack([n_tile, 1, 1])), [total_programs, n_ops]) - - # Append impurity symbol into symbol name - new_symbol_names = tf.concat([ - symbol_names, - tf.expand_dims(tf.constant( - parameter_shift_util._PARAMETER_IMPURITY_NAME), - axis=0) - ], - axis=0) - - # STEP 2: calculate the required expectation values - expectations = self.expectation_op(flat_programs, new_symbol_names, - flat_perturbations, flat_ops) - - # STEP 3: generate gradients according to the results - if self.stochastic_coordinate: - # Transpose to the original shape - # [n_symbols, n_programs, n_param_gates, n_shifts] - # - # coordinate_relocator has [sub_total_programs, n_symbols](=ij) - # expectations has [sub_total_programs, n_ops](=ik) - # einsum -> [n_ops, n_symbols, sub_total_programs](=kji) - expectations = tf.einsum( - 'ij,ik->kji', tf.cast(coordinate_relocator, dtype=tf.float64), - tf.cast(expectations, dtype=tf.float64)) - # Transpose to [n_symbols, sub_total_programs, n_ops] - expectations = tf.transpose(expectations, [1, 2, 0]) - - # we know the rows are grouped according to which parameter - # was perturbed, so reshape to reflect that - grouped_expectations = tf.reshape( - tf.cast(expectations, dtype=tf.float64), - [n_symbols, n_shifts * n_programs * n_param_gates, -1]) - - # now we can calculate the partial of the circuit output with - # respect to each perturbed parameter - def rearrange_expectations(grouped): - - def split_vertically(i): - return tf.slice(grouped, [i * n_programs, 0], - [n_programs, n_ops]) - - return tf.map_fn(split_vertically, - tf.range(n_param_gates * n_shifts), - dtype=tf.float64) - - # reshape so that expectations calculated on different programs are - # separated by a dimension - rearranged_expectations = tf.map_fn(rearrange_expectations, - grouped_expectations, - dtype=tf.float64) - - # now we will calculate all of the partial derivatives - # s: symbol, p: perturbation, c: circuit, o: ops - partials = tf.einsum( - 'spco,spc->sco', rearranged_expectations, - tf.cast(tf.reshape( - weights, [n_symbols, n_param_gates * n_shifts, n_programs]), - dtype=tf.float64)) - - if self.stochastic_cost: - # Reshape to the original n_ops shape - # partials: [n_symbols, n_programs, n_ops=1] - # cost_relocator: [n_programs, original_n_ops] - # Result: [n_symbols, n_programs, original_n_ops] - partials = partials * tf.stop_gradient( - tf.cast(cost_relocator, dtype=tf.float64)) - - # now apply the chain rule - # cast partials back to float32 - return tf.cast( - tf.einsum('sco,co -> cs', partials, - tf.cast(grad, dtype=tf.float64)), tf.float32) - - @tf.function - def differentiate_sampled(self, programs, symbol_names, symbol_values, - pauli_sums, num_samples, forward_pass_vals, grad): - """Compute the sampled gradient with cascaded stochastic processes. - The gradient calculations follows the following steps: - 1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) - 2. Construct probability distributions & perform stochastic processes - to select parameter-shift terms. - - Stochastic generator : sampling on parameter-shifted gates. - - Stochastic coordinate : sampling on symbols. - - Stochastic cost : sampling on pauli sums - 3. Sum up terms and reshape for the total gradient that is compatible - with tensorflow differentiation. - Args: - programs: `tf.Tensor` of strings with shape [n_programs] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_symbols], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [n_programs, n_symbols] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - num_samples: `tf.Tensor` of positive integers representing the - number of samples per term in each term of pauli_sums used - during the forward pass. - pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops] - representing output observables for each program. - forward_pass_vals : `tf.Tensor` of real numbers for forward pass - values with the shape of [n_programs, n_ops] - grad : `tf.Tensor` of real numbers for backpropagated gradient - values from the upper layer with the shape of - [n_programs, n_ops] - Returns: - A `tf.Tensor` of real numbers for sampled gradients from the above - samplers with the shape of [n_programs, n_symbols] - """ - n_symbols = tf.gather(tf.shape(symbol_values), 1) - n_programs = tf.gather(tf.shape(programs), 0) - n_ops = tf.gather(tf.shape(pauli_sums), 1) - n_shifts = 2 - - # STEP 1: Generate required inputs for executor by using parsers - - # Deserialize programs and parse the whole parameterized gates - # new_programs has [n_symbols, n_programs, n_param_gates, n_shifts]. - new_programs, weights, shifts, n_param_gates = \ - parameter_shift_util.parse_programs( - programs, symbol_names, symbol_values, n_symbols) - - if self.stochastic_generator: - # Result : [n_symbols, n_programs, n_param_gates=1, n_shifts]. - new_programs, weights, shifts, n_param_gates = \ - sd_util.stochastic_generator_preprocessor( - new_programs, weights, shifts, n_programs, n_symbols, - n_param_gates, n_shifts, self.uniform_sampling) - - # Reshape & transpose new_programs, weights and shifts to fit into - # the input format of tensorflow_quantum simulator. - # [n_symbols, n_param_gates, n_shifts, n_programs] - new_programs = tf.transpose(new_programs, [0, 2, 3, 1]) - weights = tf.transpose(weights, [0, 2, 3, 1]) - shifts = tf.transpose(shifts, [0, 2, 3, 1]) - - if self.stochastic_cost: - # Result : pauli_sums [n_programs, n_ops] -> [n_programs, n_ops=1] - pauli_sums, cost_relocator, n_ops = \ - sd_util.stochastic_cost_preprocessor( - pauli_sums, n_programs, n_ops, self.uniform_sampling) - - if self.stochastic_coordinate: - flat_programs, flat_perturbations, flat_ops, flat_num_samples, \ - weights, coordinate_relocator = \ - sd_util.stochastic_coordinate_preprocessor( - new_programs, symbol_values, pauli_sums, weights, shifts, - n_programs, n_symbols, n_param_gates, n_shifts, n_ops, - self.uniform_sampling, num_samples=num_samples) - else: - # reshape everything to fit into expectation op correctly - total_programs = n_programs * n_shifts * n_symbols * n_param_gates - # tile up and then reshape to order programs correctly - flat_programs = tf.reshape(new_programs, [total_programs]) - flat_shifts = tf.reshape(shifts, [total_programs]) - - # tile up and then reshape to order ops correctly - n_tile = n_shifts * n_symbols * n_param_gates - flat_perturbations = tf.concat([ - tf.reshape( - tf.tile(tf.expand_dims(symbol_values, 0), - tf.stack([n_tile, 1, 1])), - [total_programs, n_symbols]), - tf.expand_dims(flat_shifts, axis=1) - ], - axis=1) - flat_ops = tf.reshape( - tf.tile(tf.expand_dims(pauli_sums, 0), - tf.stack([n_tile, 1, 1])), [total_programs, n_ops]) - flat_num_samples = tf.reshape( - tf.tile(tf.expand_dims(num_samples, 0), - tf.stack([n_tile, 1, 1])), [total_programs, n_ops]) - - # Append impurity symbol into symbol name - new_symbol_names = tf.concat([ - symbol_names, - tf.expand_dims(tf.constant( - parameter_shift_util._PARAMETER_IMPURITY_NAME), - axis=0) - ], - axis=0) - - # STEP 2: calculate the required expectation values - expectations = self.expectation_op(flat_programs, new_symbol_names, - flat_perturbations, flat_ops, - flat_num_samples) - - # STEP 3: generate gradients according to the results - if self.stochastic_coordinate: - # Transpose to the original shape - # [n_symbols, n_programs, n_param_gates, n_shifts] - # - # coordinate_relocator has [sub_total_programs, n_symbols](=ij) - # expectations has [sub_total_programs, n_ops](=ik) - # einsum -> [n_ops, n_symbols, sub_total_programs](=kji) - expectations = tf.einsum( - 'ij,ik->kji', tf.cast(coordinate_relocator, dtype=tf.float64), - tf.cast(expectations, dtype=tf.float64)) - # Transpose to [n_symbols, sub_total_programs, n_ops] - expectations = tf.transpose(expectations, [1, 2, 0]) - - # we know the rows are grouped according to which parameter - # was perturbed, so reshape to reflect that - grouped_expectations = tf.reshape( - tf.cast(expectations, dtype=tf.float64), - [n_symbols, n_shifts * n_programs * n_param_gates, -1]) - - # now we can calculate the partial of the circuit output with - # respect to each perturbed parameter - def rearrange_expectations(grouped): - - def split_vertically(i): - return tf.slice(grouped, [i * n_programs, 0], - [n_programs, n_ops]) - - return tf.map_fn(split_vertically, - tf.range(n_param_gates * n_shifts), - dtype=tf.float64) - - # reshape so that expectations calculated on different programs are - # separated by a dimension - rearranged_expectations = tf.map_fn(rearrange_expectations, - grouped_expectations, - dtype=tf.float64) - - # now we will calculate all of the partial derivatives - # s: symbol, p: perturbation, c: circuit, o: ops - partials = tf.einsum( - 'spco,spc->sco', rearranged_expectations, - tf.cast(tf.reshape( - weights, [n_symbols, n_param_gates * n_shifts, n_programs]), - dtype=tf.float64)) - - if self.stochastic_cost: - # Reshape to the original n_ops shape - # partials: [n_symbols, n_programs, n_ops=1] - # cost_relocator: [n_programs, original_n_ops] - # Result: [n_symbols, n_programs, original_n_ops] - partials = partials * tf.stop_gradient( - tf.cast(cost_relocator, dtype=tf.float64)) - - # now apply the chain rule - # cast partials back to float32 - return tf.cast( - tf.einsum('sco,co -> cs', partials, - tf.cast(grad, dtype=tf.float64)), tf.float32) diff --git a/tensorflow_quantum/python/differentiators/stochastic_differentiator_test.py b/tensorflow_quantum/python/differentiators/stochastic_differentiator_test.py deleted file mode 100644 index 58077070b..000000000 --- a/tensorflow_quantum/python/differentiators/stochastic_differentiator_test.py +++ /dev/null @@ -1,131 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Basic tests for SGDifferentiator.""" -import numpy as np -from absl.testing import parameterized -import tensorflow as tf -import sympy -import cirq - -from tensorflow_quantum.core.ops import circuit_execution_ops -from tensorflow_quantum.python.differentiators import stochastic_differentiator -from tensorflow_quantum.python import util - - -def _simple_op_inputs(): - qubit = cirq.GridQubit(0, 0) - symbol = 'alpha' - circuit = cirq.Circuit(cirq.Y(qubit)**sympy.Symbol(symbol)) - op = cirq.X(qubit) - value = 0.3 - n_samples = 2000 - - # Return inputs prepped for expectation ops. - # circuit, symbol_names, values, ops, n_samples - # along with expected feedforward expectation - # and expected gradient. - return (util.convert_to_tensor([circuit]), tf.convert_to_tensor([symbol]), - tf.convert_to_tensor([[value]]), util.convert_to_tensor([[op]]), - tf.convert_to_tensor([[n_samples]]), - tf.convert_to_tensor([[np.sin(np.pi * value)]]), - tf.convert_to_tensor([[np.pi * np.cos(np.pi * value)]])) - - -class SGDifferentiatorTest(tf.test.TestCase, parameterized.TestCase): - """Test the SGDifferentiator will run end to end.""" - - def test_stochastic_differentiator_instantiate(self): - """Test SGDifferentiator type checking.""" - stochastic_differentiator.SGDifferentiator() - with self.assertRaisesRegex( - TypeError, expected_regex="stochastic_coordinate must be"): - stochastic_differentiator.SGDifferentiator(stochastic_coordinate=1) - stochastic_differentiator.SGDifferentiator( - stochastic_coordinate=0.1) - stochastic_differentiator.SGDifferentiator( - stochastic_coordinate=[1]) - stochastic_differentiator.SGDifferentiator( - stochastic_coordinate="junk") - with self.assertRaisesRegex( - TypeError, expected_regex="stochastic_generator must be"): - stochastic_differentiator.SGDifferentiator(stochastic_generator=1) - stochastic_differentiator.SGDifferentiator(stochastic_generator=0.1) - stochastic_differentiator.SGDifferentiator(stochastic_generator=[1]) - stochastic_differentiator.SGDifferentiator( - stochastic_generator="junk") - with self.assertRaisesRegex(TypeError, - expected_regex="stochastic_cost must be"): - stochastic_differentiator.SGDifferentiator(stochastic_cost=1) - stochastic_differentiator.SGDifferentiator(stochastic_cost=0.1) - stochastic_differentiator.SGDifferentiator(stochastic_cost=[1]) - stochastic_differentiator.SGDifferentiator(stochastic_cost="junk") - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'coordinate': [True, False], - 'generator': [True, False], - 'cost': [True, False], - 'uniform': [True, False] - }))) - def test_stochastic_differentiator_call_analytic(self, coordinate, - generator, cost, uniform): - """Test if SGDifferentiator.differentiate_analytical doesn't crash - before running.""" - programs, names, values, ops, _, true_f, true_g = \ - _simple_op_inputs() - diff = stochastic_differentiator.SGDifferentiator( - coordinate, generator, cost, uniform) - op = diff.generate_differentiable_op( - analytic_op=circuit_execution_ops.get_expectation_op()) - - with tf.GradientTape() as g: - g.watch(values) - expectations = op(programs, names, values, ops) - grads = g.gradient(expectations, values) - self.assertAllClose(expectations, true_f, atol=1e-2, rtol=1e-2) - self.assertAllClose(grads, true_g, atol=1e-2, rtol=1e-2) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'coordinate': [True, False], - 'generator': [True, False], - 'cost': [True, False], - 'uniform': [True, False] - }))) - def test_stochastic_differentiator_call_sampled(self, coordinate, generator, - cost, uniform): - """Test if SGDifferentiator.differentiate_sampled doesn't crash before - running.""" - programs, names, values, ops, n_samples, true_f, true_g = \ - _simple_op_inputs() - diff = stochastic_differentiator.SGDifferentiator( - coordinate, generator, cost, uniform) - op = diff.generate_differentiable_op( - sampled_op=circuit_execution_ops.get_sampled_expectation_op()) - - with tf.GradientTape() as g: - g.watch(values) - expectations = op(programs, names, values, ops, n_samples) - grads = g.gradient(expectations, values) - self.assertAllClose(expectations, true_f, atol=1e-1, rtol=1e-1) - self.assertAllClose(grads, true_g, atol=1e-1, rtol=1e-1) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/python/differentiators/stochastic_differentiator_util.py b/tensorflow_quantum/python/differentiators/stochastic_differentiator_util.py deleted file mode 100644 index 395132b03..000000000 --- a/tensorflow_quantum/python/differentiators/stochastic_differentiator_util.py +++ /dev/null @@ -1,442 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Utility functions for stochastic generator differentiator.""" -import tensorflow as tf - -from tensorflow_quantum.python import util - - -def _get_pdist_shifts(weights, uniform_sampling): - """Helper function to calculate probabilistic distributions of sampling - shifts. - correction_factor: `tf.Tensor` of real numbers for correction factors - ${1\over{prob(k)}}={{\sum_k |\gamma_k|}\over{|\gamma_k|}}$ where - $prob(k)$ is sampling probability of k-th parameter-shift sampling - term among `n_param_gates` possible samples. - If uniform_sampling = True, it has integers equal to or less than - `n_param_gates` because only non-zero terms are considered. - It has the shape of [sub_total_programs, n_total_samples]. - Args: - weights: `tf.Tensor` of real numbers for parameter-shift weights. - [sub_total_programs, n_param_gates] - uniform_sampling: `tf.Tensor` of a boolean value to decide - probabilistic distribution of samplers inside. - Returns: - corrected_weights: `tf.Tensor` of real numbers for new weights used in - sampling process. It is multiplied by correction factors as above. - [sub_total_programs, 1]. - pdist: `tf.Tensor` of probabilistic distribution of given terms - $prob(k)$ - [sub_total_programs, 1]. - """ - if uniform_sampling: - non_zeros = tf.cast(tf.not_equal(weights, 0.0), dtype=tf.float32) - correction_factor = tf.reduce_sum(non_zeros, axis=1, keepdims=True) - pdist = tf.math.divide_no_nan(non_zeros, correction_factor) - else: - weights_abs = tf.abs(weights) - pdist = tf.math.divide_no_nan( - weights_abs, tf.reduce_sum(weights_abs, axis=1, keepdims=True)) - correction_factor = tf.math.divide_no_nan( - 1.0, tf.cast(pdist, dtype=tf.float32)) - corrected_weights = correction_factor * weights - return corrected_weights, pdist - - -def _sampling_helper_from_pdist_shifts(pdist_shifts, sub_total_programs, - n_param_gates): - pdist_shifts = tf.reshape(pdist_shifts, [-1, 2 * n_param_gates]) - sampled_idx = tf.random.categorical( - tf.math.log(pdist_shifts[:, :n_param_gates]), 1) - sampled_idx = tf.reshape(tf.tile(sampled_idx, [1, 2]), - [sub_total_programs, 1]) - return sampled_idx - - -# TODO(jaeyoo) : this will be c++ op -def stochastic_generator_preprocessor(new_programs, weights, shifts, n_programs, - n_symbols, n_param_gates, n_shifts, - uniform_sampling): - """Helper function to sample parameter shift rule terms. - It can be two of the followings: - - uniform distribution prob(k) = 1/n_param_gates - - parameter-shift weight-based probability distributions - Args: - new_programs: `tf.Tensor' of deserialized parameter-shifted - program strings with the shape of - [n_symbols, n_programs, n_param_gates, n_shifts]. - weights: `tf.Tensor` of real numbers for parameter-shift weights. - [n_symbols, n_programs, n_param_gates, n_shifts] - shifts: `tf.Tensor` of real numbers for shift values. - [n_symbols, n_programs, n_param_gates, n_shifts] - n_programs: `tf.Tensor` of the number of programs. - n_symbols: `tf.Tensor` of the number of symbols. - n_param_gates: `tf.Tensor` of the number of maximum parameter gates - given all programs. - n_shifts: `tf.Tensor` of the number of shift terms. - uniform_sampling: `tf.Tensor` of a boolean value to decide - probabilistic distribution of samplers inside. - Returns: - newly sampled new_programs, weights, shifts, whose are `tf.Tensor` with - the shape of [n_symbols, n_programs, n_param_gates=1, n_shifts]. - n_param_gates: this is used at the post-processing. - """ - sub_total_programs = n_symbols * n_programs * n_shifts - - # Transpose to [n_symbols, n_programs, n_shifts, n_param_gates] - new_programs = tf.transpose(new_programs, [0, 1, 3, 2]) - weights = tf.transpose(weights, [0, 1, 3, 2]) - shifts = tf.transpose(shifts, [0, 1, 3, 2]) - - new_programs = tf.reshape(new_programs, [sub_total_programs, n_param_gates]) - weights = tf.reshape(weights, [sub_total_programs, n_param_gates]) - shifts = tf.reshape(shifts, [sub_total_programs, n_param_gates]) - - corrected_weights, pdist_shifts = _get_pdist_shifts(weights, - uniform_sampling) - - sampled_idx = _sampling_helper_from_pdist_shifts(pdist_shifts, - sub_total_programs, - n_param_gates) - # TODO(jaeyoo) : make sure all symbols appear in circuit. - # not appearing symbols make probability distribution with 0.0 logits. - # tf.random.categorical outputs out-of-index value if all logits are 0.0 - # this makes tf.gather_nd fail due to ouf-of-index error. - # for now, it was fixed by adding a dummy additional column. - # BUT, find the way to mask no-show symbols, and reduce n_symbols. - new_programs = tf.concat([new_programs, new_programs[:, :1]], axis=-1) - corrected_weights = tf.concat( - [corrected_weights, - tf.zeros((sub_total_programs, 1))], axis=-1) - shifts = tf.concat([shifts, tf.zeros([sub_total_programs, 1])], axis=-1) - - new_programs = tf.gather_nd(new_programs, sampled_idx, batch_dims=1) - weights = tf.gather_nd(corrected_weights, sampled_idx, batch_dims=1) - shifts = tf.gather_nd(shifts, sampled_idx, batch_dims=1) - - n_param_gates = 1 - new_programs = tf.reshape(new_programs, - [n_symbols, n_programs, n_shifts, n_param_gates]) - weights = tf.reshape(weights, - [n_symbols, n_programs, n_shifts, n_param_gates]) - shifts = tf.reshape(shifts, - [n_symbols, n_programs, n_shifts, n_param_gates]) - - # Return back to [n_symbols, n_programs, n_param_gates=1, n_shifts]. - new_programs = tf.transpose(new_programs, [0, 1, 3, 2]) - weights = tf.transpose(weights, [0, 1, 3, 2]) - shifts = tf.transpose(shifts, [0, 1, 3, 2]) - - return new_programs, weights, shifts, n_param_gates - - -def _get_pdist_symbols(weights, uniform_sampling): - """Helper function to calculate probabilistic distributions of sampling - symbols. - correction_factor: `tf.Tensor` of real numbers for correction factors - ${1\over{prob(k)}}={{\sum_k |\gamma_k|}\over{|\gamma_k|}}$ where - $prob(k)$ is sampling probability of k-th parameter-shift sampling - term among `n_symbols` possible samples. - If uniform_sampling = True, it has integers equal to or less than - `n_symbols` because only non-zero terms are considered. - It has the shape of [sub_total_programs, n_symbols]. - Args: - weights: `tf.Tensor` of real numbers for parameter-shift weights. - [sub_total_programs, n_total_samples] - uniform_sampling: `tf.Tensor` of a boolean value to decide - probabilistic distribution of samplers inside. - Returns: - corrected_weights: `tf.Tensor` of real numbers for new weights used in - sampling process. It is multiplied by correction factors as above. - [1, n_symbols]. - pdist: `tf.Tensor` of probabilistic distribution of given terms - $prob(k)$ - [1, n_symbols]. - """ - if uniform_sampling: - non_zeros = tf.cast(tf.not_equal(weights, 0.0), dtype=tf.float32) - pdist = tf.reduce_sum(non_zeros, axis=0, - keepdims=True) / tf.reduce_sum(non_zeros) - else: - weights_abs = tf.abs(weights) - pdist = tf.reduce_sum(weights_abs, axis=0, - keepdims=True) / tf.reduce_sum(weights_abs) - - correction_factor = tf.math.divide_no_nan( - tf.ones_like(pdist, dtype=tf.float32), tf.cast(pdist, dtype=tf.float32)) - corrected_weights = correction_factor * weights - return corrected_weights, pdist - - -# TODO(jaeyoo) : this will be c++ op -def stochastic_coordinate_preprocessor(new_programs, - symbol_values, - pauli_sums, - weights, - shifts, - n_programs, - n_symbols, - n_param_gates, - n_shifts, - n_ops, - uniform_sampling, - num_samples=None): - """Helper function to sample symbols. - It can be two of the followings: - - uniform distribution prob(k) = 1/n_symbols - - parameter-shift weight-based probability distributions - Args: - new_programs: `tf.Tensor' of deserialized parameter-shifted - program strings with the shape of - [n_symbols, n_programs, n_param_gates, n_shifts]. - symbol_values: `tf.Tensor` of real numbers with shape - [n_programs, n_symbols] specifying parameter values to resolve - into the circuits specified by programs. - pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops] - representing output observables for each program. - weights: `tf.Tensor` of real numbers for parameter-shift weights. - [n_symbols, n_programs, n_param_gates, n_shifts] - shifts: `tf.Tensor` of real numbers for shift values. - [n_symbols, n_programs, n_param_gates, n_shifts] - n_programs: `tf.Tensor` of the number of programs. - n_symbols: `tf.Tensor` of the number of symbols. - n_param_gates: `tf.Tensor` of the number of maximum parameter gates - given all programs. - n_shifts: `tf.Tensor` of the number of shift terms. - n_ops: `tf.Tensor` of the number of pauli sums. - uniform_sampling: `tf.Tensor` of a boolean value to decide - probabilistic distribution of samplers inside. - num_samples : Optional `tf.Tensor` of the numbers of samples. - Defaults to None. - Returns: - flat_programs: `tf.Tensor' of the programs of the newly sampled symbols. - [n_programs * n_param_gates * n_shifts]. - flat_perturbations: `tf.Tensor' of real numbers of perturbations of the - newly sampled symbols. - [n_programs * n_param_gates * n_shifts, (n_symbols + 1)]. - flat_ops : `tf.Tensor` of strings of the newly sampled output - observables. - [n_programs * n_param_gates * n_shifts, n_ops]. - flat_num_samples : `tf.Tensor` of int32 of the numbers of samples. - [n_programs * n_param_gates * n_shifts, n_ops]. - weights: `tf.Tensor` of real numbers of re-sampled weights. - this is used at the post-processing. - [n_symbols, n_param_gates, n_shifts, n_programs] - coordinate_relocator: `tf.Tensor` of one-hot matrix with real numbers. - This is used to restore squeezed symbol dimension at the - post-processing. - [n_programs * n_param_gates * n_shifts, n_symbols] - """ - sub_total_programs = n_programs * n_shifts * n_param_gates - # [n_param_gates, n_shifts, n_programs, n_symbols] - new_programs = tf.transpose(new_programs, [1, 2, 3, 0]) - weights = tf.transpose(weights, [1, 2, 3, 0]) - shifts = tf.transpose(shifts, [1, 2, 3, 0]) - - new_programs = tf.reshape(new_programs, [sub_total_programs, n_symbols]) - weights = tf.reshape(weights, [sub_total_programs, n_symbols]) - shifts = tf.reshape(shifts, [sub_total_programs, n_symbols]) - - corrected_weights, pdist_symbols = _get_pdist_symbols( - weights, uniform_sampling) - - sampled_idx = tf.transpose( - tf.random.categorical(tf.math.log(pdist_symbols), sub_total_programs), - [1, 0]) - - flat_programs = tf.gather_nd(new_programs, sampled_idx, batch_dims=1) - flat_shifts = tf.gather_nd(shifts, sampled_idx, batch_dims=1) - # It doesn't change n_symbols because it loses locations info of - # symbol_names. Rather we use one_hot matrix to restore the - # locations. - weights = tf.gather_nd(corrected_weights, sampled_idx, batch_dims=1) - coordinate_relocator = tf.reshape(tf.one_hot(sampled_idx, depth=n_symbols), - [-1, n_symbols]) - - # Return back to [n_param_gates, n_shifts, n_programs, n_symbols]) - weights = tf.reshape(tf.einsum('ij,i->ij', coordinate_relocator, weights), - [n_param_gates, n_shifts, n_programs, n_symbols]) - - # Transpose to the original shape - # [n_symbols, n_param_gates, n_shifts, n_programs] - weights = tf.transpose(weights, [3, 0, 1, 2]) - - n_sub_tile = n_shifts * n_param_gates - flat_perturbations = tf.concat([ - tf.reshape( - tf.tile(tf.expand_dims(symbol_values, 0), - tf.stack([n_sub_tile, 1, 1])), - [sub_total_programs, n_symbols]), - tf.expand_dims(flat_shifts, axis=1) - ], - axis=1) - flat_ops = tf.reshape( - tf.tile(tf.expand_dims(pauli_sums, 0), tf.stack([n_sub_tile, 1, 1])), - [sub_total_programs, n_ops]) - - flat_num_samples = None - if num_samples is not None: - flat_num_samples = tf.reshape( - tf.tile(tf.expand_dims(num_samples, 0), - tf.stack([n_sub_tile, 1, 1])), [sub_total_programs, n_ops]) - - return flat_programs, flat_perturbations, flat_ops, flat_num_samples, \ - weights, coordinate_relocator - - -def _get_parse_pauli_sums(): - """Helper function to obtain the generator of the sampled list of the pauli - sum coefficients after parsing pauli sums.""" - - # TODO(jaeyoo) : this will be c++ op - def _parse_pauli_sums(pauli_sums, n_programs, n_ops): - """Helper function to parse given pauli sums to collect observable - coefficients. - Currently `cirq.PauliSum` is not subscriptable, which means it is not - possible to construct a uniform-shape tensor whose elements are - consistently matched to `cirq.PauliString` inside of given `PauliSum` - because the order of `PauliString`'s can be different whenever accessed. - So, the current version of _parse_pauli_sums only consider a `PauliSum` - to be sampled, not a `PauliString`. The observable coefficients are then - sum of the absolute value of coefficients of `PauliString`'s in the - `PauliSum`. - Args: - pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops] - representing output observables for each program. - n_programs: `tf.Tensor` of the number of programs. - n_ops: `tf.Tensor` of the number of pauli sums. - Returns: - observable_coeff_: `tf.Tensor` of real numbers. This involves the - coefficients of Pauli sum terms of the first PauliString. - It is directly used to calculate probabilities. - [n_programs, n_ops] - """ - pauli_sums = util.from_tensor(pauli_sums) - - def get_pauli_sum_coeff(i): - - def get_i_pauli_sum_coeff(j): - - # Because PauliSum object is not subscriptable, use for-loop. - # pauli_sums[i][j] : j-th `PauliSum` of i-th program. - return tf.reduce_sum( - tf.abs([ - pstring.coefficient.real for pstring in pauli_sums[i][j] - ])) - - return tf.map_fn(get_i_pauli_sum_coeff, - tf.range(n_ops), - dtype=tf.float32) - - observable_coeff = tf.map_fn(get_pauli_sum_coeff, - tf.range(n_programs), - dtype=tf.float32) - - return observable_coeff - - def parse_pauli_sums_generator(pauli_sums, n_programs, n_ops): - """tf.py_function wrapper generator of _parse_programs().""" - # observable_coeff has the shape of [n_programs, n_ops] - observable_coeff = tf.py_function(func=_parse_pauli_sums, - inp=[ - tf.stop_gradient(pauli_sums), - tf.stop_gradient(n_programs), - tf.stop_gradient(n_ops), - ], - Tout=tf.float32) - return observable_coeff - - return parse_pauli_sums_generator - - -def _get_pdist_cost(op_coeff, uniform_sampling): - """Helper function to calculate probabilistic distributions of sampling - `PauliSum`'s. - correction_factor: `tf.Tensor` of real numbers for correction factors - ${1\over{prob(k)}}={{\sum_k |\gamma_k|}\over{|\gamma_k|}}$ where - $prob(k)$ is sampling probability of k-th observable PauliSum among `n_ops` - possible samples. - If uniform_sampling = True, it has integers equal to or less than - `n_ops` because only non-zero terms are considered. - It has the shape of [n_programs, n_ops]. - Args: - op_coeff: `tf.Tensor` of real numbers for cost Hamiltonian coefficients. - [n_programs, n_total_samples] - uniform_sampling: `tf.Tensor` of a boolean value to decide - probabilistic distribution of samplers inside. - Returns: - correction_factor_ops: `tf.Tensor` of real numbers for new observables - used in sampling process. - [1, n_ops]. - pdist: `tf.Tensor` of probabilistic distribution of given terms - $prob(k)$ - [1, n_ops]. - """ - if uniform_sampling: - ones = tf.ones_like(op_coeff) - pdist = tf.reduce_sum(ones, axis=0, keepdims=True) / tf.reduce_sum(ones) - else: - pdist = tf.reduce_sum(op_coeff, axis=0, - keepdims=True) / tf.reduce_sum(op_coeff) - - correction_factor_ops = tf.math.divide_no_nan( - tf.ones_like(pdist, dtype=tf.float32), tf.cast(pdist, dtype=tf.float32)) - return correction_factor_ops, pdist - - -# TODO(jaeyoo) : this will be c++ op -def stochastic_cost_preprocessor(pauli_sums, n_programs, n_ops, - uniform_sampling): - """Helper function to sample pauli_sums. - It can be two of the followings: - - uniform distribution prob(k) = 1/n_ops - - PauliSum coefficient-based probability distributions - Args: - pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops] - representing output observables for each program. - n_programs: `tf.Tensor` of the number of programs. - n_ops: `tf.Tensor` of the number of pauli sums. - uniform_sampling: `tf.Tensor` of a boolean value to decide - probabilistic distribution of samplers inside. - Returns: - new_pauli_sums : `tf.Tensor` of strings of the newly sampled output - observables. - [n_programs, n_ops=1]. - cost_relocator: `tf.Tensor` of one-hot matrix with real numbers. - This is used to restore squeezed pauli_sums dimension at the - post-processing. - [n_programs, n_ops] - n_ops: `tf.Tensor` of the new number of pauli sums = 1. - """ - parser = _get_parse_pauli_sums() - op_coeff = parser(pauli_sums, n_programs, n_ops) - correction_factor_ops, pdist_ops = _get_pdist_cost(op_coeff, - uniform_sampling) - - sampled_idx = tf.transpose( - tf.random.categorical(tf.math.log(pdist_ops), n_programs), [1, 0]) - - # Construct one_hot matrix to restore the locations at the post-processing. - new_pauli_sums = tf.reshape( - tf.gather_nd(pauli_sums, sampled_idx, batch_dims=1), [n_programs, 1]) - cost_relocator = tf.reshape(tf.one_hot(sampled_idx, depth=n_ops), - [n_programs, n_ops]) - - # Set the output tensor shapes - correction_factor_ops = tf.reshape(correction_factor_ops, [1, n_ops]) - cost_relocator = cost_relocator * correction_factor_ops - n_ops = 1 - - return new_pauli_sums, cost_relocator, n_ops diff --git a/tensorflow_quantum/python/differentiators/stochastic_differentiator_util_test.py b/tensorflow_quantum/python/differentiators/stochastic_differentiator_util_test.py deleted file mode 100644 index 3eba3986f..000000000 --- a/tensorflow_quantum/python/differentiators/stochastic_differentiator_util_test.py +++ /dev/null @@ -1,489 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Basic tests for utility functions for SGDifferentiator""" - -import numpy as np -import tensorflow as tf -from absl.testing import parameterized -import sympy -import cirq - -from tensorflow_quantum.python import util -from tensorflow_quantum.python.differentiators import \ - stochastic_differentiator_util as sd_util -from tensorflow_quantum.python.differentiators import parameter_shift_util - - -def _example_circuit_helper(n_qubits, n_programs): - n_shifts = 2 - symbol_names = ['a', 'b'] - n_symbols = len(symbol_names) - sympy_symbols = [sympy.Symbol(s) for s in symbol_names] - coeff = [1.0, -2.0, 3.0, -4.0, 5.0] - q = cirq.GridQubit.rect(1, n_qubits) - c = cirq.Circuit([ - cirq.Rz(coeff[i] * sympy_symbols[i % 2]).on(q[i]) - for i in range(n_qubits) - ]) - circuit_batch = [c] * n_programs - symbol_values_array = np.array( - [[i for i, _ in enumerate(symbol_names)] for _ in range(n_programs)], - dtype=np.float32) - - symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - programs = util.convert_to_tensor(circuit_batch) - return programs, symbol_values_tensor, n_symbols, n_shifts - - -def _example_ops_helper(n_programs, n_ops): - coeffs = [[1.0, -2.0, 3.0], [-4.0, 5.0]] - n_qubits = 3 - q = cirq.GridQubit.rect(1, n_qubits) - cirq_op_list = [cirq.X, cirq.Y, cirq.Z] - - def get_term_with_coefficient(coeff_list): - # Test with multiple `cirq.PauliString`'s - return [ - cirq.PauliString({ - q[i]: cirq_op_list[i], - }, coefficient=coeff) for i, coeff in enumerate(coeff_list) - ] - - psums = [[ - cirq.PauliSum.from_pauli_strings(get_term_with_coefficient(coeffs[i])) - for i in range(n_ops) - ] - for _ in range(n_programs)] - ops = util.convert_to_tensor(psums) - return ops, psums, coeffs - - -class SGDifferentiatorUtilTest(tf.test.TestCase, parameterized.TestCase): - """Test the stochastic_differentiator_util module.""" - - @parameterized.parameters([{'eps': 1e-7}]) - def test_get_parse_pauli_sums(self, eps): - """Input & output check for _get_parse_pauli_sums().""" - n_programs = 3 - n_ops = 2 - ops, psums, coeffs = _example_ops_helper(n_programs, n_ops) - - parser = sd_util._get_parse_pauli_sums() - - # input should be tensorflow tensor. - with self.assertRaises(ValueError): - # psums is used instead of ops. - parser(psums, n_programs, n_ops) - - observable_coeff = parser(ops, n_programs, n_ops) - # shape check - tf.assert_equal([n_programs, n_ops], tf.shape(observable_coeff)) - # value check - true_coeff = np.array( - [np.sum(np.abs(coeff_list)) for coeff_list in coeffs]) - self.assertAllClose(np.ones([n_programs, n_ops]) * true_coeff, - observable_coeff, - atol=eps, - rtol=eps) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product(**{ - 'uniform_sampling': [True, False], - 'eps': [1e-6] - }))) - def test_get_pdist_cost(self, uniform_sampling, eps): - """Input & output check for _get_pdist_cost().""" - n_programs = 3 - n_ops = 2 - ops, psums, _ = _example_ops_helper(n_programs, n_ops) - - parser = sd_util._get_parse_pauli_sums() - - # input should be tensorflow tensor. - with self.assertRaises(ValueError): - # psums is used instead of ops. - parser(psums, n_programs, n_ops) - - observable_coeff = parser(ops, n_programs, n_ops) - - correction_factor_ops, pdist = \ - sd_util._get_pdist_cost(observable_coeff, uniform_sampling) - if uniform_sampling: - ground_truth_correction_factor = np.array([[2.0, 2.0]]) - ground_truth_pdist = np.array([[0.5, 0.5]]) - else: - ground_truth_correction_factor = np.array([[2.5, 5.0 / 3.0]]) - # pdist is weighted by each coefficients. - ground_truth_pdist = np.array([[0.4, 0.6]]) - - self.assertAllClose(ground_truth_correction_factor, - correction_factor_ops, - atol=eps, - rtol=eps) - self.assertAllClose(ground_truth_pdist, pdist, atol=eps, rtol=eps) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product(**{ - 'uniform_sampling': [True, False], - 'eps': [0.1] - }))) - def test_stochastic_cost_preprocessor(self, uniform_sampling, eps): - """Input & output check for stochastic_cost_preprocessor(). - The consistency of the estimated average gradient is checked by: - //benchmarks/scripts/differentiators:convergence_test""" - n_programs = 3 - n_ops = 2 - ops, psums, _ = _example_ops_helper(n_programs, n_ops) - - # all inputs should be tensorflow tensors. - with self.assertRaises(ValueError): - # psums is used instead of ops. - new_pauli_sums, cost_relocator, n_ops = \ - sd_util.stochastic_cost_preprocessor( - psums, n_programs, n_ops, uniform_sampling) - - new_pauli_sums, cost_relocator, new_n_ops = \ - sd_util.stochastic_cost_preprocessor( - ops, n_programs, n_ops, uniform_sampling) - # n_ops should be 1 because the only one op is sampled. - self.assertEqual(new_n_ops, 1, "n_ops should be 1") - ground_truth_shape = np.array([n_programs, new_n_ops], dtype=np.int32) - tf.assert_equal(ground_truth_shape, tf.shape(new_pauli_sums)) - ground_truth_shape = np.array([n_programs, n_ops], dtype=np.int32) - tf.assert_equal(ground_truth_shape, tf.shape(cost_relocator)) - - if uniform_sampling: - ground_truth_pdist = [[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]] - ground_truth_cost_relocator = [[2.0, 0.0], [0.0, 2.0]] - - else: - ground_truth_pdist = [[0.4, 0.6], [0.4, 0.6], [0.4, 0.6]] - ground_truth_cost_relocator = [[2.5, 0.0], [0.0, 5 / 3.0]] - - # Sampling ops and estimate probabilistic distribution of them. - cost_relocator_hist = np.zeros((n_programs, n_ops)) - n_samples = 700 - for _ in range(n_samples): - _, cost_relocator, _ = sd_util.stochastic_cost_preprocessor( - ops, n_programs, n_ops, uniform_sampling) - for i, cost_per_program in enumerate(cost_relocator): - loc = np.where( - np.isclose(ground_truth_cost_relocator, - cost_per_program))[0][0] - cost_relocator_hist[i][loc] += 1.0 - - pdist = cost_relocator_hist / n_samples - self.assertAllClose(ground_truth_pdist, pdist, atol=eps, rtol=eps) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product(**{ - 'uniform_sampling': [True, False], - 'eps': [1e-6] - }))) - def test_get_pdist_shifts(self, uniform_sampling, eps): - """value check of _get_pdist_shifts()""" - weights = np.array([[[[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]], - [[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]], - [[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]]], - [[[-1., 1.], [-2., 2.], [0., -0.]], - [[-1., 1.], [-2., 2.], [0., -0.]], - [[-1., 1.], [-2., 2.], [0., -0.]]]]) - # Transpose to [n_symbols, n_programs, n_shifts, n_param_gates] - weights = np.transpose(weights, [0, 1, 3, 2]) - # Reshape to [sub_total_programs, n_param_gates] - sub_total_programs = np.prod(weights.shape[:-1]) - n_param_gates = weights.shape[-1] - weights = np.reshape(weights, [sub_total_programs, n_param_gates]) - - corrected_weights, pdist = \ - sd_util._get_pdist_shifts(weights, uniform_sampling) - if uniform_sampling: - ground_truth_corrected_weights = np.array([[1.5, 4.5, 7.5], - [-1.5, -4.5, -7.5], - [1.5, 4.5, 7.5], - [-1.5, -4.5, -7.5], - [1.5, 4.5, 7.5], - [-1.5, -4.5, -7.5], - [-2.0, -4.0, 0.0], - [2.0, 4.0, -0.0], - [-2.0, -4.0, 0.0], - [2.0, 4.0, -0.0], - [-2.0, -4.0, 0.0], - [2.0, 4.0, -0.0]]) - ground_truth_pdist = np.array([[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0], - [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0], - [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0], - [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0], - [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0], - [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0], - [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], - [0.5, 0.5, 0.0], [0.5, 0.5, 0.0], - [0.5, 0.5, 0.0], [0.5, 0.5, 0.0]]) - else: - ground_truth_corrected_weights = np.array([[4.5, 4.5, 4.5], - [-4.5, -4.5, -4.5], - [4.5, 4.5, 4.5], - [-4.5, -4.5, -4.5], - [4.5, 4.5, 4.5], - [-4.5, -4.5, -4.5], - [-3.0, -3.0, 0.0], - [3.0, 3.0, -0.0], - [-3.0, -3.0, 0.0], - [3.0, 3.0, -0.0], - [-3.0, -3.0, 0.0], - [3.0, 3.0, -0.0]]) - # pdist is weighted by each coefficients. - ground_truth_pdist = np.array( - [[1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0], - [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0], - [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0], - [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0], - [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0], - [1.0 / 9.0, 1.0 / 3.0, 5.0 / 9.0], [1.0 / 3.0, 2.0 / 3.0, 0.0], - [1.0 / 3.0, 2.0 / 3.0, 0.0], [1.0 / 3.0, 2.0 / 3.0, 0.0], - [1.0 / 3.0, 2.0 / 3.0, 0.0], [1.0 / 3.0, 2.0 / 3.0, 0.0], - [1.0 / 3.0, 2.0 / 3.0, 0.0]], - dtype=np.float32) - - self.assertAllClose(ground_truth_corrected_weights, - corrected_weights, - atol=eps, - rtol=eps) - self.assertAllClose(ground_truth_pdist, pdist, atol=eps, rtol=eps) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product(**{ - 'uniform_sampling': [True, False], - 'eps': [0.1] - }))) - def test_stochastic_generator_preprocessor(self, uniform_sampling, eps): - """Input & output check for stochastic_generator_preprocessor(). - The consistency of the estimated average gradient is checked by: - //benchmarks/scripts/differentiators:convergence_test""" - n_qubits = 5 - n_programs = 3 - symbol_names = ['a', 'b'] - - programs, symbol_values_tensor, n_symbols, n_shifts = \ - _example_circuit_helper(n_qubits, n_programs) - - new_programs_before, weights_before, shifts_before, \ - n_param_gates_before = parameter_shift_util.parse_programs( - programs, symbol_names, symbol_values_tensor, n_symbols) - - new_programs, weights, shifts, n_param_gates = \ - sd_util.stochastic_generator_preprocessor( - new_programs_before, weights_before, shifts_before, n_programs, - n_symbols, n_param_gates_before, n_shifts, uniform_sampling) - - # n_param_gates should be 1 because the only one generator is sampled. - self.assertEqual(n_param_gates, 1, "n_param_gates should be 1") - ground_truth_shape = np.array( - [n_symbols, n_programs, n_param_gates, n_shifts], dtype=np.int32) - tf.assert_equal(ground_truth_shape, tf.shape(new_programs)) - tf.assert_equal(ground_truth_shape, tf.shape(weights)) - tf.assert_equal(ground_truth_shape, tf.shape(shifts)) - - # Estimate probability of sampling each shifts - ground_truth_shifts = [[[1.5707964, -1.5707964], - [0.5235988, -0.5235988], - [0.31415927, -0.31415927]], - [[0.21460181, 1.7853982], [0.6073009, 1.3926991], - [1.0, 1.0]]] - if uniform_sampling: - ground_truth_pdist = [[0.333333, 0.333333, 0.333333], - [0.5, 0.5, 0.0]] - else: - ground_truth_pdist = [[0.111111, 0.333333, 0.555555], - [0.333333, 0.666666, 0.0]] - - shifts_hist = np.zeros((n_symbols, n_programs)) - n_samples = 700 - for _ in range(n_samples): - _, _, shifts, _ = \ - sd_util.stochastic_generator_preprocessor( - new_programs_before, weights_before, shifts_before, - n_programs, n_symbols, n_param_gates_before, n_shifts, - uniform_sampling) - for i, shifts_per_symbol in enumerate(shifts): - for s in shifts_per_symbol: # per program - loc = np.where(np.isclose(ground_truth_shifts, s))[1][0] - shifts_hist[i][loc] += 1.0 - - shifts_pdist = shifts_hist / n_samples / n_programs - self.assertAllClose(ground_truth_pdist, - shifts_pdist, - atol=eps, - rtol=eps) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product(**{ - 'uniform_sampling': [True, False], - 'eps': [1e-6] - }))) - def test_get_pdist_symbols(self, uniform_sampling, eps): - """value check of _get_pdist_symbols()""" - weights = np.array([[[[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]], - [[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]], - [[0.5, -0.5], [1.5, -1.5], [2.5, -2.5]]], - [[[-1., 1.], [-2., 2.], [0., -0.]], - [[-1., 1.], [-2., 2.], [0., -0.]], - [[-1., 1.], [-2., 2.], [0., -0.]]]]) - # Transpose to [n_param_gates, n_shifts, n_programs, n_symbols] - weights = np.transpose(weights, [1, 2, 3, 0]) - # Reshape to [sub_total_programs, n_param_gates] - sub_total_programs = np.prod(weights.shape[:-1]) - n_symbols = weights.shape[-1] - weights = np.reshape(weights, [sub_total_programs, n_symbols]) - - corrected_weights, pdist = sd_util._get_pdist_symbols( - weights, uniform_sampling) - # In this case, both pdist's of uniform_sampling=True & False are equal. - ground_truth_corrected_weights = np.array([[0.8333333, - -2.5], [-0.8333333, 2.5], - [2.5, -5.0], [-2.5, 5.0], - [4.1666665, 0.0], - [-4.1666665, -0.0], - [0.8333333, - -2.5], [-0.8333333, 2.5], - [2.5, -5.0], [-2.5, 5.0], - [4.1666665, 0.0], - [-4.1666665, -0.0], - [0.8333333, -2.5], - [-0.8333333, 2.5], - [2.5, -5.0], [-2.5, 5.0], - [4.1666665, 0.0], - [-4.1666665, -0.0]]) - ground_truth_pdist = np.array([[0.6, 0.4]]) - - self.assertAllClose(ground_truth_corrected_weights, - corrected_weights, - atol=eps, - rtol=eps) - self.assertAllClose(ground_truth_pdist, pdist, atol=eps, rtol=eps) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product(**{ - 'uniform_sampling': [True, False], - 'eps': [0.1] - }))) - def test_stochastic_coordinate_preprocessor(self, uniform_sampling, eps): - """Input & output check for stochastic_coordinate_preprocessor(). - The consistency of the estimated average gradient is checked by: - //benchmarks/scripts/differentiators:convergence_test""" - n_qubits = 5 - n_programs = 3 - symbol_names = ['a', 'b'] - - programs, symbol_values_tensor, n_symbols, n_shifts = \ - _example_circuit_helper(n_qubits, n_programs) - - n_ops = 2 - ops, psums, _ = _example_ops_helper(n_programs, n_ops) - - new_programs, weights_before, shifts, n_param_gates = \ - parameter_shift_util.parse_programs( - programs, symbol_names, symbol_values_tensor, n_symbols) - - # all inputs should be tensorflow tensors. - with self.assertRaises(ValueError): - # symbol_values_array is used instead of symbol_values_tensor. - sd_util.stochastic_coordinate_preprocessor( - new_programs, symbol_values_tensor.numpy(), ops, weights_before, - shifts, n_programs, n_symbols, n_param_gates, n_shifts, n_ops, - uniform_sampling) - # psums is used instead of ops. - sd_util.stochastic_coordinate_preprocessor( - new_programs, symbol_values_tensor, psums, weights_before, - shifts, n_programs, n_symbols, n_param_gates, n_shifts, n_ops, - uniform_sampling) - - flat_programs, flat_perturbations, flat_ops, _, weights, \ - coordinate_relocator = \ - sd_util.stochastic_coordinate_preprocessor( - new_programs, symbol_values_tensor, ops, weights_before, - shifts, n_programs, n_symbols, n_param_gates, n_shifts, - n_ops, uniform_sampling) - - # n_symbols should not be 1 because it doesn't fit the input format of - # expectation_op or sampling_op. - total_programs = n_programs * n_param_gates * n_shifts - # flat_programs should have n_programs * n_param_gates * n_shifts * 1 - # because only one symbol is sampled now. - self.assertAllClose([total_programs], - tf.shape(flat_programs), - atol=eps, - rtol=eps) - # perturbation symbol is added, so the number of symbol should be - # n_symbol+1 - self.assertAllClose([total_programs, n_symbols + 1], - tf.shape(flat_perturbations), - atol=eps, - rtol=eps) - # shape check on flat_ops. - self.assertAllClose([total_programs, n_ops], - tf.shape(flat_ops), - atol=eps, - rtol=eps) - # resampled weights is in - # [n_symbols, n_param_gates, n_shifts, n_programs] - self.assertAllClose([n_symbols, n_param_gates, n_shifts, n_programs], - tf.shape(weights), - atol=eps, - rtol=eps) - # resampled coordinate_relocator is in [total_programs, n_symbols] - self.assertAllClose([total_programs, n_symbols], - tf.shape(coordinate_relocator), - atol=eps, - rtol=eps) - - # Estimate probability of sampling each shifts - ground_truth_shifts = [[ - 1.5707964, -1.5707964, 0.5235988, -0.5235988, 0.31415927, - -0.31415927 - ], [0.21460181, 1.7853982, 0.6073009, 1.3926991, 1.0, 1.0]] - - ground_truth_pdist = [0.6, 0.4] - - shifts_hist = np.zeros((n_symbols,)) - n_samples = 700 - cnt = 0.0 - for _ in range(n_samples): - _, flat_perturbations, _, _, _, _ = \ - sd_util.stochastic_coordinate_preprocessor( - new_programs, symbol_values_tensor, ops, weights_before, - shifts, n_programs, n_symbols, n_param_gates, n_shifts, - n_ops, uniform_sampling) - - for s in flat_perturbations[:, -1]: # See only shift symbols. - sym = np.where(np.isclose(ground_truth_shifts, s))[0][0] - shifts_hist[sym] += 1.0 - cnt += 1.0 - - shifts_pdist = shifts_hist / cnt - self.assertAllClose(ground_truth_pdist, - shifts_pdist, - atol=eps, - rtol=eps) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/python/layers/BUILD b/tensorflow_quantum/python/layers/BUILD deleted file mode 100644 index fd4e75c2e..000000000 --- a/tensorflow_quantum/python/layers/BUILD +++ /dev/null @@ -1,6 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) diff --git a/tensorflow_quantum/python/layers/__init__.py b/tensorflow_quantum/python/layers/__init__.py deleted file mode 100644 index f3ffbd65b..000000000 --- a/tensorflow_quantum/python/layers/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module definitions for tensorflow_quantum.python.layers.*""" -# Utility layers. -from tensorflow_quantum.python.layers.circuit_construction import ( - AddCircuit,) -# Executor layers. -from tensorflow_quantum.python.layers.circuit_executors import ( - Expectation, - Sample, - State, - SampledExpectation, -) -# High level layers. -from tensorflow_quantum.python.layers.high_level import ( - ControlledPQC, - PQC, -) diff --git a/tensorflow_quantum/python/layers/circuit_construction/BUILD b/tensorflow_quantum/python/layers/circuit_construction/BUILD deleted file mode 100644 index 4a1a511a7..000000000 --- a/tensorflow_quantum/python/layers/circuit_construction/BUILD +++ /dev/null @@ -1,25 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_library( - name = "elementary", - srcs = ["elementary.py"], - deps = [ - "//tensorflow_quantum/core/ops:tfq_utility_ops_py", - "//tensorflow_quantum/python:util", - ], -) - -py_test( - name = "elementary_test", - srcs = ["elementary_test.py"], - python_version = "PY3", - deps = [ - ":elementary", - "//tensorflow_quantum/python:util", - ], -) diff --git a/tensorflow_quantum/python/layers/circuit_construction/__init__.py b/tensorflow_quantum/python/layers/circuit_construction/__init__.py deleted file mode 100644 index 0c5ded5c5..000000000 --- a/tensorflow_quantum/python/layers/circuit_construction/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module for tfq.python.layers.circuit_construction.*""" - -# pylint: disable=line-too-long -from tensorflow_quantum.python.layers.circuit_construction.elementary import AddCircuit -# pylint: enable=line-too-long \ No newline at end of file diff --git a/tensorflow_quantum/python/layers/circuit_construction/elementary.py b/tensorflow_quantum/python/layers/circuit_construction/elementary.py deleted file mode 100644 index 2d3b7f82b..000000000 --- a/tensorflow_quantum/python/layers/circuit_construction/elementary.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Elementary layers, such as the AddCircuit layer.""" -import numpy as np -import tensorflow as tf -import cirq - -from tensorflow_quantum.core.ops import tfq_utility_ops -from tensorflow_quantum.python import util - - -class AddCircuit(tf.keras.layers.Layer): - """A layer that pre/appends a sequence of gates to the input circuit tensor. - - This layer allows for an arbitrary `cirq.Circuit` (or list of circuits of - equal length to the input) to be appended or prepended to the list of input - circuits. - - - >>> qubits = cirq.GridQubit.rect(1, 4) - >>> add = tfq.layers.AddCircuit() - >>> output = add( - ... [cirq.Circuit(cirq.Y(qubits[0])), cirq.Circuit(cirq.Z(qubits[0]))] - ... append = cirq.Circuit(cirq.Y(qubits[0])) - ... ) - >>> # Now we have a layer that would append a single Y gate to any inputs. - >>> tfq.from_tensor(output) - [cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), - ]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), - ])] - - - Note: When specifying a new layer for a *compiled* `tf.keras.Model` using - something like - `tfq.layers.AddCircuit()(cirq.Circuit(...), append/prepend=cirq.Circuit())` - please be sure to instead use - `tfq.layers.AddCircuit()(circuit_input, append/prepend=cirq.Circuit())` - where `circuit_input` is a `tf.keras.Input` that is filled with - `tfq.conver_to_tensor([cirq.Circuit(..)] * batch_size)` at runtime - (`append/prepend` can still remain a `cirq.Circuit` object). This - is because compiled Keras models require non keyword layer `call` inputs to - be traceable back to a `tf.keras.Input`. - - """ - - def __init__(self, **kwargs): - """Instantiate this layer.""" - super().__init__(**kwargs) - - def call(self, inputs, *, append=None, prepend=None): - """Keras call method. - - Input options: - - 1. `inputs` can be a single `cirq.Circuit`, a Python `list` of - `cirq.Circuit`s or a pre-converted `tf.Tensor` of - `cirq.Circuit`s. - - 2. `append` can be a Python `list` of `cirq.Circuit`s or a - pre-converted `tf.Tensor` of type `str` (containing circuits). - - 3. `prepend` can be a Python `list` of `cirq.Circuit`s or a - pre-converted `tf.Tensor` of type `str` (containing circuits). - - Output shape: - `tf.Tensor` of shape [input size] containing circuits with append - circuits appended or prepend circuits prepended. - - """ - # inputs is circuit. - - if append is None and prepend is None: - raise ValueError("Values must be provided for append or prepend.") - - if append is not None and prepend is not None: - raise ValueError( - "Values cannot be given for both append and prepend.") - - # Ingest input circuit(s). - if isinstance(inputs, cirq.Circuit): - inputs = util.convert_to_tensor([inputs]) - - if isinstance(inputs, (tuple, list, np.ndarray)): - inputs = util.convert_to_tensor(inputs) - - if not tf.is_tensor(inputs): - raise TypeError("Circuits cannot be parsed with given input:" - " ".format(inputs)) - - batch_dim = tf.gather(tf.shape(inputs), 0) - - # Ingest append circuit(s): - if append is not None: - if isinstance(append, cirq.Circuit): - append = tf.tile(util.convert_to_tensor([append]), [batch_dim]) - if isinstance(append, (tuple, list, np.ndarray)): - append = util.convert_to_tensor(append) - if not tf.is_tensor(append): - raise TypeError( - "Append circuits cannot be parsed with given input:" - " ".format(append)) - - return tfq_utility_ops.tfq_append_circuit(inputs, append) - - # Otherwise ingest prepend circuits. - if isinstance(prepend, cirq.Circuit): - prepend = tf.tile(util.convert_to_tensor([prepend]), [batch_dim]) - if isinstance(prepend, (tuple, list, np.ndarray)): - prepend = util.convert_to_tensor(prepend) - if not tf.is_tensor(prepend): - raise TypeError( - "Prepend circuits cannot be parsed with given input:" - " ".format(prepend)) - - return tfq_utility_ops.tfq_append_circuit(prepend, inputs) diff --git a/tensorflow_quantum/python/layers/circuit_construction/elementary_test.py b/tensorflow_quantum/python/layers/circuit_construction/elementary_test.py deleted file mode 100644 index afb7650e2..000000000 --- a/tensorflow_quantum/python/layers/circuit_construction/elementary_test.py +++ /dev/null @@ -1,120 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for the elementary layers.""" -import tensorflow as tf -import cirq -import sympy - -from tensorflow_quantum.python import util -from tensorflow_quantum.python.layers.circuit_construction import elementary - - -class AddCircuitTest(tf.test.TestCase): - """Test AddCircuit works with various inputs.""" - - def test_addcircuit_instantiate(self): - """Test that a addcircuit layer can be instantiated correctly.""" - elementary.AddCircuit() - - def test_addcircuit_keras_error(self): - """Test that addcircuit layer errors in keras call.""" - add = elementary.AddCircuit() - circuit = cirq.Circuit(cirq.X(cirq.GridQubit(0, 0))) - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed"): - add(circuit, append='junk') - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed"): - add(circuit, prepend='junk') - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed"): - add('junk', prepend=circuit) - - with self.assertRaisesRegex(ValueError, - expected_regex="append or prepend"): - add(circuit) - - with self.assertRaisesRegex(ValueError, - expected_regex="append and prepend"): - add(circuit, append=circuit, prepend=circuit) - - def test_addcircuit_op_error(self): - """Test that addcircuit will error inside of ops correctly.""" - add = elementary.AddCircuit() - circuit = cirq.Circuit(cirq.X(cirq.GridQubit(0, 0))) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - expected_regex="matching sizes"): - # append is wrong shape. - add(circuit, append=[circuit, circuit]) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - expected_regex="matching sizes"): - # prepend is wrong shape. - add(circuit, prepend=[circuit, circuit]) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - expected_regex="rank 1"): - # prepend is wrong shape. - add(circuit, prepend=[[circuit]]) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - expected_regex="rank 1"): - # append is wrong shape. - add(circuit, append=[[circuit]]) - - with self.assertRaisesRegex(tf.errors.InvalidArgumentError, - expected_regex="rank 1"): - # circuit is wrong shape. - add([[circuit]], append=[circuit]) - - def test_addcircuit_simple_inputs(self): - """Test the valid cases.""" - add = elementary.AddCircuit() - circuit = cirq.Circuit( - cirq.X(cirq.GridQubit(0, 0))**(sympy.Symbol('alpha') * sympy.pi)) - add([circuit, circuit], append=circuit) - add([circuit, circuit], prepend=circuit) - add(circuit, append=circuit) - add(circuit, prepend=circuit) - - def test_addcircuit_modify(self): - """Test that a addcircuit layer correctly modifies input circuits.""" - bits = cirq.GridQubit.rect(1, 20) - circuit_a = cirq.testing.random_circuit(bits, 10, 0.9, - util.get_supported_gates()) - circuit_b = cirq.testing.random_circuit(bits, 10, 0.9, - util.get_supported_gates()) - - expected_append = util.convert_to_tensor([circuit_a + circuit_b]) - expected_prepend = util.convert_to_tensor([circuit_b + circuit_a]) - - append_layer = elementary.AddCircuit() - prepend_layer = elementary.AddCircuit() - - actual_append = util.convert_to_tensor( - util.from_tensor(append_layer(circuit_a, append=circuit_b))) - actual_prepend = util.convert_to_tensor( - util.from_tensor(prepend_layer(circuit_a, prepend=circuit_b))) - - self.assertEqual(expected_append.numpy()[0], actual_append.numpy()[0]) - self.assertEqual(expected_prepend.numpy()[0], actual_prepend.numpy()[0]) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/python/layers/circuit_executors/BUILD b/tensorflow_quantum/python/layers/circuit_executors/BUILD deleted file mode 100644 index 0491ef137..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/BUILD +++ /dev/null @@ -1,90 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_library( - name = "state", - srcs = ["state.py"], - deps = [ - "//tensorflow_quantum/core/ops:circuit_execution_ops", - "//tensorflow_quantum/python:util", - ], -) - -py_library( - name = "expectation", - srcs = ["expectation.py"], - deps = [ - "//tensorflow_quantum/core/ops:circuit_execution_ops", - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/differentiators:differentiator", - "//tensorflow_quantum/python/differentiators:linear_combination", - ], -) - -py_library( - name = "sampled_expectation", - srcs = ["sampled_expectation.py"], - deps = [ - "//tensorflow_quantum/core/ops:circuit_execution_ops", - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/differentiators:differentiator", - "//tensorflow_quantum/python/differentiators:linear_combination", - "//tensorflow_quantum/python/differentiators:parameter_shift", - ], -) - -py_library( - name = "sample", - srcs = ["sample.py"], - deps = [ - "//tensorflow_quantum/core/ops:circuit_execution_ops", - "//tensorflow_quantum/python:util", - ], -) - -py_test( - name = "state_test", - srcs = ["state_test.py"], - python_version = "PY3", - deps = [ - ":state", - "//tensorflow_quantum/python:util", - ], -) - -py_test( - name = "expectation_test", - srcs = ["expectation_test.py"], - python_version = "PY3", - deps = [ - ":expectation", - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/differentiators:linear_combination", - ], -) - -py_test( - name = "sampled_expectation_test", - timeout = "eternal", - srcs = ["sampled_expectation_test.py"], - python_version = "PY3", - deps = [ - ":sampled_expectation", - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/differentiators:linear_combination", - ], -) - -py_test( - name = "sample_test", - srcs = ["sample_test.py"], - python_version = "PY3", - deps = [ - ":sample", - "//tensorflow_quantum/python:util", - ], -) diff --git a/tensorflow_quantum/python/layers/circuit_executors/__init__.py b/tensorflow_quantum/python/layers/circuit_executors/__init__.py deleted file mode 100644 index 4d445d470..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module for tfq.python.layers.circuit_executors.*""" - -# pylint: disable=line-too-long -from tensorflow_quantum.python.layers.circuit_executors.expectation import Expectation -from tensorflow_quantum.python.layers.circuit_executors.sample import Sample -from tensorflow_quantum.python.layers.circuit_executors.state import State -from tensorflow_quantum.python.layers.circuit_executors.sampled_expectation import SampledExpectation -# pylint: enable=line-too-long diff --git a/tensorflow_quantum/python/layers/circuit_executors/expectation.py b/tensorflow_quantum/python/layers/circuit_executors/expectation.py deleted file mode 100644 index be014a081..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/expectation.py +++ /dev/null @@ -1,357 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A tf.keras.layer that ingests programs and outputs expectation values.""" -import numpy as np -import sympy -import tensorflow as tf - -import cirq -from tensorflow_quantum.core.ops import circuit_execution_ops -from tensorflow_quantum.python.differentiators import linear_combination -from tensorflow_quantum.python.differentiators import differentiator as diff -from tensorflow_quantum.python import util - - -class Expectation(tf.keras.layers.Layer): - """A Layer that calculates an expectation value. - - Given an input circuit and set of parameter values, prepare a quantum state - and output expectation values taken on that state with respect to some - observables to the tensorflow graph. - - - First define a simple helper function for generating a parametrized - quantum circuit that we will use throughout: - - >>> def _gen_single_bit_rotation_problem(bit, symbols): - ... \"""Generate a toy problem on 1 qubit.\""" - ... starting_state = [0.123, 0.456, 0.789] - ... circuit = cirq.Circuit( - ... cirq.Rx(starting_state[0])(bit), - ... cirq.Ry(starting_state[1])(bit), - ... cirq.Rz(starting_state[2])(bit), - ... cirq.Rz(symbols[2])(bit), - ... cirq.Ry(symbols[1])(bit), - ... cirq.Rx(symbols[0])(bit) - ... ) - ... return circuit - - - In quantum machine learning there are two very common use cases that - align with keras layer constructs. The first is where the circuits - represent the input data points (see the note at the bottom about - using compiled models): - - - >>> bit = cirq.GridQubit(0, 0) - >>> symbols = sympy.symbols('x, y, z') - >>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] - >>> circuit_list = [ - ... _gen_single_bit_rotation_problem(bit, symbols), - ... cirq.Circuit( - ... cirq.Z(bit) ** symbols[0], - ... cirq.X(bit) ** symbols[1], - ... cirq.Z(bit) ** symbols[2] - ... ), - ... cirq.Circuit( - ... cirq.X(bit) ** symbols[0], - ... cirq.Z(bit) ** symbols[1], - ... cirq.X(bit) ** symbols[2] - ... ) - ... ] - >>> expectation_layer = tfq.layers.Expectation() - >>> output = expectation_layer( - ... circuit_list, symbol_names=symbols, operators = ops) - >>> # Here output[i][j] corresponds to the expectation of all the ops - >>> # in ops w.r.t circuits[i] where keras managed variables are - >>> # placed in the symbols 'x', 'y', 'z'. - >>> tf.shape(output) - tf.Tensor([3 2], shape=(2,), dtype=int32) - - - Here, different `cirq.Circuit` instances sharing the common symbols 'x', - 'y' and 'z' are used as input. Keras uses the `symbol_names` - argument to map Keras managed variables to these circuits constructed - with `sympy.Symbol`s. Note that you used a Python `list` containing your - circuits, you could also specify a `tf.keras.Input` layer or any - tensorlike object to specify the circuits you would like fed to the layer - at runtime. - - - Another common use case is where there is a fixed circuit and the - expectation operators vary: - - - >>> bit = cirq.GridQubit(0, 0) - >>> symbols = sympy.symbols('x, y, z') - >>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] - >>> fixed_circuit = _gen_single_bit_rotation_problem(bit, symbols) - >>> expectation_layer = tfq.layers.Expectation() - >>> output = expectation_layer( - ... fixed_circuit, - ... symbol_names=symbols, - ... operators=ops, - ... initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)) - >>> # Here output[i][j] corresponds to - >>> # the expectation of operators[i][j] w.r.t the circuit where - >>> # variable values are managed by keras and store numbers in - >>> # the symbols 'x', 'y', 'z'. - >>> tf.shape(output) - tf.Tensor([1 2], shape=(2,), dtype=int32) - - - Note that in the above examples you used a `cirq.Circuit` object and a list - of `cirq.PauliSum` objects as inputs to your layer. To allow for varying - inputs your could change the line in the above code to: - `expectation_layer(circuit_inputs, symbol_names=symbols, operators=ops)` - with `circuit_inputs` is `tf.keras.Input(shape=(), dtype=tf.dtypes.string)` - to allow you to pass in different circuits in a compiled model. Lastly - you also supplied a `tf.keras.initializer` to the `initializer` argument. - This argument is optional in the case that the layer itself will be managing - the symbols of the circuit and not have them fed in from somewhere else in - the model. - - - There are also some more complex use cases. Notably these use cases all - make use of the `symbol_values` parameter that causes the - `Expectation` layer to stop managing the `sympy.Symbol`s in the quantum - circuits for the user and instead require them to supply input - values themselves. Lets look at the case where there - is a single fixed circuit, some fixed operators and symbols that must be - common to all circuits: - - - >>> bit = cirq.GridQubit(0, 0) - >>> symbols = sympy.symbols('x, y, z') - >>> ops = [cirq.Z(bit), cirq.X(bit)] - >>> circuit = _gen_single_bit_rotation_problem(bit, symbols) - >>> values = [[1,1,1], [2,2,2], [3,3,3]] - >>> expectation_layer = tfq.layers.Expectation() - >>> output = expectation_layer( - ... circuit, - ... symbol_names=symbols, - ... symbol_values=values, - ... operators=ops) - >>> # output[i][j] = The expectation of operators[j] with - >>> # values[i] placed into the symbols of the circuit - >>> # with the order specified by symbol_names. - >>> # so output[1][2] = The expectation of your circuit with parameter - >>> # values [2,2,2] w.r.t Pauli X. - >>> output - tf.Tensor( - [[0.63005245 0.76338404] - [0.25707167 0.9632684 ] - [0.79086655 0.5441111 ]], shape=(3, 2), dtype=float32) - - - Here is a simple model that uses this particular input signature of - `tfq.layers.Expectation`, that learns to undo the random rotation - of the qubit: - - - >>> bit = cirq.GridQubit(0, 0) - >>> symbols = sympy.symbols('x, y, z') - >>> circuit = _gen_single_bit_rotation_problem(bit, symbols) - >>> control_input = tf.keras.Input(shape=(1,)) - >>> circuit_inputs = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - >>> d1 = tf.keras.layers.Dense(10)(control_input) - >>> d2 = tf.keras.layers.Dense(3)(d1) - >>> expectation = tfq.layers.Expectation()( - ... circuit_inputs, # See note below! - ... symbol_names=symbols, - ... symbol_values=d2, - ... operators=cirq.Z(bit)) - >>> data_in = np.array([[1], [0]], dtype=np.float32) - >>> data_out = np.array([[1], [-1]], dtype=np.float32) - >>> model = tf.keras.Model( - ... inputs=[circuit_inputs, control_input], outputs=expectation) - >>> model.compile( - ... optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), - ... loss=tf.keras.losses.mean_squared_error) - >>> history = model.fit( - ... x=[tfq.convert_to_tensor([circuit] * 2), data_in], - ... y=data_out, - ... epochs=100) - - - For an example featuring this layer, please check out `Quantum sensing` - in our dev website http://www.tensorflow.org/quantum/tutorials. - - Lastly `symbol_values`, `operators` and circuit `inputs` can all be fed - Python `list` objects. In addition to this they can also be fed `tf.Tensor` - inputs, meaning that you can input all of these things from other Tensor - objects (like `tf.keras.Dense` layer outputs or `tf.keras.Input`s etc). - - - Note: When specifying a new layer for a *compiled* `tf.keras.Model` using - something like `tfq.layers.Expectation()(cirq.Circuit(...), ...)` please - be sure to instead use `tfq.layers.Expectation()(circuit_input, ...)` - where `circuit_input` is a `tf.keras.Input` that is filled with - `tfq.conver_to_tensor([cirq.Circuit(..)] * batch_size)` at runtime. This - is because compiled Keras models require non keyword layer `call` inputs to - be traceable back to a `tf.keras.Input`. - - """ - - def __init__(self, backend=None, differentiator=None, **kwargs): - """Instantiate this Layer. - - Create a layer that will output expectation values gained from - simulating a quantum circuit. - - Args: - backend: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead, - which must inherit `cirq.SimulatesFinalState`. - differentiator: Optional Differentiator to use to calculate analytic - derivative values of given operators_to_measure and circuit, - which must inherit `tfq.differentiators.Differentiator` and - implements `differentiate_analytic` method. Defaults to None, - which uses `linear_combination.ForwardDifference()`. - - """ - super().__init__(**kwargs) - - # Ingest backend. - if not isinstance(backend, cirq.SimulatesFinalState) and \ - isinstance(backend, cirq.Sampler): - raise TypeError("Backend implements cirq.Sampler but not" - " cirq.SimulatesFinalState. Please use " - "SampledExpectation instead.") - - # Ingest differentiator. - if differentiator is None: - differentiator = linear_combination.ForwardDifference() - - if not isinstance(differentiator, diff.Differentiator): - raise TypeError("Differentiator must inherit from " - "tfq.differentiators.Differentiator") - - self._expectation_op = differentiator.generate_differentiable_op( - analytic_op=circuit_execution_ops.get_expectation_op( - backend=backend)) - - self._w = None - - def call(self, - inputs, - *, - symbol_names=None, - symbol_values=None, - operators=None, - initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)): - """Keras call function.""" - - # inputs is the circuit(s). - values_empty = False - if symbol_names is None: - symbol_names = [] - if symbol_values is None: - values_empty = True - symbol_values = [[]] - - # Ingest and promote symbol_names. - if isinstance(symbol_names, (list, tuple, np.ndarray)): - if not all( - isinstance(x, (str, sympy.Symbol)) for x in symbol_names): - raise TypeError("Each element in symbol_names" - " must be a string or sympy.Symbol.") - symbol_names = [str(s) for s in symbol_names] - if not len(symbol_names) == len(list(set(symbol_names))): - raise ValueError("All elements of symbol_names must be unique.") - symbol_names = tf.identity( - tf.convert_to_tensor(symbol_names, dtype=tf.dtypes.string)) - - if not tf.is_tensor(symbol_names): - raise TypeError("symbol_names cannot be parsed to string" - " tensor given input: ".format(symbol_names)) - - # Ingest and promote symbol_values. - if isinstance(symbol_values, (list, tuple, np.ndarray)): - symbol_values = tf.convert_to_tensor(symbol_values, - dtype=tf.dtypes.float32) - - if not tf.is_tensor(symbol_values): - raise TypeError("symbol_values cannot be parsed to float32" - " tensor given input: ".format(symbol_values)) - - symbol_batch_dim = tf.gather(tf.shape(symbol_values), 0) - - # Ingest and promote circuits. - # Would be nice to support python circuits *fully* in this layer. - if isinstance(inputs, cirq.Circuit): - # process single circuit. - inputs = tf.tile(util.convert_to_tensor([inputs]), - [symbol_batch_dim]) - - elif isinstance(inputs, (list, tuple, np.ndarray)): - # process list of circuits. - inputs = util.convert_to_tensor(inputs) - - if not tf.is_tensor(inputs): - raise TypeError("circuits cannot be parsed with given input:" - " ".format(inputs)) - - circuit_batch_dim = tf.gather(tf.shape(inputs), 0) - - # Ingest and promote operators. - if operators is None: - raise RuntimeError("Value for operators not provided. operators " - "must be one of cirq.PauliSum, cirq.PauliString" - ", or a list/tensor/tuple containing " - "cirq.PauliSum or cirq.PauliString.") - - op_needs_tile = False - if isinstance(operators, (cirq.PauliSum, cirq.PauliString)): - # If we are given a single operator promote it to a list and tile - # it up to size. - operators = [[operators]] - op_needs_tile = True - - if isinstance(operators, (list, tuple, np.ndarray)): - if not isinstance(operators[0], (list, tuple, np.ndarray)): - # If we are given a flat list of operators. tile them up - # to match the batch size of circuits. - operators = [operators] - op_needs_tile = True - operators = util.convert_to_tensor(operators) - - if op_needs_tile: - # Don't tile up if the user gave a python list that was precisely - # the correct size to match circuits outer batch dim. - operators = tf.tile(operators, [circuit_batch_dim, 1]) - - if not tf.is_tensor(operators): - raise TypeError("operators cannot be parsed to string tensor" - " given input: ".format(operators)) - - if values_empty: - # No symbol_values were provided. So we assume the user wants us - # to create and manage variables for them. We will do so by - # creating a weights variable and tiling it up to appropriate - # size of [batch, num_symbols]. - - if self._w is None: - # don't re-add variable. - self._w = self.add_weight(name='circuit_learnable_parameters', - shape=[len(symbol_names)], - initializer=initializer) - - symbol_values = tf.tile(tf.expand_dims(self._w, axis=0), - tf.stack([circuit_batch_dim, 1])) - - return self._expectation_op(inputs, symbol_names, symbol_values, - operators) diff --git a/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py b/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py deleted file mode 100644 index 4f5a7384f..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/expectation_test.py +++ /dev/null @@ -1,374 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for tensorflow_quantum.layers.circuit_executors.expectation.""" -import numpy as np -import sympy -import tensorflow as tf - -import cirq -from tensorflow_quantum.python.layers.circuit_executors import expectation -from tensorflow_quantum.python.differentiators import linear_combination -from tensorflow_quantum.python import util - - -def _gen_single_bit_rotation_problem(bit, symbols): - """Generate a toy problem on 1 qubit.""" - starting_state = np.random.uniform(0, 2 * np.pi, 3) - circuit = cirq.Circuit( - cirq.Rx(starting_state[0])(bit), - cirq.Ry(starting_state[1])(bit), - cirq.Rz(starting_state[2])(bit), - cirq.Rz(symbols[2])(bit), - cirq.Ry(symbols[1])(bit), - cirq.Rx(symbols[0])(bit)) - - return circuit - - -class ExpectationTest(tf.test.TestCase): - """Basic tests for the expectation layer.""" - - def test_expectation_instantiate(self): - """Test that Expectation instantiates correctly.""" - expectation.Expectation() - expectation.Expectation(backend=cirq.Simulator()) - expectation.Expectation( - differentiator=linear_combination.ForwardDifference()) - - def test_expectation_instantiate_error(self): - """Test that Expectation errors with bad inputs.""" - - class MySampler(cirq.Sampler): - """Class to test sampler detection in Expectation.""" - - def run_sweep(self): - """do nothing.""" - return - - with self.assertRaisesRegex(TypeError, - expected_regex="SampledExpectation"): - expectation.Expectation(backend=MySampler()) - - with self.assertRaisesRegex( - TypeError, expected_regex="SimulatesFinalState or None"): - expectation.Expectation(backend='junk') - - with self.assertRaisesRegex( - TypeError, expected_regex="tfq.differentiators.Differentiator"): - expectation.Expectation(differentiator='junk') - - def test_expectation_type_inputs_error(self): - """Test that expectation errors within Keras call.""" - - bit = cirq.GridQubit(0, 0) - symbol = sympy.Symbol('alpha') - test_pstring = cirq.Z(bit) - test_psum = cirq.PauliSum.from_pauli_strings([test_pstring]) - symb_circuit = cirq.Circuit(cirq.H(bit)**symbol) - reg_circuit = cirq.Circuit(cirq.H(bit)) - - with self.assertRaisesRegex(TypeError, - expected_regex="string or sympy.Symbol"): - expectation.Expectation()(symb_circuit, - symbol_names=[symbol, 5.0], - operators=test_psum) - - with self.assertRaisesRegex(ValueError, - expected_regex="must be unique."): - expectation.Expectation()(symb_circuit, - symbol_names=[symbol, symbol], - operators=test_psum) - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed"): - expectation.Expectation()(symb_circuit, - symbol_names='junk', - operators=test_psum) - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed"): - expectation.Expectation()(symb_circuit, - symbol_names=[symbol], - symbol_values='junk', - operators=test_psum) - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed"): - expectation.Expectation()('junk', - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=test_psum) - - with self.assertRaisesRegex(RuntimeError, - expected_regex="operators not provided"): - expectation.Expectation()(symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]]) - - with self.assertRaisesRegex(Exception, - expected_regex="Unknown initializer"): - expectation.Expectation()(reg_circuit, - operators=test_psum, - initializer='junk') - - def test_expectation_op_error(self): - """Test that expectation errors within underlying ops correctly.""" - - bit = cirq.GridQubit(0, 0) - symbol = sympy.Symbol('alpha') - test_pstring = cirq.Z(bit) - test_psum = cirq.PauliSum.from_pauli_strings([test_pstring]) - symb_circuit = cirq.Circuit(cirq.H(bit)**symbol) - reg_circuit = cirq.Circuit(cirq.H(bit)) - - with self.assertRaisesRegex(Exception, - expected_regex="Could not find symbol"): - # No symbol matchups. - expectation.Expectation()([symb_circuit], operators=test_psum) - - with self.assertRaisesRegex(Exception, - expected_regex="Unparseable proto"): - # Proto is unparseable. - expectation.Expectation()([reg_circuit], - operators=tf.convert_to_tensor( - [['bad_operator']])) - - with self.assertRaisesRegex(Exception, expected_regex="rank 2"): - # Operators has wrong rank. - expectation.Expectation()([reg_circuit], - operators=util.convert_to_tensor( - [test_psum])) - - with self.assertRaisesRegex(Exception, expected_regex="rank 2"): - # symbol_values has wrong rank. - expectation.Expectation()([symb_circuit], - symbol_names=[symbol], - symbol_values=[0.5], - operators=test_psum) - - with self.assertRaisesRegex(Exception, expected_regex="do not match."): - # Wrong batch size for pauli operators. - expectation.Expectation()(symb_circuit, - symbol_names=[symbol], - operators=[[test_psum], [test_psum]]) - - def test_static_cases(self): - """Run inputs through in complex cases.""" - - bit = cirq.GridQubit(0, 0) - symbol = sympy.Symbol('alpha') - test_pstring = cirq.Z(bit) - test_psum = cirq.PauliSum.from_pauli_strings([test_pstring]) - symb_circuit = cirq.Circuit(cirq.H(bit)**symbol) - reg_circuit = cirq.Circuit(cirq.H(bit)) - - # Passing a 2d operators input requires a 1d circuit input. - expectation.Expectation()([reg_circuit, reg_circuit], - operators=[[test_psum, test_psum], - [test_psum, test_psum]]) - - # Passing 2d operators along with other inputs. - expectation.Expectation()([symb_circuit, symb_circuit], - symbol_names=[symbol], - operators=[[test_psum, test_psum], - [test_psum, test_psum]]) - expectation.Expectation()([symb_circuit, symb_circuit], - symbol_names=[symbol], - symbol_values=[[0.5], [0.8]], - operators=[[test_psum, test_psum], - [test_psum, test_psum]]) - - # Ensure tiling up of circuits works as expected. - expectation.Expectation()(reg_circuit, operators=test_psum) - expectation.Expectation()(reg_circuit, operators=[test_psum, test_psum]) - - # Ensure tiling up of symbol_values works as expected. - expectation.Expectation()(symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5], [0.8]], - operators=test_psum) - expectation.Expectation()(symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=test_psum) - - def test_expectation_simple_tf_train(self): - """Train a layer using standard tf (not keras). - This is a subtle test that will work since we don't use keras compile. - """ - bit = cirq.GridQubit(0, 0) - circuit = \ - cirq.Circuit(cirq.Rx(sympy.Symbol('theta'))(bit)) - op = cirq.Z(bit) - layer = expectation.Expectation() - optimizer = tf.optimizers.Adam(learning_rate=0.05) - for _ in range(200): - with tf.GradientTape() as tape: - circuit_out = layer(circuit, - symbol_names=['theta'], - operators=op) - mse = tf.square(tf.reduce_sum(tf.subtract(circuit_out, -1))) - grads = tape.gradient(mse, layer.trainable_weights) - optimizer.apply_gradients(zip(grads, layer.trainable_weights)) - self.assertAllClose(mse.numpy(), 0, atol=1e-3) - - -class ExpectationFunctionalTests(tf.test.TestCase): - """Test hybrid/integrated models that include an expectation layer.""" - - def test_simple_param_value_input(self): - """Train a densely connected hybrid model. - - This model will put a qubit in the zero or one state from a random state - given the input zero or one. This tests the input signature: - Expectation([input_value_batch]). - """ - bit = cirq.GridQubit(0, 0) - symbols = sympy.symbols('x y z') - circuit = _gen_single_bit_rotation_problem(bit, symbols) - - inputs = tf.keras.Input(shape=(1,), dtype=tf.dtypes.float64) - datum = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - l1 = tf.keras.layers.Dense(10)(inputs) - l2 = tf.keras.layers.Dense(3)(l1) - outputs = expectation.Expectation()(datum, - symbol_names=symbols, - operators=cirq.Z(bit), - symbol_values=l2) - model = tf.keras.Model(inputs=[datum, inputs], outputs=outputs) - - data_in = np.array([[1], [0]], dtype=np.float32) - data_out = np.array([[1], [-1]], dtype=np.float32) - - model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), - loss=tf.keras.losses.mean_squared_error) - - circuits = util.convert_to_tensor([circuit, circuit]) - - history = model.fit(x=[circuits, data_in], y=data_out, epochs=100) - self.assertAllClose(history.history['loss'][-1], 0, atol=1e-3) - - def test_simple_op_input(self): - """Test a simple operator input - - Learn qubit in the z+ state using two different measurement operators. - This tests input signature Expectation([operator_batch]) - """ - bit = cirq.GridQubit(0, 0) - symbols = sympy.symbols('x, y, z') - - circuits = util.convert_to_tensor( - [_gen_single_bit_rotation_problem(bit, symbols)] * 2) - - data_out = tf.convert_to_tensor(np.array([[1], [1]])) - ops = util.convert_to_tensor([[cirq.Z(bit)], [cirq.Z(bit)]]) - - circuit_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - op_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string) - - output = expectation.Expectation()( - circuit_input, - symbol_names=symbols, - operators=op_input, - initializer=tf.keras.initializers.RandomNormal()) - - model = tf.keras.Model(inputs=[circuit_input, op_input], outputs=output) - - model.compile( - optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), - loss=tf.keras.losses.mean_squared_error, - ) - history = model.fit(x=[circuits, ops], - y=data_out, - batch_size=2, - epochs=200) - - self.assertAllClose(history.history['loss'][-1], 0, atol=1e-3) - - def test_simple_op_and_param_input(self): - """Test a simple operator and parameter input. - - Train a NN to put a qubit in the z+ or x+ states based on a classical - binary input. This tests the input signature: - Expectation([value_batch, operator_batch]). - """ - bit = cirq.GridQubit(0, 0) - symbols = sympy.symbols('x, y, z') - ops = util.convert_to_tensor([[cirq.Z(bit)], [cirq.X(bit)]]) - circuits = util.convert_to_tensor( - [_gen_single_bit_rotation_problem(bit, symbols)] * 2) - data_in = np.array([[1], [0]]) - data_out = np.array([[1], [1]]) - - data_inp = tf.keras.Input(shape=(1), dtype=tf.dtypes.float32) - op_inp = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string) - circuit_inp = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - dense_1 = tf.keras.layers.Dense(10)(data_inp) - dense_2 = tf.keras.layers.Dense(3)(dense_1) - circuit_output = expectation.Expectation(backend=cirq.Simulator())( - circuit_inp, - symbol_names=symbols, - symbol_values=dense_2, - operators=op_inp) - - functional_model = tf.keras.Model( - inputs=[data_inp, op_inp, circuit_inp], outputs=[circuit_output]) - - functional_model.compile( - optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), - loss=tf.keras.losses.mean_squared_error) - history = functional_model.fit(x=[data_in, ops, circuits], - y=data_out, - batch_size=2, - epochs=100) - self.assertAllClose(history.history['loss'][-1], 0, atol=1e-3) - - def test_dnn_qnn_dnn(self): - """Train a fully hybrid network using an Expectation layer. - - Train the network to output +-5 given an input of 1 or 0. This tests - that everything works when Expectation layer is a middle layers. - """ - bit = cirq.GridQubit(0, 0) - symbols = sympy.symbols('x, y, z') - circuits = util.convert_to_tensor( - [_gen_single_bit_rotation_problem(bit, symbols)] * 2) - data_in = np.array([[1], [0]], dtype=np.float32) - data_out = np.array([[5], [-5]], dtype=np.float32) - - classical_input = tf.keras.Input(shape=(1,)) - circuit_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - d1 = tf.keras.layers.Dense(10)(classical_input) - d2 = tf.keras.layers.Dense(3)(d1) - quantum = expectation.Expectation()(circuit_input, - symbol_names=symbols, - symbol_values=d2, - operators=cirq.Z(bit)) - d3 = tf.keras.layers.Dense(1)(quantum) - - model = tf.keras.Model(inputs=[circuit_input, classical_input], - outputs=d3) - - model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), - loss=tf.keras.losses.mean_squared_error) - history = model.fit(x=[circuits, data_in], - y=data_out, - batch_size=2, - epochs=300) - self.assertAllClose(history.history['loss'][-1], 0, atol=1e-3) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_quantum/python/layers/circuit_executors/sample.py b/tensorflow_quantum/python/layers/circuit_executors/sample.py deleted file mode 100644 index 81423bdd6..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/sample.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A tf.keras.layer that ingests programs and outputs bitstring samples.""" -import numbers - -import numpy as np -import sympy -import tensorflow as tf - -import cirq - -from tensorflow_quantum.core.ops import circuit_execution_ops -from tensorflow_quantum.python import util - - -class Sample(tf.keras.layers.Layer): - """A Layer that samples from a quantum circuit. - - Given an input circuit and set of parameter values, output samples - taken from the end of the circuit. - - First lets define a simple circuit to sample from: - - >>> def get_circuit(): - ... q0 = cirq.GridQubit(0, 0) - ... q1 = cirq.GridQubit(1, 0) - ... circuit = cirq.Circuit( - ... cirq.X(q0), - ... cirq.CNOT(q1) - ... ) - ... - ... return circuit - - When printed: - - >>> get_circuit() - (0, 0): ───X───@─── - │ - (1, 0): ───────X─── - - Using `tfq.layers.Sample`, it's possible to sample outputs from a given - circuit. The circuit above will put both qubits in the |1> state. - - To retrieve samples of the output state: - - >>> sample_layer = tfq.layers.Sample() - >>> output = sample_layer(get_circuit(), repetitions=4) - >>> output - - - Notice above that there were no parameters passed as input into the - layer, because the circuit wasn't parameterized. If instead the circuit - had parameters, e.g. - - >>> def get_parameterized_circuit(symbols): - ... q0 = cirq.GridQubit(0, 0) - ... q1 = cirq.GridQubit(1, 0) - ... circuit = cirq.Circuit( - ... cirq.X(q0) ** symbols[0], - ... cirq.CNOT(q1) - ... ) - ... - ... return circuit - - Then it becomes necessary to provide a value for the symbol using - `symbol_names` and `symbol_values`. - - >>> symbols = sympy.symbols(['x']) - >>> sample_layer = tfq.layers.Sample() - >>> output = sample_layer(get_parameterized_circuit(), - ... symbol_names=symbols, symbol_values=[[0.5]], repetitions=4) - >>> tf.shape(output.to_tensor()) - tf.Tensor([1 4 2], shape=(3,), dtype=int32) - - Note that using multiple sets of parameters returns multiple - independent samples on the same circuit. - - >>> symbols = sympy.symbols(['x']) - >>> sample_layer = tfq.layers.Sample() - >>> params = tf.convert_to_tensor([[0.5], [0.4]], - ... dtype=tf.dtypes.float32) - >>> output = sample_layer(get_parameterized_circuit(), - ... symbol_names=symbols, symbol_values=params, repetitions=4) - >>> tf.shape(output.to_tensor()) - tf.Tensor([2 4 2], shape=(3,), dtype=int32) - - The sample layer can also be used without explicitly passing in a - circuit, but instead using the layer with a batch of circuits. This layer - will then sample the circuits provided in the batch with multiple sets of - parameters, at the same time. Note that the parameters will not be - crossed along all circuits, the circuit at index i will be run with the - parameters at index i. - - >>> symbols = sympy.symbols(['x']) - >>> sample_layer = tfq.layers.Sample() - - With the sample layer defined, just define both the circuit and - parameter inputs. - - >>> q0 = cirq.GridQubit(0, 0) - >>> q1 = cirq.GridQubit(1, 0) - >>> circuits = tfq.convert_to_tensor([ - ... cirq.Circuit( - ... cirq.X(q0) ** s[0], - ... cirq.CNOT(q0, q1), - ... ), - ... cirq.Circuit( - ... cirq.Y(q0) ** s[0], - ... cirq.CNOT(q0, q1), - ... ) - ... ]) - >>> params = tf.convert_to_tensor([[0.5], [0.4]], - ... dtype=tf.dtypes.float32) - - The layer can be used as usual: - - >>> output = sample_layer(circuits, - ... symbol_names=symbols, symbol_values = params, repetitions=4) - >>> tf.shape(output.to_tensor()) - tf.Tensor([2 4 2], shape=(3,), dtype=int32) - - - Note: When specifying a new layer for a *compiled* `tf.keras.Model` using - something like `tfq.layers.Sample()(cirq.Circuit(...), ...)` please - be sure to instead use `tfq.layers.Sample()(circuit_input, ...)` - where `circuit_input` is a `tf.keras.Input` that is filled with - `tfq.conver_to_tensor([cirq.Circuit(..)] * batch_size)` at runtime. This - is because compiled Keras models require non keyword layer `call` inputs to - be traceable back to a `tf.keras.Input`. - - """ - - def __init__(self, backend=None, **kwargs): - """Instantiate this Layer. - - Create a layer that will output bitstring samples taken from either a - simulated quantum state or a real quantum computer - - Args: - backend: Optional Backend to use to simulate this state. Defaults - to the native Tensorflow simulator (None), however users may - also specify a preconfigured cirq execution object to use - instead, which must inherit `cirq.SimulatesSamples` or a - `cirq.Sampler`. - """ - super().__init__(**kwargs) - self.sample_op = circuit_execution_ops.get_sampling_op(backend) - - def call(self, - inputs, - *, - symbol_names=None, - symbol_values=None, - repetitions=None): - """Keras call function. - - Reference of options that are shown in examples above. - - Input options: - - 1. `inputs` can be a single `cirq.Circuit`, a Python `list` of - `cirq.Circuit`s or a pre-converted `tf.Tensor` of - `cirq.Circuit`s. - - 2. `symbol_names` can be a Python `list` of `str` or `sympy.Symbols` - or a pre-converted `tf.Tensor` of type `str`. - - 3. `symbol_values` can be a Python `list` of floating point values - or `np.ndarray` or pre-converted `tf.Tensor` of floats. - - 4. `repetitions` can be a Python `int` or a pre-converted - `tf.Tensor` containing a single `int` entry. - - Output shape: - `tf.RaggedTensor` with shape: - [batch size of symbol_values, repetitions, ] - or - [number of circuits, repetitions, ] - - """ - # inputs is the circuit(s). - symbols_empty = False - if symbol_names is None: - symbol_names = [] - if symbol_values is None: - symbols_empty = True - symbol_values = [[]] - - if repetitions is None: - raise ValueError("Number of repetitions not specified.") - - # Ingest and promote repetitions. - if isinstance(repetitions, numbers.Integral): - if not repetitions > 0: - raise ValueError("Repetitions must be greater than zero.") - repetitions = tf.convert_to_tensor([repetitions], dtype=tf.int32) - - if not tf.is_tensor(repetitions): - raise TypeError("repetitions cannot be parsed to int32 tensor" - " tensor given input: ".format(repetitions)) - - # Ingest and promote symbol_names. - if isinstance(symbol_names, (list, tuple, np.ndarray)): - if symbol_names and not all( - [isinstance(x, (str, sympy.Symbol)) for x in symbol_names]): - raise TypeError("Each element in symbol_names" - " must be a string or sympy.Symbol.") - symbol_names = [str(s) for s in symbol_names] - if not len(symbol_names) == len(list(set(symbol_names))): - raise ValueError("All elements of symbol_names must be unique.") - symbol_names = tf.convert_to_tensor(symbol_names, - dtype=tf.dtypes.string) - if not tf.is_tensor(symbol_names): - raise TypeError("symbol_names cannot be parsed to string" - " tensor given input: ".format(symbol_names)) - - # Ingest and promote symbol_values. - if isinstance(symbol_values, (list, tuple, np.ndarray)): - symbol_values = tf.convert_to_tensor(symbol_values, - dtype=tf.dtypes.float32) - if not tf.is_tensor(symbol_values): - raise TypeError("symbol_values cannot be parsed to float32" - " tensor given input: ".format(symbol_values)) - - symbol_batch_dim = tf.gather(tf.shape(symbol_values), 0) - - # Ingest and promote circuit. - if isinstance(inputs, cirq.Circuit): - # process single circuit. - inputs = tf.tile(util.convert_to_tensor([inputs]), - [symbol_batch_dim]) - - elif isinstance(inputs, (list, tuple, np.ndarray)): - # process list of circuits. - inputs = util.convert_to_tensor(inputs) - - if not tf.is_tensor(inputs): - raise TypeError("circuits cannot be parsed with given input:" - " ".format(inputs)) - - if symbols_empty: - # No symbol_values were provided. so we must tile up the - # symbol values so that symbol_values = [[]] * number of circuits - # provided. - circuit_batch_dim = tf.gather(tf.shape(inputs), 0) - symbol_values = tf.tile(symbol_values, - tf.stack([circuit_batch_dim, 1])) - - return self.sample_op(inputs, symbol_names, symbol_values, repetitions) diff --git a/tensorflow_quantum/python/layers/circuit_executors/sample_test.py b/tensorflow_quantum/python/layers/circuit_executors/sample_test.py deleted file mode 100644 index 0a453de1e..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/sample_test.py +++ /dev/null @@ -1,214 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for the sample layer.""" -import numpy as np -from absl.testing import parameterized -import sympy -import tensorflow as tf -import cirq - -from tensorflow_quantum.python.layers.circuit_executors import sample -from tensorflow_quantum.python import util - - -class SampleTest(tf.test.TestCase, parameterized.TestCase): - """Tests for the Sample layer.""" - - def test_sample_create(self): - """Test that sample instantiates correctly.""" - sample.Sample(backend=cirq.Simulator()) - sample.Sample() - with self.assertRaisesRegex(TypeError, - expected_regex="junk is invalid"): - sample.Sample(backend='junk') - - def test_sample_invalid_type_inputs(self): - """Test that sample rejects bad inputs.""" - sampler = sample.Sample() - with self.assertRaisesRegex(TypeError, - expected_regex="circuits cannot be parsed"): - sampler('junk_circuit', repetitions=10) - - with self.assertRaisesRegex( - TypeError, expected_regex="symbol_values cannot be parsed"): - sampler(cirq.Circuit(), symbol_values='junk', repetitions=10) - - with self.assertRaisesRegex( - TypeError, expected_regex="symbol_names cannot be parsed"): - sampler(cirq.Circuit(), - symbol_values=[], - symbol_names='junk', - repetitions=10) - - with self.assertRaisesRegex(TypeError, expected_regex="Cannot convert"): - sampler(cirq.Circuit(), - symbol_values=[['bad']], - symbol_names=['name'], - repetitions=10) - - with self.assertRaisesRegex(TypeError, - expected_regex="must be a string."): - sampler(cirq.Circuit(), - symbol_values=[[0.5]], - symbol_names=[0.33333], - repetitions=10) - - with self.assertRaisesRegex(ValueError, - expected_regex="must be unique."): - sampler(cirq.Circuit(), - symbol_values=[[0.5]], - symbol_names=['duplicate', 'duplicate'], - repetitions=10) - - with self.assertRaisesRegex(ValueError, - expected_regex="repetitions not specified"): - sampler(cirq.Circuit()) - - with self.assertRaisesRegex(ValueError, - expected_regex="greater than zero"): - sampler(cirq.Circuit(), repetitions=-1) - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed to int32"): - sampler(cirq.Circuit(), repetitions='junk') - - def test_sample_invalid_shape_inputs(self): - """Test that sample rejects bad input shapes.""" - sampler = sample.Sample() - with self.assertRaisesRegex(TypeError, - expected_regex="string or sympy.Symbol"): - sampler(cirq.Circuit(), - symbol_values=[[0.5]], - symbol_names=[[]], - repetitions=10) - - with self.assertRaisesRegex(ValueError, - expected_regex="rank 2 but is rank 1"): - sampler(cirq.Circuit(), - symbol_values=[0.5], - symbol_names=['name'], - repetitions=10) - - with self.assertRaisesRegex(ValueError, - expected_regex="rank 1 but is rank 2"): - sampler([[cirq.Circuit()]], - symbol_values=[[0.5]], - symbol_names=['name'], - repetitions=10) - - with self.assertRaisesRegex( - TypeError, expected_regex="cannot be parsed to int32 tensor"): - sampler([cirq.Circuit()], repetitions=[10]) - - @parameterized.parameters([{ - 'backend': None - }, { - 'backend': cirq.Simulator() - }, { - 'backend': cirq.DensityMatrixSimulator() - }]) - def test_sample_invalid_combinations(self, backend): - """Test with valid type inputs and valid value, but incorrect combo.""" - sampler = sample.Sample(backend) - symbol = sympy.Symbol('alpha') - circuit = cirq.Circuit(cirq.H(cirq.GridQubit(0, 0))**symbol) - with self.assertRaisesRegex(Exception, expected_regex=""): - # no value provided. - sampler([circuit, circuit], symbol_names=[symbol], repetitions=5) - - with self.assertRaisesRegex(Exception, expected_regex=""): - # no name provided. - sampler([circuit, circuit], - symbol_names=[], - symbol_values=[[2.0], [3.0]], - repetitions=5) - - with self.assertRaisesRegex(Exception, expected_regex=""): - # deceptive, but the circuit shouldn't be in a list. otherwise fine. - sampler([circuit], - symbol_names=['alpha'], - symbol_values=[[2.0], [3.0]], - repetitions=5) - - with self.assertRaisesRegex(Exception, expected_regex=""): - # wrong symbol name. - sampler([circuit], - symbol_names=['alphaaaa'], - symbol_values=[[2.0], [3.0]], - repetitions=5) - - with self.assertRaisesRegex(Exception, expected_regex=""): - # too many symbol values provided. - sampler(circuit, - symbol_names=['alpha'], - symbol_values=[[2.0, 4.0], [3.0, 5.0]], - repetitions=5) - - def test_sample_basic_inputs(self): - """Test that sample ingests inputs correctly in simple settings.""" - sampler = sample.Sample() - sampler(cirq.Circuit(), repetitions=10) - sampler([cirq.Circuit()], repetitions=10) - sampler(cirq.Circuit(), - symbol_names=['name'], - symbol_values=[[0.5]], - repetitions=10) - sampler(cirq.Circuit(), - symbol_names=[sympy.Symbol('name')], - symbol_values=[[0.5]], - repetitions=10) - - def test_sample_outputs_simple(self): - """Test the simplest call where nothing but circuits are provided.""" - sampler = sample.Sample() - circuit = cirq.Circuit(cirq.H(cirq.GridQubit(0, 0))) - output = sampler([circuit, circuit], repetitions=5) - self.assertShapeEqual(np.empty((2, 5, 1)), output.to_tensor()) - - # TODO(trevormccrt): add QuantumEngineSampler to this once it is available - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - backend=[None, - cirq.Simulator(), - cirq.DensityMatrixSimulator()], - all_n_qubits=[[3], [8], [3, 4], [3, 4, 10]], - n_samples=[1, 10, 100], - symbol_names=[[], ['a', 'b']]))) - def test_sample_output(self, backend, all_n_qubits, n_samples, - symbol_names): - """Test that expected output format is preserved. - - Check that any pre or post processing done inside the layers does not - cause what is output from the layer to structurally deviate from what - is expected. - """ - sampler = sample.Sample(backend=backend) - bits = cirq.GridQubit.rect(1, max(all_n_qubits)) - programs = [] - expected_outputs = [] - for n_qubits in all_n_qubits: - programs.append(cirq.Circuit(*cirq.X.on_each(*bits[0:n_qubits]))) - expected_outputs.append([[1] * n_qubits for _ in range(n_samples)]) - symbol_values = np.random.random((len(all_n_qubits), len(symbol_names))) - layer_output = sampler(programs, - symbol_names=symbol_names, - symbol_values=symbol_values, - repetitions=n_samples).to_list() - self.assertEqual(expected_outputs, layer_output) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation.py b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation.py deleted file mode 100644 index 1f8560ec0..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation.py +++ /dev/null @@ -1,394 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A tf.keras.layer that ingests programs and outputs sampled expectation values -.""" -import numbers - -import numpy as np -import sympy -import tensorflow as tf - -import cirq -from tensorflow_quantum.core.ops import circuit_execution_ops -from tensorflow_quantum.python.differentiators import differentiator as diff -from tensorflow_quantum.python.differentiators import parameter_shift -from tensorflow_quantum.python import util - - -class SampledExpectation(tf.keras.layers.Layer): - """A layer that calculates a sampled expectation value. - - Given an input circuit and set of parameter values, output expectation - values of observables computed using measurement results sampled from - the input circuit. - - - First define a simple helper function for generating a parametrized - quantum circuit that we will use throughout: - - >>> def _gen_single_bit_rotation_problem(bit, symbols): - ... \"""Generate a toy problem on 1 qubit.\""" - ... starting_state = [0.123, 0.456, 0.789] - ... circuit = cirq.Circuit( - ... cirq.Rx(starting_state[0])(bit), - ... cirq.Ry(starting_state[1])(bit), - ... cirq.Rz(starting_state[2])(bit), - ... cirq.Rz(symbols[2])(bit), - ... cirq.Ry(symbols[1])(bit), - ... cirq.Rx(symbols[0])(bit) - ... ) - ... return circuit - - - In quantum machine learning there are two very common use cases that - align with keras layer constructs. The first is where the circuits - represent the input data points: - - - >>> bit = cirq.GridQubit(0, 0) - >>> symbols = sympy.symbols('x y z') - >>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] - >>> num_samples = [100, 200] - >>> circuit_list = [ - ... _gen_single_bit_rotation_problem(bit, symbols), - ... cirq.Circuit( - ... cirq.Z(bit) ** symbols[0], - ... cirq.X(bit) ** symbols[1], - ... cirq.Z(bit) ** symbols[2] - ... ), - ... cirq.Circuit( - ... cirq.X(bit) ** symbols[0], - ... cirq.Z(bit) ** symbols[1], - ... cirq.X(bit) ** symbols[2] - ... ) - ... ] - >>> sampled_expectation_layer = tfq.layers.SampledExpectation() - >>> output = sampled_expectation_layer( - ... circuit_list, - ... symbol_names=symbols, - ... operators=ops, - ... repetitions=num_samples) - >>> # Here output[i][j] corresponds to the sampled expectation - >>> # of all the ops in ops w.r.t circuits[i] where Keras managed - >>> # variables are placed in the symbols 'x', 'y', 'z'. - >>> tf.shape(output) - tf.Tensor([3 2], shape=(2,), dtype=int32) - - - Here, different `cirq.Circuit` instances sharing the common symbols 'x', - 'y' and 'z' are used as input. Keras uses the `symbol_names` - argument to map Keras managed variables to these circuits constructed - with `sympy.Symbol`s. The shape of `num_samples` is equal to that of `ops`. - - - The second most common use case is where there is a fixed circuit and - the expectation operators vary: - - - >>> bit = cirq.GridQubit(0, 0) - >>> symbols = sympy.symbols('x, y, z') - >>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] - >>> fixed_circuit = _gen_single_bit_rotation_problem(bit, symbols) - >>> expectation_layer = tfq.layers.SampledExpectation() - >>> output = expectation_layer( - ... fixed_circuit, - ... symbol_names=symbols, - ... operators=ops, - ... repetitions=5000, - ... initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)) - >>> # Here output[i][j] corresponds to - >>> # the sampled expectation of operators[i][j] using 5000 samples w.r.t - >>> # the circuit where variable values are managed by keras and store - >>> # numbers in the symbols 'x', 'y', 'z'. - >>> tf.shape(output) - tf.Tensor([1 2], shape=(2,), dtype=int32) - - - Here different `cirq.PauliSum` or `cirq.PauliString` instances can be - used as input to calculate the expectation on the fixed circuit that - the layer was initially constructed with. - - - There are also some more complex use cases that provide greater flexibility. - Notably these configurations all make use of the `symbol_values` parameter - that causes the `SampledExpectation` layer to stop managing the - `sympy.Symbol`s in the quantum circuits and instead requires the user to - supply inputs themselves. Lets look at the case where there - is a single fixed circuit, some fixed operators and symbols that must be - common to all circuits: - - - >>> bit = cirq.GridQubit(0, 0) - >>> symbols = sympy.symbols('x y z') - >>> ops = [cirq.Z(bit), cirq.X(bit)] - >>> num_samples = [100, 200] - >>> circuit = _gen_single_bit_rotation_problem(bit, symbols) - >>> values = [[1,1,1], [2,2,2], [3,3,3]] - >>> sampled_expectation_layer = tfq.layers.SampledExpectation() - >>> output = sampled_expectation_layer( - ... circuit, - ... symbol_names=symbols, - ... symbol_values=values, - ... operators=ops, - ... repetitions=num_samples) - >>> # output[i][j] = The sampled expectation of ops[j] with - >>> # values_tensor[i] placed into the symbols of the circuit - >>> # with the order specified by feed_in_params. - >>> # so output[1][2] = The sampled expectation of a circuit with parameter - >>> # values [2,2,2] w.r.t Pauli X, estimated using 200 samples per term. - >>> output # Non-deterministic result. It can vary every time. - tf.Tensor( - [[0.52, 0.72], - [0.34, 1. ], - [0.78, 0.48]], shape=(3, 2), dtype=float32) - - - Tip: you can compare the above result with that of `Expectation`: - tf.Tensor( - [[0.63005245 0.76338404] - [0.25707167 0.9632684 ] - [0.79086655 0.5441111 ]], shape=(3, 2), dtype=float32) - - - Here is a simple model that uses this particular input signature of - `tfq.layers.SampledExpectation`, that learns to undo the random rotation - of the qubit: - - - >>> bit = cirq.GridQubit(0, 0) - >>> symbols = sympy.symbols('x, y, z') - >>> circuit = _gen_single_bit_rotation_problem(bit, symbols) - >>> control_input = tf.keras.Input(shape=(1,)) - >>> circuit_inputs = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - >>> d1 = tf.keras.layers.Dense(10)(control_input) - >>> d2 = tf.keras.layers.Dense(3)(d1) - >>> expectation = tfq.layers.SampledExpectation()( - ... circuit_inputs, # See note below! - ... symbol_names=symbols, - ... symbol_values=d2, - ... operators=cirq.Z(bit), - ... repetitions=5000) - >>> data_in = np.array([[1], [0]], dtype=np.float32) - >>> data_out = np.array([[1], [-1]], dtype=np.float32) - >>> model = tf.keras.Model( - ... inputs=[circuit_inputs, control_input], outputs=expectation) - >>> model.compile( - ... optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), - ... loss=tf.keras.losses.mean_squared_error) - >>> history = model.fit( - ... x=[tfq.convert_to_tensor([circuit] * 2), data_in], - ... y=data_out, - ... epochs=100) - - - For an example featuring this layer, please check out `Taking gradients` - in our dev website http://www.tensorflow.org/quantum/tutorials. - - Lastly `symbol_values`, `operators` and circuit `inputs` can all be fed - Python `list` objects. In addition to this they can also be fed `tf.Tensor` - inputs, meaning that you can input all of these things from other Tensor - objects (like `tf.keras.Dense` layer outputs or `tf.keras.Input`s etc). - - - Note: When specifying a new layer for a *compiled* `tf.keras.Model` using - something like `tfq.layers.SampledExpectation()(cirq.Circuit(...), ...)` - please be sure to instead use - `tfq.layers.SampledExpectation()(circuit_input, ...)` where - `circuit_input` is a `tf.keras.Input` that is filled with - `tfq.conver_to_tensor([cirq.Circuit(..)] * batch_size)` at runtime. This - is because compiled Keras models require non keyword layer `call` inputs to - be traceable back to a `tf.keras.Input`. - - """ - - def __init__(self, backend=None, differentiator=None, **kwargs): - """Instantiate this Layer. - - Create a layer that will output expectation values gained from - simulating a quantum circuit. - - Args: - backend: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead, - which must inherit `cirq.SimulatesFinalState`. - differentiator: Optional Differentiator to use to calculate analytic - derivative values of given operators_to_measure and circuit, - which must inherit `tfq.differentiators.Differentiator`. - Defaults to None, which uses `parameter_shift.ParameterShift()`. - - """ - super().__init__(**kwargs) - - # Ingest backend. - if not isinstance(backend, cirq.Sampler) and \ - isinstance(backend, cirq.SimulatesFinalState): - raise TypeError("Backend implements cirq.SimulatesFinalState but " - "not cirq.Sampler. Please use Expectation instead.") - - # Ingest differentiator. - if differentiator is None: - differentiator = parameter_shift.ParameterShift() - - if not isinstance(differentiator, diff.Differentiator): - raise TypeError("Differentiator must inherit from " - "tfq.differentiators.Differentiator") - - self._expectation_op = differentiator.generate_differentiable_op( - sampled_op=circuit_execution_ops.get_sampled_expectation_op( - backend=backend)) - - self._w = None - - def call(self, - inputs, - *, - symbol_names=None, - symbol_values=None, - operators=None, - repetitions=None, - initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)): - """Keras call function.""" - - # inputs is the circuit(s). - values_empty = False - if symbol_names is None: - symbol_names = [] - if symbol_values is None: - values_empty = True - symbol_values = [[]] - - # Ingest and promote symbol_names. - if isinstance(symbol_names, (list, tuple, np.ndarray)): - if not all( - isinstance(x, (str, sympy.Symbol)) for x in symbol_names): - raise TypeError("Each element in symbol_names" - " must be a string or sympy.Symbol.") - symbol_names = [str(s) for s in symbol_names] - if not len(symbol_names) == len(list(set(symbol_names))): - raise ValueError("All elements of symbol_names must be unique.") - symbol_names = tf.identity( - tf.convert_to_tensor(symbol_names, dtype=tf.dtypes.string)) - - if not tf.is_tensor(symbol_names): - raise TypeError("symbol_names cannot be parsed to string" - " tensor given input: ".format(symbol_names)) - - # Ingest and promote symbol_values. - if isinstance(symbol_values, (list, tuple, np.ndarray)): - symbol_values = tf.convert_to_tensor(symbol_values, - dtype=tf.dtypes.float32) - - if not tf.is_tensor(symbol_values): - raise TypeError("symbol_values cannot be parsed to float32" - " tensor given input: ".format(symbol_values)) - - symbol_batch_dim = tf.gather(tf.shape(symbol_values), 0) - - # Ingest and promote circuits. - # Would be nice to support python circuits *fully* in this layer. - if isinstance(inputs, cirq.Circuit): - # process single circuit. - inputs = tf.tile(util.convert_to_tensor([inputs]), - [symbol_batch_dim]) - - elif isinstance(inputs, (list, tuple, np.ndarray)): - # process list of circuits. - inputs = util.convert_to_tensor(inputs) - - if not tf.is_tensor(inputs): - raise TypeError("circuits cannot be parsed with given input:" - " ".format(inputs)) - - circuit_batch_dim = tf.gather(tf.shape(inputs), 0) - - # Ingest and promote operators. - if operators is None: - raise RuntimeError("Value for operators not provided. operators " - "must be one of cirq.PauliSum, cirq.PauliString" - ", or a list/tensor/tuple containing " - "cirq.PauliSum or cirq.PauliString.") - - op_needs_tile = False - if isinstance(operators, (cirq.PauliSum, cirq.PauliString)): - # If we are given a single operator promote it to a list and tile - # it up to size. - operators = [[operators]] - op_needs_tile = True - - if isinstance(operators, (list, tuple, np.ndarray)): - if not isinstance(operators[0], (list, tuple, np.ndarray)): - # If we are given a flat list of operators. tile them up - # to match the batch size of circuits. - operators = [operators] - op_needs_tile = True - operators = util.convert_to_tensor(operators) - - if op_needs_tile: - # Don't tile up if the user gave a python list that was precisely - # the correct size to match circuits outer batch dim. - operators = tf.tile(operators, [circuit_batch_dim, 1]) - - if not tf.is_tensor(operators): - raise TypeError("operators cannot be parsed to string tensor" - " given input: ".format(operators)) - - # Ingest and promote repetitions. - if repetitions is None: - raise RuntimeError("Value for repetitions not provided.") - - reps_need_tile = False - if isinstance(repetitions, numbers.Integral): - # Must tile it up to size to match operators if many operators - # were provided but only one number was provided. - repetitions = tf.ones(tf.shape(operators), - dtype=tf.dtypes.int32) * repetitions - - if isinstance(repetitions, (list, tuple, np.ndarray)): - if not isinstance(repetitions[0], (list, tuple, np.ndarray)): - repetitions = [repetitions] - reps_need_tile = True - - repetitions = tf.convert_to_tensor(repetitions, - dtype=tf.dtypes.int32) - - if reps_need_tile: - # Don't tile up if the user gave a python list that was precisely - # the correct size to match circuits outer batch dim. - repetitions = tf.tile(repetitions, [circuit_batch_dim, 1]) - - if not tf.is_tensor(repetitions): - raise TypeError("repetitions cannot be parsed to int32 tensor" - " given input: ".format(repetitions)) - - if values_empty: - # No symbol_values were provided. So we assume the user wants us - # to create and manage variables for them. We will do so by - # creating a weights variable and tiling it up to appropriate - # size of [batch, num_symbols]. - - if self._w is None: - # don't re-add variable. - self._w = self.add_weight(name='circuit_learnable_parameters', - shape=[len(symbol_names)], - initializer=initializer) - - symbol_values = tf.tile(tf.expand_dims(self._w, axis=0), - tf.stack([circuit_batch_dim, 1])) - - num_samples = repetitions - - return self._expectation_op(inputs, symbol_names, symbol_values, - operators, num_samples) diff --git a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py b/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py deleted file mode 100644 index 0044fb8d0..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py +++ /dev/null @@ -1,437 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for tensorflow_quantum.layers.circuit_executors.sampled_expectation.""" - -import numpy as np -import sympy -import tensorflow as tf - -import cirq -from tensorflow_quantum.python.layers.circuit_executors import \ - sampled_expectation -from tensorflow_quantum.python.differentiators import linear_combination -from tensorflow_quantum.python import util - - -def _gen_single_bit_rotation_problem(bit, symbols): - """Generate a toy problem on 1 qubit.""" - starting_state = np.random.uniform(0, 2 * np.pi, 3) - circuit = cirq.Circuit( - cirq.Rx(starting_state[0])(bit), - cirq.Ry(starting_state[1])(bit), - cirq.Rz(starting_state[2])(bit), - cirq.Rz(symbols[2])(bit), - cirq.Ry(symbols[1])(bit), - cirq.Rx(symbols[0])(bit)) - - return circuit - - -class SampledExpectationTest(tf.test.TestCase): - """Basic tests for the SampledExpectation layer.""" - - def test_sampled_expectation_symbol_input(self): - """Test that SampledExpectation only accepts valid permutations of - symbols.""" - sampled_expectation.SampledExpectation() - sampled_expectation.SampledExpectation(backend=cirq.Simulator()) - sampled_expectation.SampledExpectation( - differentiator=linear_combination.ForwardDifference()) - - def test_sampled_expectation_instantiate_error(self): - """Test that SampledExpectation errors with bad inputs.""" - - class MySim(cirq.SimulatesFinalState): - """Class to test sampler detection in Expectation.""" - - def simulate_sweep(self): - """Do nothing.""" - return - - with self.assertRaisesRegex(TypeError, expected_regex="Expectation"): - sampled_expectation.SampledExpectation(backend=MySim()) - - with self.assertRaisesRegex(TypeError, - expected_regex="Sampler or None"): - sampled_expectation.SampledExpectation(backend='junk') - - with self.assertRaisesRegex( - TypeError, expected_regex="tfq.differentiators.Differentiator"): - sampled_expectation.SampledExpectation(differentiator='junk') - - def test_sampled_expectation_type_inputs_error(self): - """Test that SampledExpectation errors within Keras call.""" - - bit = cirq.GridQubit(0, 0) - symbol = sympy.Symbol('alpha') - test_pstring = cirq.Z(bit) - test_psum = cirq.PauliSum.from_pauli_strings([test_pstring]) - symb_circuit = cirq.Circuit(cirq.H(bit)**symbol) - reg_circuit = cirq.Circuit(cirq.H(bit)) - - with self.assertRaisesRegex(TypeError, - expected_regex="string or sympy.Symbol"): - sampled_expectation.SampledExpectation()(symb_circuit, - symbol_names=[symbol, 5.0], - operators=test_psum, - repetitions=1) - - with self.assertRaisesRegex(ValueError, - expected_regex="must be unique."): - sampled_expectation.SampledExpectation()( - symb_circuit, - symbol_names=[symbol, symbol], - operators=test_psum, - repetitions=1) - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed"): - sampled_expectation.SampledExpectation()(symb_circuit, - symbol_names='junk', - operators=test_psum, - repetitions=1) - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed"): - sampled_expectation.SampledExpectation()(symb_circuit, - symbol_names=[symbol], - symbol_values='junk', - operators=test_psum, - repetitions=1) - - with self.assertRaisesRegex(TypeError, - expected_regex="cannot be parsed"): - sampled_expectation.SampledExpectation()('junk', - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=test_psum, - repetitions=1) - - with self.assertRaisesRegex(RuntimeError, - expected_regex="operators not provided"): - sampled_expectation.SampledExpectation()(symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - repetitions=1) - - with self.assertRaisesRegex(RuntimeError, - expected_regex="repetitions not provided"): - sampled_expectation.SampledExpectation()(symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=test_psum) - - with self.assertRaisesRegex(Exception, - expected_regex="Unknown initializer"): - sampled_expectation.SampledExpectation()(reg_circuit, - operators=test_psum, - initializer='junk', - repetitions=1) - - with self.assertRaisesRegex(Exception, - expected_regex="cannot be parsed"): - sampled_expectation.SampledExpectation()(reg_circuit, - operators=test_psum, - repetitions='junk') - - def test_sampled_expectation_op_error(self): - """Test that expectation errors within underlying ops correctly.""" - # Note the expected_regex is left blank here since there is a - # discrepancy between the error strings provided between backends. - bit = cirq.GridQubit(0, 0) - symbol = sympy.Symbol('alpha') - test_pstring = cirq.Z(bit) - test_psum = cirq.PauliSum.from_pauli_strings([test_pstring]) - symb_circuit = cirq.Circuit(cirq.H(bit)**symbol) - reg_circuit = cirq.Circuit(cirq.H(bit)) - - with self.assertRaisesRegex(Exception, expected_regex="bytes-like"): - # Operators has wrong rank. Parse error. - sampled_expectation.SampledExpectation()( - [reg_circuit], - operators=util.convert_to_tensor([test_psum]), - repetitions=1) - - with self.assertRaisesRegex( - Exception, expected_regex="must match second dimension"): - # symbol_values has wrong rank. - sampled_expectation.SampledExpectation()([symb_circuit], - symbol_names=[symbol], - symbol_values=[0.5], - operators=test_psum, - repetitions=1) - - with self.assertRaisesRegex(Exception, expected_regex="same batch"): - # Wrong batch size for pauli operators. - sampled_expectation.SampledExpectation()(symb_circuit, - symbol_names=[symbol], - operators=[[test_psum], - [test_psum]], - repetitions=1) - - with self.assertRaisesRegex(Exception, expected_regex="same batch"): - # Wrong batch size for pauli operators. - sampled_expectation.SampledExpectation()(reg_circuit, - operators=[[test_psum], - [test_psum]], - repetitions=1) - - with self.assertRaisesRegex(Exception, expected_regex="<= 0"): - # Wrong repetitions. - sampled_expectation.SampledExpectation()(reg_circuit, - operators=test_psum, - repetitions=-1) - - with self.assertRaisesRegex(Exception, - expected_regex="same shape as pauli_sums"): - # Wrong second dimension size for repetitions & pauli operators. - sampled_expectation.SampledExpectation()(reg_circuit, - operators=test_psum, - repetitions=[5, 4, 3]) - - def test_static_cases(self): - """Run inputs through in complex cases.""" - - bit = cirq.GridQubit(0, 0) - symbol = sympy.Symbol('alpha') - test_pstring = cirq.Z(bit) - test_psum = cirq.PauliSum.from_pauli_strings([test_pstring]) - symb_circuit = cirq.Circuit(cirq.H(bit)**symbol) - reg_circuit = cirq.Circuit(cirq.H(bit)) - - # Passing a 2d operators input requires a 1d circuit input. - sampled_expectation.SampledExpectation()( - [reg_circuit, reg_circuit], - operators=[[test_psum, test_psum], [test_psum, test_psum]], - repetitions=1) - - # Passing 2d operators along with other inputs. - sampled_expectation.SampledExpectation()( - [symb_circuit, symb_circuit], - symbol_names=[symbol], - operators=[[test_psum, test_psum], [test_psum, test_psum]], - repetitions=1) - sampled_expectation.SampledExpectation()( - [symb_circuit, symb_circuit], - symbol_names=[symbol], - symbol_values=[[0.5], [0.8]], - operators=[[test_psum, test_psum], [test_psum, test_psum]], - repetitions=1) - - # Ensure tiling up of circuits works as expected. - sampled_expectation.SampledExpectation()(reg_circuit, - operators=test_psum, - repetitions=1) - sampled_expectation.SampledExpectation()( - reg_circuit, operators=[test_psum, test_psum], repetitions=1) - - # Ensure tiling up of symbol_values works as expected. - sampled_expectation.SampledExpectation()(symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5], [0.8]], - operators=test_psum, - repetitions=1) - sampled_expectation.SampledExpectation()(symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=test_psum, - repetitions=1) - - # Test multiple operators with integer valued repetition. - sampled_expectation.SampledExpectation()( - symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=[-1.0 * cirq.Z(bit), - cirq.X(bit) + 2.0 * cirq.Z(bit)], - repetitions=1) - sampled_expectation.SampledExpectation()( - symb_circuit, - symbol_names=[symbol], - symbol_values=[[0.5]], - operators=[-1.0 * cirq.Z(bit), - cirq.X(bit) + 2.0 * cirq.Z(bit)], - repetitions=[5, 1]) - - def test_sampled_expectation_simple_tf_train(self): - """Train a layer using standard tf (not keras).""" - bit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit(cirq.Rx(sympy.Symbol('theta'))(bit)) - layer = sampled_expectation.SampledExpectation() - optimizer = tf.optimizers.Adam(learning_rate=0.05) - for _ in range(10): - with tf.GradientTape() as tape: - circuit_out = layer(circuit, - symbol_names=['theta'], - operators=cirq.Z(bit), - repetitions=100) - mse = tf.square(tf.reduce_sum(tf.subtract(circuit_out, -1))) - grads = tape.gradient(mse, layer.trainable_weights) - optimizer.apply_gradients(zip(grads, layer.trainable_weights)) - self.assertAllClose(mse.numpy(), 0, atol=1e-3) - - -class SampledExpectationFunctionalTests(tf.test.TestCase): - """Test hybrid/integrated models that include a SampledExpectation layer.""" - - def test_simple_param_value_input(self): - """Train a densely connected hybrid model. - - This model will put a qubit in the zero or one state from a random state - given the input zero or one. - """ - bit = cirq.GridQubit(0, 0) - symbols = sympy.symbols('x y z') - circuit = _gen_single_bit_rotation_problem(bit, symbols) - - inputs = tf.keras.Input(shape=(1,), dtype=tf.dtypes.float64) - datum = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - l1 = tf.keras.layers.Dense(10)(inputs) - l2 = tf.keras.layers.Dense(3)(l1) - outputs = sampled_expectation.SampledExpectation()( - datum, - symbol_names=symbols, - operators=cirq.Z(bit), - symbol_values=l2, - repetitions=5000) - model = tf.keras.Model(inputs=[datum, inputs], outputs=outputs) - - data_in = np.array([[1], [0]], dtype=np.float32) - data_out = np.array([[1], [-1]], dtype=np.float32) - - model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), - loss=tf.keras.losses.mean_squared_error) - - circuits = util.convert_to_tensor([circuit, circuit]) - - history = model.fit(x=[circuits, data_in], y=data_out, epochs=30) - self.assertAllClose(history.history['loss'][-1], 0, atol=0.3) - - def test_simple_op_input(self): - """Test a simple operator input - - Learn qubit in the z+ state using two different measurement operators. - """ - bit = cirq.GridQubit(0, 0) - symbols = sympy.symbols('x y z') - ops = util.convert_to_tensor([[cirq.Z(bit)], [cirq.Z(bit)]]) - n = tf.convert_to_tensor([[5000], [5000]], dtype=tf.int32) - - circuit = util.convert_to_tensor( - [_gen_single_bit_rotation_problem(bit, symbols)] * 2) - - data_out = tf.convert_to_tensor(np.array([[1], [1]])) - op_inp = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string) - n_inp = tf.keras.Input(shape=(1,), dtype=tf.dtypes.int32) - circuit_inp = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - circuit_output = sampled_expectation.SampledExpectation()( - circuit_inp, - symbol_names=symbols, - operators=op_inp, - repetitions=n_inp) - model = tf.keras.Model(inputs=[circuit_inp, op_inp, n_inp], - outputs=[circuit_output]) - - model.compile( - optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), - loss=tf.keras.losses.mean_squared_error, - ) - history = model.fit(x=[circuit, ops, n], - y=data_out, - batch_size=1, - epochs=3) - - self.assertAllClose(history.history['loss'][-1], 0, atol=1e-2) - - def test_simple_op_and_param_input(self): - """Test a simple operator and parameter input. - - Train a NN to put a qubit in the z+ or x+ states based on a classical - binary input. - """ - bit = cirq.GridQubit(0, 0) - symbols = sympy.symbols('x y z') - ops = util.convert_to_tensor([[cirq.Z(bit)], [cirq.Z(bit)]]) - n = tf.convert_to_tensor([[5000], [5000]], dtype=tf.int32) - circuits = util.convert_to_tensor( - [_gen_single_bit_rotation_problem(bit, symbols)] * 2) - data_in = np.array([[1], [0]]) - data_out = np.array([[1], [1]]) - - data_inp = tf.keras.layers.Input(shape=(1), dtype=tf.dtypes.float32) - op_inp = tf.keras.layers.Input(shape=(1,), dtype=tf.dtypes.string) - n_inp = tf.keras.layers.Input(shape=(1,), dtype=tf.dtypes.int32) - circuit_inp = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - dense_1 = tf.keras.layers.Dense(10)(data_inp) - dense_2 = tf.keras.layers.Dense(3)(dense_1) - circuit_output = sampled_expectation.SampledExpectation()( - circuit_inp, - symbol_names=symbols, - symbol_values=dense_2, - operators=op_inp, - repetitions=n_inp) - - functional_model = tf.keras.Model( - inputs=[circuit_inp, data_inp, op_inp, n_inp], - outputs=[circuit_output]) - - functional_model.compile( - optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), - loss=tf.keras.losses.mean_squared_error) - history = functional_model.fit(x=[circuits, data_in, ops, n], - y=data_out, - batch_size=2, - epochs=20) - self.assertAllClose(history.history['loss'][-1], 0, atol=3) - - def test_dnn_qnn_dnn(self): - """Train a fully hybrid network using an SampledExpectation layer. - - Train the network to output +-5 given an input of 1 or 0. This tests - that everything works when SampledExpectation layer is a middle layers. - """ - bit = cirq.GridQubit(0, 0) - symbols = sympy.symbols('x, y, z') - circuits = util.convert_to_tensor( - [_gen_single_bit_rotation_problem(bit, symbols)] * 2) - data_in = np.array([[1], [0]], dtype=np.float32) - data_out = np.array([[5], [-5]], dtype=np.float32) - - classical_input = tf.keras.Input(shape=(1,)) - circuit_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - d1 = tf.keras.layers.Dense(10)(classical_input) - d2 = tf.keras.layers.Dense(3)(d1) - quantum = sampled_expectation.SampledExpectation()( - circuit_input, - symbol_names=symbols, - symbol_values=d2, - operators=cirq.Z(bit), - repetitions=5000) - d3 = tf.keras.layers.Dense(1)(quantum) - - model = tf.keras.Model(inputs=[circuit_input, classical_input], - outputs=d3) - - model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), - loss=tf.keras.losses.mean_squared_error) - history = model.fit(x=[circuits, data_in], - y=data_out, - batch_size=2, - epochs=75) - self.assertAllClose(history.history['loss'][-1], 0, atol=4) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_quantum/python/layers/circuit_executors/state.py b/tensorflow_quantum/python/layers/circuit_executors/state.py deleted file mode 100644 index 4205fcc6c..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/state.py +++ /dev/null @@ -1,216 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A tf.keras.layer that ingests programs and parameters and outputs a state.""" -import numpy as np -import sympy -import tensorflow as tf - -import cirq - -from tensorflow_quantum.core.ops import circuit_execution_ops -from tensorflow_quantum.python import util - - -class State(tf.keras.layers.Layer): - """A Layer that simulates a quantum state. - - Given an input circuit and set of parameter values, Simulate a quantum state - and output it to the Tensorflow graph. - - - A more common application is for determining the set of states produced - by a parametrized circuit where the values of the parameters vary. Suppose - we want to generate a family of states with varying degrees of entanglement - ranging from separable to maximally entangled. We first define a - parametrized circuit that can accomplish this - - >>> q0, q1 = cirq.GridQubit.rect(1, 2) - >>> alpha = sympy.Symbol('alpha') # degree of entanglement between q0, q1 - >>> parametrized_bell_circuit = cirq.Circuit( - ... cirq.H(q0), cirq.CNOT(q0, q1) ** alpha) - - Now pass all of the alpha values desired to `tfq.layers.State` to compute - a tensor of states corresponding to these preparation angles. - - >>> state_layer = tfq.layers.State() - >>> alphas = tf.reshape(tf.range(0, 1.1, delta=0.5), (3, 1)) # FIXME: #805 - >>> state_layer(parametrized_bell_circuit, - ... symbol_names=[alpha], symbol_values=alphas) - - - - This use case can be simplified to compute the wavefunction produced by a - fixed circuit where the values of the parameters vary. For example, this - layer produces a Bell state. - - >>> q0, q1 = cirq.GridQubit.rect(1, 2) - >>> bell_circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1)) - >>> state_layer = tfq.layers.State() - >>> state_layer(bell_circuit) - - - Not specifying `symbol_names` or `symbol_values` indicates that the - circuit(s) does not contain any `sympy.Symbols` inside of it and tfq won't - look for any symbols to resolve. - - - `tfq.layers.State` also allows for a more complicated input signature - wherein a different (possibly parametrized) circuit is used to prepare - a state for each batch of input parameters. This might be useful when - the State layer is being used to generate entirely different families - of states. Suppose we want to generate a stream of states that are - either computational basis states or 'diagonal' basis states (as in the - BB84 QKD protocol). The circuits to prepare these states are: - - >>> q0 = cirq.GridQubit(0, 0) - >>> bitval = sympy.Symbol('bitval') - >>> computational_circuit = cirq.Circuit(cirq.X(q0) ** bitval) - >>> diagonal_circuit = cirq.Circuit(cirq.X(q0) ** bitval, cirq.H(q0)) - - Now a stream of random classical bit values can be encoded into one of - these bases by preparing a state layer and passing in the bit values - accompanied by their preparation circuits - - >>> qkd_layer = tfq.layers.State() - >>> bits = [[1], [1], [0], [0]] - >>> states_to_send = [computational_circuit, - ... diagonal_circuit, - ... diagonal_circuit, - ... computational_circuit] - >>> qkd_states = qkd_layer( - ... states_to_send, symbol_names=[bitval], symbol_values=bits) - >>> # The third state was a '0' prepared in the diagonal basis: - >>> qkd_states - - - - Note: When specifying a new layer for a *compiled* `tf.keras.Model` using - something like `tfq.layers.State()(cirq.Circuit(...), ...)` please - be sure to instead use `tfq.layers.State()(circuit_input, ...)` - where `circuit_input` is a `tf.keras.Input` that is filled with - `tfq.conver_to_tensor([cirq.Circuit(..)] * batch_size)` at runtime. This - is because compiled keras models require non keyword layer `call` inputs to - be traceable back to a `tf.keras.Input`. - - """ - - def __init__(self, backend=None, **kwargs): - """Instantiate a State Layer. - - Create a layer that will simulate a quantum state and output it into - the TensorFlow graph given a correct set of inputs. - - Args: - backend: Optional Backend to use to simulate this state. Defaults - to the native TensorFlow Quantum state vector simulator, - however users may also specify a preconfigured cirq execution - object to use instead, which must inherit - `cirq.SimulatesFinalState`. Note that C++ Density Matrix - simulation is not yet supported so to do Density Matrix - simulation please use `cirq.DensityMatrixSimulator`. - """ - super().__init__(**kwargs) - self.state_op = circuit_execution_ops.get_state_op(backend) - - def call(self, inputs, *, symbol_names=None, symbol_values=None): - """Keras call function. - - Reference of options that are shown in examples above. - - Input options: - - 1. `inputs` can be a single `cirq.Circuit`, a Python `list` of - `cirq.Circuit`s or a pre-converted `tf.Tensor` of - `cirq.Circuit`s. - - 2. `symbol_names` can be a Python `list` of `str` or `sympy.Symbols` - or a pre-converted `tf.Tensor` of type `str`. - - 3. `symbol_values` can be a Python `list` of floating point values - or `np.ndarray` or pre-converted `tf.Tensor` of floats. - - Output shape: - `tf.RaggedTensor` with shape: - [batch size of symbol_values, ] - or - [number of circuits, ] - - """ - # inputs is the circuit(s). - symbols_empty = False - if symbol_names is None: - symbol_names = [] - if symbol_values is None: - symbols_empty = True - symbol_values = [[]] - - # Ingest and promote symbol_names. - if isinstance(symbol_names, (list, tuple, np.ndarray)): - if symbol_names and not all( - [isinstance(x, (str, sympy.Symbol)) for x in symbol_names]): - raise TypeError("Each element in symbol_names" - " must be a string or sympy.Symbol.") - symbol_names = [str(s) for s in symbol_names] - if not len(symbol_names) == len(list(set(symbol_names))): - raise ValueError("All elements of symbol_names must be unique.") - symbol_names = tf.convert_to_tensor(symbol_names, - dtype=tf.dtypes.string) - if not tf.is_tensor(symbol_names): - raise TypeError("symbol_names cannot be parsed to string" - " tensor given input: ".format(symbol_names)) - - # Ingest and promote symbol_values. - if isinstance(symbol_values, (list, tuple, np.ndarray)): - symbol_values = tf.convert_to_tensor(symbol_values, - dtype=tf.dtypes.float32) - if not tf.is_tensor(symbol_values): - raise TypeError("symbol_values cannot be parsed to float32" - " tensor given input: ".format(symbol_values)) - - symbol_batch_dim = tf.gather(tf.shape(symbol_values), 0) - - # Ingest and promote circuit. - if isinstance(inputs, cirq.Circuit): - # process single circuit. - inputs = tf.tile(util.convert_to_tensor([inputs]), - [symbol_batch_dim]) - - elif isinstance(inputs, (list, tuple, np.ndarray)): - # process list of circuits. - inputs = util.convert_to_tensor(inputs) - - if not tf.is_tensor(inputs): - raise TypeError("circuits cannot be parsed with given input:" - " ".format(inputs)) - - if symbols_empty: - # No symbol_values were provided. so we must tile up the - # symbol values so that symbol_values = [[]] * number of circuits - # provided. - circuit_batch_dim = tf.gather(tf.shape(inputs), 0) - symbol_values = tf.tile(symbol_values, - tf.stack([circuit_batch_dim, 1])) - - return self.state_op(inputs, symbol_names, symbol_values) diff --git a/tensorflow_quantum/python/layers/circuit_executors/state_test.py b/tensorflow_quantum/python/layers/circuit_executors/state_test.py deleted file mode 100644 index 6a9d24124..000000000 --- a/tensorflow_quantum/python/layers/circuit_executors/state_test.py +++ /dev/null @@ -1,186 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for tensorflow_quantum.layers.circuit_executors.state.""" -import numpy as np -from absl.testing import parameterized -import sympy -import tensorflow as tf -import cirq - -from tensorflow_quantum.python.layers.circuit_executors import state -from tensorflow_quantum.python import util - -WF_OUTPUT = [1 / np.sqrt(2), 0, 0, 1 / np.sqrt(2)] -DM_OUTPUT = np.outer(WF_OUTPUT, WF_OUTPUT) - - -class StateTest(parameterized.TestCase, tf.test.TestCase): - """Basic tests for the State layer.""" - - def test_state_create(self): - """Test that State layers can be created.""" - state.State() - state.State(backend=cirq.Simulator()) - with self.assertRaisesRegex(TypeError, - expected_regex="junk is invalid"): - state.State('junk') - - def test_state_invalid_type_inputs(self): - """Test that state rejects bad inputs.""" - state_calc = state.State() - with self.assertRaisesRegex(TypeError, - expected_regex="circuits cannot be parsed"): - state_calc('junk_circuit') - - with self.assertRaisesRegex( - TypeError, expected_regex="symbol_values cannot be parsed"): - state_calc(cirq.Circuit(), symbol_values='junk') - - with self.assertRaisesRegex( - TypeError, expected_regex="symbol_names cannot be parsed"): - state_calc(cirq.Circuit(), symbol_values=[], symbol_names='junk') - - with self.assertRaisesRegex(TypeError, expected_regex="Cannot convert"): - state_calc(cirq.Circuit(), - symbol_values=[['bad']], - symbol_names=['name']) - - with self.assertRaisesRegex(TypeError, - expected_regex="must be a string."): - state_calc(cirq.Circuit(), - symbol_values=[[0.5]], - symbol_names=[0.33333]) - - with self.assertRaisesRegex(ValueError, - expected_regex="must be unique."): - state_calc(cirq.Circuit(), - symbol_values=[[0.5]], - symbol_names=['duplicate', 'duplicate']) - - def test_state_invalid_shape_inputs(self): - """Test that state rejects bad input shapes.""" - state_calc = state.State() - with self.assertRaisesRegex(TypeError, - expected_regex="string or sympy.Symbol"): - state_calc(cirq.Circuit(), symbol_values=[[0.5]], symbol_names=[[]]) - - with self.assertRaisesRegex(Exception, expected_regex="rank 1"): - state_calc(cirq.Circuit(), - symbol_values=[0.5], - symbol_names=['name']) - - with self.assertRaisesRegex(Exception, expected_regex="rank 2"): - state_calc([[cirq.Circuit()]], - symbol_values=[[0.5]], - symbol_names=['name']) - - @parameterized.parameters([{ - 'backend': None - }, { - 'backend': cirq.Simulator() - }, { - 'backend': cirq.DensityMatrixSimulator() - }]) - def test_state_invalid_combinations(self, backend): - """Test with valid type inputs and valid value, but incorrect combo.""" - state_calc = state.State(backend) - symbol = sympy.Symbol('alpha') - circuit = cirq.Circuit(cirq.H(cirq.GridQubit(0, 0))**symbol) - with self.assertRaisesRegex(Exception, expected_regex=""): - # no value provided. - state_calc([circuit, circuit], symbol_names=[symbol], repetitions=5) - - with self.assertRaisesRegex(Exception, expected_regex=""): - # no name provided. - state_calc([circuit, circuit], - symbol_names=[], - symbol_values=[[2.0], [3.0]]) - - with self.assertRaisesRegex(Exception, expected_regex=""): - # deceptive, but the circuit shouldn't be in a list. otherwise fine. - state_calc([circuit], - symbol_names=['alpha'], - symbol_values=[[2.0], [3.0]]) - - with self.assertRaisesRegex(Exception, expected_regex=""): - # wrong symbol name. - state_calc([circuit], - symbol_names=['alphaaaa'], - symbol_values=[[2.0], [3.0]]) - - with self.assertRaisesRegex(Exception, expected_regex=""): - # too many symbol values provided. - state_calc(circuit, - symbol_names=['alpha'], - symbol_values=[[2.0, 4.0], [3.0, 5.0]]) - - def test_state_basic_inputs(self): - """Test that state ingests inputs correctly in simple settings.""" - state_calc = state.State() - state_calc(cirq.Circuit()) - state_calc([cirq.Circuit()]) - state_calc(cirq.Circuit(), symbol_names=['name'], symbol_values=[[0.5]]) - state_calc(cirq.Circuit(), - symbol_names=[sympy.Symbol('name')], - symbol_values=[[0.5]]) - - def test_sample_outputs_simple(self): - """Test the simplest call where nothing but circuits are provided.""" - state_calc = state.State() - circuit = cirq.Circuit(cirq.H(cirq.GridQubit(0, 0))) - output = state_calc([circuit, circuit]) - self.assertShapeEqual(np.empty((2, 2)), output.to_tensor()) - - @parameterized.parameters([ - { - 'backend_output': (None, WF_OUTPUT) - }, - { - 'backend_output': (cirq.sim.sparse_simulator.Simulator(), WF_OUTPUT) - }, - { - 'backend_output': - (cirq.sim.density_matrix_simulator.DensityMatrixSimulator(), - DM_OUTPUT) - }, - ]) - def test_state_output(self, backend_output): - """Check that any output type is as expected. - - This layer only allows for 2 different outputs, depending on whether a - wavefuntion or density matrix simulator is used. Therefore any pre or - post processing done inside the layers should not cause output from the - layer to structurally deviate from what is expected. - """ - backend = backend_output[0] - output = backend_output[1] - state_executor = state.State(backend=backend) - bits = cirq.GridQubit.rect(1, 2) - circuit = cirq.Circuit() - circuit.append(cirq.H.on(bits[0])) - circuit.append(cirq.CNOT(bits[0], bits[1])) - programs = util.convert_to_tensor([circuit, circuit]) - layer_output = state_executor(programs).to_list() - self.assertAllClose(layer_output, [output, output]) - - def test_state_one_circuit(self): - """Test that State behaves when a single layer is specified.""" - state_calc = state.State() - state_calc(cirq.Circuit(), - symbol_values=tf.zeros((5, 0), dtype=tf.dtypes.float32)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/tensorflow_quantum/python/layers/high_level/BUILD b/tensorflow_quantum/python/layers/high_level/BUILD deleted file mode 100644 index 3163a03b6..000000000 --- a/tensorflow_quantum/python/layers/high_level/BUILD +++ /dev/null @@ -1,48 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_library( - name = "controlled_pqc", - srcs = ["controlled_pqc.py"], - deps = [ - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/layers/circuit_construction:elementary", - "//tensorflow_quantum/python/layers/circuit_executors:expectation", - "//tensorflow_quantum/python/layers/circuit_executors:sampled_expectation", - ], -) - -py_library( - name = "pqc", - srcs = ["pqc.py"], - deps = [ - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/layers/circuit_construction:elementary", - "//tensorflow_quantum/python/layers/circuit_executors:expectation", - "//tensorflow_quantum/python/layers/circuit_executors:sampled_expectation", - ], -) - -py_test( - name = "controlled_pqc_test", - srcs = ["controlled_pqc_test.py"], - python_version = "PY3", - deps = [ - ":controlled_pqc", - "//tensorflow_quantum/python:util", - ], -) - -py_test( - name = "pqc_test", - srcs = ["pqc_test.py"], - python_version = "PY3", - deps = [ - ":pqc", - "//tensorflow_quantum/python:util", - ], -) diff --git a/tensorflow_quantum/python/layers/high_level/__init__.py b/tensorflow_quantum/python/layers/high_level/__init__.py deleted file mode 100644 index 8e0359840..000000000 --- a/tensorflow_quantum/python/layers/high_level/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copyright 2019 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module for tfq.python.layers.high_level.*""" - -# pylint: disable=line-too-long -from tensorflow_quantum.python.layers.high_level.controlled_pqc import ControlledPQC -from tensorflow_quantum.python.layers.high_level.pqc import PQC -# pylint: enable=line-too-long diff --git a/tensorflow_quantum/python/layers/high_level/controlled_pqc.py b/tensorflow_quantum/python/layers/high_level/controlled_pqc.py deleted file mode 100644 index 81c0b5657..000000000 --- a/tensorflow_quantum/python/layers/high_level/controlled_pqc.py +++ /dev/null @@ -1,255 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module for tfq.python.layers.high_level.controlled_pqc layer.""" -import numbers -import numpy as np -import tensorflow as tf -import cirq - -from tensorflow_quantum.python.layers.circuit_executors import \ - expectation, sampled_expectation -from tensorflow_quantum.python.layers.circuit_construction import elementary -from tensorflow_quantum.python import util - - -class ControlledPQC(tf.keras.layers.Layer): - """Controlled Parametrized Quantum Circuit (PQC) Layer. - - The `ControlledPQC` layer is very similar to the regular `PQC` layer, but - with one major difference. The `ControlledPQC` layer requires the caller - of the layer to provide the control parameter inputs for `model_circuit`. - You can see how this works through a simple example: - - - >>> bit = cirq.GridQubit(0, 0) - >>> model = cirq.Circuit( - ... cirq.X(bit) ** sympy.Symbol('alpha'), - ... cirq.Z(bit) ** sympy.Symbol('beta') - ... ) - >>> outputs = tfq.layers.ControlledPQC(model, cirq.Z(bit)) - >>> quantum_data = tfq.convert_to_tensor([ - ... cirq.Circuit(), - ... cirq.Circuit(cirq.X(bit)) - ... ]) - >>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) - >>> res = outputs([quantum_data, model_params]) - >>> res - tf.Tensor( - [[-1.4901161e-08] - [-7.0710683e-01]], shape=(2, 1), dtype=float32) - - - Just like with the `PQC` it is *very important* that the quantum datapoint - circuits do not contain any `sympy.Symbols` themselves (This can be - supported with advanced usage of the `tfq.layers.Expectation` layer). Just - like `PQC` it is possible to specify multiple readout operations and - switch to sample based expectation calculation: - - - >>> bit = cirq.GridQubit(0, 0) - >>> model = cirq.Circuit( - ... cirq.X(bit) ** sympy.Symbol('alpha'), - ... cirq.Z(bit) ** sympy.Symbol('beta') - ... ) - >>> outputs = tfq.layers.ControlledPQC( - ... model, - ... [cirq.Z(bit), cirq.X(bit), cirq.Y(bit)], - ... repetitions=5000) - >>> quantum_data = tfq.convert_to_tensor([ - ... cirq.Circuit(), - ... cirq.Circuit(cirq.X(bit)) - ... ]) - >>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) - >>> res = outputs([quantum_data, model_params]) - >>> res - tf.Tensor( - [[-0.0028 1. -0.0028] - [-0.6956 -0.498 -0.498 ]], shape=(2, 3), dtype=float32) - - - A value for `backend` can also be supplied in the layer constructor - arguments to indicate which supported backend you would like to use. - A value for `differentiator` can also be supplied in the constructor - to indicate the differentiation scheme this `ControlledPQC` layer - should use. Here's how you would take the gradients of the - above example using a `cirq.Simulator` backend (which is slower - than `backend=None` which uses C++): - - - >>> bit = cirq.GridQubit(0, 0) - >>> model = cirq.Circuit( - ... cirq.X(bit) ** sympy.Symbol('alpha'), - ... cirq.Z(bit) ** sympy.Symbol('beta') - ... ) - >>> outputs = tfq.layers.ControlledPQC( - ... model, - ... [cirq.Z(bit), cirq.X(bit), cirq.Y(bit)], - ... repetitions=5000, - ... backend=cirq.Simulator(), - ... differentiator=tfq.differentiators.ParameterShift()) - >>> quantum_data = tfq.convert_to_tensor([ - ... cirq.Circuit(), - ... cirq.Circuit(cirq.X(bit)) - ... ]) - >>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) - >>> with tf.GradientTape() as g: - ... g.watch(model_params) - ... res = outputs([quantum_data, model_params]) - >>> grads = g.gradient(res, model_params) - >>> grads - tf.Tensor( - [[-3.1415927 3.1415927 ] - [-0.9211149 0.02764606]], shape=(2, 2), dtype=float32)] - - - Lastly, like all layers in TensorFlow the `ControlledPQC` layer can be - called on any `tf.Tensor` as long as it is the right shape. This means - you could replace `model_params` in the above example with the outputs - from a `tf.keras.Dense` layer or replace `quantum_data` with values fed - in from a `tf.keras.Input`. - """ - - def __init__(self, - model_circuit, - operators, - *, - repetitions=None, - backend=None, - differentiator=None, - **kwargs): - """Instantiate this layer. - - Create a layer that will output expectation values of the given - operators when fed quantum data to it's input layer. This layer will - take two input tensors, one representing a quantum data source (these - circuits must not contain any symbols) and the other representing - control parameters for the model circuit that gets appended to the - datapoints. - - model_circuit: `cirq.Circuit` containing `sympy.Symbols` that will be - used as the model which will be fed quantum data inputs. - operators: `cirq.PauliSum` or Python `list` of `cirq.PauliSum` objects - used as observables at the end of the model circuit. - repetitions: Optional Python `int` indicating how many samples to use - when estimating expectation values. If `None` analytic expectation - calculation is used. - backend: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead. - If a cirq object is given it must inherit `cirq.SimulatesFinalState` - if `sampled_based` is True or it must inherit `cirq.Sampler` if - `sample_based` is False. - differentiator: Optional `tfq.differentiator` object to specify how - gradients of `model_circuit` should be calculated. - """ - super().__init__(**kwargs) - # Ingest model_circuit. - if not isinstance(model_circuit, cirq.Circuit): - raise TypeError("model_circuit must be a cirq.Circuit object." - " Given: ".format(model_circuit)) - - self._symbols = tf.constant( - list(sorted(util.get_circuit_symbols(model_circuit)))) - self._circuit = util.convert_to_tensor([model_circuit]) - - if len(self._symbols) == 0: - raise ValueError("model_circuit has no sympy.Symbols. Please " - "provide a circuit that contains symbols so " - "that their values can be trained.") - - # Ingest operators. - if isinstance(operators, (cirq.PauliString, cirq.PauliSum)): - operators = [operators] - - if not isinstance(operators, (list, np.ndarray, tuple)): - raise TypeError("operators must be a cirq.PauliSum or " - "cirq.PauliString, or a list, tuple, " - "or np.array containing them. " - "Got {}.".format(type(operators))) - if not all([ - isinstance(op, (cirq.PauliString, cirq.PauliSum)) - for op in operators - ]): - raise TypeError("Each element in operators to measure " - "must be a cirq.PauliString" - " or cirq.PauliSum") - - self._operators = util.convert_to_tensor([operators]) - - # Ingest and promote reptitions. - self._analytic = False - if repetitions is None: - self._analytic = True - - if not self._analytic and not isinstance(repetitions, numbers.Integral): - raise TypeError("repetitions must be a positive integer value." - " Given: ".format(repetitions)) - - if not self._analytic and repetitions <= 0: - raise ValueError("Repetitions must be greater than zero.") - - if not self._analytic: - self._repetitions = tf.constant( - [[repetitions for _ in range(len(operators))]], - dtype=tf.dtypes.int32) - - if not isinstance(backend, cirq.Sampler - ) and repetitions is not None and backend is not None: - raise TypeError("provided backend does not inherit cirq.Sampler " - "and repetitions!=None. Please provide a backend " - "that inherits cirq.Sampler or set " - "repetitions=None.") - - if not isinstance(backend, cirq.SimulatesFinalState - ) and repetitions is None and backend is not None: - raise TypeError("provided backend does not inherit " - "cirq.SimulatesFinalState and repetitions=None. " - "Please provide a backend that inherits " - "cirq.SimulatesFinalState.") - - # Ingest backend and differentiator. - if self._analytic: - self._layer = expectation.Expectation(backend=backend, - differentiator=differentiator) - else: - self._layer = sampled_expectation.SampledExpectation( - backend=backend, differentiator=differentiator) - - self._append_layer = elementary.AddCircuit() - - def call(self, inputs): - """Keras call function.""" - circuit_batch_dim = tf.gather(tf.shape(inputs[0]), 0) - tiled_up_model = tf.tile(self._circuit, [circuit_batch_dim]) - model_appended = self._append_layer(inputs[0], append=tiled_up_model) - tiled_up_operators = tf.tile(self._operators, [circuit_batch_dim, 1]) - - # this is disabled to make autograph compilation easier. - # pylint: disable=no-else-return - if self._analytic: - return self._layer(model_appended, - symbol_names=self._symbols, - symbol_values=inputs[1], - operators=tiled_up_operators) - else: - tiled_up_repetitions = tf.tile(self._repetitions, - [circuit_batch_dim, 1]) - return self._layer(model_appended, - symbol_names=self._symbols, - symbol_values=inputs[1], - operators=tiled_up_operators, - repetitions=tiled_up_repetitions) - - # pylint: enable=no-else-return diff --git a/tensorflow_quantum/python/layers/high_level/controlled_pqc_test.py b/tensorflow_quantum/python/layers/high_level/controlled_pqc_test.py deleted file mode 100644 index f61830bc4..000000000 --- a/tensorflow_quantum/python/layers/high_level/controlled_pqc_test.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2019 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Test module for tfq.python.layers.high_level.controlled_pqc layer.""" -import numpy as np -import tensorflow as tf -from absl.testing import parameterized -import cirq -import sympy - -from tensorflow_quantum.python.layers.high_level import controlled_pqc -from tensorflow_quantum.python import util - - -class ControlledPQCTest(tf.test.TestCase, parameterized.TestCase): - """Tests for the ControlledPQC layer.""" - - def test_controlled_pqc_instantiate(self): - """Basic creation test.""" - symbol = sympy.Symbol('alpha') - bit = cirq.GridQubit(0, 0) - learnable_flip = cirq.Circuit(cirq.X(bit)**symbol) - controlled_pqc.ControlledPQC(learnable_flip, cirq.Z(bit)) - controlled_pqc.ControlledPQC(learnable_flip, - cirq.Z(bit), - repetitions=500) - - def test_controlled_pqc_backend_error(self): - """Test that invalid backends error properly.""" - symbol = sympy.Symbol('alpha') - bit = cirq.GridQubit(0, 0) - learnable_flip = cirq.Circuit(cirq.X(bit)**symbol) - - class MyState(cirq.SimulatesFinalState): - """My state simulator.""" - - def simulate_sweep(self): - """do nothing.""" - return - - class MySample(cirq.Sampler): - """My state simulator.""" - - def run_sweep(self): - """do nothing.""" - return - - with self.assertRaisesRegex(TypeError, - expected_regex="cirq.SimulatesFinalState"): - controlled_pqc.ControlledPQC(learnable_flip, - cirq.Z(bit), - backend='junk') - - with self.assertRaisesRegex(TypeError, - expected_regex="cirq.SimulatesFinalState"): - controlled_pqc.ControlledPQC(learnable_flip, - cirq.Z(bit), - repetitions=None, - backend=MySample) - - with self.assertRaisesRegex(TypeError, expected_regex="cirq.Sampler"): - controlled_pqc.ControlledPQC(learnable_flip, - cirq.Z(bit), - repetitions=500, - backend=MyState) - - def test_controlled_pqc_model_circuit_error(self): - """Test that invalid circuits error properly.""" - bit = cirq.GridQubit(0, 0) - no_symbols = cirq.Circuit(cirq.X(bit)) - - with self.assertRaisesRegex(TypeError, expected_regex="cirq.Circuit"): - controlled_pqc.ControlledPQC('junk', cirq.Z(bit)) - - with self.assertRaisesRegex(ValueError, - expected_regex="no sympy.Symbols"): - controlled_pqc.ControlledPQC(no_symbols, cirq.Z(bit)) - - def test_controlled_pqc_operators_error(self): - """Test that invalid operators error properly.""" - symbol = sympy.Symbol('alpha') - bit = cirq.GridQubit(0, 0) - learnable_flip = cirq.Circuit(cirq.X(bit)**symbol) - - with self.assertRaisesRegex( - TypeError, expected_regex="cirq.PauliSum or cirq.PauliString"): - controlled_pqc.ControlledPQC(learnable_flip, 'junk') - - with self.assertRaisesRegex(TypeError, expected_regex="Each element"): - controlled_pqc.ControlledPQC(learnable_flip, [[cirq.Z(bit)]]) - - with self.assertRaisesRegex(TypeError, expected_regex="Each element"): - controlled_pqc.ControlledPQC(learnable_flip, [cirq.Z(bit), 'bad']) - - def test_controlled_pqc_repetitions_error(self): - """Test that invalid repetitions error properly.""" - symbol = sympy.Symbol('alpha') - bit = cirq.GridQubit(0, 0) - learnable_flip = cirq.Circuit(cirq.X(bit)**symbol) - - with self.assertRaisesRegex(ValueError, - expected_regex="greater than zero."): - controlled_pqc.ControlledPQC(learnable_flip, - cirq.Z(bit), - repetitions=-100) - - with self.assertRaisesRegex(TypeError, - expected_regex="positive integer value"): - controlled_pqc.ControlledPQC(learnable_flip, - cirq.Z(bit), - repetitions='junk') - - @parameterized.parameters( - list( - util.kwargs_cartesian_product(repetitions=[None, 5000], - backend=[None, cirq.Simulator()]))) - def test_controlled_pqc_simple_learn(self, backend, repetitions): - """Test a simple learning scenario using analytic and sample expectation - on many backends.""" - bit = cirq.GridQubit(0, 0) - circuit = \ - cirq.Circuit(cirq.Rx(sympy.Symbol('theta'))(bit)) - - inputs = tf.keras.Input(shape=(1,), dtype=tf.dtypes.float32) - quantum_datum = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - l1 = tf.keras.layers.Dense(10)(inputs) - l2 = tf.keras.layers.Dense(1)(l1) - outputs = controlled_pqc.ControlledPQC(circuit, - cirq.Z(bit), - repetitions=repetitions, - backend=backend)( - [quantum_datum, l2]) - model = tf.keras.Model(inputs=[quantum_datum, inputs], outputs=outputs) - - data_in = np.array([[1], [0]], dtype=np.float32) - data_out = np.array([[1], [-1]], dtype=np.float32) - - model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.05), - loss=tf.keras.losses.mean_squared_error) - - data_circuits = util.convert_to_tensor( - [cirq.Circuit(cirq.X(bit)), - cirq.Circuit()]) - - history = model.fit(x=[data_circuits, data_in], y=data_out, epochs=30) - self.assertAllClose(history.history['loss'][-1], 0, atol=1e-1) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/python/layers/high_level/pqc.py b/tensorflow_quantum/python/layers/high_level/pqc.py deleted file mode 100644 index b654e7322..000000000 --- a/tensorflow_quantum/python/layers/high_level/pqc.py +++ /dev/null @@ -1,287 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Module for tfq.python.layers.high_level.pqc layer.""" -import numbers -import numpy as np -import tensorflow as tf - -import cirq -from tensorflow_quantum.python.layers.circuit_executors import \ - expectation, sampled_expectation -from tensorflow_quantum.python.layers.circuit_construction import elementary -from tensorflow_quantum.python import util - - -class PQC(tf.keras.layers.Layer): - """Parametrized Quantum Circuit (PQC) Layer. - - This layer is for training parameterized quantum models. - Given a parameterized circuit, this layer initializes the parameters - and manages them in a Keras native way. - - We start by defining a simple quantum circuit on one qubit. - This circuit parameterizes an arbitrary rotation on the Bloch sphere in - terms of the three angles a, b, and c: - - - >>> q = cirq.GridQubit(0, 0) - >>> (a, b, c) = sympy.symbols("a b c") - >>> circuit = cirq.Circuit( - ... cirq.Rz(a)(q), - ... cirq.Rx(b)(q), - ... cirq.Rz(c)(q), - ... cirq.Rx(-b)(q), - ... cirq.Rz(-a)(q) - ... ) - - - In order to extract information from our circuit, we must apply measurement - operators. For now we choose to make a Z measurement. In order to observe - an output, we must also feed our model quantum data (NOTE: quantum data - means quantum circuits with no free parameters). Though the output values - will depend on the default random initialization of the angles in our model, - one will be the negative of the other since `cirq.X(q)` causes a bit flip: - - - >>> outputs = tfq.layers.PQC(circuit, cirq.Z(q)) - >>> quantum_data = tfq.convert_to_tensor([ - ... cirq.Circuit(), - ... cirq.Circuit(cirq.X(q)) - ... ]) - >>> res = outputs(quantum_data) - >>> res - - - - We can also choose to measure the three pauli matrices, sufficient to - fully characterize the operation of our model, or choose to simulate - sampled expectation values by specifying a number of measurement shots - (repetitions) to average over. Notice that using only 200 repetitions - introduces variation between the two rows of data, due to the - probabilistic nature of measurement. - - - >>> measurement = [cirq.X(q), cirq.Y(q), cirq.Z(q)] - >>> outputs = tfq.layers.PQC(circuit, measurement, repetitions=200) - >>> quantum_data = tfq.convert_to_tensor([ - ... cirq.Circuit(), - ... cirq.Circuit(cirq.X(q)) - ... ]) - >>> res = outputs(quantum_data) - >>> res - - - - A value for `backend` can also be supplied in the layer constructor - arguments to indicate which supported backend you would like to use. - A value for `differentiator` can also be supplied in the constructor - to indicate the differentiation scheme this `PQC` layer should use. - Here's how you would take the gradients of the above example using a - `cirq.Simulator` backend (which is slower than the default - `backend=None` which uses C++): - - - >>> q = cirq.GridQubit(0, 0) - >>> (a, b, c) = sympy.symbols("a b c") - >>> circuit = cirq.Circuit( - ... cirq.Rz(a)(q), - ... cirq.Rx(b)(q), - ... cirq.Rz(c)(q), - ... cirq.Rx(-b)(q), - ... cirq.Rz(-a)(q) - ... ) - >>> measurement = [cirq.X(q), cirq.Y(q), cirq.Z(q)] - >>> outputs = tfq.layers.PQC( - ... circuit, - ... measurement, - ... repetitions=5000, - ... backend=cirq.Simulator(), - ... differentiator=tfq.differentiators.ParameterShift()) - >>> quantum_data = tfq.convert_to_tensor([ - ... cirq.Circuit(), - ... cirq.Circuit(cirq.X(q)) - ... ]) - >>> res = outputs(quantum_data) - >>> res - - - - Lastly, like all layers in TensorFlow the `PQC` layer can be called on any - `tf.Tensor` as long as it is the right shape. This means you could replace - replace `quantum_data` with values fed in from a `tf.keras.Input`. - """ - - def __init__( - self, - model_circuit, - operators, - *, - repetitions=None, - backend=None, - differentiator=None, - initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi), - regularizer=None, - constraint=None, - **kwargs, - ): - """Instantiate this layer. - - Create a layer that will output expectation values of the given - operators when fed quantum data to it's input layer. This layer will - accept one input tensor representing a quantum data source (these - circuits must not contain any symbols) and append the model_circuit to - them, execute them and then finally output the expectation values. - - - model_circuit: `cirq.Circuit` containing `sympy.Symbols` that will be - used as the model which will be fed quantum data inputs. - operators: `cirq.PauliSum` or Python `list` of `cirq.PauliSum` objects - used as observables at the end of the model circuit. - repetitions: Optional Python `int` indicating how many samples to use - when estimating expectation values. If `None` analytic expectation - calculation is used. - backend: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead. - If a cirq object is given it must inherit either - `cirq.SimulatesFinalState` if analytic expectations are desired or - `cirq.Sampler` if sampled expectations are desired. - differentiator: Optional `tfq.differentiator` object to specify how - gradients of `model_circuit` should be calculated. - initializer: Optional `tf.keras.initializer` object to specify how the - symbols in `model_circuit` should be initialized when creating - the managed variables. - regularizer: Optional `tf.keras.regularizer` object applied to the - managed variables parameterizing `model_circuit`. - constraint: Optional `tf.keras.constraint` object applied to the - managed variables parameterizing `model_circuit`. - """ - super().__init__(**kwargs) - - # Ingest model_circuit. - if not isinstance(model_circuit, cirq.Circuit): - raise TypeError("model_circuit must be a cirq.Circuit object." - " Given: {}".format(model_circuit)) - self._symbols = tf.constant( - list(sorted(util.get_circuit_symbols(model_circuit)))) - self._model_circuit = util.convert_to_tensor([model_circuit]) - if len(self._symbols) == 0: - raise ValueError("model_circuit has no sympy.Symbols. Please " - "provide a circuit that contains symbols so " - "that their values can be trained.") - - # Ingest operators. - if isinstance(operators, (cirq.PauliString, cirq.PauliSum)): - operators = [operators] - if not isinstance(operators, (list, np.ndarray, tuple)): - raise TypeError("operators must be a cirq.PauliSum or " - "cirq.PauliString, or a list, tuple, " - "or np.array containing them. " - "Got {}.".format(type(operators))) - if not all([ - isinstance(op, (cirq.PauliString, cirq.PauliSum)) - for op in operators - ]): - raise TypeError("Each element in operators to measure " - "must be a cirq.PauliString" - " or cirq.PauliSum") - self._operators = util.convert_to_tensor([operators]) - - # Ingest and promote repetitions. - self._analytic = False - if repetitions is None: - self._analytic = True - if not self._analytic and not isinstance(repetitions, numbers.Integral): - raise TypeError("repetitions must be a positive integer value." - " Given: ".format(repetitions)) - if not self._analytic and repetitions <= 0: - raise ValueError("Repetitions must be greater than zero.") - if not self._analytic: - self._repetitions = tf.constant( - [[repetitions for _ in range(len(operators))]], - dtype=tf.dtypes.int32) - - # Set backend and differentiator. - if not isinstance(backend, cirq.Sampler - ) and repetitions is not None and backend is not None: - raise TypeError("provided backend does not inherit cirq.Sampler " - "and repetitions!=None. Please provide a backend " - "that inherits cirq.Sampler or set " - "repetitions=None.") - if not isinstance(backend, cirq.SimulatesFinalState - ) and repetitions is None and backend is not None: - raise TypeError("provided backend does not inherit " - "cirq.SimulatesFinalState and repetitions=None. " - "Please provide a backend that inherits " - "cirq.SimulatesFinalState or choose a positive " - "number of repetitions.") - if self._analytic: - self._executor = expectation.Expectation( - backend=backend, differentiator=differentiator) - else: - self._executor = sampled_expectation.SampledExpectation( - backend=backend, differentiator=differentiator) - - self._append_layer = elementary.AddCircuit() - - # Set additional parameter controls. - self.initializer = tf.keras.initializers.get(initializer) - self.regularizer = tf.keras.regularizers.get(regularizer) - self.constraint = tf.keras.constraints.get(constraint) - - # Weight creation is not placed in a Build function because the number - # of weights is independent of the input shape. - self.parameters = self.add_weight('parameters', - shape=[len(self._symbols)], - initializer=self.initializer, - regularizer=self.regularizer, - constraint=self.constraint, - dtype=tf.float32, - trainable=True) - - def build(self, input_shape): - """Keras build function.""" - super().build(input_shape) - - def call(self, inputs): - """Keras call function.""" - circuit_batch_dim = tf.gather(tf.shape(inputs), 0) - tiled_up_model = tf.tile(self._model_circuit, [circuit_batch_dim]) - model_appended = self._append_layer(inputs, append=tiled_up_model) - tiled_up_parameters = tf.tile([self.parameters], [circuit_batch_dim, 1]) - tiled_up_operators = tf.tile(self._operators, [circuit_batch_dim, 1]) - - # this is disabled to make autograph compilation easier. - # pylint: disable=no-else-return - if self._analytic: - return self._executor(model_appended, - symbol_names=self._symbols, - symbol_values=tiled_up_parameters, - operators=tiled_up_operators) - else: - tiled_up_repetitions = tf.tile(self._repetitions, - [circuit_batch_dim, 1]) - return self._executor(model_appended, - symbol_names=self._symbols, - symbol_values=tiled_up_parameters, - operators=tiled_up_operators, - repetitions=tiled_up_repetitions) - # pylint: enable=no-else-return diff --git a/tensorflow_quantum/python/layers/high_level/pqc_test.py b/tensorflow_quantum/python/layers/high_level/pqc_test.py deleted file mode 100644 index 9e6b8778a..000000000 --- a/tensorflow_quantum/python/layers/high_level/pqc_test.py +++ /dev/null @@ -1,197 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Test module for tfq.python.layers.high_level.pqc layer.""" -import numpy as np -import tensorflow as tf -from absl.testing import parameterized -import cirq -import sympy - -from tensorflow_quantum.python.layers.high_level import pqc -from tensorflow_quantum.python import util - - -class PQCTest(tf.test.TestCase, parameterized.TestCase): - """Tests for the PQC layer.""" - - def test_pqc_instantiate(self): - """Basic creation test.""" - symbol = sympy.Symbol('alpha') - qubit = cirq.GridQubit(0, 0) - learnable_flip = cirq.Circuit(cirq.X(qubit)**symbol) - pqc.PQC(learnable_flip, cirq.Z(qubit)) - pqc.PQC(learnable_flip, cirq.Z(qubit), repetitions=500) - - def test_pqc_model_circuit_error(self): - """Test that invalid circuits error properly.""" - qubit = cirq.GridQubit(0, 0) - no_symbols = cirq.Circuit(cirq.X(qubit)) - - with self.assertRaisesRegex( - TypeError, - expected_regex="model_circuit must be a cirq.Circuit"): - pqc.PQC('junk', cirq.Z(qubit)) - - with self.assertRaisesRegex( - ValueError, - expected_regex="model_circuit has no sympy.Symbols"): - pqc.PQC(no_symbols, cirq.Z(qubit)) - - def test_pqc_operators_error(self): - """Test that invalid operators error properly.""" - symbol = sympy.Symbol('alpha') - qubit = cirq.GridQubit(0, 0) - learnable_flip = cirq.Circuit(cirq.X(qubit)**symbol) - - with self.assertRaisesRegex( - TypeError, expected_regex="cirq.PauliSum or cirq.PauliString"): - pqc.PQC(learnable_flip, 'junk') - - with self.assertRaisesRegex(TypeError, expected_regex="Each element"): - pqc.PQC(learnable_flip, [[cirq.Z(qubit)]]) - - with self.assertRaisesRegex(TypeError, expected_regex="Each element"): - pqc.PQC(learnable_flip, [cirq.Z(qubit), 'bad']) - - def test_pqc_repetitions_error(self): - """Test that invalid repetitions error properly.""" - symbol = sympy.Symbol('alpha') - qubit = cirq.GridQubit(0, 0) - learnable_flip = cirq.Circuit(cirq.X(qubit)**symbol) - - with self.assertRaisesRegex(TypeError, - expected_regex="positive integer value"): - pqc.PQC(learnable_flip, cirq.Z(qubit), repetitions='junk') - - with self.assertRaisesRegex(ValueError, - expected_regex="greater than zero."): - pqc.PQC(learnable_flip, cirq.Z(qubit), repetitions=-100) - - with self.assertRaisesRegex(ValueError, - expected_regex="greater than zero."): - pqc.PQC(learnable_flip, cirq.Z(qubit), repetitions=0) - - def test_pqc_backend_error(self): - """Test that invalid backends error properly.""" - symbol = sympy.Symbol('alpha') - qubit = cirq.GridQubit(0, 0) - learnable_flip = cirq.Circuit(cirq.X(qubit)**symbol) - - class MyState(cirq.SimulatesFinalState): - """My state simulator.""" - - def simulate_sweep(self): - """do nothing.""" - return - - class MySample(cirq.Sampler): - """My state simulator.""" - - def run_sweep(self): - """do nothing.""" - return - - with self.assertRaisesRegex(TypeError, expected_regex="cirq.Sampler"): - pqc.PQC(learnable_flip, - cirq.Z(qubit), - backend=MyState, - repetitions=500) - - with self.assertRaisesRegex(TypeError, - expected_regex="cirq.SimulatesFinalState"): - pqc.PQC(learnable_flip, - cirq.Z(qubit), - backend=MySample, - repetitions=None) - - def test_pqc_initializer(self): - """Test action of initializer.""" - (a, b, c) = sympy.symbols("a b c") - qubit = cirq.GridQubit(0, 0) - three_parameters = cirq.Circuit( - [cirq.X(qubit)**a, - cirq.Y(qubit)**b, - cirq.Z(qubit)**c]) - mpqc_zeros = pqc.PQC(three_parameters, - cirq.Z(qubit), - initializer='zeros') - mpqc_ones = pqc.PQC(three_parameters, cirq.Z(qubit), initializer='ones') - self.assertAllEqual([[0, 0, 0]], mpqc_zeros.get_weights()) - self.assertAllEqual([[1, 1, 1]], mpqc_ones.get_weights()) - - def test_pqc_regularizer(self): - """Test attachment of regularizer to layer.""" - (a, b, c) = sympy.symbols("a b c") - qubit = cirq.GridQubit(0, 0) - three_parameters = cirq.Circuit( - [cirq.X(qubit)**a, - cirq.Y(qubit)**b, - cirq.Z(qubit)**c]) - mpqc = pqc.PQC(three_parameters, cirq.Z(qubit)) - mpqc_r = pqc.PQC(three_parameters, cirq.Z(qubit), regularizer='l2') - self.assertEqual(0, len(mpqc.losses)) - self.assertEqual(1, len(mpqc_r.losses)) - - def test_pqc_constraint(self): - """Test attachment of constraint to layer.""" - my_constraint = tf.keras.constraints.NonNeg() - (a, b, c) = sympy.symbols("a b c") - qubit = cirq.GridQubit(0, 0) - three_parameters = cirq.Circuit( - [cirq.X(qubit)**a, - cirq.Y(qubit)**b, - cirq.Z(qubit)**c]) - mpqc = pqc.PQC(three_parameters, - cirq.Z(qubit), - constraint=my_constraint) - self.assertEqual(my_constraint, mpqc.parameters.constraint) - - @parameterized.parameters( - list( - util.kwargs_cartesian_product(backend=[None, cirq.Simulator()], - repetitions=[None, 5000]))) - def test_pqc_simple_learn(self, backend, repetitions): - """Test a simple learning scenario using analytic and sample expectation - on many backends.""" - qubit = cirq.GridQubit(0, 0) - circuit = cirq.Circuit(cirq.X(qubit)**sympy.Symbol('bit')) - - quantum_datum = tf.keras.Input(shape=(), dtype=tf.dtypes.string) - mpqc = pqc.PQC(circuit, - cirq.Z(qubit), - backend=backend, - repetitions=repetitions, - initializer=tf.keras.initializers.Constant(value=0.5)) - outputs = mpqc(quantum_datum) - model = tf.keras.Model(inputs=quantum_datum, outputs=outputs) - - model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.03), - loss=tf.keras.losses.mean_squared_error) - - data_circuits = util.convert_to_tensor( - [cirq.Circuit(cirq.X(qubit)), - cirq.Circuit()]) - print(data_circuits) - data_out = np.array([[1], [-1]], dtype=np.float32) - - # Model should learn to flip the qubit - self.assertNear(mpqc.get_weights()[0][0], 0.5, 1e-1) - history = model.fit(x=data_circuits, y=data_out, epochs=40) - self.assertAllClose(history.history['loss'][-1], 0, atol=1e-1) - self.assertNear(mpqc.get_weights()[0][0], 1, 1e-1) - - -if __name__ == "__main__": - tf.test.main() diff --git a/tensorflow_quantum/python/operators/__init__.py b/tensorflow_quantum/python/operators/__init__.py deleted file mode 100644 index bdbbd7a51..000000000 --- a/tensorflow_quantum/python/operators/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== diff --git a/tensorflow_quantum/python/optimizers/__init__.py b/tensorflow_quantum/python/optimizers/__init__.py deleted file mode 100644 index bf5b48863..000000000 --- a/tensorflow_quantum/python/optimizers/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== \ No newline at end of file diff --git a/tensorflow_quantum/python/util.py b/tensorflow_quantum/python/util.py deleted file mode 100644 index 1cc8473f3..000000000 --- a/tensorflow_quantum/python/util.py +++ /dev/null @@ -1,518 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""A collection of helper functions that are useful several places in tfq.""" -import random -import itertools - -import numpy as np -import sympy -import tensorflow as tf -import cirq - -from tensorflow_quantum.core.proto import pauli_sum_pb2 -from tensorflow_quantum.core.serialize import serializer - - -def get_supported_gates(): - """A helper to get the gates supported by tfq.""" - supported_gates = serializer.SERIALIZER.supported_gate_types() - gate_arity_mapping_dict = dict() - for gate in supported_gates: - if gate is cirq.IdentityGate: - g_num_qubits = 1 - g = gate(num_qubits=1) - elif gate is cirq.FSimGate: - g_num_qubits = 2 - g = gate(theta=0.123, phi=0.456) - elif gate in serializer.PHASED_EIGEN_GATES_DICT: - g = gate(phase_exponent=0.123) - g_num_qubits = g.num_qubits() - else: - g = gate() - g_num_qubits = gate().num_qubits() - gate_arity_mapping_dict[g] = g_num_qubits - return gate_arity_mapping_dict - - -def random_symbol_circuit(qubits, - symbols, - n_moments=15, - p=0.9, - include_scalars=True): - """Generate a random circuit including some parameterized gates.""" - supported_gates = get_supported_gates() - circuit = cirq.testing.random_circuit(qubits, n_moments, p, supported_gates) - - for i in range(len(circuit)): - if np.random.random() < p: - op = random.choice(list(supported_gates.keys())) - n_qubits = supported_gates[op] - locs = tuple(random.sample(qubits, n_qubits)) - if isinstance(op, cirq.IdentityGate): - circuit[:i] += op.on(*locs) - else: - circuit[:i] += ( - op**((np.random.random() if include_scalars else 1.0) * - sympy.Symbol(np.random.choice(symbols)))).on(*locs) - return circuit - - -def random_circuit_resolver_batch(qubits, batch_size, n_moments=15, p=0.9): - """Generate a batch of random circuits and symbolless resolvers.""" - return_circuits = [] - return_resolvers = [] - for _ in range(batch_size): - return_circuits.append( - cirq.testing.random_circuit(qubits, n_moments, p, - get_supported_gates())) - return_resolvers.append(cirq.ParamResolver({})) - - return return_circuits, return_resolvers - - -def random_symbol_circuit_resolver_batch(qubits, - symbols, - batch_size, - n_moments=15, - p=0.9, - include_scalars=True): - """Generate a batch of random circuits and resolvers.""" - return_circuits = [] - return_resolvers = [] - for _ in range(batch_size): - return_circuits.append( - random_symbol_circuit(qubits, symbols, n_moments, p, - include_scalars)) - - return_resolvers.append( - cirq.ParamResolver( - {symbol: np.random.random() for symbol in symbols})) - - return return_circuits, return_resolvers - - -def random_pauli_sums(qubits, max_sum_length, n_sums): - """Generate a list of random cirq pauli sums of length |n_sums|.""" - sums = [] - paulis = [cirq.I, cirq.X, cirq.Y, cirq.Z] - for _ in range(n_sums): - this_sum_length = np.random.randint(1, max_sum_length + 1) - terms = [] - for _ in range(this_sum_length): - term_length = np.random.randint(1, len(qubits) + 1) - this_term_qubits = random.sample(qubits, term_length) - this_term_paulis = \ - [random.sample(paulis,1)[0] for _ in range(term_length)] - terms.append( - cirq.PauliString(dict(zip(this_term_qubits, this_term_paulis)))) - sums.append(cirq.PauliSum.from_pauli_strings(terms)) - return sums - - -def convert_to_tensor(items_to_convert): - """Convert lists of tfq supported primitives to tensor representations. - - Recursively convert a nested lists of `cirq.PauliSum` or `cirq.Circuit` - objects to a `tf.Tensor` representation. Note that cirq serialization only - supports `cirq.GridQubit`s so we also require that input circuits and - pauli sums are defined only on `cirq.GridQubit`s. - - - >>> my_qubits = cirq.GridQubit.rect(1, 2) - >>> my_circuits = [cirq.Circuit(cirq.X(my_qubits[0])), - ... cirq.Circuit(cirq.Z(my_qubits[0])) - ... ] - >>> tensor_input = tfq.convert_to_tensor(my_circuits) - >>> # Now tensor_input can be used as model input etc. - >>> same_circuits = tfq.from_tensor(tensor_input) - >>> # same_circuits now holds cirq.Circuit objects once more. - >>> same_circuits - [cirq.Circuit([ - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - ]), - ]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), - ])] - - Args: - items_to_convert: Python `list` or nested `list` of `cirq.Circuit` - or `cirq.Paulisum` objects. Should be rectangular, or this function - will error. - - Returns: - `tf.Tensor` that represents the input items. - """ - - # We use recursion here because np.ndenumerate tries to loop over - # `cirq.Circuit`s and `cirq.PauliSum`s (they are iterable). - # This code is safe for nested lists of depth less than the recursion limit, - # which is deeper than any practical use the author can think of. - def recur(items_to_convert, curr_type=None): - tensored_items = [] - for item in items_to_convert: - if isinstance(item, (list, np.ndarray, tuple)): - tensored_items.append(recur(item, curr_type)) - elif isinstance(item, (cirq.PauliSum, cirq.PauliString)) and\ - not curr_type == cirq.Circuit: - curr_type = cirq.PauliSum - tensored_items.append( - serializer.serialize_paulisum(item).SerializeToString()) - elif isinstance(item, cirq.Circuit) and\ - not curr_type == cirq.PauliSum: - curr_type = cirq.Circuit - tensored_items.append( - serializer.serialize_circuit(item).SerializeToString()) - else: - raise TypeError("Incompatible item passed into " - " convert_to_tensor. Tensor detected type: {}." - " got: {}".format(curr_type, type(item))) - return tensored_items - - # This will catch impossible dimensions - return tf.convert_to_tensor(recur(items_to_convert)) - - -def _parse_single(item): - try: - if b'tfq_gate_set' in item: - # Return a circuit parsing - obj = cirq.google.api.v2.program_pb2.Program() - obj.ParseFromString(item) - out = serializer.deserialize_circuit(obj) - return out - - # Return a PauliSum parsing. - obj = pauli_sum_pb2.PauliSum() - obj.ParseFromString(item) - out = serializer.deserialize_paulisum(obj) - return out - except Exception: - raise TypeError('Error decoding item: ' + str(item)) - - -def from_tensor(tensor_to_convert): - """Convert a tensor of tfq primitives back to Python objects. - - Convert a tensor representing `cirq.PauliSum` or `cirq.Circuit` - objects back to Python objects. - - - >>> my_qubits = cirq.GridQubit.rect(1, 2) - >>> my_circuits = [cirq.Circuit(cirq.X(my_qubits[0])), - ... cirq.Circuit(cirq.Z(my_qubits[0])) - ... ] - >>> tensor_input = tfq.convert_to_tensor(my_circuits) - >>> # Now tensor_input can be used as model input etc. - >>> same_circuits = tfq.from_tensor(tensor_input) - >>> # same_circuits now holds cirq.Circuit objects once more. - >>> same_circuits - [cirq.Circuit([ - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - ]), - ]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), - ])] - - Args: - tensor_to_convert: `tf.Tensor` or `np.ndarray` representation to - convert back into python objects. - - Returns: - Python `list` of items converted to their python representation stored - in a (potentially nested) `list`. - """ - if isinstance(tensor_to_convert, tf.Tensor): - tensor_to_convert = tensor_to_convert.numpy() - if not isinstance(tensor_to_convert, (np.ndarray, list, tuple)): - raise TypeError("tensor_to_convert recieved bad " - "type {}".format(type(tensor_to_convert))) - tensor_to_convert = np.array(tensor_to_convert) - python_items = np.empty(tensor_to_convert.shape, dtype=object) - curr_type = None - for index, item in np.ndenumerate(tensor_to_convert): - found_item = _parse_single(item) - got_type = type(found_item) - if (curr_type is not None) and (not got_type == curr_type): - raise TypeError("from_tensor expected to find a tensor containing" - " elements of a single type.") - curr_type = got_type - python_items[index] = found_item - return python_items - - -def kwargs_cartesian_product(**kwargs): - """Compute the cartesian product of inputs yielding Python `dict`s. - - Note that all kwargs must provide `iterable` values. Useful for testing - purposes. - - ```python - a = {'one': [1,2,3], 'two': [4,5]} - result = list(kwargs_cartesian_product(**a)) - - # Result now contains: - # [{'one': 1, 'two': 4}, - # {'one': 1, 'two': 5}, - # {'one': 2, 'two': 4}, - # {'one': 2, 'two': 5}, - # {'one': 3, 'two': 4}, - # {'one': 3, 'two': 5}] - ``` - - Returns: - Python `generator` of the cartesian product of the inputs `kwargs`. - """ - keys = kwargs.keys() - vals = kwargs.values() - for (k, v) in zip(keys, vals): - # Only reliable way to check for __iter__ and __getitem__ - try: - _ = iter(v) - except TypeError: - raise ValueError(f'Value for argument {k} is not iterable.' - f' Got {v}.') - - for instance in itertools.product(*vals): - yield dict(zip(keys, instance)) - - -def _symbols_in_op(op): - """Returns the set of symbols in a parameterized gate.""" - if isinstance(op, cirq.EigenGate): - return op.exponent.free_symbols - - if isinstance(op, cirq.FSimGate): - ret = set() - if isinstance(op.theta, sympy.Basic): - ret |= op.theta.free_symbols - if isinstance(op.phi, sympy.Basic): - ret |= op.phi.free_symbols - return ret - - if isinstance(op, cirq.PhasedXPowGate): - ret = set() - if isinstance(op.exponent, sympy.Basic): - ret |= op.exponent.free_symbols - if isinstance(op.phase_exponent, sympy.Basic): - ret |= op.phase_exponent.free_symbols - return ret - - raise ValueError("Attempted to scan for symbols in circuit with unsupported" - " ops inside. Expected op found in tfq.get_supported_gates" - " but found: ".format(str(op))) - - -def get_circuit_symbols(circuit): - """Returns a list of the sympy.Symbols that are present in `circuit`. - - Args: - circuit: A `cirq.Circuit` object. - - Returns: - Python `list` containing the symbols found in the circuit. - """ - all_symbols = set() - for moment in circuit: - for op in moment: - if cirq.is_parameterized(op): - all_symbols |= _symbols_in_op(op.gate) - return [str(x) for x in all_symbols] - - -def _many_clifford_to_many_z(pauli_sum): - """Convert many clifford to many Z. - Returns the gate set required for transforming an arbitrary tensor product - of paulis into a product of all pauli -Z's. - Args: - pauli_sum: `cirq.PauliSum` object to be converted to all z's. - Returns: - gate_list: List of required gates to complete the transformation. - conjugate_list: List of gates, but reversed and complex conjugate - applied to each rotation gate - """ - # TODO(jaeyoo): investigate how to apply cirq.PauliString.to_z_basis_ops - gate_list = [] - # Hermitian conjugate - conjugate_list = [] - for qubit, pauli in pauli_sum.items(): - if isinstance(pauli, cirq.ZPowGate): - continue - elif isinstance(pauli, cirq.XPowGate): - gate_list.append(cirq.H(qubit)) - conjugate_list.append(cirq.H(qubit)) - elif isinstance(pauli, cirq.YPowGate): - # It is identical to the conjugate of Phase and Hadamard gate up to - # global phase. This global phase difference is gone with - # multiplication of hermition conjugate later. - gate_list.append(cirq.Rx(np.pi / 2)(qubit)) - conjugate_list.append(cirq.Rx(-np.pi / 2)(qubit)) - return gate_list, conjugate_list[::-1] - - -def _many_z_to_single_z(focal_qubit, pauli_sum): - """Convert many Z's to single Z. - Returns the gate set required for transforming an arbitrary tensor product - of pauli-z's into a product of all identites and a single pauli-Z. - Args: - focal_qubit: central qubit among CNOT gates. - pauli_sum: `cirq.PauliSum` object to be converted to CNOT's and Z. - Returns: - gate_list: List of the required CNOT gates for this conversion. - gate_list_reversed: List of the same CNOT gates, but in reverse. - """ - gate_list = [] - for q in pauli_sum.qubits: - if q != focal_qubit: - gate_list.append(cirq.CNOT(q, focal_qubit)) - return gate_list, gate_list[::-1] - - -def check_commutability(pauli_sum): - """Return False if at least one pair of terms in pauli_sum is not - commutable. - - Args: - pauli_sum: `cirq.PauliSum` object to be checked if all of terms inside - are commutable each other. - """ - for term1 in pauli_sum: - for term2 in pauli_sum: - if not term1.commutes_with(term2): - raise ValueError("Given an operator has non-commutable " - "terms, whose exponentiation is not " - "supported yet: {} and {}".format( - term1, term2)) - - -def exp_identity(param, c, zeroth_qubit): - """Return a circuit for exponentiating an identity gate.""" - # TODO(jaeyoo): Reduce the number of gates for this decomposition. - phase_shift = cirq.ZPowGate(exponent=-param * c / np.pi).on(zeroth_qubit) - exp_circuit = cirq.Circuit( - [cirq.X(zeroth_qubit), phase_shift, - cirq.X(zeroth_qubit), phase_shift]) - return exp_circuit - - -def exponential(operators, coefficients=None): - """Return a Cirq circuit with exponential forms of operators. - - Construct an exponential form of given `operators` and `coefficients`. - Operators to be exponentiated are specified in `operators` as - `cirq.PauliSum` or `cirq.PauliString`. Parameters are given by - `coefficients`. - - Note that only operators whose standard representations consist of terms - which all commute can be exponentiated. This allows use of the identity - exp(A+B+...) = exp(A)exp(B)... else there would need to be automatic - handling of Trotterization and convergence, which is not supported yet. - - Args: - operators: Python `list` of `cirq.PauliSum` or `cirq.PauliString` object - to be exponentiated. Here are simple examples. - Let q = cirq.GridQubit(0, 0) - E.g. operator = 0.5 * X(q) -> exp(-i * 0.5 * X(q)) - operator = 0.5 * cirq.PauliString({q: cirq.I}) - -> exp(-i * 0.5)*np.eye(2) - Be careful of the negation and the PauliString of the identity gate. - coefficients: (Optional) Python `list` of Python `str`, `float` or - `sympy.Symbol` object of parameters. Defaults to None, then all - coefficients of `operators` are set to 1.0. - Returns: - A `cirq.Circuit` containing exponential form of given `operators` - and `coefficients`. - """ - # Ingest operators. - if not isinstance(operators, (list, tuple, np.ndarray)): - raise TypeError("operators is not a list of operators.") - - if not all( - isinstance(x, (cirq.PauliSum, cirq.PauliString)) - for x in operators): - raise TypeError("Each element in coefficients must be a float or a " - "cirq.PauliSum or cirq.PauliString object.") - - # Ingest coefficients. - if coefficients is None: - coefficients = [1.0 for _ in operators] - - if not isinstance(coefficients, (list, tuple, np.ndarray)): - raise TypeError("coefficients is not a list of coefficients.") - - if not all(isinstance(x, (str, sympy.Symbol, float)) for x in coefficients): - raise TypeError("Each element in coefficients" - " must be a float or a string or sympy.Symbol.") - - if len(coefficients) != len(operators): - raise ValueError("the number of operators should be the same as that " - "of coefficients. Got {} operators and {} coefficients" - "".format(len(operators), len(coefficients))) - - coefficients = [ - sympy.Symbol(s) if isinstance(s, str) else s - for i, s in enumerate(coefficients) - ] - - circuit = cirq.Circuit() - - operators = [ - cirq.PauliSum.from_pauli_strings(ps) if isinstance( - ps, cirq.PauliString) else ps for ps in operators - ] - - qubit_set = {q for psum in operators for q in psum.qubits} - identity_ref_qubit = cirq.GridQubit(0, 0) - if len(qubit_set) > 0: - identity_ref_qubit = sorted(list(qubit_set))[0] - - for param, pauli_sum in zip(coefficients, operators): - if isinstance(pauli_sum, cirq.PauliSum): - check_commutability(pauli_sum) - for op in pauli_sum: - if abs(op.coefficient.imag) > 1e-9: - raise ValueError('exponential only supports real ' - 'coefficients: got ' - '{}'.format(op.coefficient)) - # Create a circuit with exponentiating `op` with param - c = op.coefficient.real - if len(op.gate.pauli_mask) == 0: - # If given gate_op is identity. - circuit += exp_identity(param, c, identity_ref_qubit) - continue - - # Where to perform the Rz gate based on difficulty of CNOT's - # TODO(jaeyoo): will write a super duper optimization on this. - # currently going on HIGHEST-indexed qubit. - k = op.qubits[-1] - # Set of gates to convert all X's and Y's -> Z's. - u, u_dagger = _many_clifford_to_many_z(op) - - # Set of gates to convert many Z's into a single Z using CNOTs. - w, w_dagger = _many_z_to_single_z(k, op) - - # cirq.Rz(2*theta) = exp(-i*0.5*(2*theta)*Z) == exp(-i*theta*Z) - # focal point of the CNOT ladder. - exp_circuit = u + w + [cirq.Rz(2 * param * c)(k) - ] + w_dagger + u_dagger - circuit += cirq.Circuit(exp_circuit) - return circuit diff --git a/tensorflow_quantum/python/util_test.py b/tensorflow_quantum/python/util_test.py deleted file mode 100644 index 8331b54e4..000000000 --- a/tensorflow_quantum/python/util_test.py +++ /dev/null @@ -1,362 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for TFQ utilities.""" -import numpy as np -import tensorflow as tf -from absl.testing import parameterized -import sympy - -import cirq -from tensorflow_quantum.core.serialize import serializer -from tensorflow_quantum.python import util - - -def _single_to_tensor(item): - if not isinstance(item, (cirq.PauliSum, cirq.PauliString, cirq.Circuit)): - raise TypeError("Item must be a Circuit or PauliSum. Got {}.".format( - type(item))) - if isinstance(item, (cirq.PauliSum, cirq.PauliString)): - return serializer.serialize_paulisum(item).SerializeToString() - return serializer.serialize_circuit(item).SerializeToString() - - -BITS = list(cirq.GridQubit.rect(1, 10)) - - -def _items_to_tensorize(): - """Objects on which convert_to_tensor convert_from_tensor will be tested.""" - return [{ - 'item': x - } for x in (util.random_pauli_sums(BITS, 5, 5) + [ - cirq.PauliSum.from_pauli_strings([ - cirq.PauliString(), - cirq.PauliString(cirq.Z(cirq.GridQubit(0, 0))) - ]) - ] + [cirq.PauliString(), cirq.PauliString()] + [cirq.Circuit()] + [ - cirq.testing.random_circuit(BITS, 25, 0.9, util.get_supported_gates()) - for _ in range(5) - ])] - - -class UtilFunctionsTest(tf.test.TestCase, parameterized.TestCase): - """Test that utility functions work.""" - - def test_get_supported_gates(self): - """Confirm one of every gate is returned.""" - mapping_1 = util.get_supported_gates() - self.assertEqual(len(mapping_1.keys()), - len(serializer.SERIALIZER.supported_gate_types())) - - @parameterized.parameters(_items_to_tensorize()) - def test_convert_to_tensor(self, item): - """Test that the convert_to_tensor function works correctly by manually - serializing flat and 2-deep nested lists of Circuits and PauliSums.""" - nested = [[item, item]] * 2 - nested_actual = util.convert_to_tensor(nested) - nested_expected = np.array( - [np.array([_single_to_tensor(x) for x in row]) for row in nested]) - self.assertAllEqual(nested_actual, nested_expected) - flat = [item, item] - flat_actual = util.convert_to_tensor(flat) - flat_expected = np.array([_single_to_tensor(x) for x in flat]) - self.assertAllEqual(flat_actual, flat_expected) - - def test_convert_to_tensor_errors(self): - """Test that convert_to_tensor fails when it should.""" - with self.assertRaisesRegex(TypeError, expected_regex="Incompatible"): - util.convert_to_tensor("junk") - with self.assertRaisesRegex(TypeError, expected_regex="Incompatible"): - util.convert_to_tensor([1, cirq.Circuit()]) - with self.assertRaisesRegex(ValueError, - expected_regex='non-rectangular'): - util.convert_to_tensor([[cirq.Circuit()], cirq.Circuit()]) - with self.assertRaisesRegex(TypeError, expected_regex="Incompatible"): - util.convert_to_tensor( - [cirq.Circuit(), - cirq.X(BITS[0]) + cirq.Y(BITS[1])]) - - @parameterized.parameters(_items_to_tensorize()) - def test_from_tensor(self, item): - """Check from_tensor assuming convert_to_tensor works.""" - - item_nested_tensorized = util.convert_to_tensor([[item, item], - [item, item]]) - item_flat_tensorized = util.convert_to_tensor([item, item]) - item_nested_cycled = util.convert_to_tensor( - util.from_tensor(item_nested_tensorized)) - - self.assertAllEqual(item_nested_tensorized, item_nested_cycled) - item_flat_cycled = util.convert_to_tensor( - util.from_tensor(item_flat_tensorized)) - self.assertAllEqual(item_flat_tensorized, item_flat_cycled) - - def test_from_tensor_errors(self): - """test that from_tensor fails when it should.""" - with self.assertRaisesRegex(TypeError, - expected_regex='Error decoding item'): - util.from_tensor( - tf.convert_to_tensor([ - 'bad', - serializer.serialize_circuit( - cirq.Circuit()).SerializeToString() - ])) - with self.assertRaisesRegex(TypeError, - expected_regex='Error decoding item'): - util.from_tensor( - tf.convert_to_tensor([ - serializer.serialize_circuit( - cirq.Circuit()).SerializeToString() + b'bad' - ])) - with self.assertRaisesRegex(TypeError, expected_regex='single type'): - util.from_tensor( - tf.convert_to_tensor([ - serializer.serialize_circuit( - cirq.Circuit()).SerializeToString(), - serializer.serialize_paulisum( - cirq.X(BITS[0]) + cirq.Y(BITS[1])).SerializeToString() - ])) - with self.assertRaisesRegex(TypeError, - expected_regex='recieved bad type'): - util.from_tensor("junk") - - def test_cartesian_product(self): - """Ensure cartesian_product works. inputs are any iterable you want.""" - result1 = list(util.kwargs_cartesian_product(a=[1, 2], b='hi')) - self.assertEqual(result1, [{ - 'a': 1, - 'b': 'h' - }, { - 'a': 1, - 'b': 'i' - }, { - 'a': 2, - 'b': 'h' - }, { - 'a': 2, - 'b': 'i' - }]) - - result2 = list( - util.kwargs_cartesian_product(**{ - 'one': [1, 2, 3], - 'two': [4, 5] - })) - self.assertEqual(result2, [{ - 'one': 1, - 'two': 4 - }, { - 'one': 1, - 'two': 5 - }, { - 'one': 2, - 'two': 4 - }, { - 'one': 2, - 'two': 5 - }, { - 'one': 3, - 'two': 4 - }, { - 'one': 3, - 'two': 5 - }]) - - with self.assertRaisesRegex(ValueError, expected_regex='not iterable'): - list(util.kwargs_cartesian_product(a=[1, 2], b=-1)) - - def test_get_circuit_symbols(self): - """Test that symbols can be extracted from circuits. - This test will error out if get_supported_gates gets updated with new - gates and the get_circuit function isn't updated. - """ - expected_symbols = ['alpha', 'beta', 'gamma', 'omega'] - qubits = cirq.GridQubit.rect(1, 20) - n_moments = 200 - for _ in range(5): - test_circuit = util.random_symbol_circuit(qubits, expected_symbols, - n_moments) - extracted_symbols = util.get_circuit_symbols(test_circuit) - self.assertListEqual(sorted(extracted_symbols), - sorted(expected_symbols)) - - -class ExponentialUtilFunctionsTest(tf.test.TestCase): - """Test that Exponential utility functions work.""" - - def test_exponential_error(self): - """Test exponential failed when it should""" - test_paulistring = cirq.X(cirq.GridQubit(0, 0)) - test_paulisum = cirq.X(cirq.GridQubit(0, 0)) + cirq.Z( - cirq.GridQubit(0, 1)) - - # operators - with self.assertRaisesRegex(TypeError, expected_regex='not a list'): - util.exponential(operators='junk') - with self.assertRaisesRegex(TypeError, expected_regex='PauliString'): - util.exponential(operators=['junk']) - util.exponential(operators=[test_paulistring, 'junk']) - util.exponential( - operators=[test_paulistring, test_paulisum, 'junk']) - - util.exponential(operators=[test_paulistring, test_paulisum]) - - # coefficients - with self.assertRaisesRegex(TypeError, expected_regex='not a list'): - util.exponential(operators=[], coefficients='junk') - - with self.assertRaisesRegex(TypeError, expected_regex='Each element'): - util.exponential(operators=[test_paulistring], coefficients=[None]) - util.exponential(operators=[test_paulistring, test_paulisum], - coefficients=[1.0, None]) - - util.exponential( - operators=[test_paulistring, test_paulisum, test_paulisum], - coefficients=[1.0, 'test', sympy.Symbol('test')]) - - with self.assertRaisesRegex(ValueError, - expected_regex='should be the same as'): - util.exponential(operators=[test_paulistring], coefficients=[]) - - def test_many_clifford_to_many_z(self): - """Confirm correct basis transformations of input PauliSums.""" - q = cirq.GridQubit.rect(1, 4) - test_term = 0.2277 * cirq.Z(q[1]) * cirq.X(q[2]) * cirq.Y(q[3]) - test_basis_gates = [cirq.H(q[2]), cirq.Rx(np.pi / 2)(q[3])] - test_conj_gates = [cirq.Rx(-np.pi / 2)(q[3]), cirq.H(q[2])] - - gate_list, conj_gate_list = util._many_clifford_to_many_z(test_term) - self.assertEqual(gate_list, test_basis_gates) - self.assertEqual(conj_gate_list, test_conj_gates) - - def test_many_z_to_single_z(self): - """Test many Z's to a single Z.""" - q = cirq.GridQubit.rect(1, 8) - benchmark_term = 1.321 * cirq.Z(q[0]) * cirq.Z(q[3]) * cirq.Z( - q[5]) * cirq.Z(q[7]) - # Assume the focal qubit is set to q[3]. - benchmark_gates_indices = [(q[7], q[3]), (q[5], q[3]), (q[0], q[3])] - gates, _ = util._many_z_to_single_z(q[3], benchmark_term) - for gate_op in gates: - qubits = gate_op.qubits - gate = gate_op.gate - self.assertIsInstance(gate, cirq.CNotPowGate) - self.assertIn(qubits, benchmark_gates_indices) - benchmark_gates_indices.remove(qubits) - self.assertEqual([], benchmark_gates_indices) - - def test_exponential_simple(self): - """Test exponential for a simple operator.""" - q = cirq.GridQubit(0, 0) - for op in [cirq.X, cirq.Y, cirq.Z]: - theta = np.random.random() - circuit = util.exponential(operators=[theta * op(q)]) - - # TODO(jaeyoo) : remove factor 2 if cirq issue is resolved - # https://github.com/quantumlib/Cirq/issues/2710 - ground_truth_unitary = cirq.unitary(np.exp(-1j * 2 * theta * op(q))) - self.assertAllClose(ground_truth_unitary, cirq.unitary(circuit)) - - def test_exponential_identity(self): - """Test exponential for an identity.""" - theta = np.random.random() - identity = cirq.PauliString({None: cirq.I}) - circuit = util.exponential(operators=[theta * identity]) - - result_gates = [] - for moment in circuit: - for gate_op in moment: - result_gates.append(gate_op.gate) - - # Because it has no qubit in the total circuit, the default is set to - # zeroth qubit. - self.assertEqual(circuit.all_qubits(), frozenset({cirq.GridQubit(0, - 0)})) - - self.assertIsInstance(result_gates[0], cirq.XPowGate) - self.assertIsInstance(result_gates[1], cirq.ZPowGate) - self.assertIsInstance(result_gates[2], cirq.XPowGate) - self.assertIsInstance(result_gates[3], cirq.ZPowGate) - - self.assertAllClose( - np.eye(2) * np.exp(-1j * theta), cirq.unitary(circuit)) - - def test_exponential_complex(self): - """Test exponential for complex operators.""" - q = cirq.GridQubit.rect(1, 3) - theta1 = np.random.random() - theta2 = np.random.random() - identity = cirq.PauliString({None: cirq.I}) - op1 = theta1 * cirq.Z(q[1]) * cirq.Z(q[2]) - op2 = theta2 * identity - circuit = util.exponential(operators=[op1, op2]) - - result_gates = [] - for moment in circuit: - for gate_op in moment: - result_gates.append(gate_op) - - self.assertIsInstance(result_gates[0].gate, cirq.CNotPowGate) - self.assertIsInstance(result_gates[1].gate, cirq.ZPowGate) - self.assertIsInstance(result_gates[2].gate, cirq.CNotPowGate) - self.assertIsInstance(result_gates[3].gate, cirq.XPowGate) - self.assertIsInstance(result_gates[4].gate, cirq.ZPowGate) - self.assertIsInstance(result_gates[5].gate, cirq.XPowGate) - self.assertIsInstance(result_gates[6].gate, cirq.ZPowGate) - - # The exponentiation of identity should not be on q[0], but on q[1]. - for i in range(3, 7): - self.assertEqual(result_gates[i].qubits, (q[1],)) - - # TODO(jaeyoo) : remove factor 2 if cirq issue is resolved - # https://github.com/quantumlib/Cirq/issues/2710 - ground_truth_unitary = cirq.unitary(np.exp(-1j * 2 * op1)) - ground_truth_unitary *= cirq.unitary(np.exp(-1j * op2)) - result_unitary = cirq.unitary(circuit) - global_phase = ground_truth_unitary[0][0] / result_unitary[0][0] - result_unitary *= global_phase - - self.assertAllClose(ground_truth_unitary, result_unitary) - - def test_exponential_commutablility(self): - """Test exponential for non-commutable operator.""" - q = cirq.GridQubit(0, 0) - theta1 = np.random.random() - theta2 = np.random.random() - with self.assertRaisesRegex(ValueError, - expected_regex="non-commutable"): - util.exponential( - operators=[theta1 * cirq.X(q) + theta2 * cirq.Z(q)]) - - def test_serializability(self): - """Test exponential with serializer.""" - q = cirq.GridQubit.rect(1, 2) - theta = np.random.random() - identity = cirq.PauliString({None: cirq.I}) - op1 = theta * cirq.Z(q[0]) * cirq.Z(q[1]) - op2 = theta * identity - circuit = util.exponential(operators=[op1, op2]) - util.convert_to_tensor([circuit]) - - def test_real_coefficients(self): - """Test exponential with complex coefficient.""" - q = cirq.GridQubit.rect(1, 2) - theta = np.random.random() - op = 1j * theta * cirq.Z(q[0]) * cirq.Z(q[1]) - with self.assertRaisesRegex(ValueError, expected_regex="supports real"): - util.exponential(operators=[op]) - - -if __name__ == "__main__": - tf.test.main() diff --git a/third_party/BUILD b/third_party/BUILD deleted file mode 100644 index e69de29bb..000000000 diff --git a/third_party/tf/BUILD b/third_party/tf/BUILD deleted file mode 100644 index e69de29bb..000000000 diff --git a/third_party/tf/BUILD.tpl b/third_party/tf/BUILD.tpl deleted file mode 100644 index 64cc5f3a0..000000000 --- a/third_party/tf/BUILD.tpl +++ /dev/null @@ -1,26 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -cc_library( - name = "tf_header_lib", - hdrs = [":tf_header_include"], - includes = ["include"], - visibility = ["//visibility:public"], -) - -cc_library( - name = "libtensorflow_framework", - srcs = [":libtensorflow_framework.so"], - #data = ["lib/libtensorflow_framework.so"], - visibility = ["//visibility:public"], -) - -load("@com_google_protobuf//:protobuf.bzl", "py_proto_library") - -py_library( - name = "test_log_pb2", - srcs = ["test_log_pb2.py"], -) - -%{TF_HEADER_GENRULE} -%{TF_SHARED_LIBRARY_GENRULE} -%{TF_PROTO_GENRULE} diff --git a/third_party/tf/auditwheel b/third_party/tf/auditwheel deleted file mode 100644 index 5cb83e2bb..000000000 --- a/third_party/tf/auditwheel +++ /dev/null @@ -1,9 +0,0 @@ -TF_SHARED_LIBRARY_NAME=$(grep -r TF_SHARED_LIBRARY_NAME .bazelrc | awk -F= '{print$2}') - -POLICY_JSON=$(find / -name policy.json) - -sed -i "s/libresolv.so.2\"/libresolv.so.2\", $TF_SHARED_LIBRARY_NAME/g" $POLICY_JSON - -cat $POLICY_JSON - -auditwheel $@ diff --git a/third_party/tf/tf_configure.bzl b/third_party/tf/tf_configure.bzl deleted file mode 100644 index e29129534..000000000 --- a/third_party/tf/tf_configure.bzl +++ /dev/null @@ -1,213 +0,0 @@ -"""Setup TensorFlow as external dependency""" - -_TF_HEADER_DIR = "TF_HEADER_DIR" -_TF_SHARED_LIBRARY_DIR = "TF_SHARED_LIBRARY_DIR" -_TF_SHARED_LIBRARY_NAME = "TF_SHARED_LIBRARY_NAME" - -def _tpl(repository_ctx, tpl, substitutions = {}, out = None): - if not out: - out = tpl - repository_ctx.template( - out, - Label("//third_party/tf:%s.tpl" % tpl), - substitutions, - ) - -def _fail(msg): - """Output failure message when auto configuration fails.""" - red = "\033[0;31m" - no_color = "\033[0m" - fail("%sPython Configuration Error:%s %s\n" % (red, no_color, msg)) - -def _is_windows(repository_ctx): - """Returns true if the host operating system is windows.""" - os_name = repository_ctx.os.name.lower() - if os_name.find("windows") != -1: - return True - return False - -def _execute( - repository_ctx, - cmdline, - error_msg = None, - error_details = None, - empty_stdout_fine = False): - """Executes an arbitrary shell command. - Helper for executes an arbitrary shell command. - Args: - repository_ctx: the repository_ctx object. - cmdline: list of strings, the command to execute. - error_msg: string, a summary of the error if the command fails. - error_details: string, details about the error or steps to fix it. - empty_stdout_fine: bool, if True, an empty stdout result is fine, otherwise - it's an error. - Returns: - The result of repository_ctx.execute(cmdline). - """ - result = repository_ctx.execute(cmdline) - if result.stderr or not (empty_stdout_fine or result.stdout): - _fail("\n".join([ - error_msg.strip() if error_msg else "Repository command failed", - result.stderr.strip(), - error_details if error_details else "", - ])) - return result - -def _read_dir(repository_ctx, src_dir): - """Returns a string with all files in a directory. - Finds all files inside a directory, traversing subfolders and following - symlinks. The returned string contains the full path of all files - separated by line breaks. - Args: - repository_ctx: the repository_ctx object. - src_dir: directory to find files from. - Returns: - A string of all files inside the given dir. - """ - if _is_windows(repository_ctx): - src_dir = src_dir.replace("/", "\\") - find_result = _execute( - repository_ctx, - ["cmd.exe", "/c", "dir", src_dir, "/b", "/s", "/a-d"], - empty_stdout_fine = True, - ) - - # src_files will be used in genrule.outs where the paths must - # use forward slashes. - result = find_result.stdout.replace("\\", "/") - else: - find_result = _execute( - repository_ctx, - ["find", src_dir, "-follow", "-type", "f"], - empty_stdout_fine = True, - ) - result = find_result.stdout - return result - -def _genrule(genrule_name, command, outs): - """Returns a string with a genrule. - Genrule executes the given command and produces the given outputs. - Args: - genrule_name: A unique name for genrule target. - command: The command to run. - outs: A list of files generated by this rule. - Returns: - A genrule target. - """ - return ( - "genrule(\n" + - ' name = "' + - genrule_name + '",\n' + - " outs = [\n" + - outs + - "\n ],\n" + - ' cmd = """\n' + - command + - '\n """,\n' + - ")\n" - ) - -def _norm_path(path): - """Returns a path with '/' and remove the trailing slash.""" - path = path.replace("\\", "/") - if path[-1] == "/": - path = path[:-1] - return path - -def _symlink_genrule_for_dir( - repository_ctx, - src_dir, - dest_dir, - genrule_name, - src_files = [], - dest_files = []): - """Returns a genrule to symlink(or copy if on Windows) a set of files. - If src_dir is passed, files will be read from the given directory; otherwise - we assume files are in src_files and dest_files. - Args: - repository_ctx: the repository_ctx object. - src_dir: source directory. - dest_dir: directory to create symlink in. - genrule_name: genrule name. - src_files: list of source files instead of src_dir. - dest_files: list of corresonding destination files. - Returns: - genrule target that creates the symlinks. - """ - if src_dir != None: - src_dir = _norm_path(src_dir) - dest_dir = _norm_path(dest_dir) - if _is_windows(repository_ctx): - # TODO(jaeyoo) : Remove the following hard-coded lines - src_dir = "C:/Python36/lib/site-packages/tensorflow_core/include" # hard-coded - files = "\n".join(sorted(_read_dir(repository_ctx, src_dir).splitlines())) - - # Create a list with the src_dir stripped to use for outputs. - dest_files = files.replace(src_dir, "").splitlines() - src_files = files.splitlines() - command = [] - outs = [] - for i in range(len(dest_files)): - if dest_files[i] != "": - # If we have only one file to link we do not want to use the dest_dir, as - # $(@D) will include the full path to the file. - dest = "$(@D)/" + dest_dir + dest_files[i] if len(dest_files) != 1 else "$(@D)/" + dest_files[i] - - # Copy the headers to create a sandboxable setup. - cmd = "cp -f" - command.append(cmd + ' "%s" "%s"' % (src_files[i], dest)) - outs.append(' "' + dest_dir + dest_files[i] + '",') - genrule = _genrule( - genrule_name, - " && ".join(command), - "\n".join(outs), - ) - return genrule - -def _tf_pip_impl(repository_ctx): - tf_header_dir = repository_ctx.os.environ[_TF_HEADER_DIR] - tf_header_rule = _symlink_genrule_for_dir( - repository_ctx, - tf_header_dir, - "include", - "tf_header_include", - ) - - tf_shared_library_dir = repository_ctx.os.environ[_TF_SHARED_LIBRARY_DIR] - tf_shared_library_name = repository_ctx.os.environ[_TF_SHARED_LIBRARY_NAME] - tf_shared_library_path = "%s/%s" % (tf_shared_library_dir, tf_shared_library_name) - - tf_shared_library_rule = _symlink_genrule_for_dir( - repository_ctx, - None, - "", - "libtensorflow_framework.so", - [tf_shared_library_path], - ["libtensorflow_framework.so"], - ) - - tf_test_log_proto_path = "%s/core/util/test_log_pb2.py" % tf_shared_library_dir - - tf_proto_rule = _symlink_genrule_for_dir( - repository_ctx, - None, - "", - "test_log_pb2.py", - [tf_test_log_proto_path], - ["test_log_pb2.py"], - ) - - _tpl(repository_ctx, "BUILD", { - "%{TF_HEADER_GENRULE}": tf_header_rule, - "%{TF_SHARED_LIBRARY_GENRULE}": tf_shared_library_rule, - "%{TF_PROTO_GENRULE}": tf_proto_rule, - }) - -tf_configure = repository_rule( - implementation = _tf_pip_impl, - environ = [ - _TF_HEADER_DIR, - _TF_SHARED_LIBRARY_DIR, - _TF_SHARED_LIBRARY_NAME, - ], -) diff --git a/vqt_qmhl/vqt_qmhl.ipynb b/vqt_qmhl/vqt_qmhl.ipynb new file mode 100644 index 000000000..bc2ec66a2 --- /dev/null +++ b/vqt_qmhl/vqt_qmhl.ipynb @@ -0,0 +1,1489 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "name": "TFQ_Example_VQT.ipynb", + "provenance": [], + "collapsed_sections": [], + "toc_visible": true + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + } + }, + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "xeNLEVTAzqUY", + "colab_type": "text" + }, + "source": [ + "##### Copyright 2020 The TensorFlow Quantum Authors." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "jlv9gRg5zmNn", + "colab_type": "code", + "colab": {} + }, + "source": [ + "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", + "# you may not use this file except in compliance with the License.\n", + "# You may obtain a copy of the License at\n", + "#\n", + "# https://www.apache.org/licenses/LICENSE-2.0\n", + "#\n", + "# Unless required by applicable law or agreed to in writing, software\n", + "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", + "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", + "# See the License for the specific language governing permissions and\n", + "# limitations under the License." + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "45-jw_FfvFo4", + "colab_type": "text" + }, + "source": [ + "# VQT in TFQ" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-BRvsd9_DiCx", + "colab_type": "text" + }, + "source": [ + "Author : Antonio J. Martinez\n", + "\n", + "Contributors : Guillaume Verdon\n", + "\n", + "Created : 2020-Feb-06\n", + "\n", + "Last updated : 2020-Mar-06" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "YMDX5gWDExOT", + "colab_type": "text" + }, + "source": [ + "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/tensorflow/quantum/blob/research/vqt_qmhl/vqt_qmhl.ipynb)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NOcn7GMuvItv", + "colab_type": "text" + }, + "source": [ + "In this notebook, you will explore the combination of quantum computing and classical energy-based models with TensorFlow Quantum. The system under study is the [2D Heisenberg model](https://en.wikipedia.org/wiki/Heisenberg_model_(quantum)). You will apply the Variational Quantum Thermalizer (VQT) to produce approximate thermal states of this model. VQT was first proposed in the paper [here.](https://arxiv.org/abs/1910.02071)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ADO-msNe4nFL", + "colab_type": "text" + }, + "source": [ + "## Install and import dependencies" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "JuXxC5fbaGAS", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install --upgrade tensorflow==2.1.0" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "fyrqkto1aHQV", + "colab_type": "code", + "colab": {} + }, + "source": [ + "!pip install tensorflow-quantum" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "6tjpeUc-_jGr", + "colab_type": "code", + "colab": {} + }, + "source": [ + "%%capture\n", + "import cirq\n", + "import itertools\n", + "import numpy as np\n", + "import random\n", + "from scipy import linalg\n", + "import seaborn\n", + "import sympy\n", + "import tensorflow as tf\n", + "import tensorflow_probability as tfp\n", + "import tensorflow_quantum as tfq\n", + "\n", + "# visualization tools\n", + "%matplotlib inline\n", + "import matplotlib.pyplot as plt\n", + "from cirq.contrib.svg import SVGCircuit" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "36wDb-D64-Bf", + "colab_type": "text" + }, + "source": [ + "## 2D Heisenberg model" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Kj1k345gV6vC", + "colab_type": "text" + }, + "source": [ + "### Hamiltonian definition" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "P7mzjpQr__nH", + "colab_type": "text" + }, + "source": [ + "This Hamiltonian is supported on a rectangular lattice of qubits:\n", + "$$\\hat{H}_{\\text{heis}} = \\sum_{\\langle ij\\rangle_h} J_{h} \\hat{S}_i \\cdot \\hat{S}_j + \\sum_{\\langle ij\\rangle_v} J_{v} \\hat{S}_i \\cdot \\hat{S}_j,$$\n", + "where $h$ ($v$) denote horizontal (vertical) bonds, while $\\langle \\cdot \\rangle $ represent nearest-neighbor pairings.\n", + " You can build this Hamiltonian using Cirq `PauliString` and `PauliSum` objects:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "oWpQomEnA51w", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_qubit_grid(rows, cols):\n", + " \"\"\"Rectangle of qubits returned as a nested list.\"\"\"\n", + " qubits = []\n", + " for r in range(rows):\n", + " qubits.append([])\n", + " for c in range(cols):\n", + " qubits[-1].append(cirq.GridQubit(r, c))\n", + " return qubits\n", + "\n", + "def get_bond(q0, q1):\n", + " \"\"\"Given two Cirq qubits, return the PauliSum that bonds them.\"\"\"\n", + " return cirq.PauliSum.from_pauli_strings([\n", + " cirq.PauliString(cirq.X(q0), cirq.X(q1)),\n", + " cirq.PauliString(cirq.Y(q0), cirq.Y(q1)),\n", + " cirq.PauliString(cirq.Z(q0), cirq.Z(q1))])\n", + "\n", + "def get_heisenberg_hamiltonian(qubits, jh, jv):\n", + " \"\"\"Returns the 2D Heisenberg Hamiltonian over the given grid of qubits.\"\"\"\n", + " heisenberg = cirq.PauliSum()\n", + " # Apply horizontal bonds\n", + " for r in qubits:\n", + " for q0, q1 in zip(r, r[1::]):\n", + " heisenberg += jh * get_bond(q0, q1)\n", + " # Apply vertical bonds\n", + " for r0, r1 in zip(qubits, qubits[1::]):\n", + " for q0, q1 in zip(r0, r1):\n", + " heisenberg += jv * get_bond(q0, q1)\n", + " return heisenberg" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "g07ao_-9dV6v", + "colab_type": "text" + }, + "source": [ + "For visualization and verification purposes, the following function recovers an explicit matrix from a Cirq `PauliSum` given a linear ordering of the qubits which support it:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "KXEoSvsEdgfM", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def pauli_sum_to_matrix(qubits, pauli_sum):\n", + " \"\"\"Unpacks each pauli string in the pauli sum into a matrix and sums them.\"\"\"\n", + " matrix = np.zeros((2**len(qubits), 2**len(qubits)), dtype=np.complex128)\n", + " for pauli_string in pauli_sum:\n", + " coeff = pauli_string.coefficient\n", + " bare_string = pauli_string/coeff\n", + " matrix += coeff*bare_string.dense(qubits)._unitary_()\n", + " return matrix" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "lLR_vKjwWhgP", + "colab_type": "text" + }, + "source": [ + "### Target density matrix" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uUcPcokKcDOt", + "colab_type": "text" + }, + "source": [ + "Here you define the parameters of the system to be learned. The 2D Heisenberg model is defined by the number of rows and columns in the qubit lattice, the bond strengths in the horizontal and vertical directions, and the inverse temperature $\\beta$. Here, we use the same parameters as in the associated paper:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "C9L88CvDZCmw", + "colab_type": "code", + "colab": {} + }, + "source": [ + "num_rows = 2\n", + "num_cols = 2\n", + "jh = 1\n", + "jv = 0.6\n", + "beta = 2.6\n", + "\n", + "# Get the grid of qubits.\n", + "all_qubits = get_qubit_grid(num_rows, num_cols)\n", + "all_qubits_flat = [q for r in all_qubits for q in r]" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "wu96Vy78TvER", + "colab_type": "text" + }, + "source": [ + "Given a Hamiltonian $\\hat{H}$ and an inverse temperature $\\beta$, the thermal state $\\rho_T$ is given by\n", + "$$\\rho_T = e^{-\\beta \\hat{H}}.$$\n", + "Since our target system is small, you can compute this matrix exponential directly, using the `PauliSum`-to-matrix converter defined above:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "9fKp0XaNjPSF", + "colab_type": "code", + "colab": {} + }, + "source": [ + "num_H = pauli_sum_to_matrix(\n", + " all_qubits_flat, get_heisenberg_hamiltonian(all_qubits, jh, jv))\n", + "heisenberg_exp = linalg.expm(-beta*num_H)\n", + "exact_thermal_state = np.true_divide(heisenberg_exp, np.trace(heisenberg_exp))\n", + "seaborn.heatmap(abs(exact_thermal_state))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "GcqC3cXOnWBy", + "colab_type": "text" + }, + "source": [ + "Recall that any density matrix $\\rho$ [can be written as](https://en.wikipedia.org/wiki/Density_matrix#Definition)\n", + "$$\\rho = \\sum_i p_i |\\psi_i\\rangle\\langle\\psi_i|,$$\n", + "where $|\\psi_i\\rangle$ is a pure state and $p_i$ is the classical probability of encoutering that state in the mixture. Since TFQ is a pure state simulator, we will emulate density matrices by outputting pure states according to their probabilities $p_i$, which by the equation above is equivalent to outputting the full density matrix. We define here a function that converts such a list of pure states into the associated density matrix:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "QMnyxFTBncbG", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def pure_state_list_to_density_matrix(pure_states):\n", + " \"\"\"Return the uniform mixture of the given list of pure states.\"\"\"\n", + " dim = len(pure_states[0].numpy())\n", + " n_s = pure_states.shape[0]\n", + " thermal_state = np.zeros((dim, dim), dtype=np.complex128)\n", + " for i in range(n_s):\n", + " psi = pure_states[i].numpy()\n", + " thermal_state += np.outer(psi, psi)\n", + " return np.true_divide(thermal_state, n_s)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vOCUwIOBav50", + "colab_type": "text" + }, + "source": [ + "Finally, to track the performance of our models, we need a measure of the distance of our estimated density matrix $\\tilde{\\rho}$ from the target density matrix $\\rho_T$. One common metric is the [fidelity](https://en.wikipedia.org/wiki/Fidelity_of_quantum_states), which is defined as\n", + "$$F(\\tilde{\\rho}, \\rho_T) = \\text{tr}\\left[\\sqrt{\\sqrt{\\tilde{\\rho}}\\rho_T\\sqrt{\\tilde{\\rho}}}\\right]^2.$$\n", + "This is tractable to compute because our model system is small. Below we define a function that computes this quantity:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "o505UmerbsLm", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def fidelity(dm1, dm2):\n", + " \"\"\"Calculate the fidelity between the two given density matrices.\"\"\"\n", + " dm1_sqrt = linalg.sqrtm(dm1)\n", + " return abs(np.trace(linalg.sqrtm(\n", + " np.matmul(dm1_sqrt, np.matmul(dm2, dm1_sqrt))))) ** 2" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "6dHgm4SiBEB8", + "colab_type": "text" + }, + "source": [ + "## Energy based models" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "y7QXAsERBHEI", + "colab_type": "text" + }, + "source": [ + "Energy based models are a type of machine learning ansatze inspired by physics and exponential families. The advantage of using energy based models for probabilistic modeling is that fair samples can be drawn from the distributions they define without requiring computation of their partition functions.\n", + "\n", + "One specific class of EBM is the Boltzmann machine. The energy of a spin configuration $x \\in \\{-1, 1\\}^n$ in this model is defined as:\n", + "$$E(x) = -\\sum_{i, j}w_{ij} x_i x_j - \\sum_i b_i x_i.$$\n", + "\n", + "This classical model can be easily converted into a quantum mechanical Ising model by replacing each bit with the Pauli $Z$ operator, and considering the usual mapping of the spin to qubit pictures 1 -> $|0\\rangle$ and $-1$ -> $|1\\rangle$.\n", + "\n", + "In the special case where the connection weights $w_{ij}$ are all zero, the Boltzmann machine is reduced to a product of independent Bernoulli distributions over the set of qubits. This \"Bernoulli EBM\" has many simplifying properties, and hence you will explore this EBM first in the examples below. Later in the notebook, you will apply the full Boltzmann EBM to VQT." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "DHSwa_R6WEIs", + "colab_type": "text" + }, + "source": [ + "### Energy functions" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "cvoLq1XOsFQu", + "colab_type": "text" + }, + "source": [ + "Here we define functions which compute the energy of a Boltzmann or Bernoulli EBM given the weight, biases, and bitstrings:" + ] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "pYzD_xF0BJpl", + "colab": {} + }, + "source": [ + "def bitstring_to_spin_config(bitstring):\n", + " \"\"\"Implements the mapping from the qubit to the spin picture.\"\"\"\n", + " return [-1 if b == 1 else 1 for b in bitstring]\n", + "\n", + "def spin_config_to_bitstring(spin_config):\n", + " \"\"\"Implements the mapping from the spin to the qubit picture.\"\"\"\n", + " return [0 if s == 1 else 1 for s in spin_config]\n", + "\n", + "def ebm_energy(spin_config, biases, weights=None):\n", + " \"\"\"Given a rank-2 tensor representing the weight matrix and a rank-1 tensor\n", + " representing the biases, calculate the energy of the spin configuration.\"\"\"\n", + " energy = 0\n", + " if weights is not None:\n", + " for w_row, xi in zip(weights.numpy(), spin_config):\n", + " for wij, xj in zip(w_row, spin_config):\n", + " energy -= wij*xi*xj\n", + " for bi, xi in zip(biases.numpy(), spin_config):\n", + " energy -= bi*xi\n", + " return energy\n", + "\n", + "def ebm_energy_avg(spin_config_list, biases, weights=None):\n", + " \"\"\"Average energy over a set of spin configuration samples.\"\"\"\n", + " energy_avg = 0\n", + " for spin_config in spin_config_list:\n", + " energy_avg += ebm_energy(spin_config, biases, weights)\n", + " energy_avg /= len(spin_config_list)\n", + " return energy_avg" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "yCK1ByUXkywN", + "colab_type": "text" + }, + "source": [ + "We also define functions which initialize TF Variables for our weights and biases. Initializing all weights and biases near 0 means we begin near the uniform distribution, which can also be thought of as starting with a high temperature prior:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "E8TPAtmvkt6Q", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_initialized_ebm_biases(num_units):\n", + " return tf.Variable(\n", + " tf.random.uniform(minval=-0.1, maxval=0.1, shape=[num_units],\n", + " dtype=tf.float32), dtype=tf.float32)\n", + "def get_initialized_ebm_weights(num_units):\n", + " return tf.Variable(\n", + " tf.random.uniform(minval=-0.1, maxval=0.1,\n", + " shape=[num_units, num_units],dtype=tf.float32), dtype=tf.float32)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "zkDAxG9JWJep", + "colab_type": "text" + }, + "source": [ + "### EBM derivatives" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "is1yfTqVEgas", + "colab_type": "text" + }, + "source": [ + "The derivative of an EBM given a bitstring is easy to compute. In fact, the derivatives are independent of the weights and biases:\n", + "$$\\nabla_{w_{ij}}E(x) = -x_ix_j\\quad \\text{and}\\quad \\nabla_{b_{i}}E(x) = -x_i.$$\n", + "Information about the weights and biases enters by averaging these derivates over samples from the EBM." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "OIxgPEKaEbYn", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def ebm_weights_derivative(spin_config):\n", + " w_deriv = np.zeros((len(spin_config), len(spin_config)))\n", + " for i, x_i in enumerate(spin_config):\n", + " for j, x_j in enumerate(spin_config):\n", + " w_deriv[i][j] = -x_i*x_j\n", + " return w_deriv\n", + "\n", + "def ebm_biases_derivative(spin_config):\n", + " b_deriv = np.zeros(len(spin_config))\n", + " for i, x_i in enumerate(spin_config):\n", + " b_deriv[i] = -x_i\n", + " return b_deriv\n", + "\n", + "def ebm_weights_derivative_avg(spin_config_list):\n", + " w_deriv = np.zeros((len(spin_config_list[0]), len(spin_config_list[0])))\n", + " for spin_config in spin_config_list:\n", + " w_deriv += ebm_weights_derivative(spin_config)\n", + " return np.true_divide(w_deriv, len(spin_config_list))\n", + "\n", + "def ebm_biases_derivative_avg(spin_config_list):\n", + " b_deriv = np.zeros(len(spin_config_list[0]))\n", + " for spin_config in spin_config_list: \n", + " b_deriv += ebm_biases_derivative(spin_config)\n", + " return np.true_divide(b_deriv, len(spin_config_list))" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "22C0LS7GeyQh", + "colab_type": "text" + }, + "source": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "### Classical VQT loss gradients" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8kbBeqxkpBHH", + "colab_type": "text" + }, + "source": [ + "As discussed in the paper, the gradient of the VQT loss function can be calculated without computing entropies or partition functions. For example, the gradient of the VQT free energy loss with respect to the classical model parameters can be writtent as:\n", + "$$\\partial_{\\theta} \\mathcal{L}_{\\text{fe}} =\\mathbb{E}_{x\\sim p_{\\theta}(x)}[(E_{\\theta}(x)-\\beta H_{\\phi}(x) ) \\nabla_{\\theta}E_{\\theta}(x) ]-(\\mathbb{E}_{x\\sim p_{\\theta}(x)}[E_{\\theta}(x)-\\beta H_{\\phi}(x)]) ( \\mathbb{E}_{y\\sim p_{\\theta}(y)}[\\nabla_{\\theta}E_{\\theta}(y)] ).$$\n", + "Below these gradients are defined for the general Boltzmann EBM. In the VQT gradients, each entry in `bitstring_list` corresponds to the entry with the same index in `energy_losses`, where for each bitstring $x$, we compute the product $\\beta\\langle x|H|x\\rangle$. The list of bitstrings is assumed to be sampled from the EBM.\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "lNYyUNbloNO2", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_vqt_weighted_weights_grad_product(\n", + " energy_losses, spin_config_list, biases, weights):\n", + " \"\"\"Implements the first term in the derivative of the FE loss,\n", + " for the weights of a Boltzmann EBM.\"\"\"\n", + " w_deriv = np.zeros((len(spin_config_list[0]), len(spin_config_list[0])))\n", + " for e_loss, spin_config in zip(energy_losses, spin_config_list):\n", + " w_deriv = w_deriv + (\n", + " ebm_energy(spin_config, biases, weights) - e_loss \n", + " )*ebm_weights_derivative(spin_config)\n", + " return np.true_divide(w_deriv, len(energy_losses))\n", + "\n", + "def get_vqt_weighted_biases_grad_product(\n", + " energy_losses, spin_config_list, biases, weights=None):\n", + " \"\"\"Implements the first term in the derivative of the FE loss,\n", + " for the biases of a Boltzmann EBM.\"\"\"\n", + " b_deriv = np.zeros(len(spin_config_list[0]))\n", + " for e_loss, spin_config in zip(energy_losses, spin_config_list):\n", + " b_deriv = b_deriv + (\n", + " ebm_energy(spin_config, biases, weights) - e_loss\n", + " )*ebm_biases_derivative(spin_config)\n", + " return np.true_divide(b_deriv, len(energy_losses))\n", + "\n", + "def get_vqt_factored_weights_grad_product(\n", + " energy_losses, spin_config_list, biases, weights):\n", + " \"\"\"Implements the second term in the derivative of the FE loss,\n", + " for the weights of a Boltzmann EBM.\"\"\"\n", + " energy_losses_avg = tf.reduce_mean(energy_losses)\n", + " classical_energy_avg = ebm_energy_avg(spin_config_list, biases, weights)\n", + " energy_diff_avg = classical_energy_avg - energy_losses_avg\n", + " return energy_diff_avg*ebm_weights_derivative_avg(spin_config_list)\n", + "\n", + "def get_vqt_factored_biases_grad_product(\n", + " energy_losses, spin_config_list, biases, weights=None):\n", + " \"\"\"Implements the second term in the derivative of the FE loss,\n", + " for the biases of a Boltzmann EBM.\"\"\"\n", + " energy_losses_avg = tf.reduce_mean(energy_losses)\n", + " classical_energy_avg = ebm_energy_avg(spin_config_list, biases, weights)\n", + " energy_diff_avg = classical_energy_avg - energy_losses_avg\n", + " return energy_diff_avg*ebm_biases_derivative_avg(spin_config_list)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jbVDJgviYO0x", + "colab_type": "text" + }, + "source": [ + "## Model components" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "G45X3XnzWdI4", + "colab_type": "text" + }, + "source": [ + "### Ansatz unitary" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "kIS8zy34qZWy", + "colab_type": "text" + }, + "source": [ + "The parameterized unitary ansatz you will use consists of alternating layers of general single qubit rotations and nearest-neighbor entangling gates:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "2fGHmkevsUsZ", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_rotation_1q(q, a, b, c):\n", + " \"\"\"General single qubit rotation.\"\"\"\n", + " return cirq.Circuit(cirq.X(q) ** a, cirq.Y(q) ** b, cirq.Z(q) ** c)\n", + "\n", + "def get_rotation_2q(q0, q1, a):\n", + " \"\"\"Exponent of entangling CNOT gate.\"\"\"\n", + " return cirq.Circuit(cirq.CNotPowGate(exponent=a)(q0, q1))\n", + "\n", + "def get_layer_1q(qubits, layer_num, name):\n", + " \"\"\"Apply single qubit rotations to all the given qubits.\"\"\"\n", + " layer_symbols = []\n", + " circuit = cirq.Circuit()\n", + " for n, q in enumerate(qubits):\n", + " a, b, c = sympy.symbols(\n", + " \"a{2}_{0}_{1} b{2}_{0}_{1} c{2}_{0}_{1}\".format(layer_num, n, name))\n", + " layer_symbols += [a, b, c]\n", + " circuit += get_rotation_1q(q, a, b, c)\n", + " return circuit, layer_symbols\n", + "\n", + "def get_layer_2q(qubits, layer_num, name):\n", + " \"\"\"Apply CNOT gates to all pairs of nearest-neighbor qubits.\"\"\"\n", + " layer_symbols = []\n", + " circuit = cirq.Circuit()\n", + " for n, (q0, q1) in enumerate(zip(qubits[::2], qubits[1::2])):\n", + " a = sympy.symbols(\"a{2}_{0}_{1}\".format(layer_num, n, name))\n", + " layer_symbols += [a]\n", + " circuit += get_rotation_2q(q0, q1, a)\n", + " shifted_qubits = qubits[1::]+[qubits[0]]\n", + " for n, (q0, q1) in enumerate(zip(shifted_qubits[::2], shifted_qubits[1::2])):\n", + " a = sympy.symbols(\"a{2}_{0}_{1}\".format(layer_num, n+1, name))\n", + " layer_symbols += [a]\n", + " circuit += get_rotation_2q(q0, q1, a)\n", + " return circuit, layer_symbols\n", + "\n", + "def get_one_full_layer(qubits, layer_num, name):\n", + " \"\"\"Stack the one- and two-qubit parameterized circuits.\"\"\"\n", + " circuit = cirq.Circuit()\n", + " all_symbols = []\n", + " new_circ, new_symb = get_layer_1q(qubits, layer_num, name)\n", + " circuit += new_circ\n", + " all_symbols += new_symb\n", + " new_circ, new_symb = get_layer_2q(qubits, layer_num + 1, name)\n", + " circuit += new_circ\n", + " all_symbols += new_symb\n", + " return circuit, all_symbols\n", + "\n", + "def get_model_unitary(qubits, num_layers, name=\"\"):\n", + " \"\"\"Build our full parameterized model unitary.\"\"\"\n", + " circuit = cirq.Circuit()\n", + " all_symbols = []\n", + " for i in range(num_layers):\n", + " new_circ, new_symb = get_one_full_layer(qubits, 2*i, name)\n", + " circuit += new_circ\n", + " all_symbols += new_symb\n", + " return circuit, all_symbols" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8xCxHubUYVXp", + "colab_type": "text" + }, + "source": [ + "### Bitstring injector" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "NaBARInB6maZ", + "colab_type": "text" + }, + "source": [ + "You also need a way to feed bitstrings into the quantum model. These bitstrings can be prepared by applying an X gate to every qubit that should be excited. The following function returns a parameterized circuit which prepares any given bitstring:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "ly6Rp_KL7f6r", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def get_bitstring_circuit(qubits):\n", + " \"\"\"Returns wall of parameterized X gates and the bits used to turn them on.\"\"\"\n", + " circuit = cirq.Circuit()\n", + " all_symbols = []\n", + " for n, q in enumerate(qubits):\n", + " new_bit = sympy.Symbol(\"bit_{}\".format(n))\n", + " circuit += cirq.X(q) ** new_bit\n", + " all_symbols.append(new_bit)\n", + " return circuit, all_symbols" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "JkaiSF5Ez0Hp", + "colab_type": "text" + }, + "source": [ + "## Factorized latent state" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "PiivYeckVi9s", + "colab_type": "text" + }, + "source": [ + "### Bernoulli EBM" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "AbY_ZkiqSowp", + "colab_type": "text" + }, + "source": [ + "The Bernoulli EBM can be used to parameterize a factorized latent state. The probability of sampling a 1 from a unit with bias $b$ is:\n", + "\n", + "$$p = \\frac{e^b}{e^b + e^{-b}}$$\n", + "\n", + "Since the units of a Bernoulli EBM are independent, the probability of a given spin configuration is simply the product of the individual unit probabilities:\n", + "\n", + "$$p(x) = \\prod_i\\frac{e^{x_ib_i}}{e^{b_i} + e^{-b_i}}$$\n", + "\n", + "This distribution is easy to sample from." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "NE8OCbQ0gwQB", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def bernoulli_spin_p1(b):\n", + " return np.exp(b)/(np.exp(b) + np.exp(-b))\n", + "\n", + "def sample_spins_bernoulli(num_samples, biases):\n", + " prob_list = []\n", + " for bias in biases.numpy():\n", + " prob_list.append(bernoulli_spin_p1(bias))\n", + " # The `probs` keyword specifies the probability of a 1 event\n", + " latent_dist = tfp.distributions.Bernoulli(probs=prob_list, dtype=tf.float32)\n", + " bit_samples = latent_dist.sample(num_samples).numpy()\n", + " spin_samples = []\n", + " for sample in bit_samples:\n", + " spin_samples.append([])\n", + " for bit in sample:\n", + " if bit == 0:\n", + " spin_samples[-1].append(-1)\n", + " else:\n", + " spin_samples[-1].append(1)\n", + " return spin_samples" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "gJiE1ULKFMih", + "colab_type": "text" + }, + "source": [ + "The entropy of a single unit with bias $b$ in our Bernoulli EBM is:\n", + "\n", + "$S = \\frac{be^{b} - be^{-b}}{e^{b} + e^{-b}}- \\log[e^{b} + e^{-b}]$\n", + "\n", + "For a factorized latent distribution, the entropy is simply the sum of the entropies of the individual factors." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "03lIN32qFqJT", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def bernoulli_factor_partition(b):\n", + " return np.exp(b) + np.exp(-b)\n", + "\n", + "def bernoulli_partition(biases):\n", + " partition = 1\n", + " for bias in biases.numpy():\n", + " partition *= bernoulli_factor_partition(bias)\n", + " return partition\n", + "\n", + "def bernoulli_factor_entropy(b):\n", + " Z = bernoulli_factor_partition(b)\n", + " return (b*np.exp(b) - b*np.exp(-b))/Z - np.log(Z)\n", + "\n", + "def bernoulli_entropy(biases):\n", + " entropy = 0\n", + " for bias in biases.numpy():\n", + " entropy += bernoulli_factor_entropy(bias)\n", + " return entropy" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HGc8XMBOM76Q", + "colab_type": "text" + }, + "source": [ + "Finally we define a function for converting the classical Bernoulli distribution into an Ising model whose expectation values can be simulated in the TFQ ops:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "MjuUUwRbNJq7", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def bernoulli_ebm_to_ising(qubits, biases, bare=False):\n", + " pauli_s_list = []\n", + " for i, bi in enumerate(biases.numpy()):\n", + " if bare:\n", + " coeff = 1.0\n", + " else:\n", + " coeff = bi\n", + " pauli_s_list.append(cirq.PauliString(coeff, cirq.Z(qubits[i])))\n", + " if bare:\n", + " return pauli_s_list\n", + " return cirq.PauliSum.from_pauli_strings(pauli_s_list)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Pq1arQTGDRy5", + "colab_type": "text" + }, + "source": [ + "### VQT" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "eJoZptT3mt7N", + "colab_type": "text" + }, + "source": [ + "Build and view our unitary model and set up the TFQ Expectation Op inputs:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "F_EvADGJZSZw", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Number of bitstring samples from our classical model to average over\n", + "num_samples = 300\n", + "\n", + "# Number of rotations-plus-entanglement layers to stack.\n", + "# Note that the depth required to reach a given fidelity increases depending on\n", + "# the temperature and Hamiltonian parameters.\n", + "num_layers = 4\n", + "\n", + "# Build the model unitary and visible state circuits\n", + "U, model_symbols = get_model_unitary(all_qubits_flat, num_layers)\n", + "V, bit_symbols = get_bitstring_circuit(all_qubits_flat)\n", + "visible_state = tfq.convert_to_tensor([V + U])\n", + "\n", + "# Make a copy of the visible state for each bitstring we will sample\n", + "tiled_visible_state = tf.tile(visible_state, [num_samples])\n", + "\n", + "# Upconvert symbols to tensors\n", + "vqt_symbol_names = tf.identity(tf.convert_to_tensor(\n", + " [str(s) for s in bit_symbols + model_symbols], dtype=tf.dtypes.string))\n", + "\n", + "# Build and tile the Hamiltonian\n", + "H = get_heisenberg_hamiltonian(all_qubits, jh, jv)\n", + "tiled_H = tf.tile(tfq.convert_to_tensor([[H]]), [num_samples, 1])\n", + "\n", + "# Get the expectation op with a differentiator attached\n", + "expectation = tfq.differentiators.ForwardDifference().generate_differentiable_op(\n", + " analytic_op=tfq.get_expectation_op())\n", + "\n", + "SVGCircuit(U)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "OU0WZ1GPp6RF", + "colab_type": "text" + }, + "source": [ + "We can use gradient descent on the model parameters thanks to TFQ, and our classical model parameters are tractable due to our use of an energy based model. The factorized nature of our latent space also allows us to efficiently obtain a loss function." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5w3op8KUp61G", + "colab_type": "code", + "colab": {} + }, + "source": [ + "optimizer = tf.keras.optimizers.Adam(learning_rate=0.03)\n", + "\n", + "# Initialize our model variables\n", + "vqt_model_params = tf.Variable(\n", + " tf.random.uniform(minval=-0.1, maxval=0.1,\n", + " shape=[len(model_symbols)], dtype=tf.float32),\n", + " dtype=tf.float32)\n", + "\n", + "# Keep track of metrics during training\n", + "vqt_loss_history = []\n", + "vqt_fidelity_history = []\n", + "vqt_model_params_history = []\n", + "vqt_bias_history = []\n", + "vqt_density_matrix_history = []\n", + "\n", + "# Initialize our EBM variables\n", + "vqt_biases = get_initialized_ebm_biases(len(all_qubits_flat))\n", + "\n", + "# The innermost training step, where gradients are taken and applied\n", + "def vqt_train_step():\n", + " # Sample from our EBM\n", + " spin_config_list = sample_spins_bernoulli(num_samples, vqt_biases)\n", + " bitstring_list = [spin_config_to_bitstring(s) for s in spin_config_list]\n", + " bitstring_tensor = tf.convert_to_tensor(bitstring_list, dtype=tf.float32)\n", + "\n", + " # Use the samples to find gradient of the loss w.r.t. model parameters.\n", + " with tf.GradientTape() as tape:\n", + " tiled_vqt_model_params = tf.tile([vqt_model_params], [num_samples, 1])\n", + " sampled_expectations = expectation(\n", + " tiled_visible_state,\n", + " vqt_symbol_names,\n", + " tf.concat([bitstring_tensor, tiled_vqt_model_params], 1),\n", + " tiled_H)\n", + " energy_losses = beta*sampled_expectations\n", + " energy_losses_avg = tf.reduce_mean(energy_losses)\n", + " vqt_model_gradients = tape.gradient(energy_losses_avg, [vqt_model_params])\n", + "\n", + " # Build the classical model gradients\n", + " weighted_biases_grad = get_vqt_weighted_biases_grad_product(\n", + " energy_losses, spin_config_list, vqt_biases)\n", + " factored_biases_grad = get_vqt_factored_biases_grad_product(\n", + " energy_losses, spin_config_list, vqt_biases)\n", + " biases_grad = tf.subtract(weighted_biases_grad, factored_biases_grad)\n", + "\n", + " # Apply the gradients\n", + " optimizer.apply_gradients(zip([vqt_model_gradients[0], biases_grad],\n", + " [vqt_model_params, vqt_biases]))\n", + "\n", + " # Sample pure states to build the current estimate of the density matrix\n", + " many_states = tfq.layers.State()(\n", + " tiled_visible_state,\n", + " symbol_names=vqt_symbol_names,\n", + " symbol_values=tf.concat([bitstring_tensor, tiled_vqt_model_params], 1)\n", + " )\n", + " vqt_density_matrix_history.append(pure_state_list_to_density_matrix(many_states))\n", + "\n", + " # Record the history\n", + " vqt_loss_history.append((energy_losses_avg - bernoulli_entropy(vqt_biases)).numpy())\n", + " vqt_fidelity_history.append(\n", + " fidelity(vqt_density_matrix_history[-1], exact_thermal_state))\n", + " vqt_model_params_history.append(vqt_model_params.numpy())\n", + " vqt_bias_history.append(vqt_biases.numpy())\n", + " \n", + " print(\"Current loss:\")\n", + " print(vqt_loss_history[-1])\n", + " print(\"Current fidelity to optimal state:\")\n", + " print(vqt_fidelity_history[-1])\n", + " print(\"Current estimated density matrix:\")\n", + " plt.figure()\n", + " seaborn.heatmap(abs(vqt_density_matrix_history[-1]))\n", + " plt.show()" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "3n3hXfHGcVRh", + "colab_type": "text" + }, + "source": [ + "With setup complete, we can now optimize our Heisenberg VQT." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "OqFfhQtotacR", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def vqt_train(epochs):\n", + " for epoch in range(epochs):\n", + " vqt_train_step()\n", + " print ('Epoch {} finished'.format(epoch + 1))\n", + "\n", + "vqt_train(100)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Yigvs92XtcLQ", + "colab_type": "text" + }, + "source": [ + "We plot our metrics and visualize the motion of the parameters during training:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "C2dopegQtmQA", + "colab_type": "code", + "colab": {} + }, + "source": [ + "plt.plot(vqt_loss_history)\n", + "plt.xlabel('Epoch #')\n", + "plt.ylabel('Loss [free energy]')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "aQuPV9l6Lh7I", + "colab_type": "code", + "colab": {} + }, + "source": [ + "plt.plot(vqt_fidelity_history)\n", + "plt.xlabel('Epoch #')\n", + "plt.ylabel('Fidelity with exact state')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "vI1lbaENVZXq", + "colab_type": "text" + }, + "source": [ + "## Classically correlated latent state" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4ojAo5UwVQWK", + "colab_type": "text" + }, + "source": [ + "### Boltzmann machine EBM" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "WrYekcBOOToj", + "colab_type": "text" + }, + "source": [ + "The Bernoulli distribution is only able to inject entropy into our density matrix. To encode classical correlations, we need to move beyond a factorized latent state. This can be accomplished by allowing the weights of our Boltzmann machine to be non-zero.\n", + "\n", + "Now that there are correlations, sampling from the model becomes non-trivial. The probability of bitstring $x$ is:\n", + "\n", + "$P(x) = \\frac{\\exp(-E(x))}{\\sum_{y\\in\\{-1, 1\\}^n} \\exp(-E(y))}$\n", + "\n", + "In general this function is intractable to compute directly; however, we can still obtain samples from the distribution efficiently. Markov chain Monte Carlo (MCMC) is one family of procedures for this efficient sampling. Here, we use the simplest example of MCMC, the [Metropolis-Hastings](https://en.wikipedia.org/wiki/Metropolis%E2%80%93Hastings_algorithm) algorithm:\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "-cFS7yyxR6dY", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def make_proposal(y):\n", + " \"\"\"Flip spins in y to generate a new sample.\"\"\"\n", + " coin = tfp.distributions.Bernoulli(probs=[0.75]*len(y))\n", + " samples = coin.sample(1).numpy()[0]\n", + " x = []\n", + " for s_i, y_i in zip(samples, y):\n", + " if s_i:\n", + " x.append(y_i)\n", + " else:\n", + " if y_i == 1:\n", + " x.append(-1)\n", + " else:\n", + " x.append(1)\n", + " return x\n", + "\n", + "def sample_boltzmann(burn_in, num_samples, skip, initial_state, biases, weights):\n", + " \"\"\"Walk towards and sample from regions of high probability.\"\"\"\n", + " current_state = initial_state\n", + " all_samples = []\n", + " for i in range(burn_in + skip*num_samples):\n", + " proposal = make_proposal(current_state)\n", + " proposal_energy = ebm_energy(proposal, biases, weights)\n", + " current_energy = ebm_energy(current_state, biases, weights)\n", + " acceptance = min(np.exp(-proposal_energy)/np.exp(-current_energy), 1)\n", + " threshold = random.random()\n", + " if threshold <= acceptance:\n", + " current_state = proposal\n", + " if i >= burn_in:\n", + " if (i - burn_in)%skip == 0:\n", + " all_samples.append(current_state)\n", + " return all_samples" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "mTfa35s2PLbd", + "colab_type": "text" + }, + "source": [ + "Since there are now correlations between the bits, the partition function and entropy can no longer be computed in a scalable way. However, for the small system size considered here, these quantities can be illustrative." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "1tguzlInPH6r", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def boltzmann_partition(biases, weights):\n", + " partition_value = 0\n", + " for spin_config in itertools.product([-1, 1], repeat=biases.shape[0]):\n", + " partition_value += np.exp(-ebm_energy(spin_config, biases, weights))\n", + " return partition_value\n", + "\n", + "def boltzmann_entropy(biases, weights):\n", + " Z = boltzmann_partition(biases, weights)\n", + " Z_log = np.log(Z)\n", + " unnormalized = 0\n", + " for spin_config in itertools.product([-1, 1], repeat=biases.shape[0]):\n", + " this_energy = ebm_energy(spin_config, biases, weights)\n", + " unnormalized += np.exp(-this_energy)*(-this_energy - Z_log)\n", + " return -unnormalized/Z" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "05Ns_rcIOAvR", + "colab_type": "text" + }, + "source": [ + "Finally we define a function for converting the classical Boltzmann machine into an Ising model whose expectation values can be simulated in the TFQ ops:" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "bmsidwSwVT27", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def boltzmann_ebm_to_ising(qubits, biases, weights, bare=False):\n", + " pauli_s_list = []\n", + " for i, w_row in enumerate(weights.numpy()):\n", + " for j, wij in enumerate(w_row):\n", + " init_list = [cirq.Z(q) for qi, q in enumerate(qubits) if qi == i or qi == j]\n", + " if bare:\n", + " coeff = 1.0\n", + " else:\n", + " coeff = wij\n", + " pauli_s_list.append(cirq.PauliString(coeff, init_list))\n", + " for i, bi in enumerate(biases.numpy()):\n", + " if bare:\n", + " coeff = 1.0\n", + " else:\n", + " coeff = bi\n", + " pauli_s_list.append(cirq.PauliString(coeff, cirq.Z(qubits[i])))\n", + " if bare:\n", + " return pauli_s_list\n", + " return cirq.PauliSum.from_pauli_strings(pauli_s_list)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "pWgYpkp9O3Et", + "colab_type": "text" + }, + "source": [ + "### VQT" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "gr2R5-t0UJvO", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Initialize our model variables\n", + "vqt_model_params = tf.Variable(\n", + " tf.random.uniform(minval=-0.1, maxval=0.1, shape=[len(model_symbols)],\n", + " dtype=tf.float32), dtype=tf.float32)\n", + "\n", + "# Define the learning hyperparameters\n", + "burn_in = 100\n", + "skip = 7\n", + "optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)\n", + "\n", + "# Keep track of metrics during training\n", + "vqt_loss_history = []\n", + "vqt_fidelity_history = []\n", + "vqt_model_params_history = []\n", + "vqt_weights_history = []\n", + "vqt_bias_history = []\n", + "vqt_density_matrix_history = []\n", + "\n", + "# Initialize our EBM variables\n", + "vqt_weights = get_initialized_ebm_weights(len(all_qubits_flat))\n", + "vqt_biases = get_initialized_ebm_biases(len(all_qubits_flat))\n", + "\n", + "# The innermost training step, where gradients are taken and applied\n", + "def vqt_train_step():\n", + " # Sample from our EBM\n", + " spin_config_list = sample_boltzmann(burn_in, num_samples, skip,\n", + " vqt_train_step.initial_state,\n", + " vqt_biases, vqt_weights)\n", + " vqt_train_step.initial_state = spin_config_list[-1]\n", + " bitstring_list = [spin_config_to_bitstring(s) for s in spin_config_list]\n", + " bitstring_tensor = tf.convert_to_tensor(bitstring_list, dtype=tf.float32)\n", + "\n", + " # Use the samples to find gradient of the loss w.r.t. model parameters.\n", + " with tf.GradientTape() as tape:\n", + " tiled_vqt_model_params = tf.tile([vqt_model_params], [num_samples, 1])\n", + " sampled_expectations = expectation(\n", + " tiled_visible_state,\n", + " vqt_symbol_names,\n", + " tf.concat([bitstring_tensor, tiled_vqt_model_params], 1),\n", + " tiled_H)\n", + " energy_losses = beta*sampled_expectations\n", + " energy_losses_avg = tf.reduce_mean(energy_losses)\n", + " vqt_model_gradients = tape.gradient(energy_losses_avg, [vqt_model_params])\n", + "\n", + " # Build the classical model gradients\n", + " weighted_biases_grad = get_vqt_weighted_biases_grad_product(\n", + " energy_losses, spin_config_list, vqt_biases, vqt_weights)\n", + " factored_biases_grad = get_vqt_factored_biases_grad_product(\n", + " energy_losses, spin_config_list, vqt_biases, vqt_weights)\n", + " biases_grad = tf.subtract(weighted_biases_grad, factored_biases_grad)\n", + " weighted_weights_grad = get_vqt_weighted_weights_grad_product(\n", + " energy_losses, spin_config_list, vqt_weights, vqt_weights)\n", + " factored_weights_grad = get_vqt_factored_weights_grad_product(\n", + " energy_losses, spin_config_list, vqt_weights, vqt_weights)\n", + " weights_grad = tf.subtract(weighted_weights_grad, factored_weights_grad)\n", + "\n", + " # Apply the gradients\n", + " optimizer.apply_gradients(\n", + " zip([vqt_model_gradients[0], weights_grad, biases_grad],\n", + " [vqt_model_params, vqt_weights, vqt_biases]))\n", + "\n", + " # Sample pure states to build the current estimate of the density matrix\n", + " many_states = tfq.layers.State()(\n", + " tiled_visible_state,\n", + " symbol_names=vqt_symbol_names,\n", + " symbol_values=tf.concat([bitstring_tensor, tiled_vqt_model_params], 1)\n", + " )\n", + " vqt_density_matrix_history.append(pure_state_list_to_density_matrix(many_states))\n", + "\n", + " # Record the history\n", + " vqt_loss_history.append(\n", + " (energy_losses_avg - boltzmann_entropy(vqt_biases, vqt_weights)).numpy())\n", + " vqt_fidelity_history.append(\n", + " fidelity(vqt_density_matrix_history[-1], exact_thermal_state))\n", + " vqt_model_params_history.append(vqt_model_params.numpy())\n", + " vqt_weights_history.append(vqt_weights.numpy())\n", + " vqt_bias_history.append(vqt_biases.numpy())\n", + " \n", + " print(\"Current loss:\")\n", + " print(vqt_loss_history[-1])\n", + " print(\"Current fidelity to optimal state:\")\n", + " print(vqt_fidelity_history[-1])\n", + " print(\"Current estimated density matrix:\")\n", + " plt.figure()\n", + " seaborn.heatmap(abs(vqt_density_matrix_history[-1]))\n", + " plt.show()\n", + "\n", + "vqt_train_step.initial_state = [1]*len(bit_symbols)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "2NxcXXKvWA03", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def vqt_train(epochs):\n", + " for epoch in range(epochs):\n", + " vqt_train_step()\n", + " print ('Epoch {} finished'.format(epoch + 1))\n", + "\n", + "vqt_train(100)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "B0N0LhGnZtmZ", + "colab": {} + }, + "source": [ + "plt.plot(vqt_loss_history)\n", + "plt.xlabel('Epoch #')\n", + "plt.ylabel('Loss [free energy]')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "colab_type": "code", + "id": "0zVqoEJPZtmk", + "colab": {} + }, + "source": [ + "plt.plot(vqt_fidelity_history)\n", + "plt.xlabel('Epoch #')\n", + "plt.ylabel('Fidelity with exact state')" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "xUCZgEaa7_q5", + "colab_type": "code", + "colab": {} + }, + "source": [ + "" + ], + "execution_count": 0, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/wheels/tensorflow_quantum-0.2.0-cp36-cp36m-linux_x86_64.whl b/wheels/tensorflow_quantum-0.2.0-cp36-cp36m-linux_x86_64.whl deleted file mode 100755 index d2af55d32..000000000 Binary files a/wheels/tensorflow_quantum-0.2.0-cp36-cp36m-linux_x86_64.whl and /dev/null differ diff --git a/wheels/tensorflow_quantum-0.2.0-cp36-cp36m-manylinux2010_x86_64.whl b/wheels/tensorflow_quantum-0.2.0-cp36-cp36m-manylinux2010_x86_64.whl deleted file mode 100644 index f5291aba8..000000000 Binary files a/wheels/tensorflow_quantum-0.2.0-cp36-cp36m-manylinux2010_x86_64.whl and /dev/null differ diff --git a/wheels/tensorflow_quantum-0.2.0-cp37-cp37m-linux_x86_64.whl b/wheels/tensorflow_quantum-0.2.0-cp37-cp37m-linux_x86_64.whl deleted file mode 100755 index 32e0d1c70..000000000 Binary files a/wheels/tensorflow_quantum-0.2.0-cp37-cp37m-linux_x86_64.whl and /dev/null differ diff --git a/wheels/tensorflow_quantum-0.2.0-cp37-cp37m-manylinux2010_x86_64.whl b/wheels/tensorflow_quantum-0.2.0-cp37-cp37m-manylinux2010_x86_64.whl deleted file mode 100644 index 9eb52d389..000000000 Binary files a/wheels/tensorflow_quantum-0.2.0-cp37-cp37m-manylinux2010_x86_64.whl and /dev/null differ