From d758f26aaa3c70880d4d31a792fc94a581140fc6 Mon Sep 17 00:00:00 2001 From: Antonio Martinez Date: Thu, 20 Feb 2020 19:23:50 -0800 Subject: [PATCH 01/43] Cleared room for examples --- WORKSPACE | 122 - benchmarks/BUILD | 6 - benchmarks/README.md | 49 - benchmarks/__init__.py | 0 benchmarks/scripts/BUILD | 63 - benchmarks/scripts/__init__.py | 0 .../scripts/benchmark_clifford_circuit.py | 135 - benchmarks/scripts/benchmark_op_gradients.py | 204 - .../scripts/benchmark_random_circuit.py | 149 - benchmarks/scripts/benchmark_util.py | 23 - benchmarks/scripts/benchmark_util_test.py | 55 - benchmarks/scripts/differentiators/BUILD | 19 - .../scripts/differentiators/__init__.py | 14 - .../differentiators/convergence_test.py | 193 - benchmarks/scripts/flags.py | 99 - benchmarks/scripts/flags_test.py | 39 - benchmarks/scripts/models/BUILD | 21 - benchmarks/scripts/models/__init__.py | 14 - .../scripts/models/random_clifford_circuit.py | 74 - .../models/random_clifford_circuit_test.py | 98 - benchmarks/scripts/reports/.gitignore | 2 - configure.sh | 123 - docs/_book.yaml | 55 - docs/_index.yaml | 73 - docs/api_docs/python/_toc.yaml | 75 - docs/api_docs/python/index.md | 29 - docs/api_docs/python/tfq.md | 49 - docs/api_docs/python/tfq/_api_cache.json | 854 ---- docs/api_docs/python/tfq/convert_to_tensor.md | 71 - docs/api_docs/python/tfq/datasets.md | 27 - .../tfq/datasets/excited_cluster_states.md | 76 - docs/api_docs/python/tfq/differentiators.md | 37 - .../tfq/differentiators/CentralDifference.md | 188 - .../tfq/differentiators/Differentiator.md | 204 - .../tfq/differentiators/ForwardDifference.md | 188 - .../tfq/differentiators/LinearCombination.md | 191 - .../tfq/differentiators/ParameterShift.md | 254 - .../tfq/differentiators/SGDifferentiator.md | 268 - docs/api_docs/python/tfq/from_tensor.md | 69 - .../api_docs/python/tfq/get_expectation_op.md | 103 - .../python/tfq/get_sampled_expectation_op.md | 112 - docs/api_docs/python/tfq/get_sampling_op.md | 88 - docs/api_docs/python/tfq/get_state_op.md | 85 - .../python/tfq/get_supported_gates.md | 29 - docs/api_docs/python/tfq/layers.md | 39 - docs/api_docs/python/tfq/layers/AddCircuit.md | 857 ---- .../python/tfq/layers/CircuitConstruction.md | 894 ---- .../python/tfq/layers/ControlledPQC.md | 954 ---- .../api_docs/python/tfq/layers/Expectation.md | 1035 ---- docs/api_docs/python/tfq/layers/PQC.md | 963 ---- docs/api_docs/python/tfq/layers/Sample.md | 960 ---- .../python/tfq/layers/SampledExpectation.md | 1039 ---- docs/api_docs/python/tfq/layers/State.md | 928 ---- docs/api_docs/python/tfq/padded_to_ragged.md | 42 - docs/design.md | 175 - docs/install.md | 194 - docs/overview.md | 56 - docs/tutorials/barren_plateaus.ipynb | 547 -- docs/tutorials/gradients.ipynb | 894 ---- docs/tutorials/hello_many_worlds.ipynb | 793 --- docs/tutorials/images/barren_1.png | Bin 36369 -> 0 bytes docs/tutorials/images/barren_2.png | Bin 25451 -> 0 bytes docs/tutorials/images/barren_3.png | Bin 104569 -> 0 bytes docs/tutorials/images/nn_control1.png | Bin 172811 -> 0 bytes docs/tutorials/images/nn_control2.png | Bin 105657 -> 0 bytes docs/tutorials/images/qcnn_1.png | Bin 110357 -> 0 bytes docs/tutorials/images/qcnn_2.png | Bin 19694 -> 0 bytes docs/tutorials/images/qcnn_3.png | Bin 163917 -> 0 bytes docs/tutorials/images/qcnn_4.png | Bin 57987 -> 0 bytes docs/tutorials/images/qcnn_5.png | Bin 156149 -> 0 bytes docs/tutorials/images/qcnn_6.png | Bin 240677 -> 0 bytes docs/tutorials/images/sensing_1.png | Bin 60241 -> 0 bytes docs/tutorials/images/sensing_2.png | Bin 104854 -> 0 bytes docs/tutorials/mnist.ipynb | 642 --- docs/tutorials/model.png | Bin 33023 -> 0 bytes docs/tutorials/qcnn.ipynb | 1135 ----- docs/tutorials/sensing.ipynb | 645 --- release/BUILD | 49 - release/MANIFEST.in | 1 - release/README.md | 31 - release/__init__.py | 0 release/build_all_wheels.sh | 71 - release/build_pip_package.sh | 58 - release/open_ubuntu_docker.sh | 21 - release/repair_wheels.sh | 28 - release/setup.py | 81 - requirements.txt | 6 - scripts/README.md | 11 - scripts/benchmark_all.sh | 30 - scripts/build_docs.py | 77 - scripts/build_pip_package_test.sh | 24 - scripts/ci_install.sh | 19 - scripts/ci_validate_tutorials.sh | 31 - scripts/format_all.sh | 24 - scripts/format_check.sh | 108 - scripts/format_ipynb.py | 39 - scripts/import_test.py | 62 - scripts/lint_all.sh | 29 - scripts/run_example.sh | 18 - scripts/test_all.sh | 26 - scripts/test_benchmarks.sh | 29 - scripts/test_tutorials.py | 49 - tensorflow_quantum/BUILD | 6 - tensorflow_quantum/__init__.py | 42 - tensorflow_quantum/core/BUILD | 6 - tensorflow_quantum/core/__init__.py | 23 - tensorflow_quantum/core/ops/BUILD | 234 - tensorflow_quantum/core/ops/__init__.py | 23 - tensorflow_quantum/core/ops/batch_util.py | 642 --- .../core/ops/batch_util_test.py | 250 - .../core/ops/circuit_execution_ops.py | 338 -- .../core/ops/circuit_execution_ops_test.py | 517 -- tensorflow_quantum/core/ops/cirq_ops.py | 649 --- tensorflow_quantum/core/ops/cirq_ops_test.py | 428 -- tensorflow_quantum/core/ops/load_module.py | 46 - tensorflow_quantum/core/ops/parse_context.cc | 258 - tensorflow_quantum/core/ops/parse_context.h | 80 - .../core/ops/tfq_circuit_append_op.cc | 94 - .../core/ops/tfq_ps_decompose_op.cc | 310 -- .../core/ops/tfq_ps_symbol_replace_op.cc | 200 - .../core/ops/tfq_ps_util_ops.py | 23 - .../core/ops/tfq_ps_util_ops_test.py | 831 --- .../ops/tfq_ps_weights_from_symbols_op.cc | 182 - .../core/ops/tfq_simulate_expectation_op.cc | 180 - .../core/ops/tfq_simulate_ops.py | 152 - .../core/ops/tfq_simulate_ops_test.py | 479 -- .../core/ops/tfq_simulate_state_op.cc | 153 - .../core/ops/tfq_simulate_utils.cc | 29 - .../core/ops/tfq_simulate_utils.h | 29 - .../core/ops/tfq_utility_ops.py | 40 - .../core/ops/tfq_utility_ops_test.py | 130 - tensorflow_quantum/core/proto/BUILD | 34 - tensorflow_quantum/core/proto/__init__.py | 14 - tensorflow_quantum/core/proto/pauli_sum.proto | 21 - tensorflow_quantum/core/qsim/BUILD | 125 - tensorflow_quantum/core/qsim/fuser_basic.cc | 129 - tensorflow_quantum/core/qsim/fuser_basic.h | 71 - .../core/qsim/fuser_basic_test.cc | 332 -- tensorflow_quantum/core/qsim/mux.cc | 49 - tensorflow_quantum/core/qsim/mux.h | 32 - tensorflow_quantum/core/qsim/mux_test.cc | 35 - tensorflow_quantum/core/qsim/state_space.cc | 114 - tensorflow_quantum/core/qsim/state_space.h | 116 - .../core/qsim/state_space_avx.cc | 615 --- .../core/qsim/state_space_avx.h | 86 - .../core/qsim/state_space_slow.cc | 175 - .../core/qsim/state_space_slow.h | 74 - .../core/qsim/state_space_sse.cc | 4471 ----------------- .../core/qsim/state_space_sse.h | 87 - tensorflow_quantum/core/qsim/util.cc | 37 - tensorflow_quantum/core/qsim/util.h | 33 - tensorflow_quantum/core/serialize/BUILD | 23 - tensorflow_quantum/core/serialize/__init__.py | 19 - .../core/serialize/serializer.py | 483 -- .../core/serialize/serializer_test.py | 649 --- tensorflow_quantum/core/src/BUILD | 126 - tensorflow_quantum/core/src/circuit.cc | 45 - tensorflow_quantum/core/src/circuit.h | 39 - tensorflow_quantum/core/src/circuit_parser.cc | 162 - tensorflow_quantum/core/src/circuit_parser.h | 41 - .../core/src/circuit_parser_test.cc | 242 - tensorflow_quantum/core/src/circuit_test.cc | 77 - tensorflow_quantum/core/src/gates_def.cc | 531 -- tensorflow_quantum/core/src/gates_def.h | 236 - tensorflow_quantum/core/src/gates_def_test.cc | 694 --- tensorflow_quantum/core/src/matrix.h | 234 - tensorflow_quantum/core/src/matrix_test.cc | 299 -- .../core/src/program_resolution.cc | 125 - .../core/src/program_resolution.h | 54 - .../core/src/program_resolution_test.cc | 307 -- tensorflow_quantum/datasets/BUILD | 20 - tensorflow_quantum/datasets/__init__.py | 17 - tensorflow_quantum/datasets/cluster_state.py | 93 - .../datasets/cluster_state_test.py | 48 - tensorflow_quantum/python/BUILD | 24 - tensorflow_quantum/python/__init__.py | 20 - .../python/differentiators/BUILD | 133 - .../python/differentiators/__init__.py | 30 - .../python/differentiators/differentiator.py | 257 - .../differentiators/differentiator_test.py | 91 - .../python/differentiators/gradient_test.py | 331 -- .../differentiators/linear_combination.py | 516 -- .../linear_combination_test.py | 196 - .../python/differentiators/parameter_shift.py | 341 -- .../differentiators/parameter_shift_test.py | 84 - .../differentiators/parameter_shift_util.py | 100 - .../parameter_shift_util_test.py | 105 - .../stochastic_differentiator.py | 456 -- .../stochastic_differentiator_test.py | 131 - .../stochastic_differentiator_util.py | 442 -- .../stochastic_differentiator_util_test.py | 489 -- tensorflow_quantum/python/layers/BUILD | 6 - tensorflow_quantum/python/layers/__init__.py | 30 - .../python/layers/circuit_construction/BUILD | 25 - .../layers/circuit_construction/__init__.py | 19 - .../layers/circuit_construction/elementary.py | 140 - .../circuit_construction/elementary_test.py | 120 - .../python/layers/circuit_executors/BUILD | 90 - .../layers/circuit_executors/__init__.py | 22 - .../layers/circuit_executors/expectation.py | 357 -- .../circuit_executors/expectation_test.py | 374 -- .../python/layers/circuit_executors/sample.py | 261 - .../layers/circuit_executors/sample_test.py | 214 - .../circuit_executors/sampled_expectation.py | 394 -- .../sampled_expectation_test.py | 437 -- .../python/layers/circuit_executors/state.py | 216 - .../layers/circuit_executors/state_test.py | 186 - .../python/layers/high_level/BUILD | 48 - .../python/layers/high_level/__init__.py | 20 - .../layers/high_level/controlled_pqc.py | 255 - .../layers/high_level/controlled_pqc_test.py | 161 - .../python/layers/high_level/pqc.py | 287 -- .../python/layers/high_level/pqc_test.py | 197 - .../python/operators/__init__.py | 14 - .../python/optimizers/__init__.py | 14 - tensorflow_quantum/python/util.py | 518 -- tensorflow_quantum/python/util_test.py | 362 -- third_party/BUILD | 0 third_party/tf/BUILD | 0 third_party/tf/BUILD.tpl | 26 - third_party/tf/auditwheel | 9 - third_party/tf/tf_configure.bzl | 213 - ..._quantum-0.2.0-cp36-cp36m-linux_x86_64.whl | Bin 2887047 -> 0 bytes ...-0.2.0-cp36-cp36m-manylinux2010_x86_64.whl | Bin 2887069 -> 0 bytes ..._quantum-0.2.0-cp37-cp37m-linux_x86_64.whl | Bin 2887047 -> 0 bytes ...-0.2.0-cp37-cp37m-manylinux2010_x86_64.whl | Bin 2887068 -> 0 bytes 226 files changed, 44690 deletions(-) delete mode 100644 WORKSPACE delete mode 100644 benchmarks/BUILD delete mode 100644 benchmarks/README.md delete mode 100644 benchmarks/__init__.py delete mode 100644 benchmarks/scripts/BUILD delete mode 100644 benchmarks/scripts/__init__.py delete mode 100644 benchmarks/scripts/benchmark_clifford_circuit.py delete mode 100644 benchmarks/scripts/benchmark_op_gradients.py delete mode 100644 benchmarks/scripts/benchmark_random_circuit.py delete mode 100644 benchmarks/scripts/benchmark_util.py delete mode 100644 benchmarks/scripts/benchmark_util_test.py delete mode 100644 benchmarks/scripts/differentiators/BUILD delete mode 100644 benchmarks/scripts/differentiators/__init__.py delete mode 100644 benchmarks/scripts/differentiators/convergence_test.py delete mode 100644 benchmarks/scripts/flags.py delete mode 100644 benchmarks/scripts/flags_test.py delete mode 100644 benchmarks/scripts/models/BUILD delete mode 100644 benchmarks/scripts/models/__init__.py delete mode 100644 benchmarks/scripts/models/random_clifford_circuit.py delete mode 100644 benchmarks/scripts/models/random_clifford_circuit_test.py delete mode 100644 benchmarks/scripts/reports/.gitignore delete mode 100755 configure.sh delete mode 100644 docs/_book.yaml delete mode 100644 docs/_index.yaml delete mode 100644 docs/api_docs/python/_toc.yaml delete mode 100644 docs/api_docs/python/index.md delete mode 100644 docs/api_docs/python/tfq.md delete mode 100644 docs/api_docs/python/tfq/_api_cache.json delete mode 100644 docs/api_docs/python/tfq/convert_to_tensor.md delete mode 100644 docs/api_docs/python/tfq/datasets.md delete mode 100644 docs/api_docs/python/tfq/datasets/excited_cluster_states.md delete mode 100644 docs/api_docs/python/tfq/differentiators.md delete mode 100644 docs/api_docs/python/tfq/differentiators/CentralDifference.md delete mode 100644 docs/api_docs/python/tfq/differentiators/Differentiator.md delete mode 100644 docs/api_docs/python/tfq/differentiators/ForwardDifference.md delete mode 100644 docs/api_docs/python/tfq/differentiators/LinearCombination.md delete mode 100644 docs/api_docs/python/tfq/differentiators/ParameterShift.md delete mode 100644 docs/api_docs/python/tfq/differentiators/SGDifferentiator.md delete mode 100644 docs/api_docs/python/tfq/from_tensor.md delete mode 100644 docs/api_docs/python/tfq/get_expectation_op.md delete mode 100644 docs/api_docs/python/tfq/get_sampled_expectation_op.md delete mode 100644 docs/api_docs/python/tfq/get_sampling_op.md delete mode 100644 docs/api_docs/python/tfq/get_state_op.md delete mode 100644 docs/api_docs/python/tfq/get_supported_gates.md delete mode 100644 docs/api_docs/python/tfq/layers.md delete mode 100644 docs/api_docs/python/tfq/layers/AddCircuit.md delete mode 100644 docs/api_docs/python/tfq/layers/CircuitConstruction.md delete mode 100644 docs/api_docs/python/tfq/layers/ControlledPQC.md delete mode 100644 docs/api_docs/python/tfq/layers/Expectation.md delete mode 100644 docs/api_docs/python/tfq/layers/PQC.md delete mode 100644 docs/api_docs/python/tfq/layers/Sample.md delete mode 100644 docs/api_docs/python/tfq/layers/SampledExpectation.md delete mode 100644 docs/api_docs/python/tfq/layers/State.md delete mode 100644 docs/api_docs/python/tfq/padded_to_ragged.md delete mode 100644 docs/design.md delete mode 100644 docs/install.md delete mode 100644 docs/overview.md delete mode 100644 docs/tutorials/barren_plateaus.ipynb delete mode 100644 docs/tutorials/gradients.ipynb delete mode 100644 docs/tutorials/hello_many_worlds.ipynb delete mode 100644 docs/tutorials/images/barren_1.png delete mode 100644 docs/tutorials/images/barren_2.png delete mode 100644 docs/tutorials/images/barren_3.png delete mode 100644 docs/tutorials/images/nn_control1.png delete mode 100644 docs/tutorials/images/nn_control2.png delete mode 100644 docs/tutorials/images/qcnn_1.png delete mode 100644 docs/tutorials/images/qcnn_2.png delete mode 100644 docs/tutorials/images/qcnn_3.png delete mode 100644 docs/tutorials/images/qcnn_4.png delete mode 100644 docs/tutorials/images/qcnn_5.png delete mode 100644 docs/tutorials/images/qcnn_6.png delete mode 100644 docs/tutorials/images/sensing_1.png delete mode 100644 docs/tutorials/images/sensing_2.png delete mode 100644 docs/tutorials/mnist.ipynb delete mode 100644 docs/tutorials/model.png delete mode 100644 docs/tutorials/qcnn.ipynb delete mode 100644 docs/tutorials/sensing.ipynb delete mode 100644 release/BUILD delete mode 100644 release/MANIFEST.in delete mode 100644 release/README.md delete mode 100644 release/__init__.py delete mode 100755 release/build_all_wheels.sh delete mode 100755 release/build_pip_package.sh delete mode 100755 release/open_ubuntu_docker.sh delete mode 100755 release/repair_wheels.sh delete mode 100644 release/setup.py delete mode 100644 requirements.txt delete mode 100644 scripts/README.md delete mode 100644 scripts/benchmark_all.sh delete mode 100644 scripts/build_docs.py delete mode 100755 scripts/build_pip_package_test.sh delete mode 100755 scripts/ci_install.sh delete mode 100755 scripts/ci_validate_tutorials.sh delete mode 100755 scripts/format_all.sh delete mode 100755 scripts/format_check.sh delete mode 100644 scripts/format_ipynb.py delete mode 100644 scripts/import_test.py delete mode 100755 scripts/lint_all.sh delete mode 100755 scripts/run_example.sh delete mode 100755 scripts/test_all.sh delete mode 100644 scripts/test_benchmarks.sh delete mode 100644 scripts/test_tutorials.py delete mode 100644 tensorflow_quantum/BUILD delete mode 100644 tensorflow_quantum/__init__.py delete mode 100644 tensorflow_quantum/core/BUILD delete mode 100644 tensorflow_quantum/core/__init__.py delete mode 100644 tensorflow_quantum/core/ops/BUILD delete mode 100644 tensorflow_quantum/core/ops/__init__.py delete mode 100644 tensorflow_quantum/core/ops/batch_util.py delete mode 100644 tensorflow_quantum/core/ops/batch_util_test.py delete mode 100644 tensorflow_quantum/core/ops/circuit_execution_ops.py delete mode 100644 tensorflow_quantum/core/ops/circuit_execution_ops_test.py delete mode 100644 tensorflow_quantum/core/ops/cirq_ops.py delete mode 100644 tensorflow_quantum/core/ops/cirq_ops_test.py delete mode 100644 tensorflow_quantum/core/ops/load_module.py delete mode 100644 tensorflow_quantum/core/ops/parse_context.cc delete mode 100644 tensorflow_quantum/core/ops/parse_context.h delete mode 100644 tensorflow_quantum/core/ops/tfq_circuit_append_op.cc delete mode 100644 tensorflow_quantum/core/ops/tfq_ps_decompose_op.cc delete mode 100644 tensorflow_quantum/core/ops/tfq_ps_symbol_replace_op.cc delete mode 100644 tensorflow_quantum/core/ops/tfq_ps_util_ops.py delete mode 100644 tensorflow_quantum/core/ops/tfq_ps_util_ops_test.py delete mode 100644 tensorflow_quantum/core/ops/tfq_ps_weights_from_symbols_op.cc delete mode 100644 tensorflow_quantum/core/ops/tfq_simulate_expectation_op.cc delete mode 100644 tensorflow_quantum/core/ops/tfq_simulate_ops.py delete mode 100644 tensorflow_quantum/core/ops/tfq_simulate_ops_test.py delete mode 100644 tensorflow_quantum/core/ops/tfq_simulate_state_op.cc delete mode 100644 tensorflow_quantum/core/ops/tfq_simulate_utils.cc delete mode 100644 tensorflow_quantum/core/ops/tfq_simulate_utils.h delete mode 100644 tensorflow_quantum/core/ops/tfq_utility_ops.py delete mode 100644 tensorflow_quantum/core/ops/tfq_utility_ops_test.py delete mode 100644 tensorflow_quantum/core/proto/BUILD delete mode 100644 tensorflow_quantum/core/proto/__init__.py delete mode 100644 tensorflow_quantum/core/proto/pauli_sum.proto delete mode 100644 tensorflow_quantum/core/qsim/BUILD delete mode 100644 tensorflow_quantum/core/qsim/fuser_basic.cc delete mode 100644 tensorflow_quantum/core/qsim/fuser_basic.h delete mode 100644 tensorflow_quantum/core/qsim/fuser_basic_test.cc delete mode 100644 tensorflow_quantum/core/qsim/mux.cc delete mode 100644 tensorflow_quantum/core/qsim/mux.h delete mode 100644 tensorflow_quantum/core/qsim/mux_test.cc delete mode 100644 tensorflow_quantum/core/qsim/state_space.cc delete mode 100644 tensorflow_quantum/core/qsim/state_space.h delete mode 100644 tensorflow_quantum/core/qsim/state_space_avx.cc delete mode 100644 tensorflow_quantum/core/qsim/state_space_avx.h delete mode 100644 tensorflow_quantum/core/qsim/state_space_slow.cc delete mode 100644 tensorflow_quantum/core/qsim/state_space_slow.h delete mode 100644 tensorflow_quantum/core/qsim/state_space_sse.cc delete mode 100644 tensorflow_quantum/core/qsim/state_space_sse.h delete mode 100644 tensorflow_quantum/core/qsim/util.cc delete mode 100644 tensorflow_quantum/core/qsim/util.h delete mode 100644 tensorflow_quantum/core/serialize/BUILD delete mode 100644 tensorflow_quantum/core/serialize/__init__.py delete mode 100644 tensorflow_quantum/core/serialize/serializer.py delete mode 100644 tensorflow_quantum/core/serialize/serializer_test.py delete mode 100644 tensorflow_quantum/core/src/BUILD delete mode 100644 tensorflow_quantum/core/src/circuit.cc delete mode 100644 tensorflow_quantum/core/src/circuit.h delete mode 100644 tensorflow_quantum/core/src/circuit_parser.cc delete mode 100644 tensorflow_quantum/core/src/circuit_parser.h delete mode 100644 tensorflow_quantum/core/src/circuit_parser_test.cc delete mode 100644 tensorflow_quantum/core/src/circuit_test.cc delete mode 100644 tensorflow_quantum/core/src/gates_def.cc delete mode 100644 tensorflow_quantum/core/src/gates_def.h delete mode 100644 tensorflow_quantum/core/src/gates_def_test.cc delete mode 100644 tensorflow_quantum/core/src/matrix.h delete mode 100644 tensorflow_quantum/core/src/matrix_test.cc delete mode 100644 tensorflow_quantum/core/src/program_resolution.cc delete mode 100644 tensorflow_quantum/core/src/program_resolution.h delete mode 100644 tensorflow_quantum/core/src/program_resolution_test.cc delete mode 100644 tensorflow_quantum/datasets/BUILD delete mode 100644 tensorflow_quantum/datasets/__init__.py delete mode 100644 tensorflow_quantum/datasets/cluster_state.py delete mode 100644 tensorflow_quantum/datasets/cluster_state_test.py delete mode 100644 tensorflow_quantum/python/BUILD delete mode 100644 tensorflow_quantum/python/__init__.py delete mode 100644 tensorflow_quantum/python/differentiators/BUILD delete mode 100644 tensorflow_quantum/python/differentiators/__init__.py delete mode 100644 tensorflow_quantum/python/differentiators/differentiator.py delete mode 100644 tensorflow_quantum/python/differentiators/differentiator_test.py delete mode 100644 tensorflow_quantum/python/differentiators/gradient_test.py delete mode 100644 tensorflow_quantum/python/differentiators/linear_combination.py delete mode 100644 tensorflow_quantum/python/differentiators/linear_combination_test.py delete mode 100644 tensorflow_quantum/python/differentiators/parameter_shift.py delete mode 100644 tensorflow_quantum/python/differentiators/parameter_shift_test.py delete mode 100644 tensorflow_quantum/python/differentiators/parameter_shift_util.py delete mode 100644 tensorflow_quantum/python/differentiators/parameter_shift_util_test.py delete mode 100644 tensorflow_quantum/python/differentiators/stochastic_differentiator.py delete mode 100644 tensorflow_quantum/python/differentiators/stochastic_differentiator_test.py delete mode 100644 tensorflow_quantum/python/differentiators/stochastic_differentiator_util.py delete mode 100644 tensorflow_quantum/python/differentiators/stochastic_differentiator_util_test.py delete mode 100644 tensorflow_quantum/python/layers/BUILD delete mode 100644 tensorflow_quantum/python/layers/__init__.py delete mode 100644 tensorflow_quantum/python/layers/circuit_construction/BUILD delete mode 100644 tensorflow_quantum/python/layers/circuit_construction/__init__.py delete mode 100644 tensorflow_quantum/python/layers/circuit_construction/elementary.py delete mode 100644 tensorflow_quantum/python/layers/circuit_construction/elementary_test.py delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/BUILD delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/__init__.py delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/expectation.py delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/expectation_test.py delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/sample.py delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/sample_test.py delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/sampled_expectation.py delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/sampled_expectation_test.py delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/state.py delete mode 100644 tensorflow_quantum/python/layers/circuit_executors/state_test.py delete mode 100644 tensorflow_quantum/python/layers/high_level/BUILD delete mode 100644 tensorflow_quantum/python/layers/high_level/__init__.py delete mode 100644 tensorflow_quantum/python/layers/high_level/controlled_pqc.py delete mode 100644 tensorflow_quantum/python/layers/high_level/controlled_pqc_test.py delete mode 100644 tensorflow_quantum/python/layers/high_level/pqc.py delete mode 100644 tensorflow_quantum/python/layers/high_level/pqc_test.py delete mode 100644 tensorflow_quantum/python/operators/__init__.py delete mode 100644 tensorflow_quantum/python/optimizers/__init__.py delete mode 100644 tensorflow_quantum/python/util.py delete mode 100644 tensorflow_quantum/python/util_test.py delete mode 100644 third_party/BUILD delete mode 100644 third_party/tf/BUILD delete mode 100644 third_party/tf/BUILD.tpl delete mode 100644 third_party/tf/auditwheel delete mode 100644 third_party/tf/tf_configure.bzl delete mode 100755 wheels/tensorflow_quantum-0.2.0-cp36-cp36m-linux_x86_64.whl delete mode 100644 wheels/tensorflow_quantum-0.2.0-cp36-cp36m-manylinux2010_x86_64.whl delete mode 100755 wheels/tensorflow_quantum-0.2.0-cp37-cp37m-linux_x86_64.whl delete mode 100644 wheels/tensorflow_quantum-0.2.0-cp37-cp37m-manylinux2010_x86_64.whl diff --git a/WORKSPACE b/WORKSPACE deleted file mode 100644 index 7bb7c2655..000000000 --- a/WORKSPACE +++ /dev/null @@ -1,122 +0,0 @@ -# This file includes external dependencies that are required to compile the -# TensorFlow op. Maybe of them are specific versions used by the TensorFlow -# binary used. These are extracted from TF v2.0.0, but are also compatible -# with v1.14.0. - -load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") - -http_archive( - name = "com_google_absl", - sha256 = "acd93f6baaedc4414ebd08b33bebca7c7a46888916101d8c0b8083573526d070", - strip_prefix = "abseil-cpp-43ef2148c0936ebf7cb4be6b19927a9d9d145b8f", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz", - "https://github.com/abseil/abseil-cpp/archive/43ef2148c0936ebf7cb4be6b19927a9d9d145b8f.tar.gz", - ], -) - -http_archive( - name = "com_google_googletest", - sha256 = "ff7a82736e158c077e76188232eac77913a15dac0b22508c390ab3f88e6d6d86", - strip_prefix = "googletest-b6cd405286ed8635ece71c72f118e659f4ade3fb", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip", - "https://github.com/google/googletest/archive/b6cd405286ed8635ece71c72f118e659f4ade3fb.zip", - ], -) - -http_archive( - name = "com_google_protobuf", - sha256 = "b9e92f9af8819bbbc514e2902aec860415b70209f31dfc8c4fa72515a5df9d59", - strip_prefix = "protobuf-310ba5ee72661c081129eb878c1bbcec936b20f0", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz", - "https://github.com/protocolbuffers/protobuf/archive/310ba5ee72661c081129eb878c1bbcec936b20f0.tar.gz", - ], -) - -# Use this zlib rule that depends on github since it is more reliable than zlib.net. -http_archive( - name = "zlib", - build_file = "@com_google_protobuf//:third_party/zlib.BUILD", - sha256 = "629380c90a77b964d896ed37163f5c3a34f6e6d897311f1df2a7016355c45eff", - strip_prefix = "zlib-1.2.11", - urls = ["https://github.com/madler/zlib/archive/v1.2.11.tar.gz"], -) - -load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps") - -protobuf_deps() - -# com_google_protobuf depends on @bazel_skylib -http_archive( - name = "bazel_skylib", - sha256 = "bbccf674aa441c266df9894182d80de104cabd19be98be002f6d478aaa31574d", - strip_prefix = "bazel-skylib-2169ae1c374aab4a09aa90e65efe1a3aad4e279b", - urls = ["https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz"], -) - -http_archive( - name = "cirq", - sha256 = "e882a0bfbf47c75c69d70de354049d64bbec2ef0d114def7da36cf4867e7b57f", - strip_prefix = "Cirq-0.7.0", - urls = ["https://github.com/quantumlib/Cirq/archive/v0.7.0.zip"], -) - -# Added for crosstool in tensorflow. -http_archive( - name = "io_bazel_rules_closure", - sha256 = "5b00383d08dd71f28503736db0500b6fb4dda47489ff5fc6bed42557c07c6ba9", - strip_prefix = "rules_closure-308b05b2419edb5c8ee0471b67a40403df940149", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/308b05b2419edb5c8ee0471b67a40403df940149.tar.gz", - "https://github.com/bazelbuild/rules_closure/archive/308b05b2419edb5c8ee0471b67a40403df940149.tar.gz", # 2019-06-13 - ], -) - -http_archive( - name = "org_tensorflow", - sha256 = "e82f3b94d863e223881678406faa5071b895e1ff928ba18578d2adbbc6b42a4c", - strip_prefix = "tensorflow-2.1.0", - urls = [ - "https://github.com/tensorflow/tensorflow/archive/v2.1.0.zip", - ], -) - -load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace") - -tf_workspace(tf_repo_name = "@org_tensorflow") - -load("//third_party/tf:tf_configure.bzl", "tf_configure") - -tf_configure(name = "local_config_tf") - -http_archive( - name = "eigen", - # TODO(pmassey): Probably move this content in a third_party/eigen.BUILD file - build_file_content = """ -cc_library( - name = "eigen3", - textual_hdrs = glob(["Eigen/**", "unsupported/**"]), - visibility = ["//visibility:public"], -) - """, - sha256 = "7e7a57e33c59280a17a66e521396cd8b1a55d0676c9f807078522fda52114b5c", - strip_prefix = "eigen-eigen-8071cda5714d", - urls = [ - "https://storage.googleapis.com/mirror.tensorflow.org/bitbucket.org/eigen/eigen/get/8071cda5714d.tar.gz", - "https://bitbucket.org/eigen/eigen/get/8071cda5714d.tar.gz", - ], -) - -http_archive( - name = "six_archive", - build_file = "@com_google_protobuf//:six.BUILD", - sha256 = "105f8d68616f8248e24bf0e9372ef04d3cc10104f1980f54d57b2ce73a5ad56a", - url = "https://pypi.python.org/packages/source/s/six/six-1.10.0.tar.gz#md5=34eed507548117b2ab523ab14b2f8b55", -) - -bind( - name = "six", - actual = "@six_archive//:six", -) diff --git a/benchmarks/BUILD b/benchmarks/BUILD deleted file mode 100644 index fd4e75c2e..000000000 --- a/benchmarks/BUILD +++ /dev/null @@ -1,6 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) diff --git a/benchmarks/README.md b/benchmarks/README.md deleted file mode 100644 index b71bd1b04..000000000 --- a/benchmarks/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# Tensorflow Quantum Benchmarks - -## Testing instructions -Benchmarks are currently tested separately from the main repository. To run -a benchmark testcase, simply _run_ the benchmark file like with any other unit test: -``` -bazel run benchmarks/scripts: -``` - -## Instructions to run -A benchmark can be run from the command line or a bash script by setting model -parameters via flags, separated from Bazel flags by a `--` delimiter. To run a benchmark with a set of specific parameters, use -the following command template: -``` -bazel run benchmarks/scripts: -- --benchmarks= -``` -Some notes on benchmark configuration: - - "all" is a valid option for the `benchmarks` flag, and will result in all benchmarks methods associated with that file to run. - - If a benchmark method runs twice with identical configurations, the most recent run will overwrite previous reports. - - For information on valid parameter flags and their descriptions see `flags.py` - - -### Sample benchmark experiments - -For example, to benchmark a dense depth-10 Clifford circuit over 5 qubits call: -``` -bazel run -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" \ - --cxxopt="-msse3" --cxxopt="-msse4" \ - benchmarks/scripts:benchmark_clifford_circuit -- \ - --n_moments 5 --n_qubits 4 \ - --benchmarks=benchmark_clifford_circuit_eager -``` -This will produce a proto benchmark report under `benchmarks/reports` corresponding to the chosen parameters: -``` -benchmarks/scripts/reports/CliffordBenchmarks.benchmark_clifford_circuit_4_5_1 -``` - - -To benchmark the parameter shift differentiation method on a random depth-10 4-qubit circuit with 10 parameters call, where the circuit will be differentiated -over 50 trials, each time over a batch of 10 circuits. -``` -bazel run -c opt --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" --cxxopt="-msse2" \ - --cxxopt="-msse3" --cxxopt="-msse4" \ - benchmarks/scripts:benchmark_op_gradients -- \ - --n_moments 10 --n_qubits 4 --n_symbols 10 \ - --n_runs 50 --batch_size 10 \ - --benchmarks=benchmark_parameter_shift -``` - diff --git a/benchmarks/__init__.py b/benchmarks/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/benchmarks/scripts/BUILD b/benchmarks/scripts/BUILD deleted file mode 100644 index 1f93be0d6..000000000 --- a/benchmarks/scripts/BUILD +++ /dev/null @@ -1,63 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_test( - name = "benchmark_clifford_circuit", - srcs = ["benchmark_clifford_circuit.py"], - python_version = "PY3", - deps = [ - "//tensorflow_quantum/core/ops:tfq_simulate_ops_py", - "//tensorflow_quantum/core/serialize:serializer", - "@local_config_tf//:test_log_pb2", - ], -) - -py_test( - name = "benchmark_random_circuit", - srcs = ["benchmark_random_circuit.py"], - python_version = "PY3", - deps = [ - "//tensorflow_quantum/core/ops:tfq_simulate_ops_py", - "//tensorflow_quantum/core/serialize:serializer", - "@local_config_tf//:test_log_pb2", - ], -) - -py_test( - name = "benchmark_op_gradients", - srcs = ["benchmark_op_gradients.py"], - python_version = "PY3", - deps = [ - "//tensorflow_quantum/core/ops:batch_util", - "//tensorflow_quantum/core/ops:cirq_ops", - "//tensorflow_quantum/core/ops:tfq_simulate_ops_py", - "//tensorflow_quantum/core/ops:tfq_utility_ops_py", - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/differentiators:linear_combination", - "//tensorflow_quantum/python/differentiators:parameter_shift", - "//tensorflow_quantum/python/differentiators:stochastic_differentiator", - "@local_config_tf//:test_log_pb2", - ], -) - -py_library( - name = "benchmark_util", - srcs = ["benchmark_util.py"], - deps = [ - "@local_config_tf//:test_log_pb2", - ], -) - -py_test( - name = "benchmark_util_test", - srcs = ["benchmark_util_test.py"], - python_version = "PY3", - deps = [ - ":benchmark_util", - "@local_config_tf//:test_log_pb2", - ], -) diff --git a/benchmarks/scripts/__init__.py b/benchmarks/scripts/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/benchmarks/scripts/benchmark_clifford_circuit.py b/benchmarks/scripts/benchmark_clifford_circuit.py deleted file mode 100644 index 643eff790..000000000 --- a/benchmarks/scripts/benchmark_clifford_circuit.py +++ /dev/null @@ -1,135 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Benchmark simulators against classically simulatable circuits.""" -import os -import time - -from absl.testing import parameterized -import cirq -import tensorflow as tf -import numpy as np - -from tensorflow_quantum.core.ops import tfq_simulate_ops -from tensorflow_quantum.core.serialize.serializer import serialize_circuit -from models.random_clifford_circuit import random_clifford_circuit -import flags -import benchmark_util - -SEED = 48510234 -SRC = os.path.dirname(os.path.realpath(__file__)) -os.environ['TEST_REPORT_FILE_PREFIX'] = os.path.join(SRC, 'reports/') -TEST_PARAMS_1 = flags.TEST_FLAGS(n_qubits=3, n_moments=5, op_density=0.99) -TEST_PARAMS_2 = flags.TEST_FLAGS(n_qubits=4, n_moments=5, op_density=0.99) -ALL_PARAMS = [TEST_PARAMS_1, TEST_PARAMS_2] - - -class CliffordBenchmarksTest(tf.test.TestCase, parameterized.TestCase): - """Test the Clifford benchmarking class.""" - - @parameterized.named_parameters( - ("params_1", TEST_PARAMS_1), - ("params_2", TEST_PARAMS_2), - ) - def testBenchmarkCliffordCircuitEager(self, params): - """Test that Op constructs and runs correctly.""" - proto_file_path = os.path.join( - SRC, "reports/", - "CliffordBenchmarks.benchmark_clifford_circuit_{}_{}_{}".format( - params.n_qubits, params.n_moments, params.batch_size)) - self.addCleanup(os.remove, proto_file_path) - - bench = CliffordBenchmarks(params=params) - bench.benchmark_clifford_circuit_eager() - - res = benchmark_util.read_benchmark_entry(proto_file_path) - self.assertEqual( - res.name, - "CliffordBenchmarks.benchmark_clifford_circuit_{}_{}_{}".format( - params.n_qubits, params.n_moments, params.batch_size)) - self.assertEqual( - res.extras.get("n_qubits").double_value, params.n_qubits) - self.assertEqual( - res.extras.get("n_moments").double_value, params.n_moments) - self.assertEqual( - res.extras.get("op_density").double_value, params.op_density) - assert hasattr(res, 'iters') - assert hasattr(res, 'wall_time') - - -class CliffordBenchmarks(tf.test.Benchmark): - """Benchmark simulators against Clifford circuits. - - Flags: - --n_qubits --n_moments --op_density --batch_size --n_runs --n_burn - """ - - def __init__(self, params=None): - """Pull in command line flags or use provided flags.""" - super(CliffordBenchmarks, self).__init__() - # Allow input params for testing purposes. - self.params = params if params else flags.FLAGS - - def _simulate_circuit(self, circuit, params): - # TODO: implement backend switch - return tfq_simulate_ops.tfq_simulate_state( - [str(serialize_circuit(circuit))] * params.batch_size, ["None"], - [[0]] * params.batch_size) - - def benchmark_clifford_circuit_eager(self): - """tf.test.Benchmark does not provide eager benchmarks methods.""" - - qubits = cirq.GridQubit.rect(1, self.params.n_qubits) - circuit = random_clifford_circuit( - qubits, - self.params.n_moments, - self.params.op_density, - random_state=np.random.RandomState(SEED)) - - for _ in range(self.params.n_burn): - _ = self._simulate_circuit(circuit, self.params) - - deltas = [None] * self.params.n_runs - for i in range(self.params.n_runs): - start = time.perf_counter() - _ = self._simulate_circuit(circuit, self.params) - deltas[i] = time.perf_counter() - start - - extras = { - 'n_qubits': self.params.n_qubits, - 'n_moments': self.params.n_moments, - 'op_density': self.params.op_density, - 'batch_size': self.params.batch_size, - "min_time": min(deltas), - } - name = "benchmark_clifford_circuit_{}_{}_{}".format( - self.params.n_qubits, self.params.n_moments, self.params.batch_size) - - full_path = os.path.join(os.environ['TEST_REPORT_FILE_PREFIX'], - "{}.{}".format(self.__class__.__name__, name)) - if os.path.exists(full_path): - os.remove(full_path) - - benchmark_values = { - "iters": self.params.n_runs, - "wall_time": np.median(deltas), - "extras": extras, - "name": name, - } - self.report_benchmark(**benchmark_values) - return benchmark_values - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/benchmark_op_gradients.py b/benchmarks/scripts/benchmark_op_gradients.py deleted file mode 100644 index 88f37d620..000000000 --- a/benchmarks/scripts/benchmark_op_gradients.py +++ /dev/null @@ -1,204 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Benchmark differentiator methods.""" -import os -import time -import string - -from absl.testing import parameterized -import cirq -import tensorflow as tf -import numpy as np - -from tensorflow_quantum.core.ops import tfq_simulate_ops -import benchmark_util -import flags - -from tensorflow_quantum.python import util -from tensorflow_quantum.python.differentiators import (linear_combination, - parameter_shift, - stochastic_differentiator - ) - -SRC = os.path.dirname(os.path.realpath(__file__)) -os.environ['TEST_REPORT_FILE_PREFIX'] = os.path.join(SRC, 'reports/') -TEST_PARAMS_1 = flags.TEST_FLAGS(n_symbols=4, - n_qubits=3, - n_moments=5, - op_density=0.9) -TEST_PARAMS_2 = flags.TEST_FLAGS(n_symbols=3, - n_qubits=4, - n_moments=5, - op_density=0.6) - - -class GradientBenchmarksTest(tf.test.TestCase, parameterized.TestCase): - """Test the Gradient benchmarking class.""" - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'diff': [ - linear_combination.ForwardDifference(), - linear_combination.CentralDifference(), - parameter_shift.ParameterShift(), - stochastic_differentiator.SGDifferentiator(), - ], - 'params': [TEST_PARAMS_1, TEST_PARAMS_2] - }))) - def testBenchmarkGradient(self, diff, params): - """Test that op constructs and runs correctly.""" - - bench_name = "GradientBenchmarks.{}_{}_{}_{}_{}".format( - diff.__class__.__name__, params.n_qubits, params.n_moments, - params.batch_size, params.n_symbols) - proto_file_path = os.path.join(SRC, "reports/", "{}".format(bench_name)) - self.addCleanup(os.remove, proto_file_path) - - bench = GradientBenchmarks(params=params) - bench.setup() - bench._benchmark_tfq_differentiator(diff, params) - - res = benchmark_util.read_benchmark_entry(proto_file_path) - self.assertEqual(res.name, bench_name) - self.assertEqual( - res.extras.get("n_qubits").double_value, params.n_qubits) - self.assertEqual( - res.extras.get("n_moments").double_value, params.n_moments) - self.assertEqual( - res.extras.get("op_density").double_value, params.op_density) - assert hasattr(res, 'iters') - assert hasattr(res, 'wall_time') - - -class GradientBenchmarks(tf.test.Benchmark): - """Benchmarks for circuit differentiation. - - Flags: - --n_qubits --n_moments --op_density --n_runs --n_symbols --batch_size - --n_burn - """ - - def __init__(self, params=None): - """Pull in command line flags or use provided flags.""" - super(GradientBenchmarks, self).__init__() - self.params = params if params else flags.FLAGS - self.setup() - - def setup(self): - """Persistent variational circuit, parameters, and observables.""" - qubits = cirq.GridQubit.rect(1, self.params.n_qubits) - - # Generate arbitrary symbol set without name clashes. - symbol_names = set() - while len(symbol_names) < self.params.n_symbols: - symbol_names.add(''.join( - np.random.choice(list(string.ascii_uppercase), - size=4, - replace=True))) - symbol_names = list(symbol_names) - - circuit_batch, resolver_batch = util.random_symbol_circuit_resolver_batch( - qubits=qubits, - symbols=symbol_names, - batch_size=self.params.batch_size, - n_moments=self.params.n_moments, - p=self.params.op_density) - psums = util.random_pauli_sums(qubits, 1, self.params.batch_size) - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch], - dtype=np.float32) - - self.symbol_names = symbol_names - self.symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - self.programs = util.convert_to_tensor(circuit_batch) - self.psums = util.convert_to_tensor([psums]) - - def _benchmark_tfq_differentiator(self, differentiator, params): - """Common pipeline for benchmarking and reporting.""" - # for parametrization over a single differentiator instance - differentiator.refresh() - op = differentiator.generate_differentiable_op( - analytic_op=tfq_simulate_ops.tfq_simulate_expectation) - - for _ in range(params.n_burn): - op(self.programs, self.symbol_names, self.symbol_values_tensor, - self.psums) - - deltas = [None] * params.n_runs - for i in range(params.n_runs): - start = time.perf_counter() - with tf.GradientTape() as g: - g.watch(self.symbol_values_tensor) - expectations = op(self.programs, self.symbol_names, - self.symbol_values_tensor, self.psums) - g.gradient(expectations, self.symbol_values_tensor) - deltas[i] = time.perf_counter() - start - - # Name benchmark logs by differentiator classname. - name = "{}_{}_{}_{}_{}".format(differentiator.__class__.__name__, - params.n_qubits, params.n_moments, - params.batch_size, params.n_symbols) - - full_path = os.path.join(os.environ['TEST_REPORT_FILE_PREFIX'], - "{}.{}".format(self.__class__.__name__, name)) - if os.path.exists(full_path): - os.remove(full_path) - - extras = { - 'n_qubits': params.n_qubits, - 'n_moments': params.n_moments, - 'op_density': params.op_density, - 'n_symbols': params.n_symbols, - 'batch_size': params.batch_size, - "min_time": min(deltas), - } - - benchmark_values = { - "iters": params.n_runs, - "wall_time": np.median(deltas), - "extras": extras, - "name": name, - } - self.report_benchmark(**benchmark_values) - return benchmark_values - - def benchmark_finite_difference_forward(self): - """Benchmark the forward difference gradient method.""" - diff = linear_combination.ForwardDifference() - self._benchmark_tfq_differentiator(diff, self.params) - - def benchmark_finite_difference_central(self): - """Benchmark the central difference gradient method.""" - diff = linear_combination.CentralDifference() - self._benchmark_tfq_differentiator(diff, self.params) - - def benchmark_parameter_shift(self): - """Benchmark the parameter shift gradient method.""" - diff = parameter_shift.ParameterShift() - self._benchmark_tfq_differentiator(diff, self.params) - - def benchmark_stochastic_differentiator(self): - """Benchmark the default stochastic differentiator.""" - diff = stochastic_differentiator.SGDifferentiator() - self._benchmark_tfq_differentiator(diff, self.params) - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/benchmark_random_circuit.py b/benchmarks/scripts/benchmark_random_circuit.py deleted file mode 100644 index 51d4ccfbf..000000000 --- a/benchmarks/scripts/benchmark_random_circuit.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Benchmark simulators against classically intractable 'supremacy' circuits.""" -import os -import time - -from absl.testing import parameterized -import cirq -import tensorflow as tf -import numpy as np - -from tensorflow_quantum.core.ops import tfq_simulate_ops -from tensorflow_quantum.core.serialize.serializer import serialize_circuit -import flags -import benchmark_util - -SEED = 63536323 -SRC = os.path.dirname(os.path.realpath(__file__)) -os.environ['TEST_REPORT_FILE_PREFIX'] = os.path.join(SRC, 'reports/') -TEST_PARAMS_1 = flags.TEST_FLAGS(n_rows=3, n_cols=5, n_moments=5) -TEST_PARAMS_2 = flags.TEST_FLAGS(n_rows=4, n_cols=4, n_moments=20) - - -def make_random_circuit(n_rows, n_cols, depth): - """Generate a random unparameterized circuit of fixed depth.""" - return cirq.experiments.generate_boixo_2018_supremacy_circuits_v2_grid( - n_rows=n_rows, - n_cols=n_cols, - cz_depth=depth - 2, # Account for beginning/ending Hadamard layers - seed=SEED) - - -class RandomCircuitBenchmarksTest(tf.test.TestCase, parameterized.TestCase): - """Test the random circuit benchmarking class.""" - - @parameterized.named_parameters( - ("params_1", TEST_PARAMS_1), - ("params_2", TEST_PARAMS_2), - ) - def testBenchmarkRandomCircuit(self, params): - """Test that Op constructs and runs correctly.""" - proto_file_path = os.path.join( - SRC, "reports/", - "RandomCircuitBenchmarks.benchmark_random_circuit_{}_{}_{}".format( - params.n_rows, params.n_cols, params.n_moments)) - self.addCleanup(os.remove, proto_file_path) - - bench = RandomCircuitBenchmarks(params=params) - bench.benchmark_random_circuit() - - res = benchmark_util.read_benchmark_entry(proto_file_path) - self.assertEqual( - res.name, - "RandomCircuitBenchmarks.benchmark_random_circuit_{}_{}_{}".format( - params.n_rows, params.n_cols, params.n_moments)) - self.assertEqual(res.extras.get("n_rows").double_value, params.n_rows) - self.assertEqual(res.extras.get("n_cols").double_value, params.n_cols) - self.assertEqual( - res.extras.get("n_moments").double_value, params.n_moments) - - assert hasattr(res, 'iters') - assert hasattr(res, 'wall_time') - - @parameterized.named_parameters( - ("params_1", TEST_PARAMS_1), - ("params_2", TEST_PARAMS_2), - ) - def testRandomCircuitParams(self, params): - """Ensure that the random circuits are structured as advertised.""" - circuit = make_random_circuit(params.n_rows, params.n_cols, - params.n_moments) - self.assertEqual(len(circuit), params.n_moments) - self.assertEqual(len(circuit.all_qubits()), - params.n_rows * params.n_cols) - - -class RandomCircuitBenchmarks(tf.test.Benchmark): - """Benchmark simulators against random 'supremacy' circuits. - - Flags: - --n_rows --n_cols --n_moments --batch_size --n_runs --n_burn - """ - - def __init__(self, params=None): - """Pull in command line flags or use provided flags.""" - super(RandomCircuitBenchmarks, self).__init__() - # Allow input params for testing purposes. - self.params = params if params else flags.FLAGS - - def _simulate_circuit(self, circuit, params): - # TODO: implement backend switch - return tfq_simulate_ops.tfq_simulate_state( - [str(serialize_circuit(circuit))] * params.batch_size, ["None"], - [[0]] * params.batch_size) - - def benchmark_random_circuit(self): - """Benchmark simulator performance on a classically intractable circuit.""" - - circuit = make_random_circuit(self.params.n_rows, self.params.n_cols, - self.params.n_moments) - for _ in range(self.params.n_burn): - _ = self._simulate_circuit(circuit, self.params) - - deltas = [None] * self.params.n_runs - for i in range(self.params.n_runs): - start = time.perf_counter() - _ = self._simulate_circuit(circuit, self.params) - deltas[i] = time.perf_counter() - start - - extras = { - 'n_rows': self.params.n_rows, - 'n_cols': self.params.n_cols, - 'n_qubits': len(circuit.all_qubits()), - 'n_moments': self.params.n_moments, - 'batch_size': self.params.batch_size, - "min_time": min(deltas), - } - - name = "benchmark_random_circuit_{}_{}_{}".format( - self.params.n_rows, self.params.n_cols, self.params.n_moments) - full_path = os.path.join(os.environ['TEST_REPORT_FILE_PREFIX'], - "{}.{}".format(self.__class__.__name__, name)) - if os.path.exists(full_path): - os.remove(full_path) - - benchmark_values = { - "iters": self.params.n_runs, - "wall_time": np.median(deltas), - "extras": extras, - "name": name, - } - self.report_benchmark(**benchmark_values) - return benchmark_values - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/benchmark_util.py b/benchmarks/scripts/benchmark_util.py deleted file mode 100644 index 5b4bea2e5..000000000 --- a/benchmarks/scripts/benchmark_util.py +++ /dev/null @@ -1,23 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Utility functions for benchmark tools.""" -import tensorflow as tf -import test_log_pb2 - - -def read_benchmark_entry(f): - s = tf.io.gfile.GFile(f, "rb").read() - entries = test_log_pb2.BenchmarkEntries.FromString(s) - return entries.entry[0] diff --git a/benchmarks/scripts/benchmark_util_test.py b/benchmarks/scripts/benchmark_util_test.py deleted file mode 100644 index bece69897..000000000 --- a/benchmarks/scripts/benchmark_util_test.py +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for utilities related to reading/running benchmarks.""" -import os -import tempfile - -import tensorflow as tf - -import test_log_pb2 -import benchmark_util - - -def _make_dummy_benchmark_report(): - """Make a serialized benchmark report.""" - entries = test_log_pb2.BenchmarkEntries() - entry = entries.entry.add() - entry.name = "dummy_report" - entry.iters = 1234 - entry.wall_time = 5678 - return entries.SerializeToString() - - -class ReadBenchmarkEntryTest(tf.test.TestCase): - """Test reading serialized benchmark results.""" - - def test_read_benchmark_entry(self): - """Test reading test_log protobuf contents.""" - - # Do temp file setup and queue teardown. - with tempfile.NamedTemporaryFile(prefix='ReadBenchmarkEntryTest', - dir=self.get_temp_dir(), - delete=False) as temp: - temp.write(_make_dummy_benchmark_report()) - self.addCleanup(lambda: os.remove(temp.name)) - - res = benchmark_util.read_benchmark_entry(temp.name) - self.assertEqual(res.name, "dummy_report") - self.assertEqual(res.iters, 1234) - self.assertEqual(res.wall_time, 5678) - - -if __name__ == '__main__': - tf.test.main() diff --git a/benchmarks/scripts/differentiators/BUILD b/benchmarks/scripts/differentiators/BUILD deleted file mode 100644 index 61241b346..000000000 --- a/benchmarks/scripts/differentiators/BUILD +++ /dev/null @@ -1,19 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_test( - name = "convergence_test", - srcs = ["convergence_test.py"], - python_version = "PY3", - deps = [ - "//tensorflow_quantum/core/ops:batch_util", - "//tensorflow_quantum/core/ops:cirq_ops", - "//tensorflow_quantum/core/ops:tfq_simulate_ops_py", - "//tensorflow_quantum/python:util", - "//tensorflow_quantum/python/differentiators:stochastic_differentiator", - ], -) diff --git a/benchmarks/scripts/differentiators/__init__.py b/benchmarks/scripts/differentiators/__init__.py deleted file mode 100644 index bf5b48863..000000000 --- a/benchmarks/scripts/differentiators/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== \ No newline at end of file diff --git a/benchmarks/scripts/differentiators/convergence_test.py b/benchmarks/scripts/differentiators/convergence_test.py deleted file mode 100644 index 42499d294..000000000 --- a/benchmarks/scripts/differentiators/convergence_test.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Testing for SGDifferentiator convergence & calculation consistency in TFQ.""" -import copy -import time - -import numpy as np -import tensorflow as tf -from absl.testing import parameterized - -import cirq -from tensorflow_quantum.python import util -from tensorflow_quantum.python.differentiators import stochastic_differentiator -from tensorflow_quantum.core.ops import tfq_simulate_ops, batch_util - -# DISCLAIMER: Environment : Intel(R) Xeon(R) W-2135 CPU @ 3.70GHz, 12 cores. -# The overall tests take around 1 hours. -DIFFS_NUM_RUNS = [ - # The tests without sampling cost Hamiltonian take 1.5 hours. - # Case 1 : ParameterShift ~ 0.04 sec/shot - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=False, - stochastic_cost=False), 1), - # Case 2 : coordinate ~ 42 sec (0.04 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=False, - stochastic_cost=False), 1100), - # Case 3 : generator ~ 350 sec (0.023 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=True, - stochastic_cost=False), 15000), - # Case 4 : coordinate + generator ~ 400 sec ~ (0.020 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=True, - stochastic_cost=False), 20000), - # The tests with sampling cost Hamiltonian takes around 3 hours - # Case 5 : cost ~ 35 sec (0.15 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=False, - stochastic_cost=True), 250), - # Case 6 : cost + coordinate ~ 160 sec (0.15 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=False, - stochastic_cost=True), 1200), - # Case 7 : cost + generator ~ 320 sec (0.13 sec/shot) - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=False, - stochastic_generator=True, - stochastic_cost=True), 2500), - # Case 8 : All ~ 2400 sec ~ 40 m (0.12 sec/shot) - # Increase error margin due to numerical stability of summing up gradients - (stochastic_differentiator.SGDifferentiator(stochastic_coordinate=True, - stochastic_generator=True, - stochastic_cost=True), 20000), -] - - -# TODO(jaeyoo): aggregate identical _cirq_simple_finite_difference functions -# in different tests into one python file and import it. -def _cirq_simple_finite_difference(circuit_batch, - resolvers, - symbol_names, - op_batch, - grid_spacing=0.0001): - simulator = cirq.sim.Simulator() - - init_vals = batch_util.batch_calculate_expectation(circuit_batch, resolvers, - op_batch, simulator) - grad_circuits = [] - grad_resolvers = [] - grad_pauli_sums = [] - for this_program, this_pauli_sums, this_resolver in \ - zip(circuit_batch, op_batch, resolvers): - for symbol in symbol_names: - perturbed_resolver = copy.deepcopy(this_resolver) - perturbed_resolver.param_dict[symbol] += grid_spacing - grad_circuits.append(this_program) - grad_pauli_sums.append(this_pauli_sums) - grad_resolvers.append(perturbed_resolver) - - # shape: [n_programs * len(symbol_names), n_pauli_sums] - results = np.array( - batch_util.batch_calculate_expectation(circuits=grad_circuits, - param_resolvers=grad_resolvers, - ops=grad_pauli_sums, - simulator=simulator)) - - # shape: [n_pauli_sums, n_programs, len(symbol_names)] - gradient_generator = results.transpose().reshape( - (len(op_batch[0]), len(circuit_batch), len(symbol_names))) - - # shape: [n_pauli_sums, n_programs, len(symbol_names)] - forward_pass_vals = np.transpose( - np.vstack([np.expand_dims(init_vals, axis=0)] * len(symbol_names)), - (2, 1, 0)) - - return np.sum(1 / grid_spacing * (gradient_generator - forward_pass_vals), - axis=0) - - -class StochasticGradientConvergenceTest(tf.test.TestCase, - parameterized.TestCase): - - @parameterized.parameters( - list( - util.kwargs_cartesian_product( - **{ - 'differentiator_num_runs': DIFFS_NUM_RUNS, - 'n_qubits': [3], - 'n_programs': [3], - 'n_ops': [3], - 'symbol_names': [['a', 'b']], - 'eps': [0.1] - }))) - def test_gradients_vs_cirq_finite_difference(self, differentiator_num_runs, - n_qubits, n_programs, n_ops, - symbol_names, eps): - """Convergence tests on SGDifferentiator variants.""" - - # TODO(trevormccrt): remove this once I build the user-facing op - # interface - differentiator, num_runs = differentiator_num_runs - differentiator.refresh() - op = differentiator.generate_differentiable_op( - analytic_op=tfq_simulate_ops.tfq_simulate_expectation) - - qubits = cirq.GridQubit.rect(1, n_qubits) - circuit_batch, resolver_batch = \ - util.random_symbol_circuit_resolver_batch( - cirq.GridQubit.rect(1, n_qubits), symbol_names, n_programs) - - psums = [ - util.random_pauli_sums(qubits, 1, n_ops) for _ in circuit_batch - ] - - symbol_values_array = np.array( - [[resolver[symbol] - for symbol in symbol_names] - for resolver in resolver_batch], - dtype=np.float32) - - # calculate tfq gradient - symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) - programs = util.convert_to_tensor(circuit_batch) - ops = util.convert_to_tensor(psums) - - def _get_gradient(): - with tf.GradientTape() as g: - g.watch(symbol_values_tensor) - expectations = op(programs, symbol_names, symbol_values_tensor, - ops) - return tf.cast(g.gradient(expectations, symbol_values_tensor), - dtype=tf.float64) - - # warm-up & initialize tfq_grads. - grads_sum = _get_gradient() - tfq_grads = grads_sum - - # calculate gradients in cirq using a very simple forward differencing - # scheme - cirq_grads = _cirq_simple_finite_difference(circuit_batch, - resolver_batch, - symbol_names, psums) - cnt = 1 - # Since self.assertAllClose() has more strict atol than that of - # np.allclose(), it is required to set smaller value to np.allclose() - total_time = 0 - while cnt < num_runs and (not np.allclose( - tfq_grads, cirq_grads, atol=eps * 0.9)): - cnt = cnt + 1 - s = time.time() - grads_sum = grads_sum + _get_gradient() - total_time += time.time() - s - tfq_grads = grads_sum / cnt - - self.assertAllClose(cirq_grads, tfq_grads, atol=eps) - print('Passed: count {}, total_time {} ({}sec/shot)'.format( - cnt, total_time, total_time / cnt)) - - -if __name__ == '__main__': - tf.test.main() diff --git a/benchmarks/scripts/flags.py b/benchmarks/scripts/flags.py deleted file mode 100644 index eaf7e78e2..000000000 --- a/benchmarks/scripts/flags.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Command line flags shared between benchmarks.""" -from collections import namedtuple -from absl import flags as absl_flags - -FLAGS = absl_flags.FLAGS - -absl_flags.DEFINE_integer('n_qubits', - None, - 'Number of qubits in the benchmark circuit.', - lower_bound=2, - upper_bound=16) - -absl_flags.DEFINE_integer('n_moments', - None, - 'Depth of benchmark circuit.', - lower_bound=1) - -absl_flags.DEFINE_float( - 'op_density', - 0.99, - 'Density of operators in benchmark circuit, or the probability that a ' - 'given qubit in each moment is acted on by an operation.', - lower_bound=0, - upper_bound=0.99) # For compatibility with util lib - -absl_flags.DEFINE_integer('n_symbols', - 1, 'Number of symbols to parametrize a circuit by. ' - 'Use this to tune optimization convergence times.', - lower_bound=1) - -absl_flags.DEFINE_integer( - 'n_rows', - None, - 'Number of qubit rows in random circuit to benchmark.', - lower_bound=2) - -absl_flags.DEFINE_integer( - 'n_cols', - None, - 'Number of qubit columns in random circuit to benchmark.', - lower_bound=2) - -absl_flags.DEFINE_integer('batch_size', - 1, - 'The number of circuits to simulate in parallel.', - lower_bound=1) - -absl_flags.DEFINE_integer( - 'n_iters', - 1, "Number of rounds to run each benchmark, corresponding to" - "number of iterations in a training context. ", - lower_bound=1) - -# Benchmark metadata. -absl_flags.DEFINE_string('backend', None, - 'Which backend simulator to benchmark.') - -absl_flags.DEFINE_integer( - 'n_runs', - 1, - 'Number of times to run the model for its specified number of iterations ' - 'during benchmarking. For example, if a model is specified to be trained ' - 'for 50 iterations, `n_runs=10` would reset this model after training a ' - 'total of 10 times, resulting in a time overhead of 500 total iterations.', - lower_bound=1) - -absl_flags.DEFINE_integer('n_burn', - 0, - 'Number of burner runs. See `n_runs`.', - lower_bound=0) - - -def TEST_FLAGS(**kwargs): - """Create a set of test flags by kwarg assignment. - - This constructs a named tuple that mimics the interface of absl.flags. - Any command line flags defined with defaults will be present in the output - with their default value unless overwritten. - - Returns: - namedtuple containing valid flag names. - """ - base_flags = FLAGS.flag_values_dict() - updated = dict(base_flags, **kwargs) - return namedtuple('params', updated.keys())(**updated) diff --git a/benchmarks/scripts/flags_test.py b/benchmarks/scripts/flags_test.py deleted file mode 100644 index 6383809c6..000000000 --- a/benchmarks/scripts/flags_test.py +++ /dev/null @@ -1,39 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Tests for benchmark command line flags.""" - -import tensorflow as tf -from benchmarks.scripts import flags - - -class FlagsTest(tf.test.TestCase): - """Test the flag and test-flag interface.""" - - def test_test_flags_defaults(self): - """Test default values in TEST_FLAGS conform to flag defaults.""" - params = flags.TEST_FLAGS() - assert params.n_runs == 1 - assert params.n_burn == 0 - assert params.n_iters == 1 - - def test_test_flags(self): - """Test that kwargs convert to attributes.""" - params = flags.TEST_FLAGS(garbage="garbage value", other_garbage=123) - assert params.garbage == "garbage value" - assert params.other_garbate == 123 - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/models/BUILD b/benchmarks/scripts/models/BUILD deleted file mode 100644 index e5bd16e60..000000000 --- a/benchmarks/scripts/models/BUILD +++ /dev/null @@ -1,21 +0,0 @@ -package(default_visibility = ["//visibility:public"]) - -licenses(["notice"]) - -# Export for the PIP package. -exports_files(["__init__.py"]) - -py_binary( - name = "random_clifford_circuit", - srcs = ["random_clifford_circuit.py"], - python_version = "PY3", -) - -py_test( - name = "random_clifford_circuit_test", - srcs = ["random_clifford_circuit_test.py"], - python_version = "PY3", - deps = [ - ":random_clifford_circuit", - ], -) diff --git a/benchmarks/scripts/models/__init__.py b/benchmarks/scripts/models/__init__.py deleted file mode 100644 index bf5b48863..000000000 --- a/benchmarks/scripts/models/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== \ No newline at end of file diff --git a/benchmarks/scripts/models/random_clifford_circuit.py b/benchmarks/scripts/models/random_clifford_circuit.py deleted file mode 100644 index a08a667b5..000000000 --- a/benchmarks/scripts/models/random_clifford_circuit.py +++ /dev/null @@ -1,74 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from typing import Iterable - -import numpy as np -import cirq - - -def random_clifford_circuit(qubits, n_moments, op_density, random_state=None): - """Generate a dense circuit using elements of C2. - - Each layer will consist of a random number of one- or two-qubit Clifford - gates acting on a random subset of qubits. - Args: - qubits: The sequence of GridQubits that the circuit should act on. - Because the qubits on which an operation acts are chosen randomly, - not all given qubits may be acted upon. - n_moments: The number of moments in the generated circuit. - op_density: the expected fraction of qubits acted on in each - moment in half-open interval [0, 1]. - random_state: Optional random state or random state seed. - - Returns: - Clifford circuit with randomly chosen and assigned gates. - """ - if random_state and not isinstance(random_state, - (np.random.RandomState, int)): - raise TypeError("Random state input must be a numpy RandomState or an " - "integer seed to a random state.") - - if not isinstance(qubits, Iterable) or not all( - isinstance(q, cirq.GridQubit) for q in qubits): - raise TypeError("Must provide an iterable of GridQubits.") - - n_qubits = len(qubits) - if n_qubits < 2: - raise ValueError("Must provide at least 2 qubits to circuit generator.") - - rng = np.random - if isinstance(random_state, np.random.RandomState): - rng = random_state - elif isinstance(random_state, int): - rng = np.random.RandomState(random_state) - - cliffords_1q = (cirq.X, cirq.Y, cirq.Z, cirq.H) - cliffords_2q = (cirq.CZ, cirq.CNOT, cirq.SWAP) - moments = [] - for _ in range(n_moments): - moment_ops = [] - n_layer_qubits = rng.binomial(n_qubits, op_density) - layer_qubits = list( - rng.choice(qubits, size=n_layer_qubits, replace=False)) - while any(layer_qubits): - sampler = cliffords_1q - if len(layer_qubits) > 1: - sampler += cliffords_2q - gate = rng.choice(sampler) - gate_qubits = [layer_qubits.pop() for _ in range(gate.num_qubits())] - moment_ops.append(gate(*gate_qubits)) - moments += moment_ops - return cirq.Circuit(*moments) diff --git a/benchmarks/scripts/models/random_clifford_circuit_test.py b/benchmarks/scripts/models/random_clifford_circuit_test.py deleted file mode 100644 index c6d968ea0..000000000 --- a/benchmarks/scripts/models/random_clifford_circuit_test.py +++ /dev/null @@ -1,98 +0,0 @@ -# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -from absl.testing import parameterized -import cirq -import numpy as np -import tensorflow as tf - -from random_clifford_circuit import random_clifford_circuit - - -class RandomCliffordCircuitTest(parameterized.TestCase, tf.test.TestCase): - """Test the Random Clifford Circuit model.""" - - def test_random_clifford_circuit_inputs(self): - """Test for input validation.""" - qubits = cirq.GridQubit.rect(3, 2) - n_moments = 10 - op_density = 0.9 - with self.assertRaisesRegex(TypeError, 'RandomState'): - random_clifford_circuit(qubits, - n_moments, - op_density, - random_state="string") - with self.assertRaisesRegex(TypeError, 'RandomState'): - random_clifford_circuit(qubits, - n_moments, - op_density, - random_state=[1, 2, 3]) - - with self.assertRaisesRegex(TypeError, 'iterable'): - random_clifford_circuit(cirq.GridQubit(0, 0), - n_moments, - op_density, - random_state=None) - with self.assertRaisesRegex(TypeError, 'iterable'): - random_clifford_circuit(cirq.LineQubit(0), - n_moments, - op_density, - random_state=None) - - with self.assertRaisesRegex(ValueError, '2 qubits'): - random_clifford_circuit([cirq.GridQubit(0, 0)], - n_moments, - op_density, - random_state=None) - - def test_reproducible_circuit(self): - """Test that circuits are reproducible via random state seeding.""" - qubits = cirq.GridQubit.rect(4, 2) - n_moments = 13 - op_density = 0.8 - rng = np.random.RandomState(4902796) - - c1 = cirq.Circuit(*random_clifford_circuit( - qubits, n_moments, op_density, random_state=rng)) - - rng = np.random.RandomState(4902796) - c2 = cirq.Circuit(*random_clifford_circuit( - qubits, n_moments, op_density, random_state=rng)) - self.assertEqual(c1, c2) - - def test_only_cliffords(self): - """Test that the circuit contains only Cliffords.""" - qubits = cirq.GridQubit.rect(4, 2) - n_moments = 10 - op_density = 0.9 - circuit = cirq.Circuit( - *random_clifford_circuit(qubits, n_moments, op_density)) - cliffords = set( - [cirq.X, cirq.Y, cirq.Z, cirq.H, cirq.CZ, cirq.CNOT, cirq.SWAP]) - non_id_gates = [op.gate for op in circuit.all_operations()] - self.assertTrue(set(non_id_gates).issubset(cliffords)) - - @parameterized.parameters([5, 7, 11, 20]) - def test_random_clifford_circuit_depth(self, n_moments): - """Test that the circuit has the number of moments requested.""" - qubits = cirq.GridQubit.rect(3, 2) - op_density = 0.9 - circuit = cirq.Circuit( - *random_clifford_circuit(qubits, n_moments, op_density)) - self.assertEqual(len(circuit), n_moments) - - -if __name__ == "__main__": - tf.test.main() diff --git a/benchmarks/scripts/reports/.gitignore b/benchmarks/scripts/reports/.gitignore deleted file mode 100644 index 005717ead..000000000 --- a/benchmarks/scripts/reports/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -* -!.gitignore diff --git a/configure.sh b/configure.sh deleted file mode 100755 index 798da6021..000000000 --- a/configure.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/bin/bash -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -function write_to_bazelrc() { - echo "$1" >> .bazelrc -} - -function write_action_env_to_bazelrc() { - write_to_bazelrc "build --action_env $1=\"$2\"" -} - -# Function to write the SHARED_LIBRARY_DIR as a linkopt. This is required to -# get bazel tests that require the libtensorflow shared object to compile. This -# function is necessary because ${SHARED_LIBRARY_DIR} is space delimited and -# using bash arguments is a hacky way to split it. -function write_linkopt_dir_to_bazelrc() { - write_to_bazelrc "build --linkopt -Wl,-rpath,$1" >> .bazelrc -} - -# Remove .bazelrc if it already exist -[ -e .bazelrc ] && rm .bazelrc - -# Check if we are building GPU or CPU ops, default CPU -while [[ "$TF_NEED_CUDA" == "" ]]; do - read -p "Do you want to build ops again TensorFlow CPU pip package?"\ -" Y or enter for CPU (tensorflow), N for GPU (tensorflow-gpu). [Y/n] " INPUT - case $INPUT in - [Yy]* ) echo "Build with CPU pip package."; TF_NEED_CUDA=0;; - [Nn]* ) echo "Build with GPU pip package."; TF_NEED_CUDA=1;; - "" ) echo "Build with CPU pip package."; TF_NEED_CUDA=0;; - * ) echo "Invalid selection: " $INPUT;; - esac -done - - - -# CPU -if [[ "$TF_NEED_CUDA" == "0" ]]; then - - # Check if it's installed - if [[ $(python3 -m pip show tensorflow) == *tensorflow* ]] || [[ $(python3 -m pip show tf-nightly) == *tf-nightly* ]] ; then - echo 'Using installed tensorflow' - else - # Uninstall GPU version if it is installed. - if [[ $(python3 -m pip show tensorflow-gpu) == *tensorflow-gpu* ]]; then - echo 'Already have gpu version of tensorflow installed. Uninstalling......\n' - python3 -m pip uninstall tensorflow-gpu - elif [[ $(python3 -m pip show tf-nightly-gpu) == *tf-nightly-gpu* ]]; then - echo 'Already have gpu version of tensorflow installed. Uninstalling......\n' - python3 -m pip uninstall tf-nightly-gpu - fi - # Install CPU version - echo 'Installing tensorflow......\n' - python3 -m pip install tensorflow - fi - -else - - # Check if it's installed - if [[ $(python3 -m pip show tensorflow-gpu) == *tensorflow-gpu* ]] || [[ $(python3 -m pip show tf-nightly-gpu) == *tf-nightly-gpu* ]]; then - echo 'Using installed tensorflow-gpu' - else - # Uninstall CPU version if it is installed. - if [[ $(python3 -m pip show tensorflow) == *tensorflow* ]]; then - echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' - python3 -m pip uninstall tensorflow - elif [[ $(python3 -m pip show tf-nightly) == *tf-nightly* ]]; then - echo 'Already have tensorflow non-gpu installed. Uninstalling......\n' - python3 -m pip uninstall tf-nightly - fi - # Install CPU version - echo 'Installing tensorflow-gpu .....\n' - python3 -m pip install tensorflow-gpu - fi -fi - - -TF_CFLAGS=( $(python3 -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_compile_flags()))') ) -TF_LFLAGS="$(python3 -c 'import tensorflow as tf; print(" ".join(tf.sysconfig.get_link_flags()))')" - -write_to_bazelrc "build:cuda --define=using_cuda=true --define=using_cuda_nvcc=true" -write_to_bazelrc "build:cuda --crosstool_top=@local_config_cuda//crosstool:toolchain" -write_to_bazelrc "build --spawn_strategy=standalone" -write_to_bazelrc "build --strategy=Genrule=standalone" -write_to_bazelrc "build -c opt" - - -write_action_env_to_bazelrc "TF_HEADER_DIR" ${TF_CFLAGS:2} -SHARED_LIBRARY_DIR=${TF_LFLAGS:2} -SHARED_LIBRARY_NAME=$(echo $TF_LFLAGS | rev | cut -d":" -f1 | rev) -if ! [[ $TF_LFLAGS =~ .*:.* ]]; then - if [[ "$(uname)" == "Darwin" ]]; then - SHARED_LIBRARY_NAME="libtensorflow_framework.dylib" - else - SHARED_LIBRARY_NAME="libtensorflow_framework.so" - fi -fi -write_action_env_to_bazelrc "TF_SHARED_LIBRARY_DIR" ${SHARED_LIBRARY_DIR} -write_action_env_to_bazelrc "TF_SHARED_LIBRARY_NAME" ${SHARED_LIBRARY_NAME} -write_action_env_to_bazelrc "TF_NEED_CUDA" ${TF_NEED_CUDA} -write_linkopt_dir_to_bazelrc ${SHARED_LIBRARY_DIR} - -# TODO(yifeif): do not hardcode path -if [[ "$TF_NEED_CUDA" == "1" ]]; then - write_action_env_to_bazelrc "CUDNN_INSTALL_PATH" "/usr/lib/x86_64-linux-gnu" - write_action_env_to_bazelrc "TF_CUDA_VERSION" "10.0" - write_action_env_to_bazelrc "TF_CUDNN_VERSION" "7" - write_action_env_to_bazelrc "CUDA_TOOLKIT_PATH" "/usr/local/cuda" - write_to_bazelrc "build --config=cuda" - write_to_bazelrc "test --config=cuda" -fi diff --git a/docs/_book.yaml b/docs/_book.yaml deleted file mode 100644 index eefd661fe..000000000 --- a/docs/_book.yaml +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -upper_tabs: -# Tabs left of dropdown menu -- include: /_upper_tabs_left.yaml -- include: /api_docs/_upper_tabs_api.yaml -# Dropdown menu -- name: Resources - path: /resources - is_default: true - menu: - - include: /resources/_menu_toc.yaml - lower_tabs: - # Subsite tabs - other: - - name: Guide & Tutorials - contents: - - title: "Overview" - path: /quantum/overview - - title: "Install" - path: /quantum/install - - title: "Design and concepts" - path: /quantum/design - - heading: Tutorials - - title: "Hello, many worlds" - path: /quantum/tutorials/hello_many_worlds - - title: "MNIST classification" - path: /quantum/tutorials/mnist - - title: "Calculate gradients" - path: /quantum/tutorials/gradients - - title: "Barren plateaus" - path: /quantum/tutorials/barren_plateaus - - title: "Quantum CNN" - path: /quantum/tutorials/qcnn - - title: "Quantum sensing" - path: /quantum/tutorials/sensing - - - name: API - skip_translation: true - contents: - - include: /quantum/api_docs/python/_toc.yaml - -- include: /_upper_tabs_right.yaml diff --git a/docs/_index.yaml b/docs/_index.yaml deleted file mode 100644 index cc9788d89..000000000 --- a/docs/_index.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -book_path: /quantum/_book.yaml -project_path: /quantum/_project.yaml -description: -landing_page: - custom_css_path: /site-assets/css/style.css - rows: - - heading: "TensorFlow Quantum is a library for hybrid quantum-classical machine learning." - items: - - classname: devsite-landing-row-50 - description: > -

TensorFlow Quantum (TFQ) is a Python framework for hybrid - quantum-classical machine learning. As an application framework, TFQ - allows quantum algorithm researchers and ML application researchers to - leverage Google’s quantum computing frameworks, all from within - TensorFlow.

-

TensorFlow Quantum focuses on modeling quantum data. It provides - tools to interleave quantum algorithms and logic designed in - Cirq - with TensorFlow. A basic understanding of quantum computing is required - to effectively use TensorFlow Quantum.

-

To get started, read the overview and - design and concepts guide, then run the - tutorials.

- - code_block: | -
-        # A hybrid quantum-classical model.
-        model = tf.keras.Sequential([
-            # Quantum circuit data comes in inside of tensors.
-            tf.keras.Input(shape=(), dtype=tf.dtypes.string),
-
-            # Parametrized Quantum Circuit (PQC) provides output
-            # data from the input circuits run on a quantum computer.
-            tfq.layers.PQC(my_circuit, [cirq.Z(q1), cirq.X(q0)]),
-
-            # Output data from quantum computer passed through model.
-            tf.keras.layers.Dense(50)
-        ])
-        
- - - classname: devsite-landing-row-cards - items: - - heading: "Quantum supremacy using a programmable superconducting processor" - image_path: /resources/images/tf-logo-card-16x9.png - path: https://ai.googleblog.com/2019/10/quantum-supremacy-using-programmable.html - buttons: - - label: "Read on the Google AI blog" - path: https://ai.googleblog.com/2019/10/quantum-supremacy-using-programmable.html - - heading: "Programming a quantum computer
with Cirq" - youtube_id: 16ZfkPRVf2w - buttons: - - label: Watch the video - path: https://www.youtube.com/watch?v=16ZfkPRVf2w - - heading: "TensorFlow Quantum on GitHub" - image_path: /resources/images/github-card-16x9.png - path: https://github.com/tensorflow/quantum - buttons: - - label: "View on GitHub" - path: https://github.com/tensorflow/quantum diff --git a/docs/api_docs/python/_toc.yaml b/docs/api_docs/python/_toc.yaml deleted file mode 100644 index a801aa6fd..000000000 --- a/docs/api_docs/python/_toc.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright 2020 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -toc: -- title: tfq - section: - - title: Overview - path: /quantum/api_docs/python/tfq - - title: convert_to_tensor - path: /quantum/api_docs/python/tfq/convert_to_tensor - - title: from_tensor - path: /quantum/api_docs/python/tfq/from_tensor - - title: get_expectation_op - path: /quantum/api_docs/python/tfq/get_expectation_op - - title: get_sampled_expectation_op - path: /quantum/api_docs/python/tfq/get_sampled_expectation_op - - title: get_sampling_op - path: /quantum/api_docs/python/tfq/get_sampling_op - - title: get_state_op - path: /quantum/api_docs/python/tfq/get_state_op - - title: get_supported_gates - path: /quantum/api_docs/python/tfq/get_supported_gates - - title: padded_to_ragged - path: /quantum/api_docs/python/tfq/padded_to_ragged -- title: tfq.datasets - section: - - title: Overview - path: /quantum/api_docs/python/tfq/datasets - - title: excited_cluster_states - path: /quantum/api_docs/python/tfq/datasets/excited_cluster_states -- title: tfq.differentiators - section: - - title: Overview - path: /quantum/api_docs/python/tfq/differentiators - - title: CentralDifference - path: /quantum/api_docs/python/tfq/differentiators/CentralDifference - - title: Differentiator - path: /quantum/api_docs/python/tfq/differentiators/Differentiator - - title: ForwardDifference - path: /quantum/api_docs/python/tfq/differentiators/ForwardDifference - - title: LinearCombination - path: /quantum/api_docs/python/tfq/differentiators/LinearCombination - - title: ParameterShift - path: /quantum/api_docs/python/tfq/differentiators/ParameterShift - - title: SGDifferentiator - path: /quantum/api_docs/python/tfq/differentiators/SGDifferentiator -- title: tfq.layers - section: - - title: Overview - path: /quantum/api_docs/python/tfq/layers - - title: AddCircuit - path: /quantum/api_docs/python/tfq/layers/AddCircuit - - title: ControlledPQC - path: /quantum/api_docs/python/tfq/layers/ControlledPQC - - title: Expectation - path: /quantum/api_docs/python/tfq/layers/Expectation - - title: PQC - path: /quantum/api_docs/python/tfq/layers/PQC - - title: Sample - path: /quantum/api_docs/python/tfq/layers/Sample - - title: SampledExpectation - path: /quantum/api_docs/python/tfq/layers/SampledExpectation - - title: State - path: /quantum/api_docs/python/tfq/layers/State diff --git a/docs/api_docs/python/index.md b/docs/api_docs/python/index.md deleted file mode 100644 index 2673ec069..000000000 --- a/docs/api_docs/python/index.md +++ /dev/null @@ -1,29 +0,0 @@ -# All symbols in TensorFlow Quantum - -## Primary symbols -* tfq -* tfq.convert_to_tensor -* tfq.datasets -* tfq.datasets.excited_cluster_states -* tfq.differentiators -* tfq.differentiators.CentralDifference -* tfq.differentiators.Differentiator -* tfq.differentiators.ForwardDifference -* tfq.differentiators.LinearCombination -* tfq.differentiators.ParameterShift -* tfq.differentiators.SGDifferentiator -* tfq.from_tensor -* tfq.get_expectation_op -* tfq.get_sampled_expectation_op -* tfq.get_sampling_op -* tfq.get_state_op -* tfq.get_supported_gates -* tfq.layers -* tfq.layers.AddCircuit -* tfq.layers.ControlledPQC -* tfq.layers.Expectation -* tfq.layers.PQC -* tfq.layers.Sample -* tfq.layers.SampledExpectation -* tfq.layers.State -* tfq.padded_to_ragged \ No newline at end of file diff --git a/docs/api_docs/python/tfq.md b/docs/api_docs/python/tfq.md deleted file mode 100644 index 6f45eed9c..000000000 --- a/docs/api_docs/python/tfq.md +++ /dev/null @@ -1,49 +0,0 @@ -
- - -
- -# Module: tfq - - - - -
- - - View source on GitHub - -
- - - -Module functions for tensorflow_quantum.* - - - -## Modules - -[`datasets`](./tfq/datasets.md) module: Interesting quantum datasets. - -[`differentiators`](./tfq/differentiators.md) module: Module functions for tfq.differentiators.* - -[`layers`](./tfq/layers.md) module: Module definitions for tensorflow_quantum.python.layers.* - -## Functions - -[`convert_to_tensor(...)`](./tfq/convert_to_tensor.md): Convert lists of tfq supported primitives to tensor representations. - -[`from_tensor(...)`](./tfq/from_tensor.md): Convert a tensor of tfq primitives back to Python objects. - -[`get_expectation_op(...)`](./tfq/get_expectation_op.md): Get a Tensorflow op that will calculate batches of expectation values. - -[`get_sampled_expectation_op(...)`](./tfq/get_sampled_expectation_op.md): Get a TensorFlow op that will calculate sampled expectation values. - -[`get_sampling_op(...)`](./tfq/get_sampling_op.md): Get a Tensorflow op that produces samples from given quantum circuits. - -[`get_state_op(...)`](./tfq/get_state_op.md): Get a tensorflow op that produces states from given quantum circuits. - -[`get_supported_gates(...)`](./tfq/get_supported_gates.md): A helper to get the gates supported by tfq. - -[`padded_to_ragged(...)`](./tfq/padded_to_ragged.md): Utility `tf.function` that converts a padded tensor to ragged. - diff --git a/docs/api_docs/python/tfq/_api_cache.json b/docs/api_docs/python/tfq/_api_cache.json deleted file mode 100644 index 9693b9daa..000000000 --- a/docs/api_docs/python/tfq/_api_cache.json +++ /dev/null @@ -1,854 +0,0 @@ -{ - "duplicate_of": { - "tfq.differentiators.CentralDifference.differentiate_analytic": "tfq.differentiators.LinearCombination.differentiate_analytic", - "tfq.differentiators.CentralDifference.differentiate_sampled": "tfq.differentiators.LinearCombination.differentiate_sampled", - "tfq.differentiators.CentralDifference.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.CentralDifference.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.differentiators.Differentiator.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.Differentiator.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.Differentiator.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.Differentiator.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.Differentiator.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.Differentiator.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.Differentiator.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.ForwardDifference.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.ForwardDifference.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.ForwardDifference.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.ForwardDifference.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.ForwardDifference.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.ForwardDifference.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.ForwardDifference.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.ForwardDifference.differentiate_analytic": "tfq.differentiators.LinearCombination.differentiate_analytic", - "tfq.differentiators.ForwardDifference.differentiate_sampled": "tfq.differentiators.LinearCombination.differentiate_sampled", - "tfq.differentiators.ForwardDifference.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.ForwardDifference.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.differentiators.LinearCombination.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.LinearCombination.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.LinearCombination.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.LinearCombination.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.LinearCombination.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.LinearCombination.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.LinearCombination.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.LinearCombination.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.LinearCombination.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.differentiators.ParameterShift.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.ParameterShift.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.ParameterShift.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.ParameterShift.__init__": "tfq.differentiators.Differentiator.__init__", - "tfq.differentiators.ParameterShift.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.ParameterShift.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.ParameterShift.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.ParameterShift.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.ParameterShift.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.ParameterShift.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.differentiators.SGDifferentiator.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.differentiators.SGDifferentiator.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.differentiators.SGDifferentiator.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.differentiators.SGDifferentiator.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.differentiators.SGDifferentiator.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.differentiators.SGDifferentiator.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.differentiators.SGDifferentiator.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.differentiators.SGDifferentiator.generate_differentiable_op": "tfq.differentiators.Differentiator.generate_differentiable_op", - "tfq.differentiators.SGDifferentiator.refresh": "tfq.differentiators.Differentiator.refresh", - "tfq.layers.AddCircuit.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.AddCircuit.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.AddCircuit.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.AddCircuit.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.AddCircuit.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.AddCircuit.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.AddCircuit.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.ControlledPQC.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.ControlledPQC.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.ControlledPQC.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.ControlledPQC.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.ControlledPQC.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.ControlledPQC.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.ControlledPQC.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.ControlledPQC.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.ControlledPQC.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.ControlledPQC.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.ControlledPQC.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.ControlledPQC.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.ControlledPQC.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.ControlledPQC.build": "tfq.layers.AddCircuit.build", - "tfq.layers.ControlledPQC.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.ControlledPQC.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.ControlledPQC.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.ControlledPQC.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.ControlledPQC.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.ControlledPQC.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.ControlledPQC.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.ControlledPQC.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.ControlledPQC.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.ControlledPQC.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.ControlledPQC.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.ControlledPQC.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.ControlledPQC.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.ControlledPQC.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.ControlledPQC.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.ControlledPQC.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.ControlledPQC.input": "tfq.layers.AddCircuit.input", - "tfq.layers.ControlledPQC.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.ControlledPQC.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.ControlledPQC.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.ControlledPQC.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.ControlledPQC.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.ControlledPQC.name": "tfq.layers.AddCircuit.name", - "tfq.layers.ControlledPQC.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.ControlledPQC.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.ControlledPQC.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.ControlledPQC.output": "tfq.layers.AddCircuit.output", - "tfq.layers.ControlledPQC.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.ControlledPQC.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.ControlledPQC.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.ControlledPQC.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.ControlledPQC.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.ControlledPQC.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.ControlledPQC.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.ControlledPQC.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.ControlledPQC.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.ControlledPQC.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.Expectation.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.Expectation.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.Expectation.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.Expectation.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.Expectation.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.Expectation.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.Expectation.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.Expectation.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.Expectation.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.Expectation.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.Expectation.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.Expectation.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.Expectation.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.Expectation.build": "tfq.layers.AddCircuit.build", - "tfq.layers.Expectation.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.Expectation.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.Expectation.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.Expectation.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.Expectation.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.Expectation.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.Expectation.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.Expectation.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.Expectation.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.Expectation.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.Expectation.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.Expectation.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.Expectation.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.Expectation.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.Expectation.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.Expectation.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.Expectation.input": "tfq.layers.AddCircuit.input", - "tfq.layers.Expectation.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.Expectation.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.Expectation.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.Expectation.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.Expectation.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.Expectation.name": "tfq.layers.AddCircuit.name", - "tfq.layers.Expectation.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.Expectation.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.Expectation.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.Expectation.output": "tfq.layers.AddCircuit.output", - "tfq.layers.Expectation.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.Expectation.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.Expectation.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.Expectation.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.Expectation.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.Expectation.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.Expectation.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.Expectation.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.Expectation.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.Expectation.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.PQC.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.PQC.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.PQC.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.PQC.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.PQC.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.PQC.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.PQC.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.PQC.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.PQC.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.PQC.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.PQC.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.PQC.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.PQC.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.PQC.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.PQC.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.PQC.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.PQC.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.PQC.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.PQC.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.PQC.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.PQC.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.PQC.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.PQC.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.PQC.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.PQC.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.PQC.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.PQC.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.PQC.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.PQC.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.PQC.input": "tfq.layers.AddCircuit.input", - "tfq.layers.PQC.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.PQC.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.PQC.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.PQC.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.PQC.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.PQC.name": "tfq.layers.AddCircuit.name", - "tfq.layers.PQC.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.PQC.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.PQC.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.PQC.output": "tfq.layers.AddCircuit.output", - "tfq.layers.PQC.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.PQC.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.PQC.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.PQC.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.PQC.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.PQC.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.PQC.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.PQC.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.PQC.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.PQC.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.Sample.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.Sample.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.Sample.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.Sample.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.Sample.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.Sample.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.Sample.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.Sample.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.Sample.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.Sample.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.Sample.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.Sample.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.Sample.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.Sample.build": "tfq.layers.AddCircuit.build", - "tfq.layers.Sample.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.Sample.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.Sample.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.Sample.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.Sample.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.Sample.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.Sample.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.Sample.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.Sample.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.Sample.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.Sample.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.Sample.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.Sample.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.Sample.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.Sample.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.Sample.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.Sample.input": "tfq.layers.AddCircuit.input", - "tfq.layers.Sample.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.Sample.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.Sample.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.Sample.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.Sample.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.Sample.name": "tfq.layers.AddCircuit.name", - "tfq.layers.Sample.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.Sample.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.Sample.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.Sample.output": "tfq.layers.AddCircuit.output", - "tfq.layers.Sample.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.Sample.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.Sample.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.Sample.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.Sample.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.Sample.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.Sample.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.Sample.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.Sample.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.Sample.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.SampledExpectation.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.SampledExpectation.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.SampledExpectation.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.SampledExpectation.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.SampledExpectation.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.SampledExpectation.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.SampledExpectation.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.SampledExpectation.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.SampledExpectation.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.SampledExpectation.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.SampledExpectation.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.SampledExpectation.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.SampledExpectation.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.SampledExpectation.build": "tfq.layers.AddCircuit.build", - "tfq.layers.SampledExpectation.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.SampledExpectation.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.SampledExpectation.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.SampledExpectation.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.SampledExpectation.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.SampledExpectation.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.SampledExpectation.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.SampledExpectation.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.SampledExpectation.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.SampledExpectation.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.SampledExpectation.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.SampledExpectation.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.SampledExpectation.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.SampledExpectation.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.SampledExpectation.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.SampledExpectation.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.SampledExpectation.input": "tfq.layers.AddCircuit.input", - "tfq.layers.SampledExpectation.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.SampledExpectation.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.SampledExpectation.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.SampledExpectation.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.SampledExpectation.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.SampledExpectation.name": "tfq.layers.AddCircuit.name", - "tfq.layers.SampledExpectation.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.SampledExpectation.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.SampledExpectation.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.SampledExpectation.output": "tfq.layers.AddCircuit.output", - "tfq.layers.SampledExpectation.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.SampledExpectation.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.SampledExpectation.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.SampledExpectation.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.SampledExpectation.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.SampledExpectation.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.SampledExpectation.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.SampledExpectation.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.SampledExpectation.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.SampledExpectation.weights": "tfq.layers.AddCircuit.weights", - "tfq.layers.State.__call__": "tfq.layers.AddCircuit.__call__", - "tfq.layers.State.__eq__": "tfq.differentiators.CentralDifference.__eq__", - "tfq.layers.State.__ge__": "tfq.differentiators.CentralDifference.__ge__", - "tfq.layers.State.__gt__": "tfq.differentiators.CentralDifference.__gt__", - "tfq.layers.State.__le__": "tfq.differentiators.CentralDifference.__le__", - "tfq.layers.State.__lt__": "tfq.differentiators.CentralDifference.__lt__", - "tfq.layers.State.__ne__": "tfq.differentiators.CentralDifference.__ne__", - "tfq.layers.State.__new__": "tfq.differentiators.CentralDifference.__new__", - "tfq.layers.State.activity_regularizer": "tfq.layers.AddCircuit.activity_regularizer", - "tfq.layers.State.add_loss": "tfq.layers.AddCircuit.add_loss", - "tfq.layers.State.add_metric": "tfq.layers.AddCircuit.add_metric", - "tfq.layers.State.add_update": "tfq.layers.AddCircuit.add_update", - "tfq.layers.State.add_weight": "tfq.layers.AddCircuit.add_weight", - "tfq.layers.State.build": "tfq.layers.AddCircuit.build", - "tfq.layers.State.compute_mask": "tfq.layers.AddCircuit.compute_mask", - "tfq.layers.State.compute_output_shape": "tfq.layers.AddCircuit.compute_output_shape", - "tfq.layers.State.compute_output_signature": "tfq.layers.AddCircuit.compute_output_signature", - "tfq.layers.State.count_params": "tfq.layers.AddCircuit.count_params", - "tfq.layers.State.dtype": "tfq.layers.AddCircuit.dtype", - "tfq.layers.State.dynamic": "tfq.layers.AddCircuit.dynamic", - "tfq.layers.State.get_config": "tfq.layers.AddCircuit.get_config", - "tfq.layers.State.get_input_at": "tfq.layers.AddCircuit.get_input_at", - "tfq.layers.State.get_input_mask_at": "tfq.layers.AddCircuit.get_input_mask_at", - "tfq.layers.State.get_input_shape_at": "tfq.layers.AddCircuit.get_input_shape_at", - "tfq.layers.State.get_losses_for": "tfq.layers.AddCircuit.get_losses_for", - "tfq.layers.State.get_output_at": "tfq.layers.AddCircuit.get_output_at", - "tfq.layers.State.get_output_mask_at": "tfq.layers.AddCircuit.get_output_mask_at", - "tfq.layers.State.get_output_shape_at": "tfq.layers.AddCircuit.get_output_shape_at", - "tfq.layers.State.get_updates_for": "tfq.layers.AddCircuit.get_updates_for", - "tfq.layers.State.get_weights": "tfq.layers.AddCircuit.get_weights", - "tfq.layers.State.input": "tfq.layers.AddCircuit.input", - "tfq.layers.State.input_mask": "tfq.layers.AddCircuit.input_mask", - "tfq.layers.State.input_shape": "tfq.layers.AddCircuit.input_shape", - "tfq.layers.State.input_spec": "tfq.layers.AddCircuit.input_spec", - "tfq.layers.State.losses": "tfq.layers.AddCircuit.losses", - "tfq.layers.State.metrics": "tfq.layers.AddCircuit.metrics", - "tfq.layers.State.name": "tfq.layers.AddCircuit.name", - "tfq.layers.State.name_scope": "tfq.layers.AddCircuit.name_scope", - "tfq.layers.State.non_trainable_variables": "tfq.layers.AddCircuit.non_trainable_variables", - "tfq.layers.State.non_trainable_weights": "tfq.layers.AddCircuit.non_trainable_weights", - "tfq.layers.State.output": "tfq.layers.AddCircuit.output", - "tfq.layers.State.output_mask": "tfq.layers.AddCircuit.output_mask", - "tfq.layers.State.output_shape": "tfq.layers.AddCircuit.output_shape", - "tfq.layers.State.set_weights": "tfq.layers.AddCircuit.set_weights", - "tfq.layers.State.submodules": "tfq.layers.AddCircuit.submodules", - "tfq.layers.State.trainable": "tfq.layers.AddCircuit.trainable", - "tfq.layers.State.trainable_variables": "tfq.layers.AddCircuit.trainable_variables", - "tfq.layers.State.trainable_weights": "tfq.layers.AddCircuit.trainable_weights", - "tfq.layers.State.updates": "tfq.layers.AddCircuit.updates", - "tfq.layers.State.variables": "tfq.layers.AddCircuit.variables", - "tfq.layers.State.weights": "tfq.layers.AddCircuit.weights" - }, - "is_fragment": { - "tfq": false, - "tfq.convert_to_tensor": false, - "tfq.datasets": false, - "tfq.datasets.excited_cluster_states": false, - "tfq.differentiators": false, - "tfq.differentiators.CentralDifference": false, - "tfq.differentiators.CentralDifference.__eq__": true, - "tfq.differentiators.CentralDifference.__ge__": true, - "tfq.differentiators.CentralDifference.__gt__": true, - "tfq.differentiators.CentralDifference.__init__": true, - "tfq.differentiators.CentralDifference.__le__": true, - "tfq.differentiators.CentralDifference.__lt__": true, - "tfq.differentiators.CentralDifference.__ne__": true, - "tfq.differentiators.CentralDifference.__new__": true, - "tfq.differentiators.CentralDifference.differentiate_analytic": true, - "tfq.differentiators.CentralDifference.differentiate_sampled": true, - "tfq.differentiators.CentralDifference.generate_differentiable_op": true, - "tfq.differentiators.CentralDifference.refresh": true, - "tfq.differentiators.Differentiator": false, - "tfq.differentiators.Differentiator.__eq__": true, - "tfq.differentiators.Differentiator.__ge__": true, - "tfq.differentiators.Differentiator.__gt__": true, - "tfq.differentiators.Differentiator.__init__": true, - "tfq.differentiators.Differentiator.__le__": true, - "tfq.differentiators.Differentiator.__lt__": true, - "tfq.differentiators.Differentiator.__ne__": true, - "tfq.differentiators.Differentiator.__new__": true, - "tfq.differentiators.Differentiator.differentiate_analytic": true, - "tfq.differentiators.Differentiator.differentiate_sampled": true, - "tfq.differentiators.Differentiator.generate_differentiable_op": true, - "tfq.differentiators.Differentiator.refresh": true, - "tfq.differentiators.ForwardDifference": false, - "tfq.differentiators.ForwardDifference.__eq__": true, - "tfq.differentiators.ForwardDifference.__ge__": true, - "tfq.differentiators.ForwardDifference.__gt__": true, - "tfq.differentiators.ForwardDifference.__init__": true, - "tfq.differentiators.ForwardDifference.__le__": true, - "tfq.differentiators.ForwardDifference.__lt__": true, - "tfq.differentiators.ForwardDifference.__ne__": true, - "tfq.differentiators.ForwardDifference.__new__": true, - "tfq.differentiators.ForwardDifference.differentiate_analytic": true, - "tfq.differentiators.ForwardDifference.differentiate_sampled": true, - "tfq.differentiators.ForwardDifference.generate_differentiable_op": true, - "tfq.differentiators.ForwardDifference.refresh": true, - "tfq.differentiators.LinearCombination": false, - "tfq.differentiators.LinearCombination.__eq__": true, - "tfq.differentiators.LinearCombination.__ge__": true, - "tfq.differentiators.LinearCombination.__gt__": true, - "tfq.differentiators.LinearCombination.__init__": true, - "tfq.differentiators.LinearCombination.__le__": true, - "tfq.differentiators.LinearCombination.__lt__": true, - "tfq.differentiators.LinearCombination.__ne__": true, - "tfq.differentiators.LinearCombination.__new__": true, - "tfq.differentiators.LinearCombination.differentiate_analytic": true, - "tfq.differentiators.LinearCombination.differentiate_sampled": true, - "tfq.differentiators.LinearCombination.generate_differentiable_op": true, - "tfq.differentiators.LinearCombination.refresh": true, - "tfq.differentiators.ParameterShift": false, - "tfq.differentiators.ParameterShift.__eq__": true, - "tfq.differentiators.ParameterShift.__ge__": true, - "tfq.differentiators.ParameterShift.__gt__": true, - "tfq.differentiators.ParameterShift.__init__": true, - "tfq.differentiators.ParameterShift.__le__": true, - "tfq.differentiators.ParameterShift.__lt__": true, - "tfq.differentiators.ParameterShift.__ne__": true, - "tfq.differentiators.ParameterShift.__new__": true, - "tfq.differentiators.ParameterShift.differentiate_analytic": true, - "tfq.differentiators.ParameterShift.differentiate_sampled": true, - "tfq.differentiators.ParameterShift.generate_differentiable_op": true, - "tfq.differentiators.ParameterShift.refresh": true, - "tfq.differentiators.SGDifferentiator": false, - "tfq.differentiators.SGDifferentiator.__eq__": true, - "tfq.differentiators.SGDifferentiator.__ge__": true, - "tfq.differentiators.SGDifferentiator.__gt__": true, - "tfq.differentiators.SGDifferentiator.__init__": true, - "tfq.differentiators.SGDifferentiator.__le__": true, - "tfq.differentiators.SGDifferentiator.__lt__": true, - "tfq.differentiators.SGDifferentiator.__ne__": true, - "tfq.differentiators.SGDifferentiator.__new__": true, - "tfq.differentiators.SGDifferentiator.differentiate_analytic": true, - "tfq.differentiators.SGDifferentiator.differentiate_sampled": true, - "tfq.differentiators.SGDifferentiator.generate_differentiable_op": true, - "tfq.differentiators.SGDifferentiator.refresh": true, - "tfq.from_tensor": false, - "tfq.get_expectation_op": false, - "tfq.get_sampled_expectation_op": false, - "tfq.get_sampling_op": false, - "tfq.get_state_op": false, - "tfq.get_supported_gates": false, - "tfq.layers": false, - "tfq.layers.AddCircuit": false, - "tfq.layers.AddCircuit.__call__": true, - "tfq.layers.AddCircuit.__eq__": true, - "tfq.layers.AddCircuit.__ge__": true, - "tfq.layers.AddCircuit.__gt__": true, - "tfq.layers.AddCircuit.__init__": true, - "tfq.layers.AddCircuit.__le__": true, - "tfq.layers.AddCircuit.__lt__": true, - "tfq.layers.AddCircuit.__ne__": true, - "tfq.layers.AddCircuit.__new__": true, - "tfq.layers.AddCircuit.activity_regularizer": true, - "tfq.layers.AddCircuit.add_loss": true, - "tfq.layers.AddCircuit.add_metric": true, - "tfq.layers.AddCircuit.add_update": true, - "tfq.layers.AddCircuit.add_weight": true, - "tfq.layers.AddCircuit.build": true, - "tfq.layers.AddCircuit.call": true, - "tfq.layers.AddCircuit.compute_mask": true, - "tfq.layers.AddCircuit.compute_output_shape": true, - "tfq.layers.AddCircuit.compute_output_signature": true, - "tfq.layers.AddCircuit.count_params": true, - "tfq.layers.AddCircuit.dtype": true, - "tfq.layers.AddCircuit.dynamic": true, - "tfq.layers.AddCircuit.from_config": true, - "tfq.layers.AddCircuit.get_config": true, - "tfq.layers.AddCircuit.get_input_at": true, - "tfq.layers.AddCircuit.get_input_mask_at": true, - "tfq.layers.AddCircuit.get_input_shape_at": true, - "tfq.layers.AddCircuit.get_losses_for": true, - "tfq.layers.AddCircuit.get_output_at": true, - "tfq.layers.AddCircuit.get_output_mask_at": true, - "tfq.layers.AddCircuit.get_output_shape_at": true, - "tfq.layers.AddCircuit.get_updates_for": true, - "tfq.layers.AddCircuit.get_weights": true, - "tfq.layers.AddCircuit.input": true, - "tfq.layers.AddCircuit.input_mask": true, - "tfq.layers.AddCircuit.input_shape": true, - "tfq.layers.AddCircuit.input_spec": true, - "tfq.layers.AddCircuit.losses": true, - "tfq.layers.AddCircuit.metrics": true, - "tfq.layers.AddCircuit.name": true, - "tfq.layers.AddCircuit.name_scope": true, - "tfq.layers.AddCircuit.non_trainable_variables": true, - "tfq.layers.AddCircuit.non_trainable_weights": true, - "tfq.layers.AddCircuit.output": true, - "tfq.layers.AddCircuit.output_mask": true, - "tfq.layers.AddCircuit.output_shape": true, - "tfq.layers.AddCircuit.set_weights": true, - "tfq.layers.AddCircuit.submodules": true, - "tfq.layers.AddCircuit.trainable": true, - "tfq.layers.AddCircuit.trainable_variables": true, - "tfq.layers.AddCircuit.trainable_weights": true, - "tfq.layers.AddCircuit.updates": true, - "tfq.layers.AddCircuit.variables": true, - "tfq.layers.AddCircuit.weights": true, - "tfq.layers.AddCircuit.with_name_scope": true, - "tfq.layers.ControlledPQC": false, - "tfq.layers.ControlledPQC.__call__": true, - "tfq.layers.ControlledPQC.__eq__": true, - "tfq.layers.ControlledPQC.__ge__": true, - "tfq.layers.ControlledPQC.__gt__": true, - "tfq.layers.ControlledPQC.__init__": true, - "tfq.layers.ControlledPQC.__le__": true, - "tfq.layers.ControlledPQC.__lt__": true, - "tfq.layers.ControlledPQC.__ne__": true, - "tfq.layers.ControlledPQC.__new__": true, - "tfq.layers.ControlledPQC.activity_regularizer": true, - "tfq.layers.ControlledPQC.add_loss": true, - "tfq.layers.ControlledPQC.add_metric": true, - "tfq.layers.ControlledPQC.add_update": true, - "tfq.layers.ControlledPQC.add_weight": true, - "tfq.layers.ControlledPQC.build": true, - "tfq.layers.ControlledPQC.call": true, - "tfq.layers.ControlledPQC.compute_mask": true, - "tfq.layers.ControlledPQC.compute_output_shape": true, - "tfq.layers.ControlledPQC.compute_output_signature": true, - "tfq.layers.ControlledPQC.count_params": true, - "tfq.layers.ControlledPQC.dtype": true, - "tfq.layers.ControlledPQC.dynamic": true, - "tfq.layers.ControlledPQC.from_config": true, - "tfq.layers.ControlledPQC.get_config": true, - "tfq.layers.ControlledPQC.get_input_at": true, - "tfq.layers.ControlledPQC.get_input_mask_at": true, - "tfq.layers.ControlledPQC.get_input_shape_at": true, - "tfq.layers.ControlledPQC.get_losses_for": true, - "tfq.layers.ControlledPQC.get_output_at": true, - "tfq.layers.ControlledPQC.get_output_mask_at": true, - "tfq.layers.ControlledPQC.get_output_shape_at": true, - "tfq.layers.ControlledPQC.get_updates_for": true, - "tfq.layers.ControlledPQC.get_weights": true, - "tfq.layers.ControlledPQC.input": true, - "tfq.layers.ControlledPQC.input_mask": true, - "tfq.layers.ControlledPQC.input_shape": true, - "tfq.layers.ControlledPQC.input_spec": true, - "tfq.layers.ControlledPQC.losses": true, - "tfq.layers.ControlledPQC.metrics": true, - "tfq.layers.ControlledPQC.name": true, - "tfq.layers.ControlledPQC.name_scope": true, - "tfq.layers.ControlledPQC.non_trainable_variables": true, - "tfq.layers.ControlledPQC.non_trainable_weights": true, - "tfq.layers.ControlledPQC.output": true, - "tfq.layers.ControlledPQC.output_mask": true, - "tfq.layers.ControlledPQC.output_shape": true, - "tfq.layers.ControlledPQC.set_weights": true, - "tfq.layers.ControlledPQC.submodules": true, - "tfq.layers.ControlledPQC.trainable": true, - "tfq.layers.ControlledPQC.trainable_variables": true, - "tfq.layers.ControlledPQC.trainable_weights": true, - "tfq.layers.ControlledPQC.updates": true, - "tfq.layers.ControlledPQC.variables": true, - "tfq.layers.ControlledPQC.weights": true, - "tfq.layers.ControlledPQC.with_name_scope": true, - "tfq.layers.Expectation": false, - "tfq.layers.Expectation.__call__": true, - "tfq.layers.Expectation.__eq__": true, - "tfq.layers.Expectation.__ge__": true, - "tfq.layers.Expectation.__gt__": true, - "tfq.layers.Expectation.__init__": true, - "tfq.layers.Expectation.__le__": true, - "tfq.layers.Expectation.__lt__": true, - "tfq.layers.Expectation.__ne__": true, - "tfq.layers.Expectation.__new__": true, - "tfq.layers.Expectation.activity_regularizer": true, - "tfq.layers.Expectation.add_loss": true, - "tfq.layers.Expectation.add_metric": true, - "tfq.layers.Expectation.add_update": true, - "tfq.layers.Expectation.add_weight": true, - "tfq.layers.Expectation.build": true, - "tfq.layers.Expectation.call": true, - "tfq.layers.Expectation.compute_mask": true, - "tfq.layers.Expectation.compute_output_shape": true, - "tfq.layers.Expectation.compute_output_signature": true, - "tfq.layers.Expectation.count_params": true, - "tfq.layers.Expectation.dtype": true, - "tfq.layers.Expectation.dynamic": true, - "tfq.layers.Expectation.from_config": true, - "tfq.layers.Expectation.get_config": true, - "tfq.layers.Expectation.get_input_at": true, - "tfq.layers.Expectation.get_input_mask_at": true, - "tfq.layers.Expectation.get_input_shape_at": true, - "tfq.layers.Expectation.get_losses_for": true, - "tfq.layers.Expectation.get_output_at": true, - "tfq.layers.Expectation.get_output_mask_at": true, - "tfq.layers.Expectation.get_output_shape_at": true, - "tfq.layers.Expectation.get_updates_for": true, - "tfq.layers.Expectation.get_weights": true, - "tfq.layers.Expectation.input": true, - "tfq.layers.Expectation.input_mask": true, - "tfq.layers.Expectation.input_shape": true, - "tfq.layers.Expectation.input_spec": true, - "tfq.layers.Expectation.losses": true, - "tfq.layers.Expectation.metrics": true, - "tfq.layers.Expectation.name": true, - "tfq.layers.Expectation.name_scope": true, - "tfq.layers.Expectation.non_trainable_variables": true, - "tfq.layers.Expectation.non_trainable_weights": true, - "tfq.layers.Expectation.output": true, - "tfq.layers.Expectation.output_mask": true, - "tfq.layers.Expectation.output_shape": true, - "tfq.layers.Expectation.set_weights": true, - "tfq.layers.Expectation.submodules": true, - "tfq.layers.Expectation.trainable": true, - "tfq.layers.Expectation.trainable_variables": true, - "tfq.layers.Expectation.trainable_weights": true, - "tfq.layers.Expectation.updates": true, - "tfq.layers.Expectation.variables": true, - "tfq.layers.Expectation.weights": true, - "tfq.layers.Expectation.with_name_scope": true, - "tfq.layers.PQC": false, - "tfq.layers.PQC.__call__": true, - "tfq.layers.PQC.__eq__": true, - "tfq.layers.PQC.__ge__": true, - "tfq.layers.PQC.__gt__": true, - "tfq.layers.PQC.__init__": true, - "tfq.layers.PQC.__le__": true, - "tfq.layers.PQC.__lt__": true, - "tfq.layers.PQC.__ne__": true, - "tfq.layers.PQC.__new__": true, - "tfq.layers.PQC.activity_regularizer": true, - "tfq.layers.PQC.add_loss": true, - "tfq.layers.PQC.add_metric": true, - "tfq.layers.PQC.add_update": true, - "tfq.layers.PQC.add_weight": true, - "tfq.layers.PQC.build": true, - "tfq.layers.PQC.call": true, - "tfq.layers.PQC.compute_mask": true, - "tfq.layers.PQC.compute_output_shape": true, - "tfq.layers.PQC.compute_output_signature": true, - "tfq.layers.PQC.count_params": true, - "tfq.layers.PQC.dtype": true, - "tfq.layers.PQC.dynamic": true, - "tfq.layers.PQC.from_config": true, - "tfq.layers.PQC.get_config": true, - "tfq.layers.PQC.get_input_at": true, - "tfq.layers.PQC.get_input_mask_at": true, - "tfq.layers.PQC.get_input_shape_at": true, - "tfq.layers.PQC.get_losses_for": true, - "tfq.layers.PQC.get_output_at": true, - "tfq.layers.PQC.get_output_mask_at": true, - "tfq.layers.PQC.get_output_shape_at": true, - "tfq.layers.PQC.get_updates_for": true, - "tfq.layers.PQC.get_weights": true, - "tfq.layers.PQC.input": true, - "tfq.layers.PQC.input_mask": true, - "tfq.layers.PQC.input_shape": true, - "tfq.layers.PQC.input_spec": true, - "tfq.layers.PQC.losses": true, - "tfq.layers.PQC.metrics": true, - "tfq.layers.PQC.name": true, - "tfq.layers.PQC.name_scope": true, - "tfq.layers.PQC.non_trainable_variables": true, - "tfq.layers.PQC.non_trainable_weights": true, - "tfq.layers.PQC.output": true, - "tfq.layers.PQC.output_mask": true, - "tfq.layers.PQC.output_shape": true, - "tfq.layers.PQC.set_weights": true, - "tfq.layers.PQC.submodules": true, - "tfq.layers.PQC.trainable": true, - "tfq.layers.PQC.trainable_variables": true, - "tfq.layers.PQC.trainable_weights": true, - "tfq.layers.PQC.updates": true, - "tfq.layers.PQC.variables": true, - "tfq.layers.PQC.weights": true, - "tfq.layers.PQC.with_name_scope": true, - "tfq.layers.Sample": false, - "tfq.layers.Sample.__call__": true, - "tfq.layers.Sample.__eq__": true, - "tfq.layers.Sample.__ge__": true, - "tfq.layers.Sample.__gt__": true, - "tfq.layers.Sample.__init__": true, - "tfq.layers.Sample.__le__": true, - "tfq.layers.Sample.__lt__": true, - "tfq.layers.Sample.__ne__": true, - "tfq.layers.Sample.__new__": true, - "tfq.layers.Sample.activity_regularizer": true, - "tfq.layers.Sample.add_loss": true, - "tfq.layers.Sample.add_metric": true, - "tfq.layers.Sample.add_update": true, - "tfq.layers.Sample.add_weight": true, - "tfq.layers.Sample.build": true, - "tfq.layers.Sample.call": true, - "tfq.layers.Sample.compute_mask": true, - "tfq.layers.Sample.compute_output_shape": true, - "tfq.layers.Sample.compute_output_signature": true, - "tfq.layers.Sample.count_params": true, - "tfq.layers.Sample.dtype": true, - "tfq.layers.Sample.dynamic": true, - "tfq.layers.Sample.from_config": true, - "tfq.layers.Sample.get_config": true, - "tfq.layers.Sample.get_input_at": true, - "tfq.layers.Sample.get_input_mask_at": true, - "tfq.layers.Sample.get_input_shape_at": true, - "tfq.layers.Sample.get_losses_for": true, - "tfq.layers.Sample.get_output_at": true, - "tfq.layers.Sample.get_output_mask_at": true, - "tfq.layers.Sample.get_output_shape_at": true, - "tfq.layers.Sample.get_updates_for": true, - "tfq.layers.Sample.get_weights": true, - "tfq.layers.Sample.input": true, - "tfq.layers.Sample.input_mask": true, - "tfq.layers.Sample.input_shape": true, - "tfq.layers.Sample.input_spec": true, - "tfq.layers.Sample.losses": true, - "tfq.layers.Sample.metrics": true, - "tfq.layers.Sample.name": true, - "tfq.layers.Sample.name_scope": true, - "tfq.layers.Sample.non_trainable_variables": true, - "tfq.layers.Sample.non_trainable_weights": true, - "tfq.layers.Sample.output": true, - "tfq.layers.Sample.output_mask": true, - "tfq.layers.Sample.output_shape": true, - "tfq.layers.Sample.set_weights": true, - "tfq.layers.Sample.submodules": true, - "tfq.layers.Sample.trainable": true, - "tfq.layers.Sample.trainable_variables": true, - "tfq.layers.Sample.trainable_weights": true, - "tfq.layers.Sample.updates": true, - "tfq.layers.Sample.variables": true, - "tfq.layers.Sample.weights": true, - "tfq.layers.Sample.with_name_scope": true, - "tfq.layers.SampledExpectation": false, - "tfq.layers.SampledExpectation.__call__": true, - "tfq.layers.SampledExpectation.__eq__": true, - "tfq.layers.SampledExpectation.__ge__": true, - "tfq.layers.SampledExpectation.__gt__": true, - "tfq.layers.SampledExpectation.__init__": true, - "tfq.layers.SampledExpectation.__le__": true, - "tfq.layers.SampledExpectation.__lt__": true, - "tfq.layers.SampledExpectation.__ne__": true, - "tfq.layers.SampledExpectation.__new__": true, - "tfq.layers.SampledExpectation.activity_regularizer": true, - "tfq.layers.SampledExpectation.add_loss": true, - "tfq.layers.SampledExpectation.add_metric": true, - "tfq.layers.SampledExpectation.add_update": true, - "tfq.layers.SampledExpectation.add_weight": true, - "tfq.layers.SampledExpectation.build": true, - "tfq.layers.SampledExpectation.call": true, - "tfq.layers.SampledExpectation.compute_mask": true, - "tfq.layers.SampledExpectation.compute_output_shape": true, - "tfq.layers.SampledExpectation.compute_output_signature": true, - "tfq.layers.SampledExpectation.count_params": true, - "tfq.layers.SampledExpectation.dtype": true, - "tfq.layers.SampledExpectation.dynamic": true, - "tfq.layers.SampledExpectation.from_config": true, - "tfq.layers.SampledExpectation.get_config": true, - "tfq.layers.SampledExpectation.get_input_at": true, - "tfq.layers.SampledExpectation.get_input_mask_at": true, - "tfq.layers.SampledExpectation.get_input_shape_at": true, - "tfq.layers.SampledExpectation.get_losses_for": true, - "tfq.layers.SampledExpectation.get_output_at": true, - "tfq.layers.SampledExpectation.get_output_mask_at": true, - "tfq.layers.SampledExpectation.get_output_shape_at": true, - "tfq.layers.SampledExpectation.get_updates_for": true, - "tfq.layers.SampledExpectation.get_weights": true, - "tfq.layers.SampledExpectation.input": true, - "tfq.layers.SampledExpectation.input_mask": true, - "tfq.layers.SampledExpectation.input_shape": true, - "tfq.layers.SampledExpectation.input_spec": true, - "tfq.layers.SampledExpectation.losses": true, - "tfq.layers.SampledExpectation.metrics": true, - "tfq.layers.SampledExpectation.name": true, - "tfq.layers.SampledExpectation.name_scope": true, - "tfq.layers.SampledExpectation.non_trainable_variables": true, - "tfq.layers.SampledExpectation.non_trainable_weights": true, - "tfq.layers.SampledExpectation.output": true, - "tfq.layers.SampledExpectation.output_mask": true, - "tfq.layers.SampledExpectation.output_shape": true, - "tfq.layers.SampledExpectation.set_weights": true, - "tfq.layers.SampledExpectation.submodules": true, - "tfq.layers.SampledExpectation.trainable": true, - "tfq.layers.SampledExpectation.trainable_variables": true, - "tfq.layers.SampledExpectation.trainable_weights": true, - "tfq.layers.SampledExpectation.updates": true, - "tfq.layers.SampledExpectation.variables": true, - "tfq.layers.SampledExpectation.weights": true, - "tfq.layers.SampledExpectation.with_name_scope": true, - "tfq.layers.State": false, - "tfq.layers.State.__call__": true, - "tfq.layers.State.__eq__": true, - "tfq.layers.State.__ge__": true, - "tfq.layers.State.__gt__": true, - "tfq.layers.State.__init__": true, - "tfq.layers.State.__le__": true, - "tfq.layers.State.__lt__": true, - "tfq.layers.State.__ne__": true, - "tfq.layers.State.__new__": true, - "tfq.layers.State.activity_regularizer": true, - "tfq.layers.State.add_loss": true, - "tfq.layers.State.add_metric": true, - "tfq.layers.State.add_update": true, - "tfq.layers.State.add_weight": true, - "tfq.layers.State.build": true, - "tfq.layers.State.call": true, - "tfq.layers.State.compute_mask": true, - "tfq.layers.State.compute_output_shape": true, - "tfq.layers.State.compute_output_signature": true, - "tfq.layers.State.count_params": true, - "tfq.layers.State.dtype": true, - "tfq.layers.State.dynamic": true, - "tfq.layers.State.from_config": true, - "tfq.layers.State.get_config": true, - "tfq.layers.State.get_input_at": true, - "tfq.layers.State.get_input_mask_at": true, - "tfq.layers.State.get_input_shape_at": true, - "tfq.layers.State.get_losses_for": true, - "tfq.layers.State.get_output_at": true, - "tfq.layers.State.get_output_mask_at": true, - "tfq.layers.State.get_output_shape_at": true, - "tfq.layers.State.get_updates_for": true, - "tfq.layers.State.get_weights": true, - "tfq.layers.State.input": true, - "tfq.layers.State.input_mask": true, - "tfq.layers.State.input_shape": true, - "tfq.layers.State.input_spec": true, - "tfq.layers.State.losses": true, - "tfq.layers.State.metrics": true, - "tfq.layers.State.name": true, - "tfq.layers.State.name_scope": true, - "tfq.layers.State.non_trainable_variables": true, - "tfq.layers.State.non_trainable_weights": true, - "tfq.layers.State.output": true, - "tfq.layers.State.output_mask": true, - "tfq.layers.State.output_shape": true, - "tfq.layers.State.set_weights": true, - "tfq.layers.State.submodules": true, - "tfq.layers.State.trainable": true, - "tfq.layers.State.trainable_variables": true, - "tfq.layers.State.trainable_weights": true, - "tfq.layers.State.updates": true, - "tfq.layers.State.variables": true, - "tfq.layers.State.weights": true, - "tfq.layers.State.with_name_scope": true, - "tfq.padded_to_ragged": false - }, - "py_module_names": [ - "tfq" - ] -} diff --git a/docs/api_docs/python/tfq/convert_to_tensor.md b/docs/api_docs/python/tfq/convert_to_tensor.md deleted file mode 100644 index c3d7e1dee..000000000 --- a/docs/api_docs/python/tfq/convert_to_tensor.md +++ /dev/null @@ -1,71 +0,0 @@ -
- - -
- -# tfq.convert_to_tensor - - - - - -
- - - View source on GitHub - -
- - - -Convert lists of tfq supported primitives to tensor representations. - -``` python -tfq.convert_to_tensor(items_to_convert) -``` - - - - - -Recursively convert a nested lists of `cirq.PauliSum` or `cirq.Circuit` -objects to a `tf.Tensor` representation. Note that cirq serialization only -supports `cirq.GridQubit`s so we also require that input circuits and -pauli sums are defined only on `cirq.GridQubit`s. - - -``` - ->>> my_qubits = cirq.GridQubit.rect(1, 2) ->>> my_circuits = [cirq.Circuit(cirq.X(my_qubits[0])), -... cirq.Circuit(cirq.Z(my_qubits[0])) -... ] ->>> tensor_input = tfq.convert_to_tensor(my_circuits) ->>> # Now tensor_input can be used as model input etc. ->>> same_circuits = tfq.from_tensor(tensor_input) ->>> # same_circuits now holds cirq.Circuit objects once more. ->>> same_circuits -[cirq.Circuit([ - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - ]), -]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), -])] - -``` - -#### Args: - - -* `items_to_convert`: Python `list` or nested `list` of `cirq.Circuit` - or `cirq.Paulisum` objects. Should be rectangular, or this function - will error. - - -#### Returns: - -`tf.Tensor` that represents the input items. diff --git a/docs/api_docs/python/tfq/datasets.md b/docs/api_docs/python/tfq/datasets.md deleted file mode 100644 index 752f7d9c7..000000000 --- a/docs/api_docs/python/tfq/datasets.md +++ /dev/null @@ -1,27 +0,0 @@ -
- - -
- -# Module: tfq.datasets - - - - -
- - - View source on GitHub - -
- - - -Interesting quantum datasets. - - - -## Functions - -[`excited_cluster_states(...)`](../tfq/datasets/excited_cluster_states.md): Return a tuple of potentially excited cluster states and their labels. - diff --git a/docs/api_docs/python/tfq/datasets/excited_cluster_states.md b/docs/api_docs/python/tfq/datasets/excited_cluster_states.md deleted file mode 100644 index 5e433a62e..000000000 --- a/docs/api_docs/python/tfq/datasets/excited_cluster_states.md +++ /dev/null @@ -1,76 +0,0 @@ -
- - -
- -# tfq.datasets.excited_cluster_states - - - - - -
- - - View source on GitHub - -
- - - -Return a tuple of potentially excited cluster states and their labels. - -``` python -tfq.datasets.excited_cluster_states(qubits) -``` - - - - - -For every qubit in `qubits` this method will create a cluster state circuit -on `qubits`, apply a `cirq.X` on that qubit along with a label of 1 and add -it to the return dataset. Finally a cluster state circuit on `qubits` that -doesn't contain any `cirq.X` gates with a label of -1 will be added to the -returned dataset. - - -``` - ->>> circuits, labels = tfq.datasets.excited_cluster_states( -... cirq.GridQubit.rect(1, 3) -... ) ->>> print(circuits[0]) -(0, 0): ───H───@───────@───X─── - │ │ -(0, 1): ───H───@───@───┼─────── - │ │ -(0, 2): ───H───────@───@─────── ->>> labels[0] -1 ->>> print(circuits[-1]) -(0, 0): ───H───@───────@─── - │ │ -(0, 1): ───H───@───@───┼─── - │ │ -(0, 2): ───H───────@───@─── ->>> labels[-1] --1 - -``` - - -Circuits that feature a `cirq.X` gate on one of the qubits are labeled 1, -while the circuit that doesn't feature a `cirq.X` anywhere has the label -1. - - -#### Args: - - -* `qubits`: Python `list` of `cirq.GridQubit`s on which the excited cluster - state dataset will be created. - - -#### Returns: - -A `tuple` of `cirq.Circuit`s and Python `int` labels. diff --git a/docs/api_docs/python/tfq/differentiators.md b/docs/api_docs/python/tfq/differentiators.md deleted file mode 100644 index c78bd64e5..000000000 --- a/docs/api_docs/python/tfq/differentiators.md +++ /dev/null @@ -1,37 +0,0 @@ -
- - -
- -# Module: tfq.differentiators - - - - -
- - - View source on GitHub - -
- - - -Module functions for tfq.differentiators.* - - - -## Classes - -[`class CentralDifference`](../tfq/differentiators/CentralDifference.md): Differentiates a circuit using Central Differencing. - -[`class Differentiator`](../tfq/differentiators/Differentiator.md): Interface that defines how to specify gradients for a quantum circuit. - -[`class ForwardDifference`](../tfq/differentiators/ForwardDifference.md): Differentiate a circuit using forward differencing. - -[`class LinearCombination`](../tfq/differentiators/LinearCombination.md): Differentiate a circuit with respect to its inputs by - -[`class ParameterShift`](../tfq/differentiators/ParameterShift.md): Calculate the general version of parameter-shift rule based gradients. - -[`class SGDifferentiator`](../tfq/differentiators/SGDifferentiator.md): Stochastic generator based differentiator class. - diff --git a/docs/api_docs/python/tfq/differentiators/CentralDifference.md b/docs/api_docs/python/tfq/differentiators/CentralDifference.md deleted file mode 100644 index 74676100c..000000000 --- a/docs/api_docs/python/tfq/differentiators/CentralDifference.md +++ /dev/null @@ -1,188 +0,0 @@ -
- - - - - - - -
- -# tfq.differentiators.CentralDifference - - - - - -
- - - View source on GitHub - -
- - - -## Class `CentralDifference` - -Differentiates a circuit using Central Differencing. - -Inherits From: [`LinearCombination`](../../tfq/differentiators/LinearCombination.md) - - - -Central differencing computes a derivative at point x using an equal -number of points before and after x. A closed form for -the coefficients of this derivative for an arbitrary positive error order -is used here, which is described in the following article: -https://www.sciencedirect.com/science/article/pii/S0377042799000886. - - -``` - ->>> my_op = tfq.get_expectation_op() ->>> linear_differentiator = tfq.differentiators.CentralDifference(2, 0.01) ->>> # Get an expectation op, with this differentiator attached. ->>> op = linear_differentiator.generate_differentiable_op( -... analytic_op=my_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) ->>> # Gradient would be: -50 * f(x + 0.02) + 200 * f(x + 0.01) - 150 * f(x) ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> grads -tf.Tensor([[-1.1837807]], shape=(1, 1), dtype=float32) - -``` - -

__init__

- -View source - -``` python -__init__( - error_order=2, - grid_spacing=0.001 -) -``` - -Instantiate a CentralDifference. - -Create a CentralDifference differentaitor, passing along an error order -and grid spacing to be used to contstruct differentiator coeffecients. - -#### Args: - - -* `error_order`: A positive, even `int` specifying the error order - of this differentiator. This corresponds to the smallest power - of `grid_spacing` remaining in the series that was truncated - to generate this finite differencing expression. -* `grid_spacing`: A positive `float` specifying how large of a - grid to use in calculating this finite difference. - - - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - - - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - - - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/Differentiator.md b/docs/api_docs/python/tfq/differentiators/Differentiator.md deleted file mode 100644 index a71f59008..000000000 --- a/docs/api_docs/python/tfq/differentiators/Differentiator.md +++ /dev/null @@ -1,204 +0,0 @@ -
- - - - - - -
- -# tfq.differentiators.Differentiator - - - - - -
- - - View source on GitHub - -
- - - -## Class `Differentiator` - -Interface that defines how to specify gradients for a quantum circuit. - - - - - -This abstract class allows for the creation of gradient calculation -procedures for (expectation values from) quantum circuits, with -respect to a set of input parameter values. This allows one -to backpropagate through a quantum circuit. - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - -Specify how to differentiate a circuit with analytical expectation. - -This is called at graph runtime by TensorFlow. `differentiate_analytic` -should calculate the gradient of a batch of circuits and return it -formatted as indicated below. See -tfq.differentiators.ForwardDifference for an example. - -#### Args: - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `forward_pass_vals`: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. -* `grad`: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - -#### Returns: - -A `tf.Tensor` with the same shape as `symbol_values` representing -the gradient backpropageted to the `symbol_values` input of the op -you are differentiating through. - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - -Specify how to differentiate a circuit with sampled expectation. - -This is called at graph runtime by TensorFlow. `differentiate_sampled` -should calculate the gradient of a batch of circuits and return it -formatted as indicated below. See -tfq.differentiators.ForwardDifference for an example. - -#### Args: - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `num_samples`: `tf.Tensor` of positive integers representing the - number of samples per term in each term of pauli_sums used - during the forward pass. -* `forward_pass_vals`: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. -* `grad`: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - -#### Returns: - -A `tf.Tensor` with the same shape as `symbol_values` representing -the gradient backpropageted to the `symbol_values` input of the op -you are differentiating through. - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/ForwardDifference.md b/docs/api_docs/python/tfq/differentiators/ForwardDifference.md deleted file mode 100644 index babc3da99..000000000 --- a/docs/api_docs/python/tfq/differentiators/ForwardDifference.md +++ /dev/null @@ -1,188 +0,0 @@ -
- - - - - - - -
- -# tfq.differentiators.ForwardDifference - - - - - -
- - - View source on GitHub - -
- - - -## Class `ForwardDifference` - -Differentiate a circuit using forward differencing. - -Inherits From: [`LinearCombination`](../../tfq/differentiators/LinearCombination.md) - - - -Forward differencing computes a derivative at a point x using only -points larger than x (in this way, it is 'one sided'). A closed form for -the coefficients of this derivative for an arbitrary positive error order -is used here, which is described in the following article: -https://www.sciencedirect.com/science/article/pii/S0377042799000886. - - -``` - ->>> my_op = tfq.get_expectation_op() ->>> linear_differentiator = tfq.differentiators.ForwardDifference(2, 0.01) ->>> # Get an expectation op, with this differentiator attached. ->>> op = linear_differentiator.generate_differentiable_op( -... analytic_op=my_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) ->>> # Gradient would be: -50 * f(x + 0.02) + 200 * f(x + 0.01) - 150 * f(x) ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> grads -tf.Tensor([[-1.184372]], shape=(1, 1), dtype=float32) - -``` - -

__init__

- -View source - -``` python -__init__( - error_order=1, - grid_spacing=0.001 -) -``` - -Instantiate a ForwardDifference. - -Create a ForwardDifference differentiator, passing along an error order -and grid spacing to be used to contstruct differentiator coeffecients. - -#### Args: - - -* `error_order`: A positive `int` specifying the error order of this - differentiator. This corresponds to the smallest power - of `grid_spacing` remaining in the series that was truncated - to generate this finite differencing expression. -* `grid_spacing`: A positive `float` specifying how large of a - grid to use in calculating this finite difference. - - - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - - - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - - - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/LinearCombination.md b/docs/api_docs/python/tfq/differentiators/LinearCombination.md deleted file mode 100644 index 9e7dfead0..000000000 --- a/docs/api_docs/python/tfq/differentiators/LinearCombination.md +++ /dev/null @@ -1,191 +0,0 @@ -
- - - - - - - -
- -# tfq.differentiators.LinearCombination - - - - - -
- - - View source on GitHub - -
- - - -## Class `LinearCombination` - -Differentiate a circuit with respect to its inputs by - -Inherits From: [`Differentiator`](../../tfq/differentiators/Differentiator.md) - - -linearly combining values obtained by evaluating the op using parameter -values perturbed about their forward-pass values. - - -``` - ->>> my_op = tfq.get_expectation_op() ->>> weights = [5, 6, 7] ->>> perturbations = [0, 0.5, 0.25] ->>> linear_differentiator = tfq.differentiators.LinearCombination( -... weights, perturbations -... ) ->>> # Get an expectation op, with this differentiator attached. ->>> op = linear_differentiator.generate_differentiable_op( -... analytic_op=my_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums -... ) ->>> # Gradient would be: 5 * f(x+0) + 6 * f(x+0.5) + 7 * f(x+0.25) ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> # Note: this gradient visn't correct in value, but showcases ->>> # the principle of how gradients can be defined in a very flexible ->>> # fashion. ->>> grads -tf.Tensor([[5.089467]], shape=(1, 1), dtype=float32) - -``` - -

__init__

- -View source - -``` python -__init__( - weights, - perturbations -) -``` - -Instantiate this differentiator. - -Create a LinearComobinationDifferentiator. Pass in weights and -perturbations as described below. - -#### Args: - - -* `weights`: Python `list` of real numbers representing linear - combination coeffecients for each perturbed function - evaluation. -* `perturbations`: Python `list` of real numbers representing - perturbation values. - - - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - - - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - - - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/ParameterShift.md b/docs/api_docs/python/tfq/differentiators/ParameterShift.md deleted file mode 100644 index eb94800c3..000000000 --- a/docs/api_docs/python/tfq/differentiators/ParameterShift.md +++ /dev/null @@ -1,254 +0,0 @@ -
- - - - - - -
- -# tfq.differentiators.ParameterShift - - - - - -
- - - View source on GitHub - -
- - - -## Class `ParameterShift` - -Calculate the general version of parameter-shift rule based gradients. - -Inherits From: [`Differentiator`](../../tfq/differentiators/Differentiator.md) - - - -This ParameterShift is the gradient estimator of the following paper: - -[arXiv:1905.13311](https://arxiv.org/abs/1905.13311), Gavin E. Crooks. - -This ParameterShift is used for any programs with parameterized gates. -It internally decompose any programs into array of gates with at most -two distinct eigenvalues by using `cirq.decompose`. - -``` ->>> non_diff_op = tfq.get_expectation_op() ->>> linear_differentiator = tfq.differentiators.ParameterShift() ->>> # Get an expectation op, with this differentiator attached. ->>> op = linear_differentiator.generate_differentiable_op( -... analytic_op=non_diff_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) ->>> # This value is now computed via the ParameterShift rule. ->>> # https://arxiv.org/abs/1905.13311 ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> grads -tf.Tensor([[-1.1839752]], shape=(1, 1), dtype=float32) -``` - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - -Calculate the gradient. - -The gradient calculations follows the following steps: - -1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) -2. Use formula (31) from paper inside of TensorFlow to calculate - gradients from all the decomposed circuits. -3. Sum up terms and reshape for the total gradient that is compatible - with TensorFlow. - -**CAUTION** -Analytic gradient measurements based on this ParameterShift generally -run at least K(=2) times SLOW than the original circuit. -On top of it, since all parameters of gates are shifted individually, -the time complexity is linear in the number of parameterized gates L. -So, you will see O(KL) slower time & space complexity than the original -forward pass measurements. - -#### Args: - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `forward_pass_vals`: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. -* `grad`: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - -#### Returns: - -Backward gradient values for each program & each pauli sum. It has -the shape of [batch_size, n_symbols]. - - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - -Calculate the gradient. - -The gradient calculations follows the following steps: - -1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) -2. Use formula (31) from paper inside of TensorFlow to calculate - gradients from all the decomposed circuits. -3. Sum up terms and reshape for the total gradient that is compatible - with TensorFlow. - -**CAUTION** -Analytic gradient measurements based on this ParameterShift generally -run at least K(=2) times SLOW than the original circuit. -On top of it, since all parameters of gates are shifted individually, -the time complexity is linear in the number of parameterized gates L. -So, you will see O(KL) slower time & space complexity than the original -forward pass measurements. - -#### Args: - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `num_samples`: `tf.Tensor` of positiver integers indicating the number - of samples used per term to calculate the expectation value - in the forward pass. -* `forward_pass_vals`: `tf.Tensor` of real numbers with shape - [batch_size, n_ops] containing the output of the forward pass - through the op you are differentiating. -* `grad`: `tf.Tensor` of real numbers with shape [batch_size, n_ops] - representing the gradient backpropagated to the output of the - op you are differentiating through. - - -#### Returns: - -Backward gradient values for each program & each pauli sum. It has -the shape of [batch_size, n_symbols]. - - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/differentiators/SGDifferentiator.md b/docs/api_docs/python/tfq/differentiators/SGDifferentiator.md deleted file mode 100644 index 46be1970a..000000000 --- a/docs/api_docs/python/tfq/differentiators/SGDifferentiator.md +++ /dev/null @@ -1,268 +0,0 @@ -
- - - - - - - -
- -# tfq.differentiators.SGDifferentiator - - - - - -
- - - View source on GitHub - -
- - - -## Class `SGDifferentiator` - -Stochastic generator based differentiator class. - -Inherits From: [`Differentiator`](../../tfq/differentiators/Differentiator.md) - - -SGDifferentiator allows you to get the sampled gradient value from three -different stochastic processes: -- parameter coordinate sampling - Choose one of the symbols of the given programs and perform coordinate - descent optimization. - e.g. if a program has parameters ['a','b','c'], choose 'a' w.r.t given - probability and get the partial derivative of the direction 'a' only -- parameter-shift rule generators sampling - e.g. Given symbols, there could be many operators sharing the same - symbol, X**'a', Y**'a', Z**'a'. Choose Y**'a' w.r.t given - probability and get the partial derivative of the generator. -- cost Hamiltonian sampling - e.g. if there are cost Hamiltonians such as ['Z1',Z2',Z3'], then choose - 'Z2' w.r.t given probability and get the partial derivative of the - Hamiltonian observable only. -and the expectation value of the sampled gradient value converges into -the true ground truth gradient value. -This Stochastic Generator Differentiator is the modified gradient estimator -of the following two papers: -- [arXiv:1901.05374](https://arxiv.org/abs/1901.05374), Harrow et al. -- [arXiv:1910.01155](https://arxiv.org/abs/1910.01155), Sweke et al. - -``` ->>> # Get an expectation op. ->>> my_op = tfq.get_expectation_op() ->>> # Attach a differentiator. ->>> my_dif = tfq.differentiators.SGDifferentiator() ->>> op = my_dif.generate_differentiable_op( -... analytic_op=my_op -... ) ->>> qubit = cirq.GridQubit(0, 0) ->>> circuit = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.X(qubit) ** sympy.Symbol('alpha')) -... ]) ->>> psums = tfq.convert_to_tensor([[cirq.Z(qubit)]]) ->>> symbol_values_array = np.array([[0.123]], dtype=np.float32) ->>> # Calculate tfq gradient. ->>> symbol_values_tensor = tf.convert_to_tensor(symbol_values_array) ->>> with tf.GradientTape() as g: -... g.watch(symbol_values_tensor) -... expectations = op(circuit, ['alpha'], symbol_values_tensor, psums) ->>> # This value is now computed via the stochastic processes described in: ->>> # https://arxiv.org/abs/1901.05374 ->>> # https://arxiv.org/abs/1910.01155 ->>> grads = g.gradient(expectations, symbol_values_tensor) ->>> # the result is non-deterministic in general, but in this special case, ->>> # it has only one result. ->>> grads - -``` - -

__init__

- -View source - -``` python -__init__( - stochastic_coordinate=True, - stochastic_generator=True, - stochastic_cost=True, - uniform_sampling=False -) -``` - -Instantiate this differentiator. -Create a SGDifferentiator. -Args: - stochastic_coordinate: Python `bool` to determine if - sampling on coordinate is performed or not. Default to True. - stochastic_generator: Python `bool` to determine if - sampling on generator is performed or not. Default to True. - stochastic_cost: Python `bool` to determine if sampling on - cost Hamiltonian is performed or not. Default to True. - uniform_sampling: Python `bool` to determine the - probabilistic distributions on the sampling targets. - Default to False. - - - -## Methods - -

differentiate_analytic

- -View source - -``` python -differentiate_analytic( - programs, - symbol_names, - symbol_values, - pauli_sums, - forward_pass_vals, - grad -) -``` - -Compute the sampled gradient with cascaded stochastic processes. -The gradient calculations follows the following steps: -1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) -2. Construct probability distributions & perform stochastic processes - to select parameter-shift terms. - - Stochastic generator : sampling on parameter-shifted gates. - - Stochastic coordinate : sampling on symbols. - - Stochastic cost : sampling on pauli sums -3. Sum up terms and reshape for the total gradient that is compatible - with tensorflow differentiation. -Args: - programs: `tf.Tensor` of strings with shape [n_programs] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_symbols], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [n_programs, n_symbols] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops] - representing output observables for each program. - forward_pass_vals : `tf.Tensor` of real numbers for forward pass - values with the shape of [n_programs, n_ops] - grad : `tf.Tensor` of real numbers for backpropagated gradient - values from the upper layer with the shape of - [n_programs, n_ops] -Returns: - A `tf.Tensor` of real numbers for sampled gradients from the above - samplers with the shape of [n_programs, n_symbols] - -

differentiate_sampled

- -View source - -``` python -differentiate_sampled( - programs, - symbol_names, - symbol_values, - pauli_sums, - num_samples, - forward_pass_vals, - grad -) -``` - -Compute the sampled gradient with cascaded stochastic processes. -The gradient calculations follows the following steps: -1. Compute the decomposition of the incoming circuits so that we have - their generator information (done using cirq in a tf.py_function) -2. Construct probability distributions & perform stochastic processes - to select parameter-shift terms. - - Stochastic generator : sampling on parameter-shifted gates. - - Stochastic coordinate : sampling on symbols. - - Stochastic cost : sampling on pauli sums -3. Sum up terms and reshape for the total gradient that is compatible - with tensorflow differentiation. -Args: - programs: `tf.Tensor` of strings with shape [n_programs] containing - the string representations of the circuits to be executed. - symbol_names: `tf.Tensor` of strings with shape [n_symbols], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. - symbol_values: `tf.Tensor` of real numbers with shape - [n_programs, n_symbols] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - num_samples: `tf.Tensor` of positive integers representing the - number of samples per term in each term of pauli_sums used - during the forward pass. - pauli_sums : `tf.Tensor` of strings with shape [n_programs, n_ops] - representing output observables for each program. - forward_pass_vals : `tf.Tensor` of real numbers for forward pass - values with the shape of [n_programs, n_ops] - grad : `tf.Tensor` of real numbers for backpropagated gradient - values from the upper layer with the shape of - [n_programs, n_ops] -Returns: - A `tf.Tensor` of real numbers for sampled gradients from the above - samplers with the shape of [n_programs, n_symbols] - -

generate_differentiable_op

- -View source - -``` python -generate_differentiable_op() -``` - -Generate a differentiable op by attaching self to an op. - -This function returns a `tf.function` that passes values through to -`forward_op` during the forward pass and this differentiator (`self`) to -backpropagate through the op during the backward pass. If sampled_op -is provided the differentiators `differentiate_sampled` method will -be invoked (which requires sampled_op to be a sample based expectation -op with num_samples input tensor). If analytic_op is provided the -differentiators `differentiate_analytic` method will be invoked (which -requires analytic_op to be an analytic based expectation op that does -NOT have num_samples as an input). If both sampled_op and analytic_op -are provided an exception will be raised. - -***CAUTION*** - -This `generate_differentiable_op()` can be called only ONCE because -of the `one differentiator per op` policy. You need to call `refresh()` -to reuse this differentiator with another op. - -#### Args: - - -* `sampled_op`: A `callable` op that you want to make differentiable - using this differentiator's `differentiate_sampled` method. -* `analytic_op`: A `callable` op that you want to make differentiable - using this differentiators `differentiate_analytic` method. - - -#### Returns: - -A `callable` op that who's gradients are now registered to be -a call to this differentiators `differentiate_*` function. - - -

refresh

- -View source - -``` python -refresh() -``` - -Refresh this differentiator in order to use it with other ops. - - - - diff --git a/docs/api_docs/python/tfq/from_tensor.md b/docs/api_docs/python/tfq/from_tensor.md deleted file mode 100644 index 7cd9eca69..000000000 --- a/docs/api_docs/python/tfq/from_tensor.md +++ /dev/null @@ -1,69 +0,0 @@ -
- - -
- -# tfq.from_tensor - - - - - -
- - - View source on GitHub - -
- - - -Convert a tensor of tfq primitives back to Python objects. - -``` python -tfq.from_tensor(tensor_to_convert) -``` - - - - - -Convert a tensor representing `cirq.PauliSum` or `cirq.Circuit` -objects back to Python objects. - - -``` - ->>> my_qubits = cirq.GridQubit.rect(1, 2) ->>> my_circuits = [cirq.Circuit(cirq.X(my_qubits[0])), -... cirq.Circuit(cirq.Z(my_qubits[0])) -... ] ->>> tensor_input = tfq.convert_to_tensor(my_circuits) ->>> # Now tensor_input can be used as model input etc. ->>> same_circuits = tfq.from_tensor(tensor_input) ->>> # same_circuits now holds cirq.Circuit objects once more. ->>> same_circuits -[cirq.Circuit([ - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - ]), -]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), -])] - -``` - -#### Args: - - -* `tensor_to_convert`: `tf.Tensor` or `np.ndarray` representation to - convert back into python objects. - - -#### Returns: - -Python `list` of items converted to their python representation stored - in a (potentially nested) `list`. diff --git a/docs/api_docs/python/tfq/get_expectation_op.md b/docs/api_docs/python/tfq/get_expectation_op.md deleted file mode 100644 index 155344276..000000000 --- a/docs/api_docs/python/tfq/get_expectation_op.md +++ /dev/null @@ -1,103 +0,0 @@ -
- - -
- -# tfq.get_expectation_op - - - - - -
- - - View source on GitHub - -
- - - -Get a Tensorflow op that will calculate batches of expectation values. - -``` python -tfq.get_expectation_op(backend=None) -``` - - - - - -This function produces a non-differentiable TF op that will calculate -batches of expectation values given tensor batches of `cirq.Circuit`s, -parameter values, and `cirq.PauliSum` operators to measure. - - -``` - ->>> # Simulate circuits with C++. ->>> my_op = tfq.get_expectation_op() ->>> # Prepare some inputs. ->>> qubit = cirq.GridQubit(0, 0) ->>> my_symbol = sympy.Symbol('alpha') ->>> my_circuit_tensor = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.H(qubit) ** my_symbol) -... ]) ->>> my_values = np.array([[0.123]]) ->>> my_paulis = tfq.convert_to_tensor([[ -... 3.5 * cirq.X(qubit) - 2.2 * cirq.Y(qubit) -... ]]) ->>> # This op can now be run with: ->>> output = my_op( -... my_circuit_tensor, ['alpha'], my_values, my_paulis) ->>> output -tf.Tensor([[0.71530885]], shape=(1, 1), dtype=float32) - -``` - - -In order to make the op differentiable, a `tfq.differentiator` object is -needed. see tfq.differentiators for more details. Below is a simple -example of how to make my_op from the above code block differentiable: - -``` ->>> diff = tfq.differentiators.ForwardDifference() ->>> my_differentiable_op = diff.generate_differentiable_op( -... analytic_op=my_op -... ) -``` - - -#### Args: - - -* `backend`: Optional python `object` that specifies what backend this op -should use when evaluating circuits. Can be any -`cirq.SimulatesFinalState`. If not provided the default C++ analytical -expectation calculation op is returned. - - -#### Returns: - -A `callable` with the following signature: - -```op(programs, symbol_names, symbol_values, pauli_sums)``` - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. - -* `Returns`: `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). \ No newline at end of file diff --git a/docs/api_docs/python/tfq/get_sampled_expectation_op.md b/docs/api_docs/python/tfq/get_sampled_expectation_op.md deleted file mode 100644 index 759bca794..000000000 --- a/docs/api_docs/python/tfq/get_sampled_expectation_op.md +++ /dev/null @@ -1,112 +0,0 @@ -
- - -
- -# tfq.get_sampled_expectation_op - - - - - -
- - - View source on GitHub - -
- - - -Get a TensorFlow op that will calculate sampled expectation values. - -``` python -tfq.get_sampled_expectation_op(backend=None) -``` - - - - - -This function produces a non-differentiable TF op that will calculate -batches of expectation values given tensor batches of `cirq.Circuit`s, -parameter values, and `cirq.PauliSum` operators to measure. -Expectation is estimated by taking num_samples shots per term in the -corresponding PauliSum. - - -``` - ->>> # Simulate circuits with C++. ->>> my_op = tfq.get_sampled_expectation_op() ->>> # Prepare some inputs. ->>> qubit = cirq.GridQubit(0, 0) ->>> my_symbol = sympy.Symbol('alpha') ->>> my_circuit_tensor = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.H(qubit) ** my_symbol) -... ]) ->>> my_values = np.array([[0.123]]) ->>> my_paulis = tfq.convert_to_tensor([[ -... 3.5 * cirq.X(qubit) - 2.2 * cirq.Y(qubit) -... ]]) ->>> my_num_samples = np.array([[100]]) ->>> # This op can now be run with: ->>> output = my_op( -... my_circuit_tensor, ['alpha'], my_values, my_paulis, my_num_samples) ->>> output -tf.Tensor([[0.71530885]], shape=(1, 1), dtype=float32) - -``` - - -In order to make the op differentiable, a `tfq.differentiator` object is -needed. see tfq.differentiators for more details. Below is a simple -example of how to make my_op from the above code block differentiable: - - -``` - ->>> diff = tfq.differentiators.ForwardDifference() ->>> my_differentiable_op = diff.generate_differentiable_op( -... analytic_op=my_op -... ) - -``` - -#### Args: - - -* `backend`: Python `object` that specifies what backend this op should use - when evaluating circuits. It only accepts `cirq.Sampler`. - - -#### Returns: - -A `callable` with the following signature: - -```op(programs, symbol_names, symbol_values, pauli_sums, num_samples)``` - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `pauli_sums`: `tf.Tensor` of strings with shape [batch_size, n_ops] - containing the string representation of the operators that will - be used on all of the circuits in the expectation calculations. -* `num_samples`: `tf.Tensor` with `n_samples[i][j]` is equal to the - number of samples to draw in each term of `pauli_sums[i][j]` - when estimating the expectation. It can also be tiled up to the - shape of pauli_sums by broadcasting if tf.shape(num_samples)[0] - or tf.shape(num_samples)[1] is 1 and the other dimension is the - same with that of pauli_sums. - -* `Returns`: `tf.Tensor` with shape [batch_size, n_ops] that holds the - expectation value for each circuit with each op applied to it - (after resolving the corresponding parameters in). \ No newline at end of file diff --git a/docs/api_docs/python/tfq/get_sampling_op.md b/docs/api_docs/python/tfq/get_sampling_op.md deleted file mode 100644 index 6dd84d19e..000000000 --- a/docs/api_docs/python/tfq/get_sampling_op.md +++ /dev/null @@ -1,88 +0,0 @@ -
- - -
- -# tfq.get_sampling_op - - - - - -
- - - View source on GitHub - -
- - - -Get a Tensorflow op that produces samples from given quantum circuits. - -``` python -tfq.get_sampling_op(backend=None) -``` - - - - - -This function produces a non-differentiable op that will calculate -batches of circuit samples given tensor batches of `cirq.Circuit`s, -parameter values, and a scalar telling the op how many samples to take. - - -``` - ->>> # Simulate circuits with cirq. ->>> my_op = tfq.get_sampling_op(backend=cirq.sim.Simulator()) ->>> # Simulate circuits with C++. ->>> my_second_op = tfq.get_sampling_op() ->>> # Prepare some inputs. ->>> qubit = cirq.GridQubit(0, 0) ->>> my_symbol = sympy.Symbol('alpha') ->>> my_circuit_tensor = tfq.convert_to_tensor( -... [cirq.Circuit(cirq.X(qubit)**my_symbol)]) ->>> my_values = np.array([[2.0]]) ->>> n_samples = np.array([10]) ->>> # This op can now be run to take samples. ->>> output = my_second_op( -... my_circuit_tensor, ['alpha'], my_values, n_samples) ->>> output - - -``` - - -#### Args: - - -* `backend`: Optional Python `object` that specifies what backend this op - should use when evaluating circuits. Can be any `cirq.Sampler`. If - not provided the default C++ sampling op is returned. - - -#### Returns: - -A `callable` with the following signature: - -```op(programs, symbol_names, symbol_values, num_samples)``` - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. -* `num_samples`: `tf.Tensor` with one element indicating the number of - samples to draw. - -* `Returns`: `tf.Tensor` with shape - [batch_size, num_samples, n_qubits] that - holds samples (as boolean values) for each circuit. \ No newline at end of file diff --git a/docs/api_docs/python/tfq/get_state_op.md b/docs/api_docs/python/tfq/get_state_op.md deleted file mode 100644 index 885d23c21..000000000 --- a/docs/api_docs/python/tfq/get_state_op.md +++ /dev/null @@ -1,85 +0,0 @@ -
- - -
- -# tfq.get_state_op - - - - - -
- - - View source on GitHub - -
- - - -Get a tensorflow op that produces states from given quantum circuits. - -``` python -tfq.get_state_op(backend=None) -``` - - - - - -This function produces a non-differentiable op that will calculate -batches of state tensors given tensor batches of `cirq.Circuit`s and -parameter values. - - -``` - ->>> # Simulate circuits with cirq. ->>> my_op = tfq.get_state_op(backend=cirq.DensityMatrixSimulator()) ->>> # Simulate circuits with C++. ->>> my_second_op = tfq.get_state_op() ->>> # Prepare some inputs. ->>> qubit = cirq.GridQubit(0, 0) ->>> my_symbol = sympy.Symbol('alpha') ->>> my_circuit_tensor = tfq.convert_to_tensor([ -... cirq.Circuit(cirq.Y(qubit) ** my_symbol) -... ]) ->>> my_values = np.array([[0.5]]) ->>> # This op can now be run to calculate the state. ->>> output = my_second_op(my_circuit_tensor, ['alpha'], my_values) ->>> output - - -``` - - -#### Args: - - -* `backend`: Optional Python `object` that specifies what backend this op - should use when evaluating circuits. Can be any - `cirq.SimulatesFinalState`. If not provided, the default C++ - wavefunction simulator will be used. - - -#### Returns: - -A `callable` with the following signature: - -```op(programs, symbol_names, symbol_values)``` - - -* `programs`: `tf.Tensor` of strings with shape [batch_size] containing - the string representations of the circuits to be executed. -* `symbol_names`: `tf.Tensor` of strings with shape [n_params], which - is used to specify the order in which the values in - `symbol_values` should be placed inside of the circuits in - `programs`. -* `symbol_values`: `tf.Tensor` of real numbers with shape - [batch_size, n_params] specifying parameter values to resolve - into the circuits specified by programs, following the ordering - dictated by `symbol_names`. - -* `Returns`: `tf.Tensor` with shape [batch_size, size of state] that - contains the state information of the circuit. \ No newline at end of file diff --git a/docs/api_docs/python/tfq/get_supported_gates.md b/docs/api_docs/python/tfq/get_supported_gates.md deleted file mode 100644 index 7a4409939..000000000 --- a/docs/api_docs/python/tfq/get_supported_gates.md +++ /dev/null @@ -1,29 +0,0 @@ -
- - -
- -# tfq.get_supported_gates - - - - - -
- - - View source on GitHub - -
- - - -A helper to get the gates supported by tfq. - -``` python -tfq.get_supported_gates() -``` - - - - diff --git a/docs/api_docs/python/tfq/layers.md b/docs/api_docs/python/tfq/layers.md deleted file mode 100644 index 1cb0edd2d..000000000 --- a/docs/api_docs/python/tfq/layers.md +++ /dev/null @@ -1,39 +0,0 @@ -
- - -
- -# Module: tfq.layers - - - - -
- - - View source on GitHub - -
- - - -Module definitions for tensorflow_quantum.python.layers.* - - - -## Classes - -[`class AddCircuit`](../tfq/layers/AddCircuit.md): A layer that pre/appends a sequence of gates to the input circuit tensor. - -[`class ControlledPQC`](../tfq/layers/ControlledPQC.md): Controlled Parametrized Quantum Circuit (PQC) Layer. - -[`class Expectation`](../tfq/layers/Expectation.md): A Layer that calculates an expectation value. - -[`class PQC`](../tfq/layers/PQC.md): Parametrized Quantum Circuit (PQC) Layer. - -[`class Sample`](../tfq/layers/Sample.md): A Layer that samples from a quantum circuit. - -[`class SampledExpectation`](../tfq/layers/SampledExpectation.md): A layer that calculates a sampled expectation value. - -[`class State`](../tfq/layers/State.md): A Layer that simulates a quantum state. - diff --git a/docs/api_docs/python/tfq/layers/AddCircuit.md b/docs/api_docs/python/tfq/layers/AddCircuit.md deleted file mode 100644 index 7a2ec3980..000000000 --- a/docs/api_docs/python/tfq/layers/AddCircuit.md +++ /dev/null @@ -1,857 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.AddCircuit - - - - - -
- - - View source on GitHub - -
- - - -## Class `AddCircuit` - -A layer that pre/appends a sequence of gates to the input circuit tensor. - - - - - -This layer allows for an arbitrary `cirq.Circuit` (or list of circuits of -equal length to the input) to be appended or prepended to the list of input -circuits. - - -``` - ->>> qubits = cirq.GridQubit.rect(1,4) ->>> add = tfq.layers.AddCircuit() ->>> output = add( -... [cirq.Circuit(cirq.Y(qubits[0])), cirq.Circuit(cirq.Z(qubits[0])) -... append = cirq.Circuit(cirq.Y(qubits[0]))) -... ])) ->>> # Now we have a layer that would append a single Y gate to any inputs. ->>> tfq.from_tensor(output) -[cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), -]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), -])] - -``` - -

__init__

- -View source - -``` python -__init__(**kwargs) -``` - -Instantiate this layer. - - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/CircuitConstruction.md b/docs/api_docs/python/tfq/layers/CircuitConstruction.md deleted file mode 100644 index 3581801c5..000000000 --- a/docs/api_docs/python/tfq/layers/CircuitConstruction.md +++ /dev/null @@ -1,894 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.CircuitConstruction - - - - - -
- - - View source on GitHub - -
- - - -## Class `CircuitConstruction` - -A class that defines the behavior of circuit constructing layers in TFQ. - - - - - -An abstract class that defines all behaviors of a circuit constructing -layer, namely the ability to modify a tensor representation of a -circuit inside the TF graph by prepending or appending to the input circuit. - - -``` - ->>> class AddXLayer(tfq.layers.CircuitConstruction): -... def __init__(self, qubits): -... super().__init__() -... self.qubits = qubits -... def get_circuit(self): -... x_wall = cirq.Circuit() -... for qubit in qubits: -... x_wall.append(cirq.X(qubit)) -... return x_wall ->>> qubits = cirq.GridQubit.rect(1,4) ->>> model = tf.keras.Sequential([ -... tf.keras.layers.Input(shape=(), dtype=tf.dtypes.string), -... AddXLayer(qubits) -... ]) ->>> output = model(tfq.convert_to_tensor([ -... cirq.Circuit(cirq.Y(qubits[0])), -... cirq.Circuit(cirq.Z(qubits[0])) -... ])) ->>> tfq.from_tensor(output) -[cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Y.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - cirq.X.on(cirq.GridQubit(0, 1)), - cirq.X.on(cirq.GridQubit(0, 2)), - cirq.X.on(cirq.GridQubit(0, 3)), - ]), -]) - cirq.Circuit([ - cirq.Moment(operations=[ - cirq.Z.on(cirq.GridQubit(0, 0)), - ]), - cirq.Moment(operations=[ - cirq.X.on(cirq.GridQubit(0, 0)), - cirq.X.on(cirq.GridQubit(0, 1)), - cirq.X.on(cirq.GridQubit(0, 2)), - cirq.X.on(cirq.GridQubit(0, 3)), - ]), -])] - -``` - -

__init__

- -View source - -``` python -__init__( - prepend=False, - **kwargs -) -``` - -Instantiate a GateLayer object. - -Create a GateLayer, whose main purpose is to append or prepend to -tensors of circuits in the TF graph. - -#### Args: - - -* `prepend`: Python `bool` if set to true, the gates produced by this - layer will be prepended to the input instead of appended. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -View source - -``` python -build(input_shape) -``` - -Keras build function. - - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_circuit

- -View source - -``` python -get_circuit() -``` - -Abstract method that returns a cirq.Circuit - -At runtime, this method will be called, and must produce a -`cirq.Circuit` which will be pre/appended to the input circuit tensor. - -#### Returns: - -`cirq.Circuit` circuit to prepend/append to the input tensor - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/ControlledPQC.md b/docs/api_docs/python/tfq/layers/ControlledPQC.md deleted file mode 100644 index fb5141e64..000000000 --- a/docs/api_docs/python/tfq/layers/ControlledPQC.md +++ /dev/null @@ -1,954 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.ControlledPQC - - - - - -
- - - View source on GitHub - -
- - - -## Class `ControlledPQC` - -Controlled Parametrized Quantum Circuit (PQC) Layer. - - - - - -The `ControlledPQC` layer is very similar to the regular `PQC` layer, but -with one major difference. The `ControlledPQC` layer requires the caller -of the layer to provide the control parameter inputs for `model_circuit`. -You can see how this works through a simple example: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> model = cirq.Circuit( -... cirq.X(bit) ** sympy.Symbol('alpha'), -... cirq.Z(bit) ** sympy.Symbol('beta') -... ) ->>> outputs = tfq.layers.ControlledPQC(model, cirq.Z(bit)) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(bit)) -... ]) ->>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) ->>> res = outputs([quantum_data, model_params]) ->>> res -tf.Tensor( -[[-1.4901161e-08] - [-7.0710683e-01]], shape=(2, 1), dtype=float32) - -``` - - -Just like with the `PQC` it is *very important* that the quantum datapoint -circuits do not contain any `sympy.Symbols` themselves (This can be -supported with advanced usage of the tfq.layers.Expectation layer). Just -like `PQC` it is possible to specify multiple readout operations and -switch to sample based expectation calculation: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> model = cirq.Circuit( -... cirq.X(bit) ** sympy.Symbol('alpha'), -... cirq.Z(bit) ** sympy.Symbol('beta') -... ) ->>> outputs = tfq.layers.ControlledPQC( -... model, -... [cirq.Z(bit), cirq.X(bit), cirq.Y(bit)], -... repetitions=5000) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(bit)) -... ]) ->>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) ->>> res = outputs([quantum_data, model_params]) ->>> res -tf.Tensor( -[[-0.0028 1. -0.0028] - [-0.6956 -0.498 -0.498 ]], shape=(2, 3), dtype=float32) - -``` - - -A value for `backend` can also be supplied in the layer constructor -arguments to indicate which supported backend you would like to use. -A value for `differentiator` can also be supplied in the constructor -to indicate the differentiation scheme this `ControlledPQC` layer -should use. Here's how you would take the gradients of the -above example using a `cirq.Simulator` backend (which is slower -than `backend=None` which uses C++): - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> model = cirq.Circuit( -... cirq.X(bit) ** sympy.Symbol('alpha'), -... cirq.Z(bit) ** sympy.Symbol('beta') -... ) ->>> outputs = tfq.layers.ControlledPQC( -... model, -... [cirq.Z(bit), cirq.X(bit), cirq.Y(bit)], -... repetitions=5000, -... backend=cirq.Simulator(), -... differentiator=tfq.differentiators.ParameterShift()) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(bit)) -... ]) ->>> model_params = tf.convert_to_tensor([[0.5, 0.5], [0.25, 0.75]]) ->>> with tf.GradientTape() as g: -... g.watch(model_params) -... res = outputs([quantum_data, model_params]) ->>> grads = g.gradient(res, model_params) ->>> grads -tf.Tensor( -[[-3.1415927 3.1415927 ] - [-0.9211149 0.02764606]], shape=(2, 2), dtype=float32)] - -``` - - -Lastly, like all layers in TensorFlow the `ControlledPQC` layer can be -called on any `tf.Tensor` as long as it is the right shape. This means -you could replace `model_params` in the above example with the outputs -from a `tf.keras.Dense` layer or replace `quantum_data` with values fed -in from a `tf.keras.Input`. - -

__init__

- -View source - -``` python -__init__( - model_circuit, - operators, - **kwargs -) -``` - -Instantiate this layer. - -Create a layer that will output expectation values of the given -operators when fed quantum data to it's input layer. This layer will -take two input tensors, one representing a quantum data source (these -circuits must not contain any symbols) and the other representing -control parameters for the model circuit that gets appended to the -datapoints. - -model_circuit: `cirq.Circuit` containing `sympy.Symbols` that will be - used as the model which will be fed quantum data inputs. -operators: `cirq.PauliSum` or Python `list` of `cirq.PauliSum` objects - used as observables at the end of the model circuit. -repetitions: Optional Python `int` indicating how many samples to use - when estimating expectation values. If `None` analytic expectation - calculation is used. -backend: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead. - If a cirq object is given it must inherit `cirq.SimulatesFinalState` - if `sampled_based` is True or it must inherit `cirq.Sampler` if - `sample_based` is False. -differentiator: Optional `tfq.differentiator` object to specify how - gradients of `model_circuit` should be calculated. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/Expectation.md b/docs/api_docs/python/tfq/layers/Expectation.md deleted file mode 100644 index cc38dc21f..000000000 --- a/docs/api_docs/python/tfq/layers/Expectation.md +++ /dev/null @@ -1,1035 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.Expectation - - - - - -
- - - View source on GitHub - -
- - - -## Class `Expectation` - -A Layer that calculates an expectation value. - - - - - -Given an input circuit and set of parameter values, prepare a quantum state -and output expectation values taken on that state with respect to some -observables to the tensorflow graph. - - -First define a simple helper function for generating a parametrized -quantum circuit that we will use throughout: - -``` ->>> def _gen_single_bit_rotation_problem(bit, symbols): -... """Generate a toy problem on 1 qubit.""" -... starting_state = [0.123, 0.456, 0.789] -... circuit = cirq.Circuit( -... cirq.Rx(starting_state[0])(bit), -... cirq.Ry(starting_state[1])(bit), -... cirq.Rz(starting_state[2])(bit), -... cirq.Rz(symbols[2])(bit), -... cirq.Ry(symbols[1])(bit), -... cirq.Rx(symbols[0])(bit) -... ) -... return circuit -``` - - -In quantum machine learning there are two very common use cases that -align with keras layer constructs. The first is where the circuits -represent the input data points (see the note at the bottom about -using compiled models): - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] ->>> circuit_list = [ -... _gen_single_bit_rotation_problem(bit, symbols), -... cirq.Circuit( -... cirq.Z(bit) ** symbols[0], -... cirq.X(bit) ** symbols[1], -... cirq.Z(bit) ** symbols[2] -... ), -... cirq.Circuit( -... cirq.X(bit) ** symbols[0], -... cirq.Z(bit) ** symbols[1], -... cirq.X(bit) ** symbols[2] -... ) -... ] ->>> expectation_layer = tfq.layers.Expectation() ->>> output = expectation_layer( -... circuit_list, symbol_names=symbols, operators = ops) ->>> # Here output[i][j] corresponds to the expectation of all the ops ->>> # in ops w.r.t circuits[i] where keras managed variables are ->>> # placed in the symbols 'x', 'y', 'z'. ->>> tf.shape(output) -tf.Tensor([3 2], shape=(2,), dtype=int32) - -``` - - -Here, different `cirq.Circuit` instances sharing the common symbols 'x', -'y' and 'z' are used as input. Keras uses the `symbol_names` -argument to map Keras managed variables to these circuits constructed -with `sympy.Symbol`s. Note that you used a Python `list` containing your -circuits, you could also specify a `tf.keras.Input` layer or any -tensorlike object to specify the circuits you would like fed to the layer -at runtime. - - -Another common use case is where there is a fixed circuit and the -expectation operators vary: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] ->>> fixed_circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> expectation_layer = tfq.layers.Expectation() ->>> output = expectation_layer( -... fixed_circuit, -... symbol_names=symbols, -... operators=ops, -... initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)) ->>> # Here output[i][j] corresponds to ->>> # the expectation of operators[i][j] w.r.t the circuit where ->>> # variable values are managed by keras and store numbers in ->>> # the symbols 'x', 'y', 'z'. ->>> tf.shape(output) -tf.Tensor([1 2], shape=(2,), dtype=int32) - -``` - - -Note that in the above examples you used a `cirq.Circuit` object and a list -of `cirq.PauliSum` objects as inputs to your layer. To allow for varying -inputs your could change the line in the above code to: -`expectation_layer(circuit_inputs, symbol_names=symbols, operators=ops)` -with `circuit_inputs` is `tf.keras.Input(shape=(), dtype=tf.dtypes.string)` -to allow you to pass in different circuits in a compiled model. Lastly -you also supplied a `tf.keras.initializer` to the `initializer` argument. -This argument is optional in the case that the layer itself will be managing -the symbols of the circuit and not have them fed in from somewhere else in -the model. - - -There are also some more complex use cases. Notably these use cases all -make use of the `symbol_values` parameter that causes the -`Expectation` layer to stop managing the `sympy.Symbol`s in the quantum -circuits for the user and instead require them to supply input -values themselves. Lets look at the case where there -is a single fixed circuit, some fixed operators and symbols that must be -common to all circuits: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> ops = [cirq.Z(bit), cirq.X(bit)] ->>> circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> values = [[1,1,1], [2,2,2], [3,3,3]] ->>> expectation_layer = tfq.layers.Expectation() ->>> output = expectation_layer( -... circuit, -... symbol_names=symbols, -... symbol_values=values, -... operators=ops) ->>> # output[i][j] = The expectation of operators[j] with ->>> # values[i] placed into the symbols of the circuit ->>> # with the order specified by symbol_names. ->>> # so output[1][2] = The expectation of your circuit with parameter ->>> # values [2,2,2] w.r.t Pauli X. ->>> output -tf.Tensor( -[[0.63005245 0.76338404] - [0.25707167 0.9632684 ] - [0.79086655 0.5441111 ]], shape=(3, 2), dtype=float32) - -``` - - -Here is a simple model that uses this particular input signature of -tfq.layers.Expectation, that learns to undo the random rotation -of the qubit: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> control_input = tf.keras.Input(shape=(1,)) ->>> circuit_inputs = tf.keras.Input(shape=(), dtype=tf.dtypes.string) ->>> d1 = tf.keras.layers.Dense(10)(control_input) ->>> d2 = tf.keras.layers.Dense(3)(d1) ->>> expectation = tfq.layers.Expectation()( -... circuit_inputs, # See note below! -... symbol_names=symbols, -... symbol_values=d2, -... operators=cirq.Z(bit)) ->>> data_in = np.array([[1], [0]], dtype=np.float32) ->>> data_out = np.array([[1], [-1]], dtype=np.float32) ->>> model = tf.keras.Model( -... inputs=[circuit_inputs, control_input], outputs=expectation) ->>> model.compile( -... optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), -... loss=tf.keras.losses.mean_squared_error) ->>> history = model.fit( -... x=[tfq.convert_to_tensor([circuit] * 2), data_in], -... y=data_out, -... epochs=100) - -``` - - -For an example featuring this layer, please check out `Quantum sensing` -in our dev website http://www.tensorflow.org/quantum/tutorials. - -Lastly `symbol_values`, `operators` and circuit `inputs` can all be fed -Python `list` objects. In addition to this they can also be fed `tf.Tensor` -inputs, meaning that you can input all of these things from other Tensor -objects (like `tf.keras.Dense` layer outputs or `tf.keras.Input`s etc). - -Note: When specifying a new layer for a *compiled* `tf.keras.Model` using -something like `tfq.layers.Expectation()(cirq.Circuit(...), ...)` please -be sure to instead use `tfq.layers.Expectation()(circuit_input_tensor, ...)` -where `circuit_input_tensor` is filled with -`tfq.conver_to_tensor([cirq.Circuit(..)] * batch_size)` at runtime. This -is because compiled keras models require layer `call` inputs to be -traceable back to a `tf.keras.Input`. - -

__init__

- -View source - -``` python -__init__( - backend=None, - differentiator=None, - **kwargs -) -``` - -Instantiate this Layer. - -Create a layer that will output expectation values gained from -simulating a quantum circuit. - -#### Args: - - -* `backend`: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead, - which must inherit `cirq.SimulatesFinalState`. -* `differentiator`: Optional Differentiator to use to calculate analytic - derivative values of given operators_to_measure and circuit, - which must inherit tfq.differentiators.Differentiator and - implements `differentiate_analytic` method. Defaults to None, - which uses `linear_combination.ForwardDifference()`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/PQC.md b/docs/api_docs/python/tfq/layers/PQC.md deleted file mode 100644 index a145a050c..000000000 --- a/docs/api_docs/python/tfq/layers/PQC.md +++ /dev/null @@ -1,963 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.PQC - - - - - -
- - - View source on GitHub - -
- - - -## Class `PQC` - -Parametrized Quantum Circuit (PQC) Layer. - - - - - -This layer is for training parameterized quantum models. -Given a parameterized circuit, this layer initializes the parameters -and manages them in a Keras native way. - -We start by defining a simple quantum circuit on one qubit. -This circuit parameterizes an arbitrary rotation on the Bloch sphere in -terms of the three angles a, b, and c: - - -``` - ->>> q = cirq.GridQubit(0, 0) ->>> (a, b, c) = sympy.symbols("a b c") ->>> circuit = cirq.Circuit( -... cirq.Rz(a)(q), -... cirq.Rx(b)(q), -... cirq.Rz(c)(q), -... cirq.Rx(-b)(q), -... cirq.Rz(-a)(q) -... ) - -``` - - -In order to extract information from our circuit, we must apply measurement -operators. For now we choose to make a Z measurement. In order to observe -an output, we must also feed our model quantum data (NOTE: quantum data -means quantum circuits with no free parameters). Though the output values -will depend on the default random initialization of the angles in our model, -one will be the negative of the other since `cirq.X(q)` causes a bit flip: - - -``` - ->>> outputs = tfq.layers.PQC(circuit, cirq.Z(q)) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(q)) -... ]) ->>> res = outputs(quantum_data) ->>> res - - -``` - - -We can also choose to measure the three pauli matrices, sufficient to -fully characterize the operation of our model, or choose to simulate -sampled expectation values by specifying a number of measurement shots -(repetitions) to average over. Notice that using only 200 repetitions -introduces variation between the two rows of data, due to the -probabilistic nature of measurement. - - -``` - ->>> measurement = [cirq.X(q), cirq.Y(q), cirq.Z(q)] ->>> outputs = tfq.layers.PQC(circuit, measurement, repetitions=200) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(q)) -... ]) ->>> res = outputs(quantum_data) ->>> res - - -``` - - -A value for `backend` can also be supplied in the layer constructor -arguments to indicate which supported backend you would like to use. -A value for `differentiator` can also be supplied in the constructor -to indicate the differentiation scheme this `PQC` layer should use. -Here's how you would take the gradients of the above example using a -`cirq.Simulator` backend (which is slower than the default -`backend=None` which uses C++): - - -``` - ->>> q = cirq.GridQubit(0, 0) ->>> (a, b, c) = sympy.symbols("a b c") ->>> circuit = cirq.Circuit( -... cirq.Rz(a)(q), -... cirq.Rx(b)(q), -... cirq.Rz(c)(q), -... cirq.Rx(-b)(q), -... cirq.Rz(-a)(q) -... ) ->>> measurement = [cirq.X(q), cirq.Y(q), cirq.Z(q)] ->>> outputs = tfq.layers.PQC( -... circuit, -... measurement, -... repetitions=5000, -... backend=cirq.Simulator(), -... differentiator=tfq.differentiators.ParameterShift()) ->>> quantum_data = tfq.convert_to_tensor([ -... cirq.Circuit(), -... cirq.Circuit(cirq.X(q)) -... ]) ->>> res = outputs(quantum_data) ->>> res - - -``` - - -Lastly, like all layers in TensorFlow the `PQC` layer can be called on any -`tf.Tensor` as long as it is the right shape. This means you could replace -replace `quantum_data` with values fed in from a `tf.keras.Input`. - -

__init__

- -View source - -``` python -__init__( - model_circuit, - operators, - **kwargs -) -``` - -Instantiate this layer. - -Create a layer that will output expectation values of the given -operators when fed quantum data to it's input layer. This layer will -accept one input tensor representing a quantum data source (these -circuits must not contain any symbols) and append the model_circuit to -them, execute them and then finally output the expectation values. - - -model_circuit: `cirq.Circuit` containing `sympy.Symbols` that will be - used as the model which will be fed quantum data inputs. -operators: `cirq.PauliSum` or Python `list` of `cirq.PauliSum` objects - used as observables at the end of the model circuit. -repetitions: Optional Python `int` indicating how many samples to use - when estimating expectation values. If `None` analytic expectation - calculation is used. -backend: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead. - If a cirq object is given it must inherit either - `cirq.SimulatesFinalState` if analytic expectations are desired or - `cirq.Sampler` if sampled expectations are desired. -differentiator: Optional `tfq.differentiator` object to specify how - gradients of `model_circuit` should be calculated. -initializer: Optional `tf.keras.initializer` object to specify how the - symbols in `model_circuit` should be initialized when creating - the managed variables. -regularizer: Optional `tf.keras.regularizer` object applied to the - managed variables parameterizing `model_circuit`. -constraint: Optional `tf.keras.constraint` object applied to the - managed variables parameterizing `model_circuit`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -View source - -``` python -build(input_shape) -``` - -Keras build function. - - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/Sample.md b/docs/api_docs/python/tfq/layers/Sample.md deleted file mode 100644 index 815332a5c..000000000 --- a/docs/api_docs/python/tfq/layers/Sample.md +++ /dev/null @@ -1,960 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.Sample - - - - - -
- - - View source on GitHub - -
- - - -## Class `Sample` - -A Layer that samples from a quantum circuit. - - - - - -Given an input circuit and set of parameter values, output samples -taken from the end of the circuit. - -First lets define a simple circuit to sample from: - -``` ->>> def get_circuit(): -... q0 = cirq.GridQubit(0, 0) -... q1 = cirq.GridQubit(1, 0) -... circuit = cirq.Circuit( -... cirq.X(q0), -... cirq.CNOT(q1) -... ) -... -... return circuit -``` - -#### When printed: - - - -``` ->>> get_circuit() -(0, 0): ───X───@─── - │ -(1, 0): ───────X─── -``` - -Using tfq.layers.Sample, it's possible to sample outputs from a given -circuit. The circuit above will put both qubits in the |1> state. - -To retrieve samples of the output state: - -``` ->>> sample_layer = tfq.layers.Sample() ->>> output = sample_layer(get_circuit(), repetitions=4) ->>> output - -``` - -Notice above that there were no parameters passed as input into the -layer, because the circuit wasn't parameterized. If instead the circuit -had parameters, e.g. - -``` ->>> def get_parameterized_circuit(symbols): -... q0 = cirq.GridQubit(0, 0) -... q1 = cirq.GridQubit(1, 0) -... circuit = cirq.Circuit( -... cirq.X(q0) ** symbols[0], -... cirq.CNOT(q1) -... ) -... -... return circuit -``` - -Then it becomes necessary to provide a value for the symbol using -`symbol_names` and `symbol_values`. - -``` ->>> symbols = sympy.symbols(['x']) ->>> sample_layer = tfq.layers.Sample() ->>> output = sample_layer(get_parameterized_circuit(), -... symbol_names=symbols, symbol_values=[[0.5]], repetitions=4) ->>> tf.shape(output.to_tensor()) -tf.Tensor([1 4 2], shape=(3,), dtype=int32) -``` - -Note that using multiple sets of parameters returns multiple -independent samples on the same circuit. - -``` ->>> symbols = sympy.symbols(['x']) ->>> sample_layer = tfq.layers.Sample() ->>> params = tf.convert_to_tensor([[0.5], [0.4]], -... dtype=tf.dtypes.float32) ->>> output = sample_layer(get_parameterized_circuit(), -... symbol_names=symbols, symbol_values=params, repetitions=4) ->>> tf.shape(output.to_tensor()) -tf.Tensor([2 4 2], shape=(3,), dtype=int32) -``` - -The sample layer can also be used without explicitly passing in a -circuit, but instead using the layer with a batch of circuits. This layer -will then sample the circuits provided in the batch with multiple sets of -parameters, at the same time. Note that the parameters will not be -crossed along all circuits, the circuit at index i will be run with the -parameters at index i. - -``` ->>> symbols = sympy.symbols(['x']) ->>> sample_layer = tfq.layers.Sample() -``` - -With the sample layer defined, just define both the circuit and -parameter inputs. - -``` ->>> q0 = cirq.GridQubit(0, 0) ->>> q1 = cirq.GridQubit(1, 0) ->>> circuits = tfq.convert_to_tensor([ -... cirq.Circuit( -... cirq.X(q0) ** s[0], -... cirq.CNOT(q0, q1), -... ), -... cirq.Circuit( -... cirq.Y(q0) ** s[0], -... cirq.CNOT(q0, q1), -... ) -... ]) ->>> params = tf.convert_to_tensor([[0.5], [0.4]], -... dtype=tf.dtypes.float32) -``` - -The layer can be used as usual: - -``` ->>> output = sample_layer(circuits, -... symbol_names=symbols, symbol_values = params, repetitions=4) ->>> tf.shape(output.to_tensor()) - tf.Tensor([2 4 2], shape=(3,), dtype=int32) -``` - -

__init__

- -View source - -``` python -__init__( - backend=None, - **kwargs -) -``` - -Instantiate this Layer. - -Create a layer that will output bitstring samples taken from either a -simulated quantum state or a real quantum computer - -#### Args: - - -* `backend`: Optional Backend to use to simulate this state. Defaults - to the native Tensorflow simulator (None), however users may - also specify a preconfigured cirq execution object to use - instead, which must inherit `cirq.SimulatesSamples` or a - `cirq.Sampler`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/SampledExpectation.md b/docs/api_docs/python/tfq/layers/SampledExpectation.md deleted file mode 100644 index e712f12be..000000000 --- a/docs/api_docs/python/tfq/layers/SampledExpectation.md +++ /dev/null @@ -1,1039 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.SampledExpectation - - - - - -
- - - View source on GitHub - -
- - - -## Class `SampledExpectation` - -A layer that calculates a sampled expectation value. - - - - - -Given an input circuit and set of parameter values, output expectation -values of observables computed using measurement results sampled from -the input circuit. - - -First define a simple helper function for generating a parametrized -quantum circuit that we will use throughout: - -``` ->>> def _gen_single_bit_rotation_problem(bit, symbols): -... """Generate a toy problem on 1 qubit.""" -... starting_state = [0.123, 0.456, 0.789] -... circuit = cirq.Circuit( -... cirq.Rx(starting_state[0])(bit), -... cirq.Ry(starting_state[1])(bit), -... cirq.Rz(starting_state[2])(bit), -... cirq.Rz(symbols[2])(bit), -... cirq.Ry(symbols[1])(bit), -... cirq.Rx(symbols[0])(bit) -... ) -... return circuit -``` - - -In quantum machine learning there are two very common use cases that -align with keras layer constructs. The first is where the circuits -represent the input data points: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x y z') ->>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] ->>> num_samples = [100, 200] ->>> circuit_list = [ -... _gen_single_bit_rotation_problem(bit, symbols), -... cirq.Circuit( -... cirq.Z(bit) ** symbols[0], -... cirq.X(bit) ** symbols[1], -... cirq.Z(bit) ** symbols[2] -... ), -... cirq.Circuit( -... cirq.X(bit) ** symbols[0], -... cirq.Z(bit) ** symbols[1], -... cirq.X(bit) ** symbols[2] -... ) -... ] ->>> sampled_expectation_layer = tfq.layers.SampledExpectation() ->>> output = sampled_expectation_layer( -... circuit_list, -... symbol_names=symbols, -... operators=ops, -... repetitions=num_samples) ->>> # Here output[i][j] corresponds to the sampled expectation ->>> # of all the ops in ops w.r.t circuits[i] where Keras managed ->>> # variables are placed in the symbols 'x', 'y', 'z'. ->>> tf.shape(output) -tf.Tensor([3 2], shape=(2,), dtype=int32) - -``` - - -Here, different `cirq.Circuit` instances sharing the common symbols 'x', -'y' and 'z' are used as input. Keras uses the `symbol_names` -argument to map Keras managed variables to these circuits constructed -with `sympy.Symbol`s. The shape of `num_samples` is equal to that of `ops`. - - -The second most common use case is where there is a fixed circuit and -the expectation operators vary: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> ops = [-1.0 * cirq.Z(bit), cirq.X(bit) + 2.0 * cirq.Z(bit)] ->>> fixed_circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> expectation_layer = tfq.layers.SampledExpectation() ->>> output = expectation_layer( -... fixed_circuit, -... symbol_names=symbols, -... operators=ops, -... repetitions=5000, -... initializer=tf.keras.initializers.RandomUniform(0, 2 * np.pi)) ->>> # Here output[i][j] corresponds to ->>> # the sampled expectation of operators[i][j] using 5000 samples w.r.t ->>> # the circuit where variable values are managed by keras and store ->>> # numbers in the symbols 'x', 'y', 'z'. ->>> tf.shape(output) -tf.Tensor([1 2], shape=(2,), dtype=int32) - -``` - - -Here different `cirq.PauliSum` or `cirq.PauliString` instances can be -used as input to calculate the expectation on the fixed circuit that -the layer was initially constructed with. - - -There are also some more complex use cases that provide greater flexibility. -Notably these configurations all make use of the `symbol_values` parameter -that causes the `SampledExpectation` layer to stop managing the -`sympy.Symbol`s in the quantum circuits and instead requires the user to -supply inputs themselves. Lets look at the case where there -is a single fixed circuit, some fixed operators and symbols that must be -common to all circuits: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x y z') ->>> ops = [cirq.Z(bit), cirq.X(bit)] ->>> num_samples = [100, 200] ->>> circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> values = [[1,1,1], [2,2,2], [3,3,3]] ->>> sampled_expectation_layer = tfq.layers.SampledExpectation() ->>> output = sampled_expectation_layer( -... circuit, -... symbol_names=symbols, -... symbol_values=values, -... operators=ops, -... repetitions=num_samples) ->>> # output[i][j] = The sampled expectation of ops[j] with ->>> # values_tensor[i] placed into the symbols of the circuit ->>> # with the order specified by feed_in_params. ->>> # so output[1][2] = The sampled expectation of a circuit with parameter ->>> # values [2,2,2] w.r.t Pauli X, estimated using 200 samples per term. ->>> output # Non-deterministic result. It can vary every time. -tf.Tensor( -[[0.52, 0.72], - [0.34, 1. ], - [0.78, 0.48]], shape=(3, 2), dtype=float32) - -``` - - -Tip: you can compare the above result with that of `Expectation`: -tf.Tensor( -[[0.63005245 0.76338404] - [0.25707167 0.9632684 ] - [0.79086655 0.5441111 ]], shape=(3, 2), dtype=float32) - - -Here is a simple model that uses this particular input signature of -tfq.layers.SampledExpectation, that learns to undo the random rotation -of the qubit: - - -``` - ->>> bit = cirq.GridQubit(0, 0) ->>> symbols = sympy.symbols('x, y, z') ->>> circuit = _gen_single_bit_rotation_problem(bit, symbols) ->>> control_input = tf.keras.Input(shape=(1,)) ->>> circuit_inputs = tf.keras.Input(shape=(), dtype=tf.dtypes.string) ->>> d1 = tf.keras.layers.Dense(10)(control_input) ->>> d2 = tf.keras.layers.Dense(3)(d1) ->>> expectation = tfq.layers.SampledExpectation()( -... circuit_inputs, # See note below! -... symbol_names=symbols, -... symbol_values=d2, -... operators=cirq.Z(bit), -... repetitions=5000) ->>> data_in = np.array([[1], [0]], dtype=np.float32) ->>> data_out = np.array([[1], [-1]], dtype=np.float32) ->>> model = tf.keras.Model( -... inputs=[circuit_inputs, control_input], outputs=expectation) ->>> model.compile( -... optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), -... loss=tf.keras.losses.mean_squared_error) ->>> history = model.fit( -... x=[tfq.convert_to_tensor([circuit] * 2), data_in], -... y=data_out, -... epochs=100) - -``` - - -For an example featuring this layer, please check out `Taking gradients` -in our dev website http://www.tensorflow.org/quantum/tutorials. - -Lastly `symbol_values`, `operators` and circuit `inputs` can all be fed -Python `list` objects. In addition to this they can also be fed `tf.Tensor` -inputs, meaning that you can input all of these things from other Tensor -objects (like `tf.keras.Dense` layer outputs or `tf.keras.Input`s etc). - -Note: When specifying a new layer for a *compiled* `tf.keras.Model` using -something like `tfq.layers.SampledExpectation()(cirq.Circuit(...), ...)` -please be sure to instead use -`tfq.layers.SampledExpectation()(circuit_input_tensor, ...)` where -`circuit_input_tensor` is filled with -`tfq.conver_to_tensor([cirq.Circuit(..)] * batch_size)` at runtime. This -is because compiled keras models require layer `call` inputs to be -traceable back to a `tf.keras.Input`. - -

__init__

- -View source - -``` python -__init__( - backend=None, - differentiator=None, - **kwargs -) -``` - -Instantiate this Layer. - -Create a layer that will output expectation values gained from -simulating a quantum circuit. - -#### Args: - - -* `backend`: Optional Backend to use to simulate states. Defaults to - the native TensorFlow simulator (None), however users may also - specify a preconfigured cirq simulation object to use instead, - which must inherit `cirq.SimulatesFinalState`. -* `differentiator`: Optional Differentiator to use to calculate analytic - derivative values of given operators_to_measure and circuit, - which must inherit tfq.differentiators.Differentiator. - Defaults to None, which uses `parameter_shift.ParameterShift()`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/layers/State.md b/docs/api_docs/python/tfq/layers/State.md deleted file mode 100644 index 408a96a61..000000000 --- a/docs/api_docs/python/tfq/layers/State.md +++ /dev/null @@ -1,928 +0,0 @@ -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- -# tfq.layers.State - - - - - -
- - - View source on GitHub - -
- - - -## Class `State` - -A Layer that simulates a quantum state. - - - - - -Given an input circuit and set of parameter values, Simulate a quantum state -and output it to the Tensorflow graph. - - -A more common application is for determining the set of states produced -by a parametrized circuit where the values of the parameters vary. Suppose -we want to generate a family of states with varying degrees of entanglement -ranging from separable to maximally entangled. We first define a -parametrized circuit that can accomplish this - -``` ->>> q0, q1 = cirq.GridQubit.rect(1, 2) ->>> alpha = sympy.Symbol('alpha') # degree of entanglement between q0, q1 ->>> parametrized_bell_circuit = cirq.Circuit( -... cirq.H(q0), cirq.CNOT(q0, q1) ** alpha) -``` - -Now pass all of the alpha values desired to tfq.layers.State to compute -a tensor of states corresponding to these preparation angles. - -``` ->>> state_layer = tfq.layers.State() ->>> alphas = tf.reshape(tf.range(0, 1.1, delta=0.5), (3, 1)) # FIXME: #805 ->>> state_layer(parametrized_bell_circuit, -... symbol_names=[alpha], symbol_values=alphas) #FIXME: cl/285479498 - -``` - - -This use case can be simplified to compute the wavefunction produced by a -fixed circuit where the values of the parameters vary. For example, this -layer produces a Bell state. - -``` ->>> q0, q1 = cirq.GridQubit.rect(1, 2) ->>> bell_circuit = cirq.Circuit(cirq.H(q0), cirq.CNOT(q0, q1)) ->>> state_layer = tfq.layers.State() ->>> state_layer(bell_circuit) #FIXME: cl/285479498 - -``` - -Not specifying `symbol_names` or `symbol_values` indicates that the -circuit(s) does not contain any `sympy.Symbols` inside of it and tfq won't -look for any symbols to resolve. - - -tfq.layers.State also allows for a more complicated input signature -wherein a different (possibly parametrized) circuit is used to prepare -a state for each batch of input parameters. This might be useful when -the State layer is being used to generate entirely different families -of states. Suppose we want to generate a stream of states that are -either computational basis states or 'diagonal' basis states (as in the -BB84 QKD protocol). The circuits to prepare these states are: - -``` ->>> q0 = cirq.GridQubit(0, 0) ->>> bitval = sympy.Symbol('bitval') ->>> computational_circuit = cirq.Circuit(cirq.X(q0) ** bitval) ->>> diagonal_circuit = cirq.Circuit(cirq.X(q0) ** bitval, cirq.H(q0)) -``` - -Now a stream of random classical bit values can be encoded into one of -these bases by preparing a state layer and passing in the bit values -accompanied by their preparation circuits - -``` ->>> qkd_layer = tfq.layers.State() ->>> bits = [[1], [1], [0], [0]] ->>> states_to_send = [computational_circuit, -... diagonal_circuit, -... diagonal_circuit, -... computational_circuit] ->>> qkd_states = qkd_layer( -... states_to_send, symbol_names=[bitval], symbol_values=bits) ->>> # The third state was a '0' prepared in the diagonal basis: ->>> qkd_states #FIXME: cl/285479498 - -``` - -

__init__

- -View source - -``` python -__init__( - backend=None, - **kwargs -) -``` - -Instantiate a State Layer. - -Create a layer that will simulate a quantum state and output it into -the TensorFlow graph given a correct set of inputs. - -#### Args: - - -* `backend`: Optional Backend to use to simulate this state. Defaults - to the native TensorFlow Quantum state vector simulator, - however users may also specify a preconfigured cirq execution - object to use instead, which must inherit - `cirq.SimulatesFinalState`. Note that C++ Density Matrix - simulation is not yet supported so to do Density Matrix - simulation please use `cirq.DensityMatrixSimulator`. - - - -## Properties - -

activity_regularizer

- -Optional regularizer function for the output of this layer. - - -

dtype

- - - - -

dynamic

- - - - -

input

- -Retrieves the input tensor(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input tensor or list of input tensors. - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. -* `AttributeError`: If no inbound nodes are found. - -

input_mask

- -Retrieves the input mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Input mask tensor (potentially None) or list of input -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

input_shape

- -Retrieves the input shape(s) of a layer. - -Only applicable if the layer has exactly one input, -i.e. if it is connected to one incoming layer, or if all inputs -have the same shape. - -#### Returns: - -Input shape, as an integer shape tuple -(or list of shape tuples, one tuple per input tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined input_shape. -* `RuntimeError`: if called in Eager mode. - -

input_spec

- - - - -

losses

- -Losses which are associated with this `Layer`. - -Variable regularization tensors are created when this property is accessed, -so it is eager safe: accessing `losses` under a `tf.GradientTape` will -propagate gradients back to the corresponding variables. - -#### Returns: - -A list of tensors. - - -

metrics

- - - - -

name

- -Returns the name of this module as passed or determined in the ctor. - -NOTE: This is not the same as the `self.name_scope.name` which includes -parent module names. - -

name_scope

- -Returns a `tf.name_scope` instance for this class. - - -

non_trainable_variables

- - - - -

non_trainable_weights

- - - - -

output

- -Retrieves the output tensor(s) of a layer. - -Only applicable if the layer has exactly one output, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output tensor or list of output tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to more than one incoming - layers. -* `RuntimeError`: if called in Eager mode. - -

output_mask

- -Retrieves the output mask tensor(s) of a layer. - -Only applicable if the layer has exactly one inbound node, -i.e. if it is connected to one incoming layer. - -#### Returns: - -Output mask tensor (potentially None) or list of output -mask tensors. - - - -#### Raises: - - -* `AttributeError`: if the layer is connected to -more than one incoming layers. - -

output_shape

- -Retrieves the output shape(s) of a layer. - -Only applicable if the layer has one output, -or if all outputs have the same shape. - -#### Returns: - -Output shape, as an integer shape tuple -(or list of shape tuples, one tuple per output tensor). - - - -#### Raises: - - -* `AttributeError`: if the layer has no defined output shape. -* `RuntimeError`: if called in Eager mode. - -

submodules

- -Sequence of all sub-modules. - -Submodules are modules which are properties of this module, or found as -properties of modules which are properties of this module (and so on). - -``` -a = tf.Module() -b = tf.Module() -c = tf.Module() -a.b = b -b.c = c -assert list(a.submodules) == [b, c] -assert list(b.submodules) == [c] -assert list(c.submodules) == [] -``` - -#### Returns: - -A sequence of all submodules. - - -

trainable

- - - - -

trainable_variables

- -Sequence of variables owned by this module and it's submodules. - -Note: this method uses reflection to find variables on the current instance -and submodules. For performance reasons you may wish to cache the result -of calling this method if you don't expect the return value to change. - -#### Returns: - -A sequence of variables for the current module (sorted by attribute -name) followed by variables from all submodules recursively (breadth -first). - - -

trainable_weights

- - - - -

updates

- - - - -

variables

- -Returns the list of all layer variables/weights. - -Alias of `self.weights`. - -#### Returns: - -A list of variables. - - -

weights

- -Returns the list of all layer variables/weights. - - -#### Returns: - -A list of variables. - - - - -## Methods - -

__call__

- -``` python -__call__( - inputs, - *args, - **kwargs -) -``` - -Wraps `call`, applying pre- and post-processing steps. - - -#### Arguments: - - -* `inputs`: input tensor(s). -* `*args`: additional positional arguments to be passed to `self.call`. -* `**kwargs`: additional keyword arguments to be passed to `self.call`. - - -#### Returns: - -Output tensor(s). - - - -#### Note: - -- The following optional keyword arguments are reserved for specific uses: - * `training`: Boolean scalar tensor of Python boolean indicating - whether the `call` is meant for training or inference. - * `mask`: Boolean input mask. -- If the layer's `call` method takes a `mask` argument (as some Keras - layers do), its default value will be set to the mask generated - for `inputs` by the previous layer (if `input` did come from - a layer that generated a corresponding mask, i.e. if it came from - a Keras layer with masking support. - - - -#### Raises: - - -* `ValueError`: if the layer's `call` method returns None (an invalid value). - -

build

- -``` python -build(input_shape) -``` - -Creates the variables of the layer (optional, for subclass implementers). - -This is a method that implementers of subclasses of `Layer` or `Model` -can override if they need a state-creation step in-between -layer instantiation and layer call. - -This is typically used to create the weights of `Layer` subclasses. - -#### Arguments: - - -* `input_shape`: Instance of `TensorShape`, or list of instances of - `TensorShape` if the layer expects a list of inputs - (one instance per input). - -

compute_mask

- -``` python -compute_mask( - inputs, - mask=None -) -``` - -Computes an output mask tensor. - - -#### Arguments: - - -* `inputs`: Tensor or list of tensors. -* `mask`: Tensor or list of tensors. - - -#### Returns: - -None or a tensor (or list of tensors, - one per output tensor of the layer). - - -

compute_output_shape

- -``` python -compute_output_shape(input_shape) -``` - -Computes the output shape of the layer. - -If the layer has not been built, this method will call `build` on the -layer. This assumes that the layer will later be used with inputs that -match the input shape provided here. - -#### Arguments: - - -* `input_shape`: Shape tuple (tuple of integers) - or list of shape tuples (one per output tensor of the layer). - Shape tuples can include None for free dimensions, - instead of an integer. - - -#### Returns: - -An input shape tuple. - - -

count_params

- -``` python -count_params() -``` - -Count the total number of scalars composing the weights. - - -#### Returns: - -An integer count. - - - -#### Raises: - - -* `ValueError`: if the layer isn't yet built - (in which case its weights aren't yet defined). - -

from_config

- -``` python -@classmethod -from_config( - cls, - config -) -``` - -Creates a layer from its config. - -This method is the reverse of `get_config`, -capable of instantiating the same layer from the config -dictionary. It does not handle layer connectivity -(handled by Network), nor weights (handled by `set_weights`). - -#### Arguments: - - -* `config`: A Python dictionary, typically the - output of get_config. - - -#### Returns: - -A layer instance. - - -

get_config

- -``` python -get_config() -``` - -Returns the config of the layer. - -A layer config is a Python dictionary (serializable) -containing the configuration of a layer. -The same layer can be reinstantiated later -(without its trained weights) from this configuration. - -The config of a layer does not include connectivity -information, nor the layer class name. These are handled -by `Network` (one layer of abstraction above). - -#### Returns: - -Python dictionary. - - -

get_input_at

- -``` python -get_input_at(node_index) -``` - -Retrieves the input tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_input_mask_at

- -``` python -get_input_mask_at(node_index) -``` - -Retrieves the input mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple inputs). - - -

get_input_shape_at

- -``` python -get_input_shape_at(node_index) -``` - -Retrieves the input shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple inputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_losses_for

- -``` python -get_losses_for(inputs) -``` - -Retrieves losses relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of loss tensors of the layer that depend on `inputs`. - - -

get_output_at

- -``` python -get_output_at(node_index) -``` - -Retrieves the output tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A tensor (or list of tensors if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_output_mask_at

- -``` python -get_output_mask_at(node_index) -``` - -Retrieves the output mask tensor(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A mask tensor -(or list of tensors if the layer has multiple outputs). - - -

get_output_shape_at

- -``` python -get_output_shape_at(node_index) -``` - -Retrieves the output shape(s) of a layer at a given node. - - -#### Arguments: - - -* `node_index`: Integer, index of the node - from which to retrieve the attribute. - E.g. `node_index=0` will correspond to the - first time the layer was called. - - -#### Returns: - -A shape tuple -(or list of shape tuples if the layer has multiple outputs). - - - -#### Raises: - - -* `RuntimeError`: If called in Eager mode. - -

get_updates_for

- -``` python -get_updates_for(inputs) -``` - -Retrieves updates relevant to a specific set of inputs. - - -#### Arguments: - - -* `inputs`: Input tensor or list/tuple of input tensors. - - -#### Returns: - -List of update ops of the layer that depend on `inputs`. - - -

get_weights

- -``` python -get_weights() -``` - -Returns the current weights of the layer. - - -#### Returns: - -Weights values as a list of numpy arrays. - - -

set_weights

- -``` python -set_weights(weights) -``` - -Sets the weights of the layer, from Numpy arrays. - - -#### Arguments: - - -* `weights`: a list of Numpy arrays. The number - of arrays and their shape must match - number of the dimensions of the weights - of the layer (i.e. it should match the - output of `get_weights`). - - -#### Raises: - - -* `ValueError`: If the provided weights list does not match the - layer's specifications. - -

with_name_scope

- -``` python -@classmethod -with_name_scope( - cls, - method -) -``` - -Decorator to automatically enter the module name scope. - -``` -class MyModule(tf.Module): - @tf.Module.with_name_scope - def __call__(self, x): - if not hasattr(self, 'w'): - self.w = tf.Variable(tf.random.normal([x.shape[1], 64])) - return tf.matmul(x, self.w) -``` - -Using the above module would produce `tf.Variable`s and `tf.Tensor`s whose -names included the module name: - -``` -mod = MyModule() -mod(tf.ones([8, 32])) -# ==> -mod.w -# ==> -``` - -#### Args: - - -* `method`: The method to wrap. - - -#### Returns: - -The original method wrapped such that it enters the module's name scope. - - - - diff --git a/docs/api_docs/python/tfq/padded_to_ragged.md b/docs/api_docs/python/tfq/padded_to_ragged.md deleted file mode 100644 index 67d6c763c..000000000 --- a/docs/api_docs/python/tfq/padded_to_ragged.md +++ /dev/null @@ -1,42 +0,0 @@ -
- - -
- -# tfq.padded_to_ragged - - - - - -
- - - View source on GitHub - -
- - - -Utility `tf.function` that converts a padded tensor to ragged. - -``` python -tfq.padded_to_ragged(masked_state) -``` - - - - - -Convert a state `tf.Tensor` padded with the value -2 to a `tf.RaggedTensor` -using efficient boolean masking. - -#### Args: - - -* `masked_state`: `tf.State` tensor with -2 padding. - -#### Returns: - - -* `state_ragged`: State tensor without padding as a `tf.RaggedTensor`. \ No newline at end of file diff --git a/docs/design.md b/docs/design.md deleted file mode 100644 index 1e0dfd918..000000000 --- a/docs/design.md +++ /dev/null @@ -1,175 +0,0 @@ -# TensorFlow Quantum design and concepts - -In October 2019, -Google announced -they achieved -quantum supremacy. -Using 53 *noisy* qubits, this demonstration was a critical first step to unlock -the full potential of quantum computing and marks the beginning of the -Noisy Intermediate-Scale Quantum (NISQ) -computing era. In the coming years, quantum devices with tens-to-hundreds of -noisy qubits are expected to become a reality. So what is possible with these -devices? - -There are many ideas for leveraging NISQ quantum computing including -optimization, quantum simulation, cryptography, and machine learning. -TensorFlow Quantum (TFQ) is designed to help researchers experiment -with these ideas. Researchers create and run *quantum circuits*. It integrates -with TensorFlow, an established machine learning framework used for research and -production. TensorFlow Quantum provides flexible and performant tools and -constructs used by quantum machine learning researchers. TensorFlow Quantum -hopes to bridge the quantum and classical machine learning communities—and -enrich both with new perspectives and ideas. - -## NISQ quantum machine learning - -During the NISQ-era, quantum algorithms with known speedups over classical -algorithms—like -Shor's factoring algorithm or -Grover's search algorithm—are -not yet possible at a meaningful scale. - -A goal of TensorFlow Quantum is to help discover algorithms for the -NISQ-era, with particular interest in: - -1. *Use classical machine learning to enhance NISQ algorithms.* The hope is that - techniques from classical machine learning can enhance our understanding of - quantum computing. For example, - this paper - shows a recurrent neural network (RNN) used to discover that optimization of - the control parameters for algorithms like the QAOA and VQE are more efficient - than simple off the shelf optimizers. And - this paper - uses reinforcement learning to help mitigate errors and produce higher - quality quantum gates. -2. *Model quantum data with quantum circuits.* Classically modeling quantum data - is possible if you have an exact description of the datasource—but sometimes - this isn’t possible. To solve this problem, you can try modeling on the - quantum computer itself and measure/observe the important statistics. - This paper - shows a quantum circuit designed with a structure analogous to a - convolutional neural network (CNN) to detect different topological phases of - matter. The quantum computer holds the data and the model. The classical - processor sees only measurement samples from the model output and never the - data itself. In - this paper - the authors learn to compress information about quantum many-body systems - using a DMERA model. - -Other areas of interest in quantum machine learning include: - -1. Modeling purely classical data on quantum computers. -2. Quantum-inspired classical algorithms. TFQ does not contain any purely - classical algorithms that are quantum-inspired. - -While these last two areas did not inform the design of TensorFlow Quantum, -you can still use TFQ for research here. For example, in -this paper -the authors use a quantum computer to solve some purely classical data problems— -which could be implemented in TFQ. - - -## Software components - -TensorFlow Quantum is designed for the problems of NISQ-era quantum machine -learning. Integration with [TensorFlow](https://www.tensorflow.org/overview) and -[Keras](https://www.tensorflow.org/guide/keras/overview) is seamless and -performant. And the `tfq.datasets` module allows researchers to experiment and -converse about new and interesting quantum datasets. - -### Primitives - -TensorFlow Quantum implements the components needed to integrate TensorFlow with -quantum computing hardware. To that end, TFQ introduces two datatype primitives: - -- *Quantum circuit*: This represents - Cirq-defined - quantum circuits (`cirq.Circuit`) within TensorFlow. Create batches of - circuits of varying size, similar to batches of different real-valued - datapoints. -- *Pauli sum*: Represent linear combinations of tensor products of Pauli - operators defined in Cirq (`cirq.PauliSum`). Like circuits, create batches of - operators of varying size. - -With these primitives, TFQ can build the functionality to merge quantum -computing with TensorFlow. - -### Fundamental ops - -Using the quantum circuit primitives within a `tf.Tensor`, TensorFlow Quantum -implements ops that process these circuits and produce meaningful outputs. - -The TensorFlow ops are written in optimized C++. These ops sample from -circuits, calculate expectation values, and output the state produced by the -given circuits. Writing ops that are flexible and performant has some -challenges: - -1. Circuits are not the same size. For simulated circuits, you are unable to - create static operations (like `tf.matmul` or `tf.add`) and then substitute - different numbers for circuits of different sizes. These ops must allow for - dynamic sizes that the statically sized TensorFlow compute graph doesn't - allow. -2. Quantum data can induce a different circuit structure altogether. This is - another reason to support dynamic sizes in the TFQ ops. Quantum data can - represent a structural change to the underlying quantum state that is - represented by modifications to the original circuit. As new datapoints are - swapped in and out at runtime, the TensorFlow compute graph can not be - modified after it is built, so support for these varying structures is - required. -3. `cirq.Circuits` are similar to compute graphs in that they are a series of - operations—and some might contain symbols/placeholders. It is important to - make this as compatible with TensorFlow as possible. - -For performance reasons, Eigen (the C++ library used in many TensorFlow ops) is -not well suited for quantum circuit simulation. Instead, the circuit simulators -used in the quantum supremacy experiment were used as verifiers and extended for -the foundation of TFQ ops (all written with AVX2 and SSE instructions). Ops with -identical functional signatures were created that use a physical quantum -computer. Switching between a simulated and physical quantum computer is as easy -as changing a single line of code. These ops are located in the -`circuit_execution_ops.py` in `tensorflow_quantum/core/ops/`. - -### Layers - -TensorFlow Quantum layers expose sampling, expectation, and state calculation to -developers using the `tf.keras.layers.Layer` interface. It's convenient to -create a circuit layer for classical control parameters or for readout -operations. Additionally, you can create a layer with a high degree of -complexity supporting batch circuit, batch control parameter value, and perform -batch readout operations. See `tfq.layers.Sample` for an example. - -### Differentiators - -Unlike many TensorFlow operations, observables in quantum circuits do not have -formulas for gradients that are relatively easy to calculate. This is because a -classical computer can only read samples from the circuits that are run on a -quantum computer. - -To solve this problem, the `tfq.differentiators` module provides several -standard differentiation techniques. Users can also define their own method to -compute gradients—in both the “real world” setting of sample-based expectation -calculation, and the analytic exact world. Methods like finite difference are -often the fastest (wall clock time) in an analytic/exact environment. While -slower (wall clock time), more practical methods like -parameter shift or -stochastic methods -are often more effective. A `tfq.differentiators.Differentiator` is instantiated -and attached to an existing op with `generate_differentiable_op`, or passed to -the constructor of `tfq.layers.Expectation` or `tfq.layers.SampledExpectation`. -To implement a custom differentiator, inherit from the -`tfq.differentiators.Differentiator` class. To define a gradient operation for -sampling or state vector calculation, use `tf.custom_gradient`. - -### Datasets - -As the field of quantum computing grows, more and more quantum data and model -combinations will arise, making structured comparison more difficult. The -`tfq.datasets` module is used as the data source for quantum machine learning -tasks. It ensures structured comparisons for the model and performance. - -It is hoped that with large community contributions, the `tfq.datasets` module -will grow to enable research that is more transparent and reproducible. -Carefully curated problems in: quantum control, fermionic simulation, -classification near phase transitions, quantum sensing, etc are all great -candidates for addition to `tfq.datasets`. To propose a new dataset open -a GitHub issue. diff --git a/docs/install.md b/docs/install.md deleted file mode 100644 index 6bf6eded6..000000000 --- a/docs/install.md +++ /dev/null @@ -1,194 +0,0 @@ -# Install TensorFlow Quantum - -There are a few ways to set up your environment to use TensorFlow Quantum (TFQ): - -* The easiest way to learn and use TFQ requires no installation—run the - [TensorFlow Quantum tutorials](./tutorials/hello_many_worlds.ipynb) directly - in your browser using - [Google Colab](https://colab.research.google.com/github/tensorflow/quantum/blob/master/docs/tutorials/hello_many_worlds.ipynb). -* To use TensorFlow Quantum on a local machine, install the TFQ package using - Python's pip package manager. -* Or build TensorFlow Quantum from source. - -## Pip package - -### Requirements - -* pip 19.0 or later (requires `manylinux2010` support) -* [TensorFlow 2.1](https://www.tensorflow.org/install/pip) -* [Cirq 0.6](https://cirq.readthedocs.io/en/stable/install.html) - -See the [TensorFlow install guide](https://www.tensorflow.org/install/pip) to -set up your Python development environment and an (optional) virtual environment. - -Upgrade `pip` and install TensorFlow and Cirq (these are not included as -dependencies): - - -
-  pip3 install --upgrade pip
-  pip3 install tensorflow==2.1.0
-  pip3 install cirq==0.7.0
-
- - -### Install the package - -Install the latest stable release of TensorFlow Quantum: - - -
-  pip3 install -U tensorflow-quantum
-
- - -Success: TensorFlow Quantum is now installed. - - -## Build from source - -The following steps are tested for Ubuntu-like systems. - -### 1. Set up a Python 3 development environment - - -
-  sudo apt update
-  sudo apt-get install pkg-config zip g++ zlib1g-dev unzip python3
-  sudo apt install python3 python3-dev python3-venv python3-pip
-  python3 -m pip install --upgrade pip
-
- - -### 2. Create a virtual environment - - -
-  python3 -m venv tfq_env
-  source tfq_env/bin/activate
-
- - -### 3. Install Bazel - -See the TensorFlow -[build from source](https://www.tensorflow.org/install/source#install_bazel) -guide to install the Bazel -build system. - -To ensure compatibility with TensorFlow, `bazel` version 0.26.1 or lower is -required. To remove any existing version of Bazel: - - -
-  sudo apt-get remove bazel
-
- - -Then install Bazel version 0.26.0: - - -
-  wget https://github.com/bazelbuild/bazel/releases/download/0.26.0/bazel_0.26.0-linux-x86_64.deb
-  sudo dpkg -i bazel_0.26.0-linux-x86_64.deb
-
- - - -### 4. Build TensorFlow from source - -Read the TensorFlow [build from source](https://www.tensorflow.org/install/source) -guide for details. TensorFlow Quantum is compatible with TensorFlow version 2.1. - -Download the -TensorFlow source code: - - -
-  git clone https://github.com/tensorflow/tensorflow.git
-  cd tensorflow
-  git checkout v2.1.0
-
- -Install the TensorFlow dependencies: - - -
-  python3 -m pip install -U pip six numpy wheel setuptools mock 'future>=0.17.1'
-  python3 -m pip install -U keras_applications --no-deps
-  python3 -m pip install -U keras_preprocessing --no-deps
-
- - -Configure the TensorFlow build. The default Python location and Python library -paths should point inside the virtual environment. The default options are -recommended: - - -
-  ./configure
-
- - -Verify that your Bazel version is correct: - - -
-  bazel version
-
- - -Build the TensorFlow package: - - -
-  bazel build -c opt --cxxopt="-O3" --cxxopt="-march=native" --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" //tensorflow/tools/pip_package:build_pip_package
-
- - -Note: It may take over an hour to build the package. - -After the build is complete, install the package: - - -
-  ./bazel-bin/tensorflow/tools/pip_package/build_pip_package /tmp/tensorflow_pkg
-  pip install /tmp/tensorflow_pkg/name_of_generated_wheel.whl
-
- - -### 5. Download TensorFlow Quantum - -Download the TensorFlow Quantum source code and install the requirements: - - -
-  cd ..
-  git clone https://github.com/tensorflow/quantum.git
-  cd quantum
-  python3 -m pip install -r requirements.txt
-
- - -Verify your Bazel version (since it can auto-update): - - -
-  bazel version
-
- - -### 6. Build the TensorFlow Quantum pip package - -Build the TensorFlow Quantum pip package and install: - - -
-  ./configure.sh
-  bazel build -c opt --cxxopt="-O3" --cxxopt="-march=native" --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0" release:build_pip_package
-  bazel-bin/release/build_pip_package /tmp/tfquantum/
-  python3 -m pip install /tmp/tfquantum/name_of_generated_wheel.whl
-
- - -Success: TensorFlow Quantum is now installed. diff --git a/docs/overview.md b/docs/overview.md deleted file mode 100644 index 66a67fb59..000000000 --- a/docs/overview.md +++ /dev/null @@ -1,56 +0,0 @@ -# TensorFlow Quantum - -TensorFlow Quantum (TFQ) is a Python framework for hybrid quantum-classical -machine learning. As an application framework, TFQ allows quantum algorithm -researchers and ML application researchers to leverage Google’s quantum -computing frameworks, all from within TensorFlow. - -TensorFlow Quantum focuses on modeling quantum data. It provides tools to -interleave quantum algorithms and logic designed in -Cirq with -TensorFlow. A basic understanding of quantum computing is required to -effectively use TensorFlow Quantum. - -After Google's -quantum supremacy -milestone, the -Google Quantum AI team -is focused on developing and implementing new algorithms to run on a quantum -computer—that have -real world applications. - -To get started with TensorFlow Quantum, see the [install guide](install.md) and -read through some of the runnable -[notebook tutorials](./tutorials/hello_many_worlds.ipynb). - -## Design - -TensorFlow Quantum implements the components needed to smoothly integrate -TensorFlow with quantum computing hardware. To that end, TensorFlow Quantum -introduces two datatype primitives: - -- *Quantum circuit*: This represents Cirq-defined quantum circuits within - TensorFlow. Create batches of circuits of varying size, similar to batches of - different real-valued datapoints. -- *Pauli sum*: Represent linear combinations of tensor products of Pauli - operators defined in Cirq. Like circuits, create batches of operators of - varying size. - -Using these primitives to represent quantum circuits, TensorFlow Quantum -provides the following operations to developers: - -- Sample from output distributions of batches of circuits. -- Calculate the expectation value of batches of Pauli sums on batches of - circuits. TFQ implements backpropagation-compatible gradient calculation. -- Simulate batches of circuits and states. While inspecting all quantum state - amplitudes directly throughout a quantum circuit is inefficient at scale in - the real world, state simulation can help researchers understand how a quantum - circuit maps states to a near exact level of precision. - -For more details about TFQ design choices and implementation, read the -[Design and concepts](design.md) guide. - -## Report issues - -Report bugs or feature requests using the -TensorFlow Quantum issue tracker. diff --git a/docs/tutorials/barren_plateaus.ipynb b/docs/tutorials/barren_plateaus.ipynb deleted file mode 100644 index 72f24a3be..000000000 --- a/docs/tutorials/barren_plateaus.ipynb +++ /dev/null @@ -1,547 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "barren_plateaus.ipynb", - "provenance": [], - "private_outputs": true, - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5rc1" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xLOXFOT5Q40E" - }, - "source": [ - "##### Copyright 2020 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "metadata": { - "cellView": "form", - "colab_type": "code", - "id": "iiQkM5ZgQ8r2", - "colab": {} - }, - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "j6331ZSsQGY3" - }, - "source": [ - "# Barren plateaus" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "i9Jcnb8bQQyd" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "DyEcfFapraq6", - "colab_type": "text" - }, - "source": [ - "In this example you will explore the result of McClean, 2019 that says not just any quantum neural network structure will do well when it comes to learning. In particular you will see that a certain large family of random quantum circuits do not serve as good quantum neural networks, because they have gradients that vanish almost everywhere. In this example you won't be training any models for a specific learning problem, but instead focusing on the simpler problem of understanding the behaviors of gradients." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "zB_Xw0Y9rVNi", - "colab_type": "text" - }, - "source": [ - "## Setup\n", - "\n", - "Download and install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "fAVlg7rxkvUw", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade pip\n", - "!pip install cirq==0.7.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "TorxE5tnkvb2", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade tensorflow==2.1.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FxkQA6oblNqI" - }, - "source": [ - "Note: If the following code cell fails, execute the first code cells and then restart the Colab runtime (*Runtime > Restart Runtime*)." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "saFHsRDpkvkH", - "colab": {} - }, - "source": [ - "%%capture\n", - "h = \"2dfcfceb9726fa73c40381c037dc01facd3d061e\"\n", - "!cd ~/\n", - "!rm -r -f TFQuantum/\n", - "!git clone https://{h}:{h}@github.com/quantumlib/TFQuantum.git;cd TFQuantum/\n", - "!pip install --upgrade ./TFQuantum/wheels/tfquantum-0.2.0-cp36-cp36m-linux_x86_64.whl" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "1PaclXeSrrMW", - "colab_type": "text" - }, - "source": [ - "Now import TensorFlow and the module dependencies:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "enZ300Bflq80", - "colab": {} - }, - "source": [ - "import tensorflow as tf\n", - "import tensorflow_quantum as tfq\n", - "\n", - "import cirq\n", - "import sympy\n", - "import numpy as np\n", - "\n", - "# visualization tools\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "from cirq.contrib.svg import SVGCircuit\n", - "\n", - "np.random.seed(1234)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "b08Mmbs8lr81" - }, - "source": [ - "## 1. Summary\n", - "\n", - "The following images are from McClean, 2019.\n", - "\n", - "Random quantum circuits with many blocks that look like this ($R_{P}(\\theta)$ is a random Pauli rotation):
\n", - "\n", - "\n", - "Where the $f(x)$ is the expectation value w.r.t. $Z_{a}Z_{b}$ for any qubits $a$ and $b$:
\n", - "\n", - "\n", - "Has the problem that $f'(x)$ has a mean very close to 0 and does not vary much. You will see this below:" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "y31qSRCczI-L" - }, - "source": [ - "## 2. Generating random circuits\n", - "\n", - "The construction from the paper is straightforward to follow. The following implements a simple function that generates a random quantum circuit—sometimes referred to as a *quantum neural network* (QNN)—with the given depth on a set of qubits:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Nh9vrgPBks7O", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def generate_random_qnn(qubits, symbol, depth):\n", - " \"\"\"Generate random QNN's with the same structure from McClean et al.\"\"\"\n", - " circuit = cirq.Circuit()\n", - " for qubit in qubits:\n", - " circuit += cirq.Ry(np.pi / 4.0)(qubit)\n", - "\n", - " for d in range(depth):\n", - " # Add a series of single qubit rotations.\n", - " for i, qubit in enumerate(qubits):\n", - " random_n = np.random.uniform()\n", - " random_rot = np.random.uniform(\n", - " ) * 2.0 * np.pi if i != 0 or d != 0 else symbol\n", - " if random_n > 2. / 3.:\n", - " # Add a Z.\n", - " circuit += cirq.Rz(random_rot)(qubit)\n", - " elif random_n > 1. / 3.:\n", - " # Add a Y.\n", - " circuit += cirq.Ry(random_rot)(qubit)\n", - " else:\n", - " # Add a X.\n", - " circuit += cirq.Rx(random_rot)(qubit)\n", - "\n", - " # Add CZ ladder.\n", - " for src, dest in zip(qubits, qubits[1:]):\n", - " circuit += cirq.CZ(src, dest)\n", - "\n", - " return circuit\n", - "\n", - "\n", - "generate_random_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "gUuQfOyrj_Hu", - "colab_type": "text" - }, - "source": [ - "The authors investigate the gradient of a single parameter $\\theta_{1,1}$. Let's follow along by placing a `sympy.Symbol` in the circuit where $\\theta_{1,1}$ would be. Since the authors do not analyze the statistics for any other symbols in the circuit, let's replace them with random values now instead of later." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lAVDRQ87k3md", - "colab_type": "text" - }, - "source": [ - "## 3. Running the circuits\n", - "\n", - "Generate a few of these circuits along with an observable to test the claim that the gradients don't vary much. First, generate a batch of random circuits. Choose a random *ZZ* observable and batch calculate the gradients and variance using TensorFlow Quantum." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qoDDaHgwj_Hz", - "colab_type": "text" - }, - "source": [ - "### 3.1 Batch variance computation\n", - "\n", - "Let's write a helper function that computes the variance of the gradient of a given observable over a batch of circuits:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "OkdndnBKk8B8", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def process_batch(circuits, symbol, op):\n", - " \"\"\"Compute the variance of a batch of expectations w.r.t. op on each circuit that \n", - " contains `symbol`. Note that this method sets up a new compute graph every time it is\n", - " called so it isn't as performant as possible.\"\"\"\n", - "\n", - " # Setup a simple layer to batch compute the expectation gradients.\n", - " expectation = tfq.layers.Expectation()\n", - " \n", - " # Prep the inputs as tensors\n", - " circuit_tensor = tfq.convert_to_tensor(circuits)\n", - " values_tensor = tf.convert_to_tensor(\n", - " np.random.uniform(0, 2 * np.pi, (n_circuits, 1)).astype(np.float32))\n", - "\n", - " # Use TensorFlow GradientTape to track gradients.\n", - " with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " forward = expectation(\n", - " circuit_tensor,\n", - " operators=op,\n", - " symbol_names=[symbol],\n", - " symbol_values=values_tensor)\n", - "\n", - " # Return variance of gradients across all circuits.\n", - " grads = g.gradient(forward, values_tensor)\n", - " grad_var = tf.math.reduce_std(grads, axis=0)\n", - " return grad_var.numpy()[0]" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "JINYTIjDj_H1", - "colab_type": "text" - }, - "source": [ - "### 3.1 Set up and run\n", - "\n", - "Choose the number of random circuits to generate along with their depth and the amount of qubits they should act on. Then plot the results." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "xAGBcq9Bj_H3", - "colab_type": "code", - "colab": {} - }, - "source": [ - "n_qubits = [2 * i for i in range(2, 7)\n", - " ] # Ranges studied in paper are between 2 and 24.\n", - "depth = 50 # Ranges studied in paper are between 50 and 500.\n", - "n_circuits = 200\n", - "theta_var = []\n", - "\n", - "for n in n_qubits:\n", - " # Generate the random circuits and observable for the given n.\n", - " qubits = cirq.GridQubit.rect(1, n)\n", - " symbol = sympy.Symbol('theta')\n", - " circuits = [\n", - " generate_random_qnn(qubits, symbol, depth) for _ in range(n_circuits)\n", - " ]\n", - " op = cirq.Z(qubits[0]) * cirq.Z(qubits[1])\n", - " theta_var.append(process_batch(circuits, symbol, op))\n", - "\n", - "plt.semilogy(n_qubits, theta_var)\n", - "plt.title('Gradient Variance in QNNs')\n", - "plt.xlabel('n_qubits')\n", - "plt.ylabel('$\\\\partial \\\\theta$ variance')\n", - "plt.show()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "qY2E0CFjxRE9", - "colab_type": "text" - }, - "source": [ - "This plot shows that for quantum machine learning problems, you can't simply guess a random QNN ansatz and hope for the best. Some structure must be present in the model circuit in order for gradients to vary to the point where learning can happen." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "4RE_idhmj_H6", - "colab_type": "text" - }, - "source": [ - "## 4. Heuristics\n", - "\n", - "An interesting heuristic by Grant, 2019 allows one to start very close to random, but not quite. Using the same circuits as McClean et al., the authors propose a different initialization technique for the classical control parameters to avoid barren plateaus. The initialization technique starts some layers with totally random control parameters—but, in the layers immediately following, choose parameters such that the initial transformation made by the first few layers is undone. The authors call this an *identity block*.\n", - "\n", - "The advantage of this heuristic is that by changing just a single parameter, all other blocks outside of the current block will remain the identity—and the gradient signal comes through much stronger than before. This allows the user to pick and choose which variables and blocks to modify to get a strong gradient signal. This heuristic does not prevent the user from falling in to a barren plateau during the training phase (and restricts a fully simultaneous update), it just guarantees that you can start outside of a plateau." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Fofv9hgyj_IB", - "colab_type": "text" - }, - "source": [ - "### 4.1 New QNN construction\n", - "\n", - "Now construct a function to generate identity block QNNs. This implementation is slightly different than the one from the paper. For now, look at the behavior of the gradient of a single parameter so it is consistent with McClean et al, so some simplifications can be made.\n", - "\n", - "To generate an identity block and train the model, generally you need $U1(\\theta_{1a}) U1(\\theta_{1b})^{\\dagger}$ and not $U1(\\theta_1) U1(\\theta_1)^{\\dagger}$. Initially $\\theta_{1a}$ and $\\theta_{1b}$ are the same angles but they are learned independently. Otherwise, you will always get the identity even after training. The choice for the number of identity blocks is empirical. The deeper the block, the smaller the variance in the middle of the block. But at the start and end of the block, the variance of the parameter gradients should be large. " - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "PL7mTHEVj_IC", - "colab_type": "code", - "colab": {} - }, - "source": [ - "def generate_identity_qnn(qubits, symbol, block_depth, total_depth):\n", - " \"\"\"Generate random QNN's with the same structure from Grant et al.\"\"\"\n", - " circuit = cirq.Circuit()\n", - "\n", - " # Generate initial block with symbol.\n", - " prep_and_U = generate_random_qnn(qubits, symbol, block_depth)\n", - " circuit += prep_and_U\n", - "\n", - " # Generate dagger of initial block without symbol.\n", - " U_dagger = (prep_and_U[1:])**-1\n", - " circuit += cirq.resolve_parameters(\n", - " U_dagger, param_resolver={symbol: np.random.uniform() * 2 * np.pi})\n", - "\n", - " for d in range(total_depth - 1):\n", - " # Get a random QNN.\n", - " prep_and_U_circuit = generate_random_qnn(\n", - " qubits,\n", - " np.random.uniform() * 2 * np.pi, block_depth)\n", - "\n", - " # Remove the state-prep component\n", - " U_circuit = prep_and_U_circuit[1:]\n", - "\n", - " # Add U\n", - " circuit += U_circuit\n", - "\n", - " # Add U^dagger\n", - " circuit += U_circuit**-1\n", - "\n", - " return circuit\n", - "\n", - "\n", - "generate_identity_qnn(cirq.GridQubit.rect(1, 3), sympy.Symbol('theta'), 2, 2)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ifWrl19kj_IG", - "colab_type": "text" - }, - "source": [ - "### 4.2 Comparison\n", - "\n", - "Here you can see that the heuristic does help to keep the variance of the gradient from vanishing as quickly:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "62kmsVAXj_IH", - "colab_type": "code", - "colab": {} - }, - "source": [ - "block_depth = 10\n", - "total_depth = 5\n", - "\n", - "heuristic_theta_var = []\n", - "\n", - "for n in n_qubits:\n", - " # Generate the identity block circuits and observable for the given n.\n", - " qubits = cirq.GridQubit.rect(1, n)\n", - " symbol = sympy.Symbol('theta')\n", - " circuits = [\n", - " generate_identity_qnn(qubits, symbol, block_depth, total_depth)\n", - " for _ in range(n_circuits)\n", - " ]\n", - " op = cirq.Z(qubits[0]) * cirq.Z(qubits[1])\n", - " heuristic_theta_var.append(process_batch(circuits, symbol, op))\n", - "\n", - "plt.semilogy(n_qubits, theta_var)\n", - "plt.semilogy(n_qubits, heuristic_theta_var)\n", - "plt.title('Heuristic vs. Random')\n", - "plt.xlabel('n_qubits')\n", - "plt.ylabel('$\\\\partial \\\\theta$ variance')\n", - "plt.show()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "E0XNSoblj_IK", - "colab_type": "text" - }, - "source": [ - "This is a great improvement in getting stronger gradient signals from (near) random QNNs.\n", - "\n", - "### 4.3 Summary\n", - "\n", - "When used in practice, Grant et al. has great success in avoiding barren plateaus in QNN classification and Variational Quantum Eigensolver (VQE) circuits. In the figure below (from the paper), each colored line represents the variance of the gradients of the first three parameters of their model used in the QNN classification task as it was trained over time. As you can see, the initial variances are well above the barren plateau line, but do gradually slope downwards. All of the instances run here did avoid barren plateaus.\n", - "\n", - "" - ] - } - ] -} diff --git a/docs/tutorials/gradients.ipynb b/docs/tutorials/gradients.ipynb deleted file mode 100644 index c2a92b1d9..000000000 --- a/docs/tutorials/gradients.ipynb +++ /dev/null @@ -1,894 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "gradients.ipynb", - "provenance": [], - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5rc1" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xLOXFOT5Q40E" - }, - "source": [ - "##### Copyright 2020 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "metadata": { - "cellView": "form", - "colab_type": "code", - "id": "iiQkM5ZgQ8r2", - "colab": {} - }, - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "j6331ZSsQGY3" - }, - "source": [ - "# Calculate gradients" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "i9Jcnb8bQQyd" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FxkQA6oblNqI" - }, - "source": [ - "This tutorial explores gradient calculation algorithms for the expectation values of quantum circuits.\n", - "\n", - "Calculating the gradient of the expectation value of a certain observable in a quantum circuit is an involved process. Expectation values of observables do not have the luxury of having analytic gradient formulas that are always easy to write down—unlike traditional machine learning transformations such as matrix multiplication or vector addition that have analytic gradient formulas which are easy to write down. As a result, there are different quantum gradient calculation methods that come in handy for different scenarios. This tutorial compares and contrasts two different differentiation schemes." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "pvG0gAJqGYJo", - "colab_type": "text" - }, - "source": [ - "## Setup\n", - "\n", - "Download and install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "fAVlg7rxkvUw", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade pip\n", - "!pip install cirq==0.7.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "TorxE5tnkvb2", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade tensorflow==2.1.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OIbP5hklC338", - "colab_type": "text" - }, - "source": [ - "Note: If the following code cell fails, execute the first code cells and then restart the Colab runtime (*Runtime > Restart Runtime*)." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "saFHsRDpkvkH", - "colab": {} - }, - "source": [ - "%%capture\n", - "h = \"2dfcfceb9726fa73c40381c037dc01facd3d061e\"\n", - "!cd ~/\n", - "!rm -r -f TFQuantum/\n", - "!git clone https://{h}:{h}@github.com/quantumlib/TFQuantum.git;cd TFQuantum/\n", - "!pip install --upgrade ./TFQuantum/wheels/tfquantum-0.2.0-cp36-cp36m-linux_x86_64.whl" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "MkTqyoSxGUfB", - "colab_type": "text" - }, - "source": [ - "Now import TensorFlow and the module dependencies:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "enZ300Bflq80", - "colab": {} - }, - "source": [ - "import tensorflow as tf\n", - "import tensorflow_quantum as tfq\n", - "\n", - "import cirq\n", - "import sympy\n", - "import numpy as np\n", - "\n", - "# visualization tools\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "from cirq.contrib.svg import SVGCircuit" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "b08Mmbs8lr81" - }, - "source": [ - "## 1. Preliminary\n", - "\n", - "Let's make the notion of gradient calculation for quantum circuits a little more concrete. Suppose you have a parameterized circuit like this one:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "YkPYJ_Ak-GKu", - "colab_type": "code", - "colab": {} - }, - "source": [ - "qubit = cirq.GridQubit(0, 0)\n", - "my_circuit = cirq.Circuit(cirq.Y(qubit)**sympy.Symbol('alpha'))\n", - "SVGCircuit(my_circuit)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "wgQIlCWy-MVr", - "colab_type": "text" - }, - "source": [ - "Along with an observable:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "xurmJdFy-Jae", - "colab_type": "code", - "colab": {} - }, - "source": [ - "pauli_x = cirq.X(qubit)\n", - "pauli_x" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "j3OzKYe5NT_W", - "colab_type": "text" - }, - "source": [ - "Looking at this operator you know that $⟨Y(\\alpha)| X | Y(\\alpha)⟩ = \\sin(\\pi \\alpha)$" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "Ps-pd2mndXs7", - "colab": {} - }, - "source": [ - "def my_expectation(op, alpha):\n", - " \"\"\"Compute ⟨Y(alpha)| `op` | Y(alpha)⟩\"\"\"\n", - " params = {'alpha': alpha}\n", - " sim = cirq.Simulator()\n", - " final_state = sim.simulate(my_circuit, params).final_state\n", - " return op.expectation_from_wavefunction(final_state, {qubit: 0}).real\n", - "\n", - "\n", - "my_alpha = 0.3\n", - "print(\"Expectation=\", my_expectation(pauli_x, my_alpha))\n", - "print(\"Sin Formula=\", np.sin(np.pi * my_alpha))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "zcCX109cJUaz" - }, - "source": [ - " and if you define $f_{1}(\\alpha) = ⟨Y(\\alpha)| X | Y(\\alpha)⟩$ then $f_{1}^{'}(\\alpha) = \\pi \\cos(\\pi \\alpha)$. Let's check this:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "VMq7EayNRyQb", - "colab": {} - }, - "source": [ - "def my_grad(obs, alpha, eps=0.01):\n", - " grad = 0\n", - " f_x = my_expectation(obs, alpha)\n", - " f_x_prime = my_expectation(obs, alpha + eps)\n", - " return ((f_x_prime - f_x) / eps).real\n", - "\n", - "\n", - "print('Finite difference:', my_grad(pauli_x, my_alpha))\n", - "print('Cosine formula: ', np.pi * np.cos(np.pi * my_alpha))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "-SUlLpXBeicF" - }, - "source": [ - "## 2. The need for a differentiator\n", - "\n", - "With larger circuits, you won't always be so lucky to have a formula that precisely calculates the gradients of a given quantum circuit. In the event that a simple formula isn't enough to calculate the gradient, the `tfq.differentiators.Differentiator` class allows you to define algorithms for computing the gradients of your circuits. For instance you can recreate the above example in TensorFlow Quantum (TFQ) with:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "Om76ZLu8NT_i", - "colab_type": "code", - "colab": {} - }, - "source": [ - "expectation_calculation = tfq.layers.Expectation(\n", - " differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))\n", - "\n", - "expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " symbol_names=['alpha'],\n", - " symbol_values=[[my_alpha]])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lx3y2DX9NT_k", - "colab_type": "text" - }, - "source": [ - "However, if you switch to estimating expectation based on sampling (what would happen on a true device) the values can change a little bit. This means you now have an imperfect estimate:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "v27rRyAHNT_l", - "colab_type": "code", - "colab": {} - }, - "source": [ - "sampled_expectation_calculation = tfq.layers.SampledExpectation(\n", - " differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))\n", - "\n", - "sampled_expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " repetitions=500,\n", - " symbol_names=['alpha'],\n", - " symbol_values=[[my_alpha]])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Igwa3EnzNT_p", - "colab_type": "text" - }, - "source": [ - "This can quickly compound into a serious accuracy problem when it comes to gradients:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "StljXH38NT_q", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Make input_points = [batch_size, 1] array.\n", - "input_points = np.linspace(0, 5, 200)[:, np.newaxis].astype(np.float32)\n", - "exact_outputs = expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " symbol_names=['alpha'],\n", - " symbol_values=input_points)\n", - "imperfect_outputs = sampled_expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " repetitions=500,\n", - " symbol_names=['alpha'],\n", - " symbol_values=input_points)\n", - "plt.title('Forward Pass Values')\n", - "plt.xlabel('$x$')\n", - "plt.ylabel('$f(x)$')\n", - "plt.plot(input_points, exact_outputs, label='Analytic')\n", - "plt.plot(input_points, imperfect_outputs, label='Sampled')\n", - "plt.legend()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "id": "dfXObk7KNT_t", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Gradients are a much different story.\n", - "values_tensor = tf.convert_to_tensor(input_points)\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " exact_outputs = expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " symbol_names=['alpha'],\n", - " symbol_values=values_tensor)\n", - "analytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " imperfect_outputs = sampled_expectation_calculation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " repetitions=500,\n", - " symbol_names=['alpha'],\n", - " symbol_values=values_tensor)\n", - "sampled_finite_diff_gradients = g.gradient(imperfect_outputs, values_tensor)\n", - "\n", - "plt.title('Gradient Values')\n", - "plt.xlabel('$x$')\n", - "plt.ylabel('$f^{\\'}(x)$')\n", - "plt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')\n", - "plt.plot(input_points, sampled_finite_diff_gradients, label='Sampled')\n", - "plt.legend()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "Ld34TJvTNT_w", - "colab_type": "text" - }, - "source": [ - "Here you can see that although the finite difference formula is fast to compute the gradients themselves in the analytical case, when it came to the sampling based methods it was far too noisy. More careful techniques must be used to ensure a good gradient can be calculated. Next you will look at a much slower technique that wouldn't be as well suited for analytical expectation gradient calculations, but does perform much better in the real-world sample based case:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "JsBxH_RaNT_x", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# A smarter differentiation scheme.\n", - "gradient_safe_sampled_expectation = tfq.layers.SampledExpectation(\n", - " differentiator=tfq.differentiators.ParameterShift())\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " imperfect_outputs = gradient_safe_sampled_expectation(\n", - " my_circuit,\n", - " operators=pauli_x,\n", - " repetitions=500,\n", - " symbol_names=['alpha'],\n", - " symbol_values = values_tensor)\n", - "\n", - "sampled_param_shift_gradients = g.gradient(imperfect_outputs, values_tensor)\n", - "\n", - "plt.title('Gradient Values')\n", - "plt.xlabel('$x$')\n", - "plt.ylabel('$f^{\\'}(x)$')\n", - "plt.plot(input_points, analytic_finite_diff_gradients, label='Analytic')\n", - "plt.plot(input_points, sampled_param_shift_gradients, label='Sampled')\n", - "plt.legend()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "0xlUlh8wNT_z", - "colab_type": "text" - }, - "source": [ - "From the above you can see that certain differentiators are best used for particular research scenarios. In general, the slower sample-based methods that are robust to device noise, etc., are great differentiators when testing or implementing algorithms in a more \"real world\" setting. Faster methods like finite difference are great for analytical calculations and you want higher throughput, but aren't yet concerned with the device viability of your algorithm." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "FaijzZ4MNT_0", - "colab_type": "text" - }, - "source": [ - "## 3. Multiple observables\n", - "\n", - "Let's introduce a second observable and see how TensorFlow Quantum supports multiple observables for a single circuit." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "ytgB_DqDNT_3", - "colab_type": "code", - "colab": {} - }, - "source": [ - "pauli_z = cirq.Z(qubit)\n", - "pauli_z" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "r51TZls4NT_6", - "colab_type": "text" - }, - "source": [ - "If this observable is used with the same circuit as before, then you have $f_{2}(\\alpha) = ⟨Y(\\alpha)| Z | Y(\\alpha)⟩ = \\cos(\\pi \\alpha)$ and $f_{2}^{'}(\\alpha) = -\\pi \\sin(\\pi \\alpha)$. Perform a quick check:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "19FKgu0ANT_7", - "colab_type": "code", - "colab": {} - }, - "source": [ - "test_value = 0.\n", - "\n", - "print('Finite difference:', my_grad(pauli_z, test_value))\n", - "print('Sin formula: ', -np.pi * np.sin(np.pi * test_value))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "_33Y5mL0NT_-", - "colab_type": "text" - }, - "source": [ - "It's a match (close enough).\n", - "\n", - "Now if you define $g(\\alpha) = f_{1}(\\alpha) + f_{2}(\\alpha)$ then $g'(\\alpha) = f_{1}^{'}(\\alpha) + f^{'}_{2}(\\alpha)$. Defining more than one observable in TensorFlow Quantum to use along with a circuit is equivalent to adding on more terms to $g$.\n", - "\n", - "This means that the gradient of a particular symbol in a circuit is equal to the sum of the gradients with regards to each observable for that symbol applied to that circuit. This is compatible with TensorFlow gradient taking and backpropagation (where you give the sum of the gradients over all observables as the gradient for a particular symbol)." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "3WFJfFEbNT_-", - "colab_type": "code", - "colab": {} - }, - "source": [ - "sum_of_outputs = tfq.layers.Expectation(\n", - " differentiator=tfq.differentiators.ForwardDifference(grid_spacing=0.01))\n", - "\n", - "sum_of_outputs(\n", - " my_circuit,\n", - " operators=[pauli_x, pauli_z],\n", - " symbol_names=['alpha'],\n", - " symbol_values=[[test_value]])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-ujQUu3WNUAB", - "colab_type": "text" - }, - "source": [ - "Here you see the first entry is the expectation w.r.t Pauli X, and the second is the expectation w.r.t Pauli Z. Now when you take the gradient:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "jcAQa9l0NUAB", - "colab_type": "code", - "colab": {} - }, - "source": [ - "test_value_tensor = tf.convert_to_tensor([[test_value]])\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(test_value_tensor)\n", - " outputs = sum_of_outputs(my_circuit,\n", - " operators=[pauli_x, pauli_z],\n", - " symbol_names=['alpha'],\n", - " symbol_values=test_value_tensor)\n", - "\n", - "sum_of_gradients = g.gradient(outputs, test_value_tensor)\n", - "\n", - "print(my_grad(pauli_x, test_value) + my_grad(pauli_z, test_value))\n", - "print(sum_of_gradients.numpy())" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "-fZmbYGANUAE", - "colab_type": "text" - }, - "source": [ - "Here you have verified that the sum of the gradients for each observable is indeed the gradient of $\\alpha$. This behavior is supported by all TensorFlow Quantum differentiators and plays a crucial role in the compatibility with the rest of TensorFlow." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "lZsGG7lWNUAF", - "colab_type": "text" - }, - "source": [ - "## 4. Advanced usage\n", - "Here you will learn how to define your own custom differentiation routines for quantum circuits.\n", - "All differentiators that exist inside of TensorFlow Quantum subclass `tfq.differentiators.Differentiator`. A differentiator must implement `differentiate_analytic` and `differentiate_sampled`.\n", - "\n", - "The following uses TensorFlow Quantum constructs to implement the closed form solution from the first part of this tutorial." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "5iY4q6FKNUAG", - "colab_type": "code", - "colab": {} - }, - "source": [ - "class MyDifferentiator(tfq.differentiators.Differentiator):\n", - " \"\"\"A Toy differentiator for .\"\"\"\n", - "\n", - " def __init__(self):\n", - " pass\n", - "\n", - " @tf.function\n", - " def _compute_gradient(self, symbol_values):\n", - " \"\"\"Compute the gradient based on symbol_values.\"\"\"\n", - "\n", - " # f(x) = sin(pi * x)\n", - " # f'(x) = pi * cos(pi * x)\n", - " return tf.cast(tf.cos(symbol_values * np.pi) * np.pi, tf.float32)\n", - "\n", - " @tf.function\n", - " def differentiate_analytic(self, programs, symbol_names, symbol_values,\n", - " pauli_sums, forward_pass_vals, grad):\n", - " \"\"\"Specify how to differentiate a circuit with analytical expectation.\n", - "\n", - " This is called at graph runtime by TensorFlow. `differentiate_analytic`\n", - " should calculate the gradient of a batch of circuits and return it\n", - " formatted as indicated below. See\n", - " `tfq.differentiators.ForwardDifference` for an example.\n", - "\n", - " Args:\n", - " programs: `tf.Tensor` of strings with shape [batch_size] containing\n", - " the string representations of the circuits to be executed.\n", - " symbol_names: `tf.Tensor` of strings with shape [n_params], which\n", - " is used to specify the order in which the values in\n", - " `symbol_values` should be placed inside of the circuits in\n", - " `programs`.\n", - " symbol_values: `tf.Tensor` of real numbers with shape\n", - " [batch_size, n_params] specifying parameter values to resolve\n", - " into the circuits specified by programs, following the ordering\n", - " dictated by `symbol_names`.\n", - " pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]\n", - " containing the string representation of the operators that will\n", - " be used on all of the circuits in the expectation calculations.\n", - " forward_pass_vals: `tf.Tensor` of real numbers with shape\n", - " [batch_size, n_ops] containing the output of the forward pass\n", - " through the op you are differentiating.\n", - " grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]\n", - " representing the gradient backpropagated to the output of the\n", - " op you are differentiating through.\n", - "\n", - " Returns:\n", - " A `tf.Tensor` with the same shape as `symbol_values` representing\n", - " the gradient backpropagated to the `symbol_values` input of the op\n", - " you are differentiating through.\n", - " \"\"\"\n", - "\n", - " # Computing gradients just based off of symbol_values.\n", - " return self._compute_gradient(symbol_values) * grad\n", - "\n", - " @tf.function\n", - " def differentiate_sampled(self, programs, symbol_names, symbol_values,\n", - " pauli_sums, num_samples, forward_pass_vals, grad):\n", - " \"\"\"Specify how to differentiate a circuit with sampled expectation.\n", - "\n", - " This is called at graph runtime by TensorFlow. `differentiate_sampled`\n", - " should calculate the gradient of a batch of circuits and return it\n", - " formatted as indicated below. See\n", - " `tfq.differentiators.ForwardDifference` for an example.\n", - "\n", - " Args:\n", - " programs: `tf.Tensor` of strings with shape [batch_size] containing\n", - " the string representations of the circuits to be executed.\n", - " symbol_names: `tf.Tensor` of strings with shape [n_params], which\n", - " is used to specify the order in which the values in\n", - " `symbol_values` should be placed inside of the circuits in\n", - " `programs`.\n", - " symbol_values: `tf.Tensor` of real numbers with shape\n", - " [batch_size, n_params] specifying parameter values to resolve\n", - " into the circuits specified by programs, following the ordering\n", - " dictated by `symbol_names`.\n", - " pauli_sums: `tf.Tensor` of strings with shape [batch_size, n_ops]\n", - " containing the string representation of the operators that will\n", - " be used on all of the circuits in the expectation calculations.\n", - " num_samples: `tf.Tensor` of positive integers representing the\n", - " number of samples per term in each term of pauli_sums used\n", - " during the forward pass.\n", - " forward_pass_vals: `tf.Tensor` of real numbers with shape\n", - " [batch_size, n_ops] containing the output of the forward pass\n", - " through the op you are differentiating.\n", - " grad: `tf.Tensor` of real numbers with shape [batch_size, n_ops]\n", - " representing the gradient backpropagated to the output of the\n", - " op you are differentiating through.\n", - "\n", - " Returns:\n", - " A `tf.Tensor` with the same shape as `symbol_values` representing\n", - " the gradient backpropagated to the `symbol_values` input of the op\n", - " you are differentiating through.\n", - " \"\"\"\n", - " return self._compute_gradient(symbol_values) * grad" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "bvEgw2m6NUAI", - "colab_type": "text" - }, - "source": [ - "This new differentiator can now be used with existing `tfq.layer` objects:" - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "QrKnkWswNUAJ", - "colab_type": "code", - "colab": {} - }, - "source": [ - "custom_dif = MyDifferentiator()\n", - "custom_grad_expectation = tfq.layers.Expectation(differentiator=custom_dif)\n", - "\n", - "# Now let's get the gradients with finite diff.\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " exact_outputs = expectation_calculation(\n", - " my_circuit,\n", - " operators=[pauli_x],\n", - " symbol_names=['alpha'],\n", - " symbol_values=values_tensor)\n", - "\n", - "analytic_finite_diff_gradients = g.gradient(exact_outputs, values_tensor)\n", - "\n", - "# Now let's get the gradients with custom diff.\n", - "with tf.GradientTape() as g:\n", - " g.watch(values_tensor)\n", - " my_outputs = custom_grad_expectation(\n", - " my_circuit,\n", - " operators=[pauli_x],\n", - " symbol_names=['alpha'],\n", - " symbol_values=values_tensor)\n", - "\n", - "my_gradients = g.gradient(my_outputs, values_tensor)\n", - "\n", - "plt.subplot(1, 2, 1)\n", - "plt.title('Exact Gradient')\n", - "plt.plot(input_points, analytic_finite_diff_gradients.numpy())\n", - "plt.xlabel('x')\n", - "plt.ylabel('f(x)')\n", - "plt.subplot(1, 2, 2)\n", - "plt.title('My Gradient')\n", - "plt.plot(input_points, my_gradients.numpy())\n", - "plt.xlabel('x')" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "oXqcJWigNUAL", - "colab_type": "text" - }, - "source": [ - "This new differentiator can now be used to generate differentiable ops.\n", - "\n", - "Key Point: A differentiator that has been previously attached to an op must be refreshed before attaching to a new op, because a differentiator may only be attached to one op at a time." - ] - }, - { - "cell_type": "code", - "metadata": { - "id": "F_WHcj3bNUAM", - "colab_type": "code", - "colab": {} - }, - "source": [ - "# Create a noisy sample based expectation op.\n", - "expectation_sampled = tfq.get_sampled_expectation_op(\n", - " cirq.DensityMatrixSimulator(noise=cirq.depolarize(0.01)))\n", - "\n", - "# Make it differentiable with your differentiator:\n", - "# Remember to refresh the differentiator before attaching the new op\n", - "custom_dif.refresh()\n", - "differentiable_op = custom_dif.generate_differentiable_op(\n", - " sampled_op=expectation_sampled)\n", - "\n", - "# Prep op inputs.\n", - "circuit_tensor = tfq.convert_to_tensor([my_circuit])\n", - "op_tensor = tfq.convert_to_tensor([[pauli_x]])\n", - "single_value = tf.convert_to_tensor([[my_alpha]])\n", - "num_samples_tensor = tf.convert_to_tensor([[1000]])\n", - "\n", - "with tf.GradientTape() as g:\n", - " g.watch(single_value)\n", - " forward_output = differentiable_op(circuit_tensor, ['alpha'], single_value,\n", - " op_tensor, num_samples_tensor)\n", - "\n", - "my_gradients = g.gradient(forward_output, single_value)\n", - "\n", - "print('---TFQ---')\n", - "print('Foward: ', forward_output.numpy())\n", - "print('Gradient:', my_gradients.numpy())\n", - "print('---Original---')\n", - "print('Forward: ', my_expectation(pauli_x, my_alpha))\n", - "print('Gradient:', my_grad(pauli_x, my_alpha))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "OGWcpqzDNUAP", - "colab_type": "text" - }, - "source": [ - "Success: Now you can use all the differentiators that TensorFlow Quantum has to offer—and define your own." - ] - } - ] -} diff --git a/docs/tutorials/hello_many_worlds.ipynb b/docs/tutorials/hello_many_worlds.ipynb deleted file mode 100644 index 845e736c4..000000000 --- a/docs/tutorials/hello_many_worlds.ipynb +++ /dev/null @@ -1,793 +0,0 @@ -{ - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "name": "hello_many_worlds.ipynb", - "provenance": [], - "private_outputs": true, - "collapsed_sections": [], - "toc_visible": true - }, - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.5rc1" - } - }, - "cells": [ - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "xLOXFOT5Q40E" - }, - "source": [ - "##### Copyright 2020 The TensorFlow Authors." - ] - }, - { - "cell_type": "code", - "metadata": { - "cellView": "form", - "colab_type": "code", - "id": "iiQkM5ZgQ8r2", - "colab": {} - }, - "source": [ - "#@title Licensed under the Apache License, Version 2.0 (the \"License\");\n", - "# you may not use this file except in compliance with the License.\n", - "# You may obtain a copy of the License at\n", - "#\n", - "# https://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# Unless required by applicable law or agreed to in writing, software\n", - "# distributed under the License is distributed on an \"AS IS\" BASIS,\n", - "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n", - "# See the License for the specific language governing permissions and\n", - "# limitations under the License." - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "j6331ZSsQGY3" - }, - "source": [ - "# Hello, many worlds" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "i9Jcnb8bQQyd" - }, - "source": [ - "\n", - " \n", - " \n", - " \n", - " \n", - "
\n", - " View on TensorFlow.org\n", - " \n", - " Run in Google Colab\n", - " \n", - " View source on GitHub\n", - " \n", - " Download notebook\n", - "
" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "6tYn2HaAUgH0" - }, - "source": [ - "This tutorial shows how a classical neural network can learn to correct qubit calibration errors. It introduces Cirq, a Python framework to create, edit, and invoke Noisy Intermediate Scale Quantum (NISQ) circuits, and demonstrates how Cirq interfaces with TensorFlow Quantum." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "sPZoNKvpUaqa" - }, - "source": [ - "## Setup\n", - "\n", - "Download and install the required packages:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "fAVlg7rxkvUw", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade pip\n", - "!pip install cirq==0.7.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "TorxE5tnkvb2", - "colab": {} - }, - "source": [ - "%%capture\n", - "!pip install --upgrade tensorflow==2.1.0" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FxkQA6oblNqI" - }, - "source": [ - "Note: If the following code cell fails, execute the first code cells and then restart the Colab runtime (*Runtime > Restart Runtime*)." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "saFHsRDpkvkH", - "colab": {} - }, - "source": [ - "h = \"2dfcfceb9726fa73c40381c037dc01facd3d061e\"\n", - "!cd ~/\n", - "!rm -r -f TFQuantum/\n", - "!git clone https://{h}:{h}@github.com/quantumlib/TFQuantum.git;\n", - "!pip install --upgrade ./TFQuantum/wheels/tfquantum-0.2.0-cp36-cp36m-manylinux1_x86_64.whl" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "F1L8h1YKUvIO" - }, - "source": [ - "Now import TensorFlow and the module dependencies:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "enZ300Bflq80", - "colab": {} - }, - "source": [ - "import tensorflow as tf\n", - "import tensorflow_quantum as tfq\n", - "\n", - "import cirq\n", - "import sympy\n", - "import numpy as np\n", - "\n", - "# visualization tools\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "from cirq.contrib.svg import SVGCircuit" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "b08Mmbs8lr81" - }, - "source": [ - "## 1. The Basics" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "y31qSRCczI-L" - }, - "source": [ - "### 1.1 Cirq and parameterized quantum circuits\n", - "\n", - "Before exploring TensorFlow Quantum (TFQ), let's look at some Cirq basics. Cirq is a Python library for quantum computing from Google. You use it to define circuits, including static and parameterized gates. Cirq uses SymPy symbols to represent free parameters:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "2yQdmhQLCrzQ", - "colab": {} - }, - "source": [ - "a, b = sympy.symbols('a b')" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "itUlpbKmDYNW" - }, - "source": [ - "The following code creates a two-qubit circuit using your parameters:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "Ps-pd2mndXs7", - "colab": {} - }, - "source": [ - "# Create two qubits.\n", - "q0, q1 = cirq.GridQubit.rect(1, 2)\n", - "\n", - "# Create a circuit on these qubits using the parameters you created above.\n", - "circuit = cirq.Circuit(\n", - " cirq.rx(a).on(q0),\n", - " cirq.ry(b).on(q1),\n", - " cirq.CNOT(control=q0, target=q1))\n", - "\n", - "SVGCircuit(circuit)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "zcCX109cJUaz" - }, - "source": [ - "To evaluate circuits, you can use the `cirq.Simulator` interface. You replace free parameters in a circuit with specific numbers by passing in a `cirq.ParamResolver` object. The following code calculates the raw state vector output of your parameterized circuit:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "VMq7EayNRyQb", - "colab": {} - }, - "source": [ - "# Calculate a state vector with a=0.5 and b=-0.5.\n", - "resolver = cirq.ParamResolver({a: 0.5, b: -0.5})\n", - "output_state_vector = cirq.Simulator().simulate(circuit, resolver).final_state\n", - "output_state_vector" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "-SUlLpXBeicF" - }, - "source": [ - "State vectors are not directly accessible outside of simulation (notice the complex numbers in the output above). To be physically realistic, you must specify a measurement, which converts a state vector into a real number that classical computers can understand. Cirq specifies measurements using combinations of the Pauli operators $\\hat{X}$, $\\hat{Y}$, and $\\hat{Z}$. As illustration, the following code measures $\\hat{Z}_0$ and $\\frac{1}{2}\\hat{Z}_0 + \\hat{X}_1$ on the state vector you just simulated:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "hrSnOCi3ehr_", - "colab": {} - }, - "source": [ - "z0 = cirq.Z(q0)\n", - "z0x1 = 0.5 * z0 + cirq.X(q1)\n", - "\n", - "print(z0.expectation_from_wavefunction(output_state_vector,\n", - " qubit_map={q0: 0, q1: 1}).real)\n", - "print(z0x1.expectation_from_wavefunction(output_state_vector,\n", - " qubit_map={q0: 0, q1: 1}).real)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "bkC-yjIolDNr" - }, - "source": [ - "### 1.2 Quantum circuits as tensors\n", - "\n", - "TensorFlow Quantum (TFQ) provides `tfq.convert_to_tensor`, a function that converts Cirq objects into tensors. This allows you to send Cirq objects to our quantum layers and quantum ops. The function can be called on lists or arrays of Cirq Circuits and Cirq Paulis:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "1gLQjA02mIyy", - "colab": {} - }, - "source": [ - "# Rank 1 tensor containing 1 circuit.\n", - "circuit_tensor = tfq.convert_to_tensor([circuit])\n", - "circuit_tensor.shape" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "aX_vEmCKmpQS", - "colab": {} - }, - "source": [ - "# Rank 1 tensor containing 2 Pauli operators.\n", - "pauli_tensor = tfq.convert_to_tensor([z0, z0x1])\n", - "pauli_tensor.shape" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "FI1JLWe6m8JF" - }, - "source": [ - "### 1.3 Batching circuit simulation\n", - "\n", - "TFQ provides methods for computing expectation values, samples, and state vectors. For now, let's focus on *expectation values*.\n", - "\n", - "The highest-level interface for calculating expectation values is the `tfq.layers.Expectation` layer, which is a `tf.keras.Layer`. In its simplest form, this layer is equivalent to simulating a parameterized circuit over many `cirq.ParamResolvers`; however, in TFQ the batching follows TensorFlow semantics and circuits are simulated using efficient C++ code.\n", - "\n", - "Create a batch of values to substitute for our `a` and `b` parameters:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "1fsVZhF5lIXp", - "colab": {} - }, - "source": [ - "batch_vals = np.array(np.random.uniform(0, 2*np.pi, (5, 2)), dtype=np.float32)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ip7jlGXIf22u" - }, - "source": [ - "Batching circuit execution over parameter values in Cirq requires a loop:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "RsfF53UCJtr9", - "colab": {} - }, - "source": [ - "cirq_results = []\n", - "cirq_simulator = cirq.Simulator()\n", - "\n", - "for vals in batch_vals:\n", - " resolver = cirq.ParamResolver({a: vals[0], b: vals[1]})\n", - " final_state = cirq_simulator.simulate(circuit, resolver).final_state\n", - " cirq_results.append([\n", - " z0.expectation_from_wavefunction(final_state, {q0: 0, q1: 1}).real\n", - " ])\n", - "\n", - "print('cirq batch results: \\n {}'.format(np.array(cirq_results)))" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "W0JlZEu-f9Ac" - }, - "source": [ - "The same operation is simplified in TFQ:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "kGZVdcZ6y9lC", - "colab": {} - }, - "source": [ - "tfq.layers.Expectation()(\n", - " circuit, symbol_names=[a, b], symbol_values=batch_vals, operators=z0)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "wppQ3TJ23mWC" - }, - "source": [ - "## 2. Hybrid quantum-classical optimization\n", - "\n", - "Now that you've seen the basics, let's use TensorFlow Quantum to construct a *hybrid quantum-classical neural net*. You will train a classical neural net to control a single qubit. The control will be optimized to correctly prepare the qubit in the `0` or `1` state, overcoming a simulated systematic calibration error. This figure shows the architecture:\n", - "\n", - "\n", - "\n", - "Even without a neural network this is a straightforward problem to solve, but the theme is similar to the real quantum control problems you might solve using TFQ. It demonstrates an end-to-end example of a quantum-classical computation using the `tfq.layers.ControlledPQC` layer inside of a `tf.keras.Model`." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "VjDf-nTM6ZSs" - }, - "source": [ - "### 2.1 Model definition\n", - "\n", - "Define a learnable single bit rotation, as indicated in the figure above. This will correspond to our model control circuit." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "N-j7SCl-51-q", - "colab": {} - }, - "source": [ - "# Parameters that the classical NN will feed values into.\n", - "control_params = sympy.symbols('theta_1 theta_2 theta_3')\n", - "\n", - "# Create the parameterized circuit.\n", - "qubit = cirq.GridQubit(0, 0)\n", - "model_circuit = cirq.Circuit(\n", - " cirq.rz(control_params[0])(qubit),\n", - " cirq.ry(control_params[1])(qubit),\n", - " cirq.rx(control_params[2])(qubit))\n", - "\n", - "SVGCircuit(model_circuit)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "wfjSbsvb7g9f" - }, - "source": [ - "Now define your model. The network architecture is indicated by the plot of the model below, which is compared to the figure above to verify correctness." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "Lod5uhHo7gXH", - "colab": {} - }, - "source": [ - "# This is the simulated miscalibration that the model will learn to correct.\n", - "circuits_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string, name='circuits_input')\n", - "\n", - "# Commands will be either `0` or `1`, specifying the state to set the qubit to.\n", - "commands_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.float32,\n", - " name='commands_input')\n", - "\n", - "# The classical neural network layers.\n", - "d1 = tf.keras.layers.Dense(10)(commands_input)\n", - "d2 = tf.keras.layers.Dense(3)(d1)\n", - "\n", - "# TFQ layer for classically controlled models.\n", - "expectation_layer = tfq.layers.ControlledPQC(model_circuit, z0)\n", - "expectation = expectation_layer([circuits_input, d2])\n", - "\n", - "# The full Keras model is built from our layers.\n", - "model = tf.keras.Model(inputs=[circuits_input, commands_input],\n", - " outputs=expectation)\n", - "tf.keras.utils.plot_model(model, show_shapes=True, dpi=70)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "bbiVHvSYVW4H" - }, - "source": [ - "### 2.2 Data definition\n", - "\n", - "The simple model is given two datapoints. The inputs are the commands for the qubit state; the outputs are the correct measurement value of $\\hat{Z}$ for each command. Below you also define the random miscalibration the model will learn to correct." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "_VYfzHffWo7n", - "colab": {} - }, - "source": [ - "# Input values to the classical NN.\n", - "commands = np.array([[0], [1]], dtype=np.float32)\n", - "\n", - "# Desired Z expectation value at output of quantum circuit.\n", - "expected_outputs = np.array([[1], [-1]], dtype=np.float32)\n", - "\n", - "# Circuits producing the random initial miscalibration.\n", - "# Note that in this example the circuits are the same, meaning that the model\n", - "# assumes the miscalibration is constant, independent of the state requested.\n", - "random_rotations = np.random.uniform(0, 2 * np.pi, 3)\n", - "datapoint_circuits = tfq.convert_to_tensor([cirq.Circuit(\n", - " cirq.rx(random_rotations[0])(qubit),\n", - " cirq.ry(random_rotations[1])(qubit),\n", - " cirq.rz(random_rotations[2])(qubit))\n", - "] * 2)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "gB--UhZZYgVY" - }, - "source": [ - "### 2.3 Training" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "dtPYqbNi8zeZ", - "colab": {} - }, - "source": [ - "optimizer = tf.keras.optimizers.Adam(learning_rate=0.05)\n", - "loss = tf.keras.losses.mean_squared_error\n", - "model.compile(optimizer=optimizer, loss=loss)\n", - "history = model.fit(x=[datapoint_circuits, commands], y=expected_outputs,\n", - " epochs=30, verbose=0)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "azE-qV0OaC1o", - "colab": {} - }, - "source": [ - "plt.plot(history.history['loss'])\n", - "plt.title(\"Learning to Control a Qubit\")\n", - "plt.xlabel(\"Iterations\")\n", - "plt.ylabel(\"Error in Control\")\n", - "plt.show()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "GTd5DGcRmmOK" - }, - "source": [ - "From this plot you can see that the neural network has learned to overcome the systematic miscalibration." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "jNrW0NXR-lDC" - }, - "source": [ - "## 3 Learning to prepare eigenstates of different operators\n", - "\n", - "The choice of the $\\pm \\hat{Z}$ eigenstates corresponding to 1 and 0 was arbitrary. You could have just as easily wanted 1 to correspond to the $+ \\hat{Z}$ eigenstate and 0 to correspond to the $-\\hat{X}$ eigenstate. One way to accomplish this is by specifying a different measurement operator for each command, as indicated in the figure below:\n", - "\n", - "\n", - "\n", - "This requires more sophisticated use of tfq.layers.Expectation. Now your input has grown to include three objects: circuit, command, and operator. The output is still the expectation value." - ] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "Ci3WMZ9CjEM1" - }, - "source": [ - "### 3.1 New model definition\n", - "\n", - "Lets take a look at the model to accomplish this task:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "n_aTG4g3-y0F", - "colab": {} - }, - "source": [ - "# Define inputs.\n", - "commands_input = tf.keras.layers.Input(shape=(1), dtype=tf.dtypes.float32, name='commands_input')\n", - "circuits_input = tf.keras.Input(shape=(), dtype=tf.dtypes.string, name='circuits_input')\n", - "operators_input = tf.keras.Input(shape=(1,), dtype=tf.dtypes.string, name='operators_input')\n", - "\n", - "# Define classical NN.\n", - "dense_1 = tf.keras.layers.Dense(10)(commands_input)\n", - "dense_2 = tf.keras.layers.Dense(3)(dense_1)\n", - "\n", - "# Since you aren't using a PQC or ControlledPQC you must append\n", - "# your model circuit onto the datapoint circuit tensor manually.\n", - "full_circuit = tfq.layers.AddCircuit()(circuits_input, append=model_circuit)\n", - "expectation_output = tfq.layers.Expectation()(full_circuit,\n", - " symbol_names=control_params,\n", - " symbol_values=dense_2,\n", - " operators=operators_input)\n", - "\n", - "# Contruct your Keras model.\n", - "two_axis_control_model = tf.keras.Model(\n", - " inputs=[circuits_input, commands_input, operators_input], outputs=[expectation_output])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "VQTM6CCiD4gU" - }, - "source": [ - "### 3.2 Adding to datapoints\n", - "\n", - "Now you will include the operators you wish to measure for each datapoint you supply for `model_circuit`:" - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "4gw_L3JG0_G0", - "colab": {} - }, - "source": [ - "operator_data = tfq.convert_to_tensor([[cirq.X(qubit)], [cirq.Z(qubit)]])" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "ALCKSvwh0_G2", - "colab_type": "text" - }, - "source": [ - "### 3.3 Training\n", - "\n", - "Now that you have your new input outputs pairs you can train once again using keras." - ] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "nFuGA73MAA4p", - "colab": {} - }, - "source": [ - "optimizer = tf.keras.optimizers.Adam(learning_rate=0.05)\n", - "loss = tf.keras.losses.mean_squared_error\n", - "\n", - "two_axis_control_model.compile(optimizer=optimizer, loss=loss)\n", - "\n", - "history = two_axis_control_model.fit(\n", - " x=[datapoint_circuits, commands, operator_data],\n", - " y=expected_outputs,\n", - " epochs=30,\n", - " verbose=1)" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "code", - "metadata": { - "colab_type": "code", - "id": "Cf_G-GdturLL", - "colab": {} - }, - "source": [ - "plt.plot(history.history['loss'])\n", - "plt.title(\"Learning to Control a Qubit\")\n", - "plt.xlabel(\"Iterations\")\n", - "plt.ylabel(\"Error in Control\")\n", - "plt.show()" - ], - "execution_count": 0, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "colab_type": "text", - "id": "sdCPDH9NlJBl" - }, - "source": [ - "The loss function has dropped to zero.\n", - "\n", - "If you inspect the parameters of the model, you'll see that the parameters have been recovered to control the qubit correctly with these new measurement operators." - ] - } - ] -} diff --git a/docs/tutorials/images/barren_1.png b/docs/tutorials/images/barren_1.png deleted file mode 100644 index 72b3a8957b4c75d8f2f4c2cdd53e23742dd7ec04..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 36369 zcmagFbx>8``v!VI2|-G_OX(624lNxL(%ndhhelFBk(O={5fJI_?(R{Q`Z3Z}sif$YwYL07b^>J~FL0QH(aL*4RPx>GRCF~sVA_A>GQi5c* zr?L^)msH3;+%9d55bn@AzfH%(2kFF^X8dm>PXniX`@2m7tKPTeO#41B_LOKFyTp0j zO85Fi90{kmE*=|y`ml76iZ3`JrNcN_!(;uqGm&? zQKU~n??^_{^bGsvLh`Ok*jodfHj_-YE9)BVPg;mkai~qpDMIfgbhK-JcTJd9OiW0u zcbMwqZa3QA%LTgEN)`GFT|EDJ=2?6fqyI?&5~8VSG(xR`_?@j#(YXt6HXS&=5kH( zi1O$ck58tEQSIr(cW&PvU`zUGt!(~LO@AF+e}4No91U{|{ONo$r31&EBDryVuZ;a* zIF(igoupDE5`?qNd;Oj02mI=!pwCZCxEBs@%bwf~b9;M0F&pcU#{=7|ADA{5OItoH zzlut_W8wQAVa3`2g z4Jp~hKI2fRN};m*^id+NBD=l4e~*lV&@LQ^iSa_@YKZ3dQ-**)>Ab(6U6b)5==v0h z_zek|<0Sfe1uV28%lhUNvFD*BH9em~IQMH^iup)YM>ODv`giXmt_}vd-|S!1L#InF zet4k(Gb|rWETW%Z2`Hbpp*<7(8S&IVJWn(zLgp8lg>cPxy;T}^Us>vd_XIcy;*x~v zyNV0H*XZyp-&m6V!BGw6^|h2d`(gW2LXqRQf;qd^GaH`@(VHLC!-1RDj#xD>=D*pB z>WeH&gl8-oun`bRAQ}4|b%+^8zP4h^jK7Ol>vQX)Fn!Ju+-`(hgs6dJO%WT2YY<%I zUffxIfq#krC$+!x6U;fe}XbggiNC8p$Yx)xA}kWAS6-W9?@_{>A>`PqmHe#X`ku+rGD< zO4t30{S~~*(9Pd1(;XH0Byv>AmEMM)n?7rdH-{)kS*ca2bOgtOW9#h}=a$k|#)x_* z?S~@v8Fn&u>jaSmQFi3Bba8YmD$i8|wngQ#V&6AroGY7|d@y1D z(E7o_#L8r_D{Kfek*hx-R)#Sn<;Q$_eVlTvL&Qp#O_Wsxab!}IKGgx!U<7NdT}&hM zgWl(YrS$JVi($t+^LHBWg5r`j?De0iji{q2YGpV| z!5K65$fB68jS`$`7@k=fBJs z(5lyRnBkqln$a%fE2DKqbF;ARvw0c*w0L1tGWu(B zH7X~@;*W*j))Njw4*gR2QkK%i{2rzKt#$z#D)iS!0~MBHW2Z`-V14xqcMe49%>+h9IQ3mg@9Q}H zf~*3MNv=sAZobNL+l_fF7uE}-3oi`9@HMh=vT%WM0c?RJ0kgWix(U~m3kmldw}Vr% zbH#JRE6%IF%bCl9v%Mqgt1s88Hv&hK7aKP&2u*MeaJBGcETdW-+7npF@SgB42!bdh z&$LKSjoE0RbX7E$7T+|byrk^tUU71rnr|ktO%|0DA@w@Np~krcy!+be&;4akyh<`! zGDIvS*cA(5F6U{W2L<5Sbb!7;pGpG5U4M zjP2R}IEROjmoZIrHG8S@TiiR@vu93UlD=eUhH1Xhv@2~bRalqWIJCdEKXa&DlV5dP z_pC)M|5~2Kr_bzde7~s`SMId)!|7CBA*Ouaby`-ezmqC}}t=hYlW47WpYqoofro}b~6$ zwVm{3@7e54Z(c9@Zqx44URCL+#YTkv)^Xi&u`XQg4C5wlJ8q#%=YE5hg{Ee0K#^pX z*{`0)KMsZK3#9HsDzvJL1+aXJ`Rp>g*@o)7-8)gFWQ?*ghRN8m2B9ti*UEw1f_lB$ zhGWe2_Y>*aXn)b_7ug$ZELE(TEN)%nU0heCx*}3(lCrWgWF&hN|C$<}j1|Wlb2m6V z6qL6A&Mb(T(6T2X!6)-R_nf(m#PbmbFn@5=(yTK4aD!LF|H-o38{0yrth%>>&yDqLq4!4fVuRe;^_!Q*t<<*S znsH|5nb6MN9Iugpz3G@|*@5(C6q0dX@kXP`{jp))A^nxbn*X-g=&!5QE&9P6;z2cj zV(-)=53ia{^)3C2`XSGwR=a`5vwpFGkjB+Uoy+>un3mkuLS}uJLBxJRuc&iVuY$|` zmRafpf`d?7Q4_o{sGID-DUPI=b=ypFwx7gX)QBY7kj?2Lx3=9k(r{EyIAK=lO z2~5hh&=8;0=S}XA>K&ke5oy-JL-}G~J$L^J@=;$@RMf&Er`;%>JgD6$M&^Tesjn}5 zioCkMzR-2yZ>1dYNKT?qnN0i3a)8ShJ3MdpDxoNb-PWC>Wdbs?Z{vE`PT5bq`FSDd ziAhQNZfm&bZ}Ri=&#|h1{xrHd-(yecQ*RtVLqkJAKp>m{TLoK*?{&q3gS;g{5AB){gRL(YHphLL>JE1cXpYv@^YSSA-=(0pw#+?KJA_4zFU>56drb?H8> z=f<7qdf|1SKW0+@r%}$CA5CHFzf)Q@njHVEZfx*bpy;tur-{~j4?7|4#?_da$hhbqoY@Mxn?WvvhnTh?LBVB+l^peUS43^z~$yv zElbl%4G+O&`yF2NSoFKg*HJw`u1wmxolzC26_`g@Gmig>oc?8^lo=0IuG^>>%EOa@ z8JV1{qpI58@QTBw-GHsioF?5$Afb_MSG(c3hq1rE|8#(lmv{Gp+2KjR6TJVugyBo> zxMjW?-*S+wbo6PpYkRV5yTcKRbhM*2SpisDxvhZQ26aU_+nIfiN{|_m|9iS@X2w^q zUM1t$rupbT44cNfNB({FktU0lmbS8@f|n#sWFr@o`&#ue-Z48xBd4v^z$?mUoKWzQ z{RjJ1nPgPL){yt95_kB@_f`58SpzVYw6wHzb>))7Wz)5%u|-rERIRLXcifF5xjCx7 zTR28^n~;){4oDEzJp6mA2Xd6^?SN%DcxY@a-Xtk~8Nvs}NcK8nMTx-ko+#Wl(^9?K zKaE2<<7tz;ObJ%yTKQ`49NbJzX|8)e55^ZW3}}hP6IW%_)g^bpK)~thGDmfrL8*f|^mA<$5#q{E$ zh?_oMH0bz;{(Q_-goK1;Mti0tlUt7T^z^${5;`f}-O~kj;ZStQlBl`b(JNk>c5xmO z`nkfN#;e1Hc#$m z5}xnn-i(=Z?m9d027;Upd=oQjJ9%$DLrUpBlwCQ(-^sr@T}p`|GMY24vb-gTrfLEO_AOvMIbVn9HA7WiR6Fw;gLjLY`}( zv$C>gKLkO^)4M#So0Q}sK0cyOb4e5nV`Hm%!XEN`0@)XIXc^FL=g zwKe(WUq&@;xst4`tiFgCIzL`qB0Nta%0w=u%9+{OolaG(baX#7>wf+E_4DWQ>DH(t z+UEJ*tYpYapND94$3N$0|HdlcFWcKC;R1bl_OzueWVbBDdZxK$TCvNroa@b-^0G3D zXi*5;v#B#)`4}1TojFI#^irl&5br0RUS5Lp^QFKEy`E(ZPYn$%$$w71EDI)mH1Hi+ z8v?wcmMPb@k_grwQ{zPbgK}+R=^S=C34Ck6Q?jgfb?}`JsAQXFQBhGAc-ar%2?g|& zl~;JfYTax>*=_i;aD=6iKT%V|i!aR2Os!4vRt?n+B!2A9q+PQjOc8$(LqkJKyidMD zk2XDz4Y|yYClpv19yUyyPw@0^Gb(O2FmOrYe-rOI;{xL<@aIp=-xH|?_hXX82Ti)sgjeb zs;ac~vm52i%*^re@wTgpB-u@u;!#Wo4*VoPDo|@E!jQtW@(Xfv9q`(Ul^puM+J6y= z02rBPZ3&ww^sBRfc6N3+^xKe8ao3uCaC=MleqRcvNKkMvsakn%Zf-c4fDG&8>eIoa9Sk@Q*vWgpav z3m#sH)&{;0FiX>c+%L$>#&hMicY%vM1Cn*7#gN41?s9(zBNai^{QW|dmXxE zJ2C%X684lXoSdAPNbENqICEIWt=N^U~^`9YF0Kj`FD|W;*soTPLwsuO>&bpXQ4=zd+zG?AWj`% z5=dSqoJ1C%=lU03;BO|2olB(tXVLq$BD%1BoWq&G6**e3$+qejAMBA&)Y1Q24l9%~ zDEos6>w_VSX|~Vg^-Z4~c<4sK4_QG(!cd7CWsFBeL=lFul1B%kc(dAdw+LBqXkx~G z*+nk-RjPfp$83cSK@Gcvii*nE*jSfI36@KpB-k9E*EpT|YK$rL1Jz{WutT1J5BBNr zH8eEh6A}Vfusb-3AmyPxm|D3*d{i8Se=eO~d1;{JP6b2;dy7a(3ZG{cr zW(-Hi#%??Et|uNJA4f*MAh*?dtOp>Y{~_V}-502ibGyd{-q=JkKqI*#BUP%w1cD3T z)4n$hF@G8w1nX**|0~AxYae}kj>*@JGUSl)lKN*{f2etTP5H|`r@>G>pQi_*xo>A0 z2%ZfwBeKe-a4HP5_C9txVZE&o2e*h#&vuM|9r#a zDSG9lupi1{k6hH>xbY9kfsE#_HiR6r|65;plQq}DTy@kaQ)+BP%84Zv?8m?^;YujMQq~ z&(SR~`VA49gPgRqw42-2)ULenzfg3M%~u2AR8klrCXLEv8ywb*!n0KFu(!P)zB3id z7%wwGju-p^0C|vq{+Z~{9|j&CLL{Vr%eZ}Z^Cig~F96eMQHBKvg^`glqRTIMG=&N; z7}U}h_fGf!{MBx3V#40}N;5ez0c=?~-6NX5?%ie|SWgib7f(n?fI>e9?Bo15xDrb8 zGxEgJPz$j``K=U<{Y}oQ)GFN9|N9uD#8p$1(EVG`TZ%rn@{cfv5>)@9jwj_CGXeFp zQ3(l}D*i6+@afgn)jxhf=IQ?nCq2_6?sp8uAF*3}?gWLUR_{$YiBM5crsnCduCBmV zZvHa|@~@-x<4}-VOkl{y7KGEu?HK2Neog93c<;o;zv!@-mAC>o21U@*Q=kL{*i#L2 zmCn6@V9Cy|MW&oKOk89R5Z{g0LVVLJYVWBpszG8I8Bxj@mJ-4VZg;G8Cn3bp<9QG8 zGJ!ITM3bZg=LYWE)XOgQp?~|>B$OQBZqi}xhv=NPl@lE z2tE^4V_0w|0*dCCeR&-7WE=+ZDaP#FD}{1h!@abBX&+7y+A%UR0`l2S4gyHXkAee8y_FB zM1#@`J zv@1*l=y;DS#J<|`t_?j2&Y0Zk+=a_2D2VM{1HpYNnjq^8mg$%cs{L|R=a}UizF^d+ zEptxd;I8x3VD*n`{{rr=VuT=l*j!0T3B(Ja3-wubb#)mT)U>on3NIiKWaj1r(n-s5 zK9_wHvqz80FkW@Fh9vV$7TEG1n$Tyzr<*G^8zh(}>oO_^iHIE+Ch|=t^G{)+q&WYN zLXu6e^@k}G>Yad)o4plVsG$OepTx^HWs&bU5&T6RI5lhfg)v~B9#Cj7vB%DtxX+mi;|7h-iI=oS87v7l*V)t4Go@{p^}(cc1p!hO3v$Ly z{>yT$`LQt^FCm&ec@X!*TaKqhfa9|f0DYinUf{7pxQBC0ypQ3v=e5&Af?`T9t?pTyx`do<96J71eYPe zE3^1AfBkARU<1^61%?C)?~&qPrbQfNe+1%6!#qkK7XZX}U1I;$MH?yB9g`YmP9n5= ziA*IYM@JP^)pS0DM?fbT9Oh%7e{#KO8zm-vBn5G`0Z2hhqN0>zVQ4vz?-fWu7@Uu@ zzk!%V%K!*5*v@#!1_@CDe>!M1%7A*Enfa>B>*@dYd-2ViH^tni>(QqW15T8$jl+Oo z;xovt;ss~^{0To#|Ig6+pXyTuzJ!h*qChgl;tY!l|JD(#f)x&;FWuW^YG>0sNMk>fLKWAWupbRj-^9ek_4TL!z2E@A;i5_!kTL`3x#4bU z?R2qf1M6~f0@dYKLBs*yS7E><KCl@ z4wZINzP^ArrN~PbD(0vl`oTRq z3#2)41wfZwY0KOqI9iC1)X!hLp`TAb)obOGt$C`ctM{%QfBlMhoi0nmXTK^1IPJe6 z^~LTy`&6%%`$4cRO9)a2meGG7vzpBB-e%|`1MDX+j~>PWraQC@z1g#qj>fTJfOxd( zsoRum)oYAy{p3TKoqqy~4o4W^fJOKLYk4dt+<)L9c@$m$x;x^~`8UMpL`X|K1BvqZ2#<@b*3koE#AA5_2pH^Y zcXu~51enMpa3xs)TI`GM^I)$bD#c?qhO;M6Pe*6ql1=dZ>+P)v2}nN>vUb=2-#kyD z5d!U7WLFlw=7sF2c{@<*K}5a}TG_=-21IFjOqN91=X(?3r>T(<5o8Nn+yAz;s|}W~ z*2@yQVF>QtTuK&tXneSM0P3e6P$B;<@#%>F@a+mAnM2uGPQr)?a`Z4I<+EQifNK_hzbiJw2WBQ>X?L z3t-Un^v%s6HkLqa?B4i|K;N3bP6*L{hgxR#rtb~N{{?w@I+eDh=l~nOZE~IF3j;$o ziAc)E8X5%zR0m9sy1KeD-^FG9`gQvk!`r}a3xp(uKXipI^ojrPDB8r*xa_eyePJP- zz_t&#s;rH-trnwMSQFoH!LTJe5=jdZ3Y-Mljx?Nx{&DfVwn;4 zwB3dAkr7{b9x!@w1?oVU3G?~>{^tICH%>`D)jW&5qpe4fckH^*Nm7jJHRVfY>ZIu= zR0Ml4T(1*0z?BT9ahQCbN8=e-w|*DYER0Gi_VNY3T@KW9;)l*pBU@@rWJL;)-H}6~ zVFfUs_rkTxUUf8n^C=_8(vrim_BX`YANT`T)N)mG$Cn`>6CJWV`i`?j^4&EWr0y|; z(@3lzp2L6sUN231(?$&5eEFhtx^O6xE{94wkQ%v(d*3k|TIM0nax<4yFY^=(@Fjob zB{ypccRgqj4>qXzbWLVc|2~d;vV-*_ZiINX!_JEk25x2`n2u*Re@2zp0x-CSFpP~p zJZQPJiMs@eZsnnK*6V?c{LujH-s2lH+0QW%A;>Iyn*HIi*(z$$Bl|caQb(~tvDl=V zy$<~Kxf{#4!>{cFX#avYVD>==rmxM4UjTzH8P_PYIBjjd>3Q{Mbnoj=<~7G3Rrpiy z%Em3<(2zZ@`Bj)u_i|1`BprPi>D!MyyYg3??{g*R>CZvAXV-?ua{La0`H(WqVV(FM zt)>Icw+Gbyffqaugm(13Lz8EgP?2C^g+SL67gq1dC+_KXtvtVgZhXbN;;;@X_BQm>FZsb7hnCb^}{ z_QLbzMrffEtoA;M6E;`Uj%_9uO6zZ8Vq(roiu0cf6-r`Zg)l8Io)3LPuwO6C-g~+ zm+b)7yp;aSm_;n{seilt>c2?zz0@sU#0KPe_rw7bU`rYN1aklJiY?5+^v}{T{2|gz zN@Oa7S&cH@%a`7cznvy;&fxati)jNR6AwI`)_QPn8lOSXJs%ln`c*(~dHI=kW${g8 z{e1v+CrmyYg{Twmw%O372W$vk-0#besfR0X2+ba*oUe}8v@30~%EJAKuMr#Cg&{se z_S{qb4Dz(OA9+=$7;7K~r?NCF6C?2FyV2uTCr=vnMz{2RN)tgoY1!o+8y?0YB(&g4 zdaaM3vqLWCL;?}%vsvKE{h^G|a30|`m(HpydR6>$5RP)%dyCO>`tC!T;%p`jIc zd-;K#89tZ=`&F`dFkUG6#bzi!fI?olAG7QrDGW=*q7$Dvve6yQwORDdsr_Z6I{LMv zD;N$^EJmOtB!{DEtTlv~3M)|H1tEHSH?unzsvOeXG-?$)cb`%pebdh`Ha`<#R~f-_ zAh{X_E52wX`?Q^t-4RMGSva@C0dJsOdF?eAC=6C)drmi?>N<)M|fA!0AWE zoyNk~i&z%z0MVz0=Nr8+5^&ViwbUj1Qj;t;eqbBGIC`D|yQ)dy_nflv zMdrg)St$4Y+>0I?e2a$ilUSY2l#Ge02996zo`l{k@H%Y!lnzdeTZdok7Yom{AJRfe8ZfU$fD@0eAgqzwoqNfdE2|oCA^U6#!xtTusiO^Z1yU30qifv(v z-!`;Xd9At{kduWaHy4y*##tf3C-~s-E>Mge7Rk2NzDx*{5s?~`ZQmoye@js|o#&64 zJN{>K2N&fQ#4)+q(au--^O$#W`Mp}z78U$Q<||XKelb&kWrLP7YdPbkF)PfKAE2Nuhp4)Jz-p1AX>chcZ-uQS0iR9gub92Ru1O3f z-DAEySraDdG);yq&fnfku)LqjEFb9>2_p3bDJn#aizvvHYpb>MUFXzdwJR`Vtq3b<`%tjz-T82bBz!~7WnB^k3T{$=UjRWa{1UtwMsK`^rlvSC+owg(!qrCDn+8&w@TcLx4`rM8Z$kb*+ zNzp(1^lGeAg3FNLyh6NG?+|Yi`wvJ~PlIQZD(UhsU~lef(y^)eT$jyP(ASSSEl4;n zOjY%!6O@t6Q8pf=lJA@CK%qkIi6c`ZHEV2;= zxjdh>8x}e&nI3dwaIflPjxMc{ZmD$NTCcg< z-v9*-jI~>Fi{#NinZ0_(5Aes{Z8C=|(Aqg%QtHlOI0HV9-;@9c=Y0hLxAxDr5bszG z1!aQJ;;*|g2asub00&9MYY%8YA`C{J>HJ&>9JO{qa{)vqR+4f+-jiCFymp2r3ZfG9 zv2lJ2gcJiG&^lSTU)&G@Sc%r<=i=N7%hE20g=(OP-APH2e2J{DL1r~7Ib{{C?|WlT4vq|JX=k%JKtO!RpKJ54n?7#qwE#fvCox|W7_lnf3r+HgnJ@HS*xC|8A_rzGnr%+G6UqE#Dv3;; zfRIAE*GZCfUTn?35mR1Sd2xNV6UVAAuMk5nIOg-@sj()qQ+Cm0jMDJ%FgP!T!C;CU zxV;nmq#y9If0{A?WRa{X3;C?BUK$bAZq|#A`J+k3D#7)REbWl-BiUwa7b%is&OT*TFP zj1BY6XaoP;#X4qV3R|pkvNAB33e4L2demvf9{oDDq>OVnH^;5fx7t)# zl5~ZxkZ9BP|q}#w2i< zvFO%tQc?LjP(PBQNt0WE^lCMbF}(nF9MDT`Y14_2(os|Vmpyux%f5671}ns~8Q$Ms zvDO{>TJJ}eC0iE8S=YSXC^o1RM9{)b{;Hh!|X-*Ogh{`%tHBWk8J@${Ab zjV{+SO5nY~hf!bP(Kc$-t*(($1vVna1iiGEcOp?BL$<}s?q8;sfUBtEPJJq@)zbF) ztdjAX>l|7KAVO=o`rn##hsjyiX;xO#s{3a5fAw>w#*nNQ!~X#`m!dkH3fMF^J>f{cAJS=e(UC z6_+pYSoAK8m%VTFYB6)5>?n6UHpPoNy63;+*PN{#A_XL>unugZug3w9=P@ zpBQ%_nb)B4*XIJ*-0gYud}yV&Q(uME;+j5b=`iY-c6We5%@*wiOi0b=E+&Aj-D*$h zY7fZ)7APEZgpsk|Uf6Pbu_kvM6*k^qk5)w4;oUFQleU+d_W!<%3s)=buY54$jDGla zh+9_pR%*rDC8*ov*J#_G_v9u^QZPQN-sQZi1<$yHvc2a*ijl%gx7exj@Rh))!hOpN z%s$2dzwSUHKR0F&yCcxLW1q3zyxy_oDpA;@@{h+5MbCKtsfSYRKdq!?V;_g8?LS@MqoDX~+vv#1GWJMp-c-b? zQL%x&Pe;;}9il%JoieUH&`*REW!1}LH@>{IZJ1{JZ*=?gWS@RCuqti<2lpOGopxR~ zO9h^BdvR}vbPIN~OP(qM#9F_D$7fQ@r4Z4v&>nc#_LhX!UzN1c`{DkVbX251(|}H` z%k)gWgYq$$?)mmwST>EJ`YuUUI^?7>0LI5H& z6ZQb5u}tpv>VqAtn_B?>?0mRHn+ZK(Ln3xYCnW@cgSA#Bc^Jy-rpX!hKugQ6Iy&#wDa0p3QVhlO-MkbWMn zcM540Qe_!!e7i@g>8lg0E?6{+sksClTH3O4QQAvB^sWPti;27|dZmD(FRky}@8>hQ zY1x%{!MJHD1=2%Z=JKiL6NMV(cnRlo!5<-;o0|Za8!C=OZg6-dRy!YrR8hzh{PHGw zPa5#n=pbt=mq~u8n+P-c?^i$!P#bZR&o%3cTnEh)SF)v7xEehV4;BQPdDNWV5WAJ^ zPK}BuTc0D?*S$WY3oF%^n=UK~`Arz~3qP1+a$nA?SmR|XE7M&R&vg> z*)lUr=~GoM77|lbnY*C4zcHCOPL5UJ4XTJfVfe<8jbg2o!wK4X)@plx;p<^EeUh7m zwyJ`Yx<=laD!;oK1v8>+%T^qmZTQ2(E0ZcpJ6&!^dtR2!?Y`l~6Mk_m@z7Xh8lf4D z?!f!clH5mzX-00JFm_#}0A+J}W4FZpfq!G%apaeD8TZ%%AY1X5vRX`OSlQsL8-x!~ zSyR~zeLd+4-5TMaX!r<`IA`Bl%|`&9JLp#>Mx|NjdbCK_n0DH-1NH*Zv_8uUJMG0! zO$7LrM-=`U*3iG)R`7N}F9k89lc%*?t9qI?T=yVOq2vd}Z$3?QO(f?7f{K9=bHg8;`CoA$Kiu=rl0zEjQVy{A?1yM zbQz_LJa@fMOV*F?g5-`nvxaI@r^SZ9J(YqB2#-Q7QF8X}PT$(}@h^&dpV~#;-U95x z9>$YS^g>Xfaq$ievlpT|oso7@lJy)g_;|h5x7Nshf}Amx4+VBD3ST7%UD+*e#vm7T zJkdv?d1ryt+tR+X&MGCVpx`p4EKi1EkZ!ZKY4F*HkfiLeg0H=O3`Y{*_0E0{%-T`FHP> z>2N@ehkX|^*XH5XprF{kP|eQn0mm#rn)Jt*rw_*y2r$LW7E`nr=^FK=T_oV3$8DnB zb%z{t)op~=tYl!H86G5&x3;kBXR12eo~T+R(-v&bPT8oGt_6aed3TMw*VtB2sgH8& z5L3_Q5*@z9FHUqrw!4T%>;OyFownClaHfqOtvFuA55`W@zi&Zh9jdUu(Pk%Q64HzM zOvtp9U3ZhQMjwNl*{A)wU7JBm-qD14@Yz1M^W}TL;D{fTSZxW5E+Spu&sb8emuMf2 z&*%+;^~5UP*v^REgmlwf=xcyM^sdZG0~2v6_n*^FVw(MA7Oy6TH9)HLS*MA8X1rxA zoekgP3zxVoztm%1UhSGaB)h>XOH<^iScYaFj5fR;^L%}j@J?@`*rYw|J`N^hPK#HM9%~n&3{;7d3_!?@SPOPD~Ju++sU=~o^#I<|;pzl3C4kTX{QIsN%K_Nx+0y~x^SfjoCj)DADA7SUU#1^c zT>gCR;Z=F?i^jOr)criaHAN-qmcnRMlL9$6ndJ9J*S1G z!#vAdghC!iYx%&k)1MCJOj$BqNIbr9$b3lTv;}1S>C#R~?m--DS?H1Khm5 zrOBoqTSiN_aqaZ8_^9wF6n3rdyW-#Bl4d={g{;#{Z}?y}65gFWHb;i~)SxYrD73@e zv@wSr-6*;pDnZhlKf{ZmhTQYQcJK*Pu?B{9}$qUPU~cPsPYQT4j~;TP5S z44XzwQZhR;^G?okJ(#hFeR4mB)+FFPCau3%zY4NO{YtzOvS2geUlPiYRh5C;M&*B@y8et5sO0V5N?H48 zLCm3cyJ4{gIBf^L-~SrEV`G6LIMmxZBlYsd7Cv|3&AAou+xW}AhS^Wb7xdDVp8(e6 zwp-ZU+A5y5P9`25IBrEEHZuB|tCI%6ZPdg2^M`ugCB7l3QtUotG~T{`|pE6W4K9bGRJx3TRdOO|P8Q*4{2I zDcRX!)`P7!a=tV41{QgE&Rn1E7f)dTT8coAWIV=)63pLLA7kx6_B;9XHF5H@Pv8nb z3jha`Pds#>d&hP7X}lHywazUF@dG8jLu#um9N?6XidCVRAe_eYBFE_(j^i04H8u(8t-o?2lOtS~-G&nMFlgU7bn7!;aj=;-S)X3S9spT>yYH zx|Z4-%k02al+15yB&`WOqy187@B)=>Fz9~=7bZu51cIplJh)5~6~QtJkpFWtO@3b9 zPe;q`TYvIG$nUJLtjJ&0YmjVfCSy_oF8H;w(r|*(8yp?b1cQdh7@3&V)Pa+OgwgPw zNvwf^k6-B?_^ws?@qI=v(OTWsk8j04^XUUI((u9`AV0jh3xzD{l@c{u(>p}ZrABPr z+IiJG+%O=)@_4QSeWt_HZts(s8v)bC4>|he#tzU}1iluKsf1>wwzYG|Zm>8J0$mG% zzN*#my4asjO-%*odLw_;#Ap>b)A)yVH1!hC{F3w5a(xBM(0u{(=>a4W91S4x3M88g zdVFkbjrfAI_b%a$)x_!MmqD7mkify`jRCEe^#L0%G4>W4)AZ=*>Hm6%1ARTHA#|Qy zP>yKh0IbVHJ)u7t*RhP8+ADSQg%@=8Bcf|)hQ0#F6g2|1e?-!PAi?PYgVh>MRzwQ= z=IJP$Sz5s5jbX|^ak2;06r)Bg*pmMn<$%Sg>q(=T%RL8Xg2BHwH_7cP%hj`~)U*4HJ?0zo&z?St)}Borx^ zJPxzOahlguzWqCp*y~?4p>a@#a+IX#)}o!I1((vR8XoBh?y|{5z0URXs1)*LH_I`f z=;^<|p9YJ?eKP|{@l>v9_X^=RM|@w3if3-jLPCvYWzhn1Uu@vBHP;3=1krk2=p(zL z^qAy7MS#fTD5zZAAD*%1DxLN?S?{lPIiQLQhyCvai>z9?DE;O4(Kb1n6+85OA&=k9 z(?oK|u6i;$Iyz$l2L~32hAEB52r(V;oZc+7Hu*|vp4zl@D z7jAiJ-R{*S;Cd<6(|fIUD*$j^Z5gcv_Pq}S|D1Q#LGAP8Z{uCH^*ElgJs0iXXF2>^ z*_>)m@GA7=I(6P#BVb>1DrN@RaJ4&jo>trW<|b!zAaq(1JZO}B5%Go;LqtmQ=5^M) z&c#M7Af}xQFmOf#O3tEN!Sd>j`wU&`lWI5;0)_I53fskol!}lY#~R?;%gaVutZ7Y^ zlP`F`t~Gn+=4g9n+^FazH=CB8ou0<`o8Cw>?dr%QcWb3ao%yM1ToB6p&ZZc#6Dr)N zUmd33^)-l~7x(^sebg)RNiRt9Wh5<(B%{;Kw}N&AO18v$*WRDni%(_~*QNKgTZgA^ z)p`E@lh$@=W2&cd`2KV-u{ZGfHozY-_|QV`XNxqM(d|51hq`s_O0`6eI)DUpe!k`Z zU2r1(WVG_N9FdFo;`+RR`$f?U5dhdvmG8Y6K3(sugxmRzzLEc`knp_EP7LT5$@6cw zDq)_B+CVi=qzNT1(TX4Zbg_%_JXAtiSy{CA)u{q!t$9l=RQ|ObQ-%9F@%;90GkLUm zj>UTSxbtFY-`Xn7n}-qs92tW+FY)^B#}i1ow;E`Aj>t(6_d-EA3=*j}u|Mit0KHPv)gudBBy&>+^j599lC#&?P zJ(2_&{gj^xkM{%{1$C!`d41PvbEW}m2GXGC`WW0zTC_ty65qst7gylN1@6mReT z%zA~nkZN=iw=x)+`(fg+!p^8#(rnkp*(v_%gZOtGNbb8ohC zH0Eue#EZT(cP-S#22Z~FubQ2jii*u`N1%^LH z(qUr*$@_w^p5#@iF(QS|u#lUR$-=MurTOw*6J;QdoJaE3!GON7q{$QuqIe{pti_Hq_@#Y>m>~D7N#{Y2T1oetPwtD%0x6v%i;D+FleVL1#&->t zHM}1IxZ8fAI&rLe&)DG02S1u(pYv|=9{MHRbp@a~0Mfw~+*X>^NuMUC5r~5hH!(M- zqpaL|+fMsfRPgcf9kv;FoBHSa?n^;ymcHkiD)`=@oLs1UHt2i@J>B2}eeYFOP=mWQ z;5@r=S777G5K}UHCv?3%T84%fe4+c-a3ApL^^Z>wgLV}Oz~cNbp<(*Nm7Z)Kd4ESf z?6}@Pm6Fz#ChLUN+`C4%R`WFeqR*MWolEBuA>aWjGf>0$(Wc(GqTgt_HvLv%AnN2{ z{&LDJ_v!!X>OH`z?*IRBO`;HGWS6LjLL9q}P+3Khoq6mWWMoyyJ}Q+>$S5mhud?^d zUfKKD$2d5@=h5f>{;%ury6^kyzBp33J$HU8Z7Jp9hbLWdVDy(+JVh3)x z+_psClIg;i_|l4sQCXg*B);J!K9=BVFqwZYU(>o(r{8z`ySrxzp;%3kD-oA2=ZgC& z?qgf|-KXmuD^rD4bN*YIAMKK%mKGM_r}CiHkDEKCu&{5l^yB&rTUqFWAyexf@y88X zTw&u34OwMBh5l9mYc?1Q&Ej1p#=+TthKgZVW^`hyi`aF~Dk42rulr1d4=%l3@d8;z zQ|o@`w(&u6R9Hj=3dynTS1cfzo^Q%kGR=c9B!9U0*O`1C4hRwq7a9!SP zpo}48N>N`!M+*<@5lXu}S{wFc`Q>)R(vMr=9pus_{P(LMmE>k8CNE+%a~gNbpSRU5 zO=YC519J^y?{Oc}j6PVo@ICZo{riA`0O*JK`0?Dp<#I@V0=5;}pxUML4aCoLSiF6M z@ys6<+#2r8RhSlvEYaI<$c(J3ZiFzNf{rVR@6iblL7c70VRffJsu^{??dqW3)Fx6p zncO2=VMh5lw>R1GqhOZb-vjG>?_x;gx7*~t&*CXw_PG<`{ISY+;#;m6X7{x?+Gao1 z0W7zwD$+Nzy^BPQ<57}ohJM9}rbdtxF@N&u&rV?H(RU-Ixc%lAipr!B+pw=i0(8-5R0BmeXdiSL`mf{-iH)-z`UQ@4UOV z?xvjb44TXYs=1}IRka?j?w^q}r`|U5o16V>)&3Iv#xOfv(^$oa@J`^j@l`bewndSR z;Z8x_X<}gKzWHgvh7RdB_qM|!@%7HbqPuxFI9%=^J1cz}?#+zEi{o$Ln_;3H%k-B4 z&xT8%HVjjC#cd=7iaA|)bcO2;#ql?(5dn<3YT076Z!no9+}`!Z0+C*lx(0I|=0ma2 zYWwXmxY8TxfZpw-q+_dHg}n-nK#oi|mnYvglVmH&8l>CSXRf;rNZV_UI!+;@Qo72a z&LPjl6C$i`nlbeAT9(zvBokbZG$iOq7vl|D7ob)R$A4~9rNa^{I#5{UF(4^8&;1xG ze3%2?DPo55R?i;c@)_CI11V|$3H$LXPo6u)yK1rTl*r~vGE$z{ha`wHj&!|oz{N->=;WM+t!`EMQ@Tf{0h5B)%XIo`il`Wz~& z`qigL#E^a*D%vUNQEk1C*;!0tqMx2|dD(c(uuE(36?=YOLs%{jcaR}p! z6&b!!EJ#@+^+t`BOOg8iq~N$Jpl>=bM#IaP36h(Dj>&@OqgeA?@?1<;psoj$ zR-0qeVZUlf>q4pBwQ(D^ATr^+E2O1hMa;*U|ApObMw@O?l@QGZxvNU0}?TAR(4xEuUvyJ5CtXlwgUy@iyNlugPFF|Mnd zW2vLg_o$}2(d;&$nT&GZU`>g;v({K(q_m^UL*2g1*r9?@t$92ooHsb~Tey_?EsNx% zKDK`aaldFCsS5B4qGe|4*=%FT3ffV=sl{7cf4WP5x2nbqq;en`BJ%761y`3Qv#B8V zCV#4)$a0lCWXUn6()=Jcgt9HTkp7#9O~Q`pdCRq8&9`;{Mun1LDBW527sHG zdodgQ;o!xwm#6bkw}=QwDcP$ds*ipqK^jI7VOI3##R!~88wUOtbKyU*{imk9Pto`T zx>hMh?8(LV)JU`AA(7cvV0Ku>4%P?tGcG{sT2S-EAj!@r~50%rp>hfQ_%TOEs6_(RR z#w{jM)YbUBx7`JMKhM0=HkiJ@@l$j4T0KWN?|H?~t$waQ)%ea#dF{thZ7|O6-Cgc1 z0wZRyaN^9=H-*RE?@pY$fAKxyYuemlDXSCmb%>OUx0W@2qy)L*{K$?&ay))ao0CS; zdF3`cH8t`sjf?R|=CjO|#{^fr`%n6v+yJOKYLMh+@WiEZ&KvND-t8AmU%*!7>Ms&6 z9(4`Al-nNDe_k(N%iVu0vvg}K8mihQ4%YmaZ{bTC7dv?MzDoarK;qjKb%9TRAPFGx z;29)sVD~rs-eD}i({^OB>nnbm|ABVojuzt6>o7joV3*8do2&#CVVs_H^K&!j=m_s< zZuDMq12rw48RJLcaCtVgu>U#3qTuNbPi6kolpCF9%7VLo3dp67SJ)piFYh1)SUk&D zR!>Cs_KQlbm`8nc(O>KwW?}w9Z}xsrGp-cz- z8KH7>CyK7}(NW)Q8Lm)h+4~e0CL$v8e6)PnOh}8DuGO?DMI|noUT4Eiknn&j6#jZ< za&p`KSZiePB64}*va8JMc2_vy3qq2Gz>m2W@ChPfH-Tb2ydl&C5DS!G9{`ltpz<3{ zO#AaMb*N(&cUE8PRL?DvY8G1!zkBhjRi7~OjMk4lGfIttXM z%BrL^zo+#(L#pLvxx=vSNWXD2U~_sEaE41<`s0Y8lS@|du2krw#Y-b$GF#0*-zeMf zRQXcj@pv`~=NpkhSg}GXd{;vxs*(pzL<9nzaQC8tSOp{CZO(=!y=mxT)4zT2_GU0$ zE;Llnojccx8n#kjHMHc&>M!`_jSX~vU?J>U$jaf3!6r-A>2XR7-225>JSgA1%;0~LaFwjDmK{YgYCo2lQh@N7EKRS6#gpC+z05?vw+8(?^U9O z7{{1?-6}Sr!e0Qw2pt{8OC|DzcF)R0C79N=cZc|qcfgldx=rCu~GzX-H5rD>E+Dsd*aAABBb4+m6s~XjAhud7xvWN z--EK>9>+MoE`L54tMc{o^75wEdWfnN8(kA?>G!67efzeIu(f#nSX2?CHG%`m7f1L{;C;< z3F=?Fo9r;JTtSXHJJZ?RyFyVf0|l@sVM#0?hJ3GYI$S+E0R5J)1A96ePLswZ+>fCa z=tDW%jy@n`TqH#6eOZl(={Uuq_{qI5rq3#t@A`SN;|^x0xE-Z66-6a1q{aHfcV;t4 zwB}7S@BC(Zwn&yDe4bLP*zxIzhYoH;iDw9o-%aVf1@>~tufy4X^TH464Nm*3&2yAX z_2}qaKic=00;oddWABx~3h&mKA9KHAg9t!SmfoKaX_{|6jgux{@TML(ZRF(D7Qy6f zJkA+^Z!JByFF5tg$HxIPBsV$)(+3_SW)(j`iUpzVbHBivn+ThRNO-WI@JFr{WWj!0 zRkG0}2G1n$vK#!uJRH@I z%L>f&&?NB({I|+HrJ4u;J3^rrKzLoPcD*7PYB{J;4CXC2g7wv9W zWs&@^3}xgKGjV}eDVg)PG&V0F>pwK_UHR6$u+a9MX}4KUUsqamErMQPomDv8G1K0TjUzD z7xBt-0`bZTOF6PLE=SKXdC=R_p)-mS-d)>enjHBR!#1V<_sE(&b4ZZe$Lrq6=oONF zsD~@zM-on~-LO$Zzs{ThoTk4rdr>-JqjqLSO;uyfC7QWJ%wvCjoi7Jb@`x3nn_*}j zzOaSOm%G1F!!Mc7k_qM-(tb9co^!2we z-|EC#9o8I5M3mG4YALkS87#!xC&#u0F!mN&_A{4pXFR+nj_PfXP*|34RYrE`FQJe< zh-^`rcXG*Z&jy&BuV=AIZDka}l+7M-vpnjW)0mfLrhX)3xVYn%Ca3HQi+HA^knWP% z;_@;|-c~4$#GB7ibg%gL9sKr+^7$Yp(iIdl=?YRo?F5JMgGoUbCQtvwvm+<*XNvP9 zJ?yu9DCnC_?H@cK$DF#&5Wc?{I=>ynjY0f<{9$^C*XXwsA zx*frC!82CWY*9)c741b2W43O&tRa43d87=VBE9c#3ZWsa7c_5@QtdQe7gkOoa7F=|6pNEt>L%Q&nwDg zMUG1Ys5tjp_fv7g!OvK@Z!=V{&Y=}`OShV0T|#(X5VJWoF|&`4tY|O;OV>%S*B*OM zq{%0#ZMaX5Y8N}IFCv-ARXUVce|uC7mpJk36xwcKb#W+e<>>Ga9|Y{D72mvwCW(I@ zJKm69rMLcKdKcU+!7wPCvtT$$N!Y5GN3m9UY3`WsR>hN&{iKYnHiSni4OM1JU(C2CtlCnp zGqh`kQ0B?qy?dkgix}m(fjSukM#WPVhuvT6uzWpShz>${|G0eigdoN~ZDOJ}xJpLg ziKg=Ee8h9mjrKSjoI# zPW@XxNRcR{9b2D1P-+)%U2kDPC8_gii<0u}-?lKei9&aiCdXby)4%bja$4=Li@3J( zzw|H(;=M_-_2s3D{le<7a9R@HFe=Jqt>l=~tdF^><}XTx(U{n?+7A($dp*IK6oj1; zyeDI1+CzhlLK%Rq!_LF4jLG0ovKLn~zwbi%x%pOaaok3(K_kZv&IEMt^NAbY-}_LZ z;Rp}S(b|z+)Iov}@3{95!ut8@tkxx#lhFuLKWmZfep0Y zsEr~Fb}cR_)wY&ZJneev=JZE!gmB^<*`*M+?|YjSrDM~olivN!omtj1b+ zu*iOfPrtmmVH{G700`^Tj6OY@2<&j|63rT=^*T~6Rw7{RjtIVXxdXXtMP^T10xM;( zlU2`=WUq^2eCE}Ip18tcQ4fO)_?MCKg6dW@i23fsGfXivT8g#nH*>yL*Ytl66B!&8 z@G#KYtNaOXS3xN3jNzL&&ko>TGPq^;Mjwt{--e_@Pk6uk4L=^oSQ(bXU$zQ%w8zk$4PEUJ_v*;u?%&^H(}2Bx z0+~HO)j~;V$RbRg30*ClI6Bw(=hAP!7g)GCbEY=uuTXlO% z90$hL|C?i7by4?vf_&&Qx-GH@+idB;H}ZQZ~MMsFz0Y;oHlp2#_5yG_AL4> zpJUzKB%S2DVI4l8#;w!?qb?Dcae+>;;_BrtdNW0zpPg8x^4yWzNp|7OaSq}&$CGMA z#}E`>K0pWx8<;+evT$C2TK0~LWh<*EYBqLfAkXsuvpl1*v$mh11x^3QIHCuA^q3c= zOpLqQ1A|pohu~ueZYKbtSR(C%9P`4fGY!<0bq;S9?r<7R7T|W5H*UmSSN!+~PJ2}M zc)HNR82>0#Dpx7G`j$ExH(tOWV6auftFTYVI0m{%5%wf30s7%|m+ae(WSSW`wQx6RORSI?P0JPJw2wqla z;te)2Kvg(6Np`MYrVsFo9BFiV&=55z5R`g!Z)~mP6plYtMS6c@AHVf{%M8l;&fXFj zamkP1$i*$STS0427AwKI6UicYi=%2hC9611!D*D>s zY{j4aOrdIby{EJIY+W#UjC0imi;D1b+k1GK8`Jgvs!(H>(uA(32CNFqdd(htvq-X{ zeu^R+PDXdBb8jZX%lqB(b=`%u;Mw6*<3es-&*$<)w#NNu24LA`G$?d)cDgl=c^<5L z+J3g8K|-P%x?YS7jj=->y&g4?c=)rAaDPoYmKSqufS(&tY6;lsB%;s?C z)^N|n*)_7RGZ*1@jd^*!tH09W`%hNFE7~kZE+g(GEC2VGMN91?=it6V%(Lt7Y#%t4 zta}-!_uZY~w?AX8j_&zJvl(QyA^*I(=i?Haj>}HBz%(jMf_BD(j$o~JV*h!%B)8J= zyh@Y;_+}WUtr+{6*8Mq^CESEsth8aNTGHtvl7);&&lr=Nbt$k_&j}6mrRDOu9Qg zXMEH2@WUNcgo!^T|2pzE-WE?yPu#r98jRrduCBg(7Rk3)caxq_y1bg8;@A;+UGDQe z)H$5!fLJUgdx7S4E~6R0(b(Gz-xT$GJyTF%T8L*ikxHp{ow%Wsxg-UJ$K;DnC7vF) z$xnWTXB{fg`-3g%$l&tc+sv8}bC|*7Ulz4}K5R@w*|Ryt0c+adPUMc0g9aNg=G?v) zPP-oS#gj^99I>9f4lNi5d#GVwUm^E0{rosl7u>dZAfajSO~WiT3uDTFBr@4$2+``0 zFA;GEYsQ$0*NiK=to@VM-@`Rd*UEmz%Ah7vV4!~ujIg$AiZ5e*s-!!++~-MK){Ajo zh!}AxI-Mmm0S6XW+tCXg=ZK3eq9n5T9AGS8%@m66apJ}gL>CYp;*>bmX{Fmu=a&P4lSI1ewH6E%U7FvN4_ClTcrvkFabq(k+yrP=SA!$!7sTG;B3zj=@F7&eq2AK!vQI{I#Cxv1--q2cgK_XO+P$m(_1nV zdJ*^F0S_kf^(Sp*!vNpl2@%)A0#5<2p{aLw!QHgUf0ZjMo#|&Umt6SkT*u|M>ht^i z+=LA$&nmb1Do>Zh*$9=PZ#krrejz8^3pT;U$J@-EI2+pH^fJrc(c1PM{t7%TXXy!m z5<}K3-D|%S?JbZFATUs*A*Evdkp+O{y#uF;h#Ll#oy#c{7vIA5!flakX!C#-StOa9 z&$2Y;5iN8jSGz%1zH{5hoN|j{9&2HbN@Oz?D3&)g_J?~eEJ9wHG^2Dim#`JHtjI~c zX*0LtiKBAAYHDa-Z7RHB#WLlKy4BE^6dW&$v1YZd#0 z6}m-sNd-=QJiB0ARVx!Ya!_gs&jw$91#_S`O#KAA86RCLUh8$?h`YgJu zm?VxK+Mh@G3b@BK^nG)Y2*@TVw_zLEq{PAJy60%)lWRjwe+N8-p@3T^{%u)zskiw2 z^q0)OF5EIg)y4Hh&tq{4PK`h0bK9(PHHl1B?0Y}+^Vcq7c+Ld?UC7ccdHWE1WrFfXH}>pSKS#QzH`XCS@M?6jH9mVBSFq6)(g3ueWrHYz zc|+rdkrU`vM_G>=MP|aI*{nG?+tHVKi5>0TNHTxw04e<9z$tm_@tfH>;7uhiD;pSP zP28meW{VYfX8J>pCt7qPQa z8ASsf2$9Rro8PV-h)c2>I~j4)qUa?@up^smO5?RFce#>F%9HSr%3mbSm?f+`y-izk zXeX!ga&~@3y6R6Gx7%p9yx1>;yAw^F18^On>)0yj+aQDRg@oj_-vq#|n-#L|aqw$|r3LeTx%=Ij0Y zUBU~m+{JH&_Qu@u+bUviw(D}rvllZ5s`gl6Y{9B4cg>APZTJLf^p2%1eC28Oa+>)7{g9iwcp>X(R3^2uVF0 zQoB7hqrV6N4-9oULU|C(&}NL*bk4%VgO#x=3i{T9yu7*1U0?FpjONE@ZSS2(iP zMFGwUPtQkDJa5(;=X!Yk61HlmC*5&UmJj(J?cht06TrRpN?OsDI``v)WcR&X8+WbR zA1(B{1zD#lwV+T5hVo-WY;a&;$#7!Xa3TwZr6gG6pz~1}w(nordTczeqE{QiiFRmw z0D(C+GryH!W>74yo<;C+R}R(>m$P?t(uk@$$5e6SI(qfN0Bw$;5~E;c5e1#8I^B&;(Mo z_ON`<0r$t*PcGk%sNz))>|>dYfa&82;R1#8$z(p7@`$AuU$E7RC&k*GQQm+j|OQaoOV#E{{E}O*~Y;9 zkI-5R?X8aZ*_ElVg7Uq!&zu$m(W2{L+aye8Ep9sG0fRwpv!?JRzSC)zVCY5UKL2^S zcg%N~>t+t8$*vgugDM#z^M3E&hO$&)o2}@Yu#!x@&7cXPA~T^-)qc-wt!3SCoJL79 zMmSA2qTFV4S07P=;u;mVG+7;RDi>Vscj0$)%w9U^m4U?>+3x3H#m?C;$n~;ypHY## z9Q2_P@{8+BgGTv<+}_RMqOP54p&`to)p6{~+2T(gULab@=gcYm1r~XcE{u+Jb&PP; zW$A@_uHCiYF9(R=nVH;u_cG7t6}7*Tq% z8vl@)YrECyeLlu>TFCl!1;XHF8=+ZmsG+3ZQh05pMBqgl98St%K?pJVwA|{3oF+wr zDeLG@-t<#jG~qM@?n{!Dc=V_TK4TxKv5SG;RqylN`%MQ1A^5FS$uvx?WFZtwJ`YPhvD6Qh-P zBgGxvosaL3MVycN<*O@dz11vI-0k2P&u(b9xVRk~%No49wRalYjes9pdYRWm3LHhx z>*vT<)5MbXMQekN_{80SB%fqUGy@|ThQ~LHD7~F!aMG6jr0^K>6mVO?#X!ks^+Sc> zr6-XxWPOQ4Sd8is-1)d+4LKJL z;f32f(=NhkCe5E~T`K=%9F#8fJ*#*vq4#a4AEmJ*Ub0PB{mpYk^JUkD*?=mrECaJN zYkT&=A<^WkWkp-usU+2gYP5SwPYHsL#@}4K2$!Dhq2B_eJ-C87Z26yI_xqeQbq%q) zKb{$kUU!E8@ot8#T(sTp;-BpP>YSJR{9=SeAK@<{b}Ftd%hC{HLmHsg%3Pw*YO6WU ziAC^5nz?>srEDPdzK*@Wq|~bX5E6S}fi$ujX4*n-kFpsNOny}Xk=$Py<+KUYTo=e8 zHv38st511U1PF44u8NRu4XW^Z)$l9wTq9KDdAAFN^d28HTrnyWti{^9I|V@qS@z>(g>qHw((pV}Tuj2cYC^J7 zm~_hQ_Ilhw+0tmV6p9cKy=XCj5M#Y*nDaRHhaF5= zkWg5fy4AddQ##&o(2RWYr%@tlsEPh=4}#t&7+Nuli{+o(RGCRDtTGl5)5bPX z`&U!Y@kO3zx$gW@Rew3moBI7-B7kpk(k@oKlyJRRN}wXupi@o4dJ6*78hVjp(;6<` zhUSnDjeDz?A;j|NPc$j6N=#OY6PKRqOv)T3;EYfz&)?sRhBgW~AUAIcj7XIOhj(Lz z=NeystfcE#)xD7;L_gA&#WE0Cen2#5vT*&2sJ-?Z5?_ zgH9{qD0!XvcX<-rlZ|6l9*$9YYcovN!aVywQd7xVuDt>dF6wARX19eCcBsMaDUQmO z^U?2yyq`sT(5#TT@E&&Wy}jX?uzMh8ao4cWakb+U4H(6mpu#_?jN}q4QVvhD<^-AW z&9zVt^sZA8>U%z0lC*WvCzN`T(rFqP_E&!;%H4{8xV@lHxS;alHr!9ZM#=4MHB{JP zCEV52H|z{M*U>pBfd6?f z2UT=rB+De4izxgFlJ%QreP6@R<=y-wHN0~>gGqOzvbpa#?z*nN^xRdBltOIB=2k_1 z=S4~UZkq0emj-r|f{U(SmaY_J6?7>rp5nNf-6pH-K3%yZ?KCUK|6*U?Np%}$ik_3x zcgpDHxHGN2CSTYk;B6z>&aMUhCZn0wm}Zi-FTAr3UH<{rJI>BM0AxR+|1_CwYy6(`SgG5S?H_pQt1YDY`L} zr$`48@q^3BK16Oqb___=e0$u;J8NZW-LTq586)+Roui1CBZ3;5cmVpwO|X6Jsy>l$ zv=M6>-{Ds2R0|DWrzU*O__#yDUdy#SgbWnSeY=%V=u);X@GvAZZsX%SJ$@Q# z`x5ybl+g zd>6*CQ1yH$yHw{bEz71 zZht9yB2;6c^o4AoT9x(Wn+D_3KS^D&l1neoqzVE>-&M7{`SHJJXP>^ly|Pr`X(9#9 zr+_29#HEr#Pu zC%SO=0)8Seb7m-~qJjV*BcV4?i{)P{0P;7uoML{5aRxaSZJkRI!tA^y-W0#O5ShIT zO^vbX8@q9rKeg3_d^+y(O~Gk3fe6;F;fta7vA3x7J_Xzq?9^n(-w=C3D2Ax{->bMi zxMw7`&cVTHnDcQal+N4$-!WC!zx#XNRnaA?-?46F$i*MJU4!tLj&>GC#%`Ab>H5M| zJ)yYvT=prNPj>frE4_`ADW+E9Pxdm&W&PYsG~*?2yy~-ixueGLbzho{ttS_;eDteW2i4b<444Ky3n+6E7JsUwuuF& zgd8ig#94DVn&a)2HwO$P2k-xK=evl!NA_MZN(w!$7REPkXc)I3L124BHm1~dW>>D- z3=?nXZ()GUfpJ2?kDhVBSm0p#?*YW~Lpq$mEUQ)KxIceuXKdd%zdwC>Z?Qs<4O*(; zXMFk6;pP^?giIYRT5zxtVB~)hQ_+{S4nleyksT2f9YB$lic;*&BBGFI7Z>m!$~=Tf zj-L%RGV+!6Y#`o5$%1UHF=6+}%%#&fhyg8Lwbun%U&A;2_lwC!c2sBunEmTI;fpn9 zD?l7a#Wu8Tnhjm6;Lowu3f8MIKNnjqk)NoAS|0e400tp?b=ypJr^qL5ZbSr$JZH#5 zsYEHtfd@x*tk_-ESYR9$Lo6jLYpbceod1(Tr>F?V|2>RvC}bEe9vb8#Qx+-dbs=Ll zdbZJDzUEQ*n7T$fUd7~{!5903mjEdXFxd<97DK$B`S0E1>w6dt?n(4C#{ht^;+`8j zj^8yu90QUalXx5Z;x(NiwxX8$^r)p4-p7 zqEc>aO4nb#!b=(Qf7{|EI{M3PtZ$IAWiz&oA5&n&dTyB-otnddDEVQ zA;HWnY;_lEPNYnn1;3AK*w6)3#p0YkL(-k-kj2y2Jz}T2t%!PwG-JyrXx%ybuBc%` z?}FBf=~mxLdlH|nyKYN^v`~qQ#a;0x88kl!?)G`m2Ld$-_K%0hmI}`}DRFwi*qLfe zR$Wu_#_yOIDWj)Ptdmiar}K*?+)~F@e(&4lANUSA-)&7t|L=FJfj{Cl>*pwkw`bxM zX_%X4r2hrTWVvPDkgFg%IqjGUW9Qc(n{*vdYQ=JNtT}Pj4=zSPF6=M`>}Vd!290*B zD5p?RP~_(3Lij?g&G^-zm0!E&<>AU)i3gy~H!1>am+XroVtm*Aus^?*YnBprO%30Qn5paDW&G>CPqqy3Gy}Pvd(TQoytbV2vVzhV41@H~RocVQ;9@@=;TAY>A|cT<&4lpOv2w06DB8-YK#^3ThDbgb>4 zFaK8?oYBTFZ`1v?q*hjKimavS|Fe|an>^rm2yz}nJ8fg@Lbhz=cwq2 zr`FpIeigJ3E}RY-czpb*LBBNiEZL1JhF9{gr_gVi3`f}xz1=iKE91-Xe`+U`H%Vl- zXvAC+`diM&9rP*qmAz5B_DTK86NCSKI1cc|-!d|QU-~c*C(;T&I{9qfjGqJOk6&7s ztKewF(4}+L%aP$!1=fSfkFTxTk`R~bDG-iFw$3u_s1!UIyK=0LVnq-6FGdu2ca8^f zxDO>J5ZT&Y{_bK&bbzI`{d^pD=qV=+YM(vj%Ma%}$Q*fnpE6Qz^?z%Rs6!*~9i~W( zt15Cl`!4xcQb@nz@fcP}&B#mInGl<Koo?SI4f#>jp5(PG`fOeI{h{$M?g{Y z$jT{x9L)oFj-}Z;e0%ay;=wf%!qAp{J`aD%a9O495-?5K*9I{HmpPZ2+7aUMxyexH zKm73%K~r@{r}StY%d?LX0}5v2)&77<2YHy#24jQ+@q)PlR4%B-0L}-yUDi*I9>GnW zTsacj;0#^<*bw>0-ySdYWgIka|Jd=B%ON-#n!p-S zQPG;38X}wg;kQyB5U(w5Z6KSDpt`w5{fli@56SlU$?4Ol3+?fMlZTG;b4`cK=T}QQ z-E@wQF2`xo=AaOp-0&)q>aX_2_s%`(jMs>AgDqgcVceO|v%2Ph@B)MzwH`kF(qU#x z0y(dv&vky0GoAE`2TFh)YNbgMDW?o=LPc6rI53GD#(Ey?L`GhSQJ!g)2FbdgPci4? zZdCG+60-{N#0eUz5?u`dU654S)k; z#yOh{y)!d2iBAuW?iejmwgoof?-fp=(sFbZaHz{4eI|5)>QAhtXO5PbpkQulYJGn& zJRb-*4)&oHFj3q|x$e?5=yV_s?OZ+VW50hQLR{~`e?HVU`Ju`FDy%gxjfqMk;_|hy zCZayxjxR`J5S8h2pYqWK0uDVeFz^xYe-^gdhDP-Bi=)oLxo*ID1Sd9|lrb2zP(T?C zWTpx=dF!0-9=`7m@dT%(f!K_JRM4TPcAk}rBxFd45kk~j-ce3(Xz;yuj(i!bbl50f z5g-3MhRKMZqnfDbMKtEsHkvczf11JFbiPM2WSpz%si`YGzNVxCZML9)vsHJAi<=v8 z!snU}D~r|Rh5I?mi#@0QO0`}&-`gZo2AZiB78cN!Im{dq9v4^PvH$YZ10Ua$L}er7 zB{+2eGlg!+-1JcD4+WWKXbUb@29jPx;^TAVvL;FT$=h9qJ202SL;185XAhqNH(2&y zMkw_fY<-5GNB<%Zg_u-6>(x)KH8wtoZ?6c9&kmzLw2CNZZbvWm8@trMPP9Ii;g|as z^dZIdRLA+aE1wT5#egd2_zE&PnVFRS%)h#L^)xlFhLMhmoNztGaac?CegbwCbd|n$ ztGqM-9h&E#9~&}=l6VOkLS@f-P&i4TK59T%M1)Q9<)8gkKZhFb+Dk;qIFjXY1dZq` z3f!eycY&UEjUQpuM5ozX=AW6)k!QR3{@99iU~Wg5_YiZ4Uzs=2w!Cse4|y<%(ulGL z+UFFu$p_)1VYm&(IdJe6F9T^FXw_iuQoiEFD(Ed<{%O&DElB?Ep~95>Mbg`JTbIS& z9*+DN$!)W0G^gDw+s5(J|9+8#gSPH{{Si%x<{X5!y}h9B>RfB^ob%oP-`DmjkP%m= zc9phAggs6H8Sz(801u*;XdSN({F420{HCy%~fY5CAN%OJ{%B;%eL=myEP z{@x%T-qv4w$bTQYH%U@I{(Gjw`K}hPeuYQbgQUvb!|z8+wEF{DTV>6+2xoSVKet#@ zC3`bX^+RJ~be}#&d=5EG~cApum|r=QWskZTf%}z^Xff*{4v5G zwRuS_2IKhoaL*Do==4%EGNw4!;EP=r0#yIlNc6ad{;C4`rj;5*X<-^W@aWO z`M3vPyie51nowu=hC?yGwYaz}DA-HpQwiCxtLMl;HySJ;97UVV?dFCC%{+_q|CSd- zal%0nFoPu@dtFRU?*}@H^Fct}`Rz83FCe!HcIabMwphRJ|7H`&YG2D`je_Cn-sNn6 zHbn02o!|4BsQuOZzdcL+eA&6Vq|EyU>sjLTqlZLJpR8()5pOXS#6RH09v`tQv{n1| z7$nX(0z+v2n;a@iP`2`Z2k$DHMVIi2**U4AO{W6IjT+-Rd104sSiy{Nuy2Erul{d6 z$_=D#PGn5};=#3yAD4N&*N|A|O~wJ;y#EcnZ3H3*x`Cln+Ael4{w=v@0z4~u6?N^RX$H2#^M0o>@N?NF==A5gEBEOnaasgBS&cVnJ ztqi!6qBmoXo7gerSq|^Qv@QmYgM35>r^nM3Api|P#vdiR!?V~6gv4?ogf>?XWfN>*vxT`u6<-s~Lco2;qQSA~B{;IXP zc{W?|MQSq87XxWl{~1zS#Deng11Y1f@Zo<(0zDLV`lO7mE(=N+QXimXx^6LMzZ$MW z8uBwQKYCl2Hboqym1AOJKq_nUgpmvX2(w2<(a*h5jx39GUDWD&Gk^|~6I z{z&}qaK{YoQRgq8bDOw0?(&7vgUDer;vuL=#m5`){%5YR^sS$rX^=!wFa7s{52b{? zwn5smzrf$9fvfQL7LA>~gy9wJ|4b%UzV)RBIK^6~j5wLP_#EQpKuPOAL%Qf|F=0w&^-?^#K$??*zgTh1N9sO-PRV( z_>%M8f4lcrd=<5}mKO6{q6vbtF?*HjSnEqrIRNTQ=%f>*_2U06Qu*wE--l-Pv3uha z%c1P4H`p`qhMn>?(O5<(%4xW*#XtS0jM-ZEP>NfRC@>VR#(M3rz>TY3<-)ImrYH5i z_`-iT%Q(VQrik-Fv72lrI41daBlF9GAbbmTqQU30UhI^2{tOMdZOnE+i>H$f^U!~0 z-0jp9hzWb_l5^Z;rv|lL7%?cZF!LO=$MqiEPwN4>;=Ocae0_8CU25OI12%_@(5Fi-{Hm8D*m#p{M)w% zk9GC{x+eO|*UsO6z?}9fes~P-4$3fd8W4a55{KrNmKtXC-|HU#tzN5|X>Nq$4}=vD zw>OAD1>Btits!-s;A2o!{4Law7uy?)xy@*Uyzf})BJj1bChmV>pE-K9r^FE~Cto7~+*q10S zuLY({KF8Wu{SHR{cU&bD+h2-KwJw==ONePn@yPM{lMnX0LD8FE@hyB_{4DcT=3lXJ zN};Q>WK{Ft?qhvxmaUW4EjIYbPlo35r+Z(=-nT~V?fW$RwUR58EYeZFgxIY-5)G<% z4mE(dJPY0{NU;%}8S1*RM?8Ut((in7Iej3N6@K9|NTttI^Tg7Txrl$C(kd7cHZF7N zvh6b7{@%8zydTB!>A`t&A7uQ)YoEg3AN|YagrgB)IE~F({^#Ye@c^gS!A(fD%JR$# zB~q}GZh!(hiK5NivY==8b1v_8i!G4)hP#^dg9w;Yhs3Y zGj(9;^qEn+BAxh$U&tx{+DDgpW0*z9JMMwH0trJd@QpyXly1bO&<19?91T4epVl}f zWn~*{YZmm|^W)B`3GqWvV|vBfDINIm%GU;MzA%L()r~jDj-O-+aRv$}pwS6A-~cBv z$ONdTPMkQgk>f6!_~pwhOo5$X;3bSNRc=2t_kwcyvm3BE`6IQD%J79z_#m}#AiuaF zBk_iW<2c;2wE}@PAa-Zo8}}f|P(vvED+CbQ+S}%pJ`xY1a7cI|`FLII-!%6Da zz7uP}?*ck{G0I_}9GzuuZ4J_1tb31Y^i@HQ3tYDqvXa{#KvDvFg^@L&YX~unBt9{q z;w#ihUoeBbSKW~#uWo>LpY*u;>BXZ(PkC`-@Rr9&ex(vq2;1n~L2t$cv7#IWf8{scI_CXJVK5JZ<6ax8T)`4B^_$1A5Kx*l^eIEvwd4_0?sdqIrR z6p)=GYiMf&t#zR3A(AFq-C!8%e;azN zg$UmyA;DfFif%{a0)gkJYXRgcTc%=>00e-aU&Fl0Dc?ZX7o?=MC{URjnYS?@-AVkL zdWa>%``keelglwnzam!e$5UQ#Bx6UeAA$f$NG_pPVQyaTzX?kbQK zVI3Cy-^Vf!nG``i+CB$iaK0Xt16`l9J3VXXAU@@l8eO1lN*A10T4c2$8fdxpt=HDq d%VNb32*(aMrcYCaHyjT1zO3@yOysjS{||TK=vDv# diff --git a/docs/tutorials/images/barren_2.png b/docs/tutorials/images/barren_2.png deleted file mode 100644 index 2042f894fffd180edd5b349b27437d0a7b94ef3b..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 25451 zcmb@tbx@p7yXQSXf+SdQhu{vuEog8CcZcBa?gR)^@J7+iPq+jDlG zefF*M)>fTA=%S{p=I-wM>Z?EB@6{8oC@+cfj^G^t06>wJ`l1W~z}8};SUxv{q?8yVbzs=ZC`TAef`p|-n09>%K5V|KCNF>EVw#@4UE?12mj#H({rmZ z9eD-W?|Q(ey>nmpeX(8HZzbtw+=q1M;Us@@i?B0Ea?1w&uMGG$JJ#0V3!kFe&vpd}s zFZn%gnk=0z2^rY9PnPYUG`EqkCy(ycmyl9-2sv)|B>ommzNdf6xocgk9@H=m$Bbp_ z;Jr&9d=$MBk5ni_oCfPtbUp zYHfMka07FX1em65%b(+oU-TOo?6pU}rc~#^Wl{L^(|k?F^%x&xC`toeq{sqoD|YKK9#h&pmL=RNa}pchujaoXeKlJ>flOItB0D@hy5k(CsKX0N)dv z52Sw2!C?3_uOc;5nCL`bT9oXC%Db27%&djCAWK_Y7%xlL*f48dQd^X6UE0{Rl3@>n zD9hArtZiF)Jt@mC=AM1FHuLz0B8q2rxFCver<}0hHZKN^SWcZc2eWhZv_lYVB{LjTpg8I+v)@Nu_&+SWVL)Mp?9k)pa`rA%) zLZJ>p36wd%KamoXuv`LQPIuXL^rG~$6Q*n+l;W2j6I8tByq?HtgQq@xXT|4!o@WNn zX39?n_YS=XS&Vk}#l-brUd zG2Fac;h}LlCt>twU!u;9oJq}R8$rhXjPg?K7Eu=O2X)j*%P`N%Rv&gF>LmsV#QMDj z+%Dj+o`=SV0|C+pygXDeb+Q?(d2xd}Z|i`Ae5x;6XB5_ml8}Y%b&G@mtV9T{-adeY zk^Ec*tJBWZ|A8;uW#~9731gFeDa(t)k2B}Uuxsp=-%UTQ{kqEY^;G`6xM6dDc_T&% zZ&;j}hhGfgNS&__e<*)dm!e_?la8ZoYHZix`_Fx{y*!#Q!xaVOR4_snUHDrc4XGKLShcZ1ekfHrfB_4DP`y7e794TY!y za4rjbc;W~I2_tERkh)*$CPvD5dfK|yh?m6Pe6ddwB=WpQ@A=TMW%S$I6Jge{TeEeM zxw%&q)H(q0GR^T$aN5De|COHaac8KVJ}Qucbyqk%!mnf#7$-~{*0{Yzr~FaE=FRZN~lO{OAM(+UD+0MNt)jvO2qq~!a!qZ5%sH9hE!MlnZQaoMTED@mK&jP zEALP2OrkZ}pV7)Hsh9;rUr2r!6!)Xb{f>4T<@tc8&nhS*R$KoK#_UH!{T&QBhiG%0 zY0`BF@8w|vXd+HWilGIjrX8^pjyBF^5UC{1blJ@-0 zMgjAl5)0i>g$Z!wHzA*B)YNxC$@AUHYsml2##%52(i#a)3Zu&9Dn{APB;Utmai#a; z0A1?T;Z`9Z^K6J~Ax?7L@ z`|>MWm5JA+x*ZF1yHasUV1w$x+`zjn@+1YVn($$izB9ZMZ`CrJzT7w>YzxJ~ z@8wUCrYj>1 z@nfUaW};e|2K&=MH$SXEs37Ed>IyvWw_dwhM*K((d^IX{`Jm_vJy>sP2y5ETCW_3fw~>CBGZ0*h30NBw@!fpzQosVZ#aQl{KW~4R?q$)Jbo;m#f=JLv8U}ntZBaQa;Gw(|W!{SNrKdB3l64 z+{1A2he#o+H|a}kN)*4tM?`}{Jdx;W-TJrIR<>Dky-Hu)zSITj=!0^wZZ@y*rZ4nR zzWHF5PlNachnqu3CGhCyO6RNt|RXJkiJ~$7z|YyubPeh^aNYl6(a-aL$@o5 zTPif`6p;+4K0=K8kdv3s=!UpI{eZj+v^4yflv{#v2eg8jc?TCj>Nj}u{Du7M@HfAM zaneyi_=ZbSmc3L96sHZv9->k{mL*}7FEZ6(aXt8Moz+y+@Gs0S%#K8341oxn?P^Md zxg`14@Zu#{Lhit?VuZSHNj(knIY1~>pF=u&5~d3L$}um-My3X^l~gDf_u%V?Jv~hQ z{DNi@A8CF1yV5%Fe{cL*BE$nfy#!S&?esM8Ohsx-gL{mA8uTq-E7Ew^^L{tAl??lRw`ifX)GQ@3 zg|Wm-3%9r(sdR~|er05~6}*__NnpVAwRe(9@a`swWaU}J=oxSqVH7*6(3(WpNBWaM z7fSC3HYM51023;u_&2fN3aV%8QaYl@YQm22*MLSf4xgp$gHwW@G8(^Cj<=o*-K;Pc z{rG}d`pwy<_w9~FduEn!U!0Z28a^>qE}E;8jp%1`2KPd*JcOE~6YNsi6O3g!_^v69 zELD;t_f{vaDk=PE-YCCXj8bQ@H;wJm3yfVM0!$v!ltD`PPrvwg#v)bnDyip#cuJS| zE%y2HcS7(J)M0)w^oB>Q#y-JVU*BJ(dgupUl=c(n@(8Tvxc)pfPNaz!g&Ij9P4`5dM5Ra=?q4J=uV4gECJ351ST1 z0A*$>Ob?DyibI!P{NBOL?_3+XH&XBwEdUM$dRU3?d*;zo&+42tCwbhX#JAh&B5TS( zRi#35m@jWqHL7(sny%XttUPmHDa= z9bMv4Kk4K3d(+zWt(!KTR-wkrZc`2; zP6v3YhGJG1BBTR44orMGg<&t3l2eR(oW3Od1ribvdPRqMs-zSp&I>z?wg5kHCOhJ%(ygPe5?DBFH4uO^Y+@$W z32z`HY!p9)-zxr0n+Tvx4P1~iGIAJw9~mZsb1;nt_iRV9`wUCemDYOy$+fC+eiKC% zW(zmHvI{T0VO$-JDCmtVD(L+#3f&WD>b-H;m+;i+U*D8|P%$`*pok5#Y>n`v_4KL> z$CGI@6V=4KD!vjU%i%k=E4CH>;`y(g@Fogwl$So&kaV7t> ze}HdxVSbE;D3d}zF}BTD0StTGciRnZ5%2N)C*bTKJUqdU-(hoCcmZ^_vDhME^IT@5hTuaOWo{MIP07)(NU3onKbml#4J!$DXXLR7`GB>DHxG`I zL*Gw}qC;c?q|)I=u=Pi$^aN-X70*XZzoN@2FzeO6IrxUkE}WK|8u)`XHGqnPCYPWw zWHIs`@|S?Z5KTp_$hP$zS#AAFK171FFj+09Ii9%ew}eXXCyP#Cio}%M;<-jwXgXCJ zkNFNfoK6|XuE!LpVbO_W8-e-u+l@M}%C;Od(cSVP zkvuXtr(V%()sY7(dFOgXZPbaRCWyi%^xtEj^)SID1`2-HteB~;MF^*DmB z;8Hq+>VsMZXua}K2rL8atHt~wii2>x@CfLwc&>dT;d)1~FuME6G#4xq4k`GQzYtT` z0xA_(352h0kzzJe#g!63hV`B$)$Yr$f7}LCDDD=1HpqHeiCiu$MAzOBAoncHr)aUE zz>j`YshO~M9isk;!$WJBK8x-YA4wqJdfR9d>D@DC)Y`+tR(>sD#9?*n6ygA1)nJb< zO4&T$_w{$2SS<`G{2095H1ivlg*@8C4`F5z2k6+OYToG<^bTnAlb`v7VFDpP$lZB+ z*LCQP(`esYi7D=*Vx^l|nQJ93N$d&Ecdc3BpWK(s?-S>61EhPjk9;*62kXWaigGZb zS04R%XE0|!Q*gceSQkii`NlAG5`&MOK7@>-Ft^RLf}m=G-WvUT^`~j;Y0r80I6K@e z-3rgipGc`8f7ZZZG(N5h3Rv05<))YA$dYuydE2K4ItkrK8*cXi@D055>akhEM zd?eI@@ncBnCnyfzop}yYrjDL^M>v}JRu(O*kC3uuJmH`Gb6=Q!RsK|$vzoM{?4-}M zX32@lUPn*jSD?!n6vtT9{6hLbsjEa7>QF$BQ;Henl-rF6YdgwwI!xvkh_2yE5@4mq zRtGzPaaw~@bsuQ6RYL97JD&c^~XN+fL@$N7eQOF#SB~0~X%J?5^v% zwauG_UJ+uUx4uDaJ~_V62T63M0U0y54(_ncsgu5<8C$=_fwuT<-kf`AA!(BfvHSL( zNL}E;-JS_Ny{FzdLlKSvSykQ=YZHBRFZ~npYy2G5!-4O_G;f0E!zeNG=8GK-I6`!- z7VK?Za)W{uIOVf};RHcN%F@N~a59u^7qGs1K`7IB1R;57caF`D-@iDYF;D7CH^U`1 zH+SjVe-?!Wi0W%eE|63;8GImHhmEe%v|#`C#mZO?@{IoRu`Sjk?RI6LisVvD4dttV zh1Yj`u`lyZp!U($kA1JHKC{_cg~O7zgd!gKo5@zchtMC%$LG;uaanQa=#X_IU@+>7 zVqOMLwAyPWh4UHx&GGSz3Z>-Im2d&O9F3Nxo^wM+E%pUWrZ-?dm zX8levr-AL}FXgY}EC~9q1D9y$6Caw8W$Fy8-Mk@K(uvV4{57P>ML;VIB1+tg%EPbK z2xa_2IX>qYaUZ*ZTQxur9qN1Aw1zj>bZ@_wg4xn=Vf#}6HuaM*xAfT*UZ}#f)+wWf?$_9R?PnIzuf_C*w zoo?Q9T@>?unM=Q&t5u%cq-U{OU}5WWYgso!dU14XZBL9Cu=7>@*5uEH$EOB&`Iq+66fTx_TXb|4Hhb5L7iCGQBXA-W7>PeEhpPzO} zL5m;$B)kgyl@+*=mOyBcF)-o18WgZsHN<;o{oX~8v*h+w^R1Q$QODPEuCm`D*ylYD=Pxju~R{wC#D>3F# zqcHVg9!x;q&(5=L^p+&;OCj4a=`jiT;N|5IFB3DXib5S38NHs zkejSCoqhSd)to^KA|~A&2|T)N2ck%>y|8&1@giA8^X~cCq6pQJpZwYJ zns8>?2dM!H@}W>J?RX5c91M(iy&}o|9sSO1c!@INk>4%Cie5H9YNHuHJx$?dgi4}2q~Gr` zxx0j)F2~Y|6Sg9KCLphcvprx=;2KqNjTrnG)7>0-O_8;(seiQ$(89L^)Fc>LL|gd6 z{h|`R$iE?$Y^xj+L{glcH1=-v)^Y#E0O>)X7>`ckWM%h8e7Ul!q@_Y_;1qkI_s`0GGw==JFUY%m ztgw5b3FS~;xsbf8 zNmdJ4-t6$aAH9o_Vo^CnqCAk8mb{F;X}ZE69Mj!?@$t2h(7tOuMpEzijc!cgJi*a@ zb3=pBe9`OxrQ{oCS3yXCuE8WJj|78KBVgpv%$0VK+6wXL1zfeDg8N*nr~14GD7$b zOcNp+Kdp&~qAHU-i8>1X%n5x5`8q*OntLLvuTu$-T zD=QM2h#xCq78;L6}P9Vbk8L%7`6D|uvFMR(=H@=NUvA^P7lLyZ4 zK`&`hGelqGPk5rm9;MM8h*WQJk0@;;)9#~o?Ihs>7S!LnmP^g7{JKyq=jm`)D z^3L8Ys=hf!CZFazsd?K~_ubcSJ^01SF1Wnm(Rq924_G0RGs3y*;0Vjo>UR;?#BPEj zU~}WN$12M}4$c zjcG+l=oLdp$SZ$d$mh~D{MFYPsvDET?shg{8XcGwprWT-@c|?~`2`PQKQ_2`u5P4(?u&_{hD>R373LzZoJ0TelEOn&8y@BpQn_ zSAYsLQ2~xqOqnp&$RdUPDUS1+Ntw33K?Z7FYDX$F!5HFejM6Rxvz*56Z>WF;j9-em zpM8{c+`=bo$flF(kUzTLYEaR8As$I0WwJZ~fD9o0MMTwO<#gR({P@Oq-*fcy zFTTpG0Q$|APiEV@U-|24sUwNXnBqC(sL2opD0`^sN_g-nynoPP8`f~2R*jU~aeIrI zHFs*AkQ;Le$4!vwf7r%@o5CvbTknTc(01wnO2aPvZq-;G?$=N;yWhcz{45rH11|WR zkyQ9>L0I9P)cKP5u@oOh4(Y3}pFgi=r8)uN#E=zl*k3c5GY5Kz0D5B1auK1U7v6dU z0Q4wPPBM=DWNonZ{-e_Od%EeAHiDKKpw@LBD?!n!8zoV|icE(I=+TD5A4;Pf@eq)o zb2$x@o6DTqZ>v(#?~yiOdAH4j(pRD`*{Tiz9AOmOuF90APqkL@XUWsC1oUf7Ic6@~ zNNB+TBF?5cn55$c?9z)`H5t{*7pxlJ1(CfH`5psgko*1!OhE+tSTAw6d8(?rU;*+R zj)RVwul7r>z`4ZN8v&MAdjPGIo_nhev6TmG_i%{T^K37t>k#DJO+;`AbMCDo{mP@|SA!Z*Ff zwT47A&^2T}OuvCZrawc0YvPM0cAQRNO8rSe>_a&lxFN;~8H?ZS7spH`N#p+UltqYdcSb+b=IDj5KVO!NbCEy;qknme@+{1V zs??1tsYmwGCaZxbH9IJxGp%SjVQhYcN`A=k@)|O@UE835l@fA3?){o~v-y021)OUz z$5}aj_1U!bxXXE-a~CwJVc`Hiro=v1V~Il7wqS-_G*KUHn^pbG+c`0TkO81?C#p`bSFS_(1OZS(p-_5_X z(C}Y3eP6%(29`aw9)Tpj%yB$Ic-9!lkA3w4Dtn&UPhk}ndB<(aGF9tEmTgrG3-d42u36=a zwQRtItjK5P_gFuBF^boOismdE9e+9!4e|ldK{ul5XfOc&wqSDTTLgq-LK{kXXcN$n z1Z^??zFVnGX!9>!aXc{p|6QjlGALYlsA1a|nsf^n)XPnu)8u4W%(V6%yUrh*Y#+P8 z9!M^)=K1?oO3>ZMk=o4FnPjR(G~`Icg7bckqkTD>HX>1ZY`2a;taz@}W^j2qt?D>? zpYt;E@67qrI|Ke@)&#a{%cO3>kP;1g3PJOP3ro%`zxIPsz@4AdAHyKqsMlQq(B9B{z5F;SNViT0d`tfi9Kq zot*~pXSe=v=Q*9(6suJ?&R0E--fG+G_~trBej%Y|M(alHiGAlKSB-MD#$&GYi}AWB zu?9!WbeX}Gc}x9QJ3?3SK316v4SgGL!ot{WGkNGRIh_5oMx9 z*lx~qFvV;u!*`nKSui0r&JK?TLPs4SyFMjRpoAsfR;7~+I`+a6hsJof_hH)^&ty1d zM$@BmlClillIxNLzNZph63t{f4x^URLSTLFAGMz%-+v&&U zh$Sw9E_J4r;p<#y@RK!DzL;nvnnoU!XuG<=7o6B}QZUI?gvGo&zyXY8AO~4$`}dqk zqTX>4D0<7L*UoWp0_*E*F`=P5B*!<#I7V+qHU%l9V~$PrbY5|!nS@Tm6g7F9hZ9A~ zk%$4bL!Dx(m0lzruj;KTzOlWm<-ICRHQ52L%ya|vE(Ap=HfB@k;Jp#?W2BPC2sI+S zkWxMgrJ!PbNYse} z4=w;-7tEA!__B`GU3jdW96e|d*k%H5rZg9_Tdc|l={TI5)>GPKsZ%G+?)?!&YOeR? zYla|-G~#|Zss)8R-NqA4(ir%J>&pAy$)+GUX?RC<>)3*s;kpVvuI-bPamUO_iT78E zK&p?(Vqw|=Bt*V!J-AxGd= zN1Js#?xS+~2NMAj8fkKcCu;#PH+KAro1fpt=F9VO_sg*#3DPC^BF*GhJ12Iygjoz> zXszq33}-11flg84>E(0R`O^8~P0VVMn>t0d$5wb{_rk{Y=>#`;p5wHt{miN{Vs;ot zdh5;f+aX7rRdt!l%42AL86=lk|B?`S0~#fT9>KErIymFt>Q`S@ym|lJ^w4&f?ZN0H zk6>A^-yygUi*~9)cOn~18?96!sww7R7oO7;9$6Z_!;JF!q05i>I%sA0KKlN5t{rR7 zG5S6i;=nlqA+lrFC+CSeMh5T_H16~gGN2K)5b}Iuu~1@gWiDidx8V-&RArLVc}>IDe6TAirC0bpRo7S23lZ@6UqDl3y- znp|5`!HXGb965=0(HVR=$6-^=pA^WE`>dDRN>mLRSkwpvO+D$PY1nesS)+gCps9!GOAk@5-Oj z=iLWT!}=;0MCUllX)Mv-K<;(DrdB=X74?_md8H-0H_NwquJ>mgt0m;1QN#TAlu(qx z*0K1>nux%ZTva34AvjFpTN)3&)DTBYhE`A=p0V;)gKe~~qTyhhLQ2aWJJZ+nK{ z3a?sQg@^#%J#vOThUAvJE7nq=EZoSGQ zo5KNw)Esx`=U8mNnNH?_<~=1WwQFnoN-8}_M(FK;Bhk0!~{InC!f@Gm#z<82tt3a%w%5HO#F7>!9zCrt-0D%ZcUlx6uYk`WQh8*md=V? zDr{_jRR;HKvaaGYtPJ{_cE(Mo^cx-2Imb-kb5hkYPG6_(stEdUMASg+%(iC;)@>s4 z(At$9cd8o-^b~eGy(JRRTe6f|HQ~$RTCKLllPDdU;{Xz*#+F8iK9c{1uv~HE+&54} zo91`Pf;+%ALxpnKzi`?=I_6(E?O(e79pV0qE}~*_{2V?I@=7EG5?Dp{~Z*es|p3j zfAI33uKzjUf5!G-bnztq9Z&(*kYhY$q8KSp%-3`?6>u_T`1xZZ{f$|=8Ty+2f-X}%@jo$rLtcc$`Sq0gCSsWI(`PB$+r2T5pus)#~6X@O%yk;34QW3 zrQQ+}6<``+oapb^{`>g%A8OY1lqv0n@B8f_K*9jx+n`)+f)d=}0OObs_|_NPWDC9} zWC=WbmAhKwjDWsMc*tTAloTVkd1AH)jE&OGxK}a7e<)kE>Z*S|aN$du;Nj<=b6tmY zNchdvF)q2%B#ao>LJ?mKHi%0euzKH>Q$)85u%R-K*!L`t5D0`K*7D6aql308BRmlM zN?nD(HDAn}O+w+mr)7Gm(eMS7?*k&&8*ZdBEz`5*r?k0!OO2q+-q-j@9+CgsLG$5| zQhL#j=5zMx54L8;@`q&0^qB3P(grvsERZF=khZ|5K32xgUnd`p@IsH<9`(5Y#>0E? zX<5Ng43DFom+XVG?P}FtcO>;)2&*GxP%@^BqLx+$f(D*NW6fm?m5tK(RxTp7l1C79mrl*TmZ4;( z7Y#V-j^?IRysxCnQzy{kWrdE~q4aUtf+HedYRPr1&P6Xa--3f+*PTUgiFkW8Op>8QP%=25&5(cZI<)AiKq|CR4q$W)G_(A zh_EA{t?TXOZ)EX8=MEDoS}*!b6YFf-waw$=Q+(%_2NQpSI#P=)9mNYdfX8U5&%-ca z2{f zL2m?S+TMaji5Q0*t?Uf{;bM}U|0gc)7sH(^WL@voR~SS>gF1`P%yM~)`T z^m`|b=P%WK0iVk+R}{P_B6gCtI0*1LJ=O|%SI=aV{6-$UrT=eCexpb{)~?G8bvqoV z{wqxGCc#0#!ge+ytfw(Oe3=y?@5_AXKz{oltn8Qh|HR6xn*YYi=@W5g9Z#Fk6S}3H z%_^dST3tfPL`yyRA6KXM!dKrTryt4i)h*JhAge>!YJulr=I>%w5y!bp(=2Q+?@HH} z8k1?&w_uvKh&OW8xqV!KPikU~+1|RK+y1o0Q$>ox7~BLUhv?XC%}W;*H0CzZMCI$# z79}jsRG#B~W0^2zp8|-ueT%%)4aWV<}O6gIGIv(ez~9Rebx$X zs+Qrrq1il+2eNVx^L%$sve>}iN>zP*5=~gW&Ub!^J2f>$ck{p%1;>V5K!XDB#4bD6 z&{hK`sU#3V=g20pS@pYZ2d|4L=|UM+qA^+9CxMdgMoC}wFM#WI`*rs6pQUtNfrd!^ zpL!8UPks-GEL{T;{I7X_Xhb9&?Svx%QND8k>sNj=$nNZwTbvS8%OI2qXj-5Cy!4gL zL=*Va^LMraraJ*AC&wg%$Fs+fq zd))gpN#1?hywQ83pffrijAe}44x`7s*L+PtIY9%`*)ED1U`1vcD+^D0Hjt|lv^v>H zp-0K7>qsobOq;U98!XCd|BK+W$8|%_JVabA{*Szb?$&Mbuq;hHqe66Mj(w~I0z*`~ z$)Vrcsxpd7ZCC3`2awEDmMLcA3|5@(51Dblt?1R`WOl7$MIZ;sb5w` zE&2#nlcdeMd6OSg0gLBmEE?RRjJL%?<>rus3{O>|*zov-Rcq~gkoWjADsoEV_r8$O zL!-R^1=8cpU%&Fc(MwRf>TCV-_}^o`fOa&@^{7q%s5pUezZPd5DlR;+;!HCOXwxUm zu1BUEiNLq+x}Ru}`}7E^9@e%!`NEx5aKiH~RHiKbxJHJP^{hfCswOE+cng7F0Vcv$ zu5L56Y{*|*bw>|%eH{6+nv`I(_tT2#k`}hQx_k;;%MYO5i@%z8t!7JAhH08I{TSPl ztL$g%6$K3?-bKPGs3whUM$lU_MIBn?AYSG=Hm-<@q!DVnha-MPx1Mqr)2j$JJXo55KHyVutLrxYSPUB)Wp_g+=D;`z`awe z&3T)-)}m8uVp`X^#}Gf%;)XgV&TI3U#7I3<);Ca1v)2`0WJxsCfKuHlB)zig7?r|k zK_IRK97_B@HNlOYdt$LE)Xp61AobjwV7`viQ+CRx+Z8IK0BV#c36%J9Wn2pY8a1%b zFWK@fR~oG~D8_#9z%(3umvqqAkEi@MnQ+c#zc~+8HZ z%xcF2wRU(6Pc#1n=R z6d>3AfDd7s0cy(q_ku3!E`c~;Q`1zdC^4TXMme4m3-+d+Q4tZ!`4&8hznXx&L{#8g zx;K9{2Le~dga!OD(iDW?sfi5HMXwn2&YK;WAPSv`F#BKkH_VCbC+q>-f4jB`bBZPh zI3)Uj72>}+9R!mh%FWpvt;SPc_~s}NZ2aBT0ZxAh0r!)&5d9(HWgp_H0>&AtRgQpm zCn{6QLsyVvu&x3=81;r74|(No<=)3m3uhoQ5~`30#zg+xWzRo~=pUVCMF)Z>L=Nk_ z@Tf>U@<^~*&6cAE;nqXV2dIizZSn=%cf4tPWuI|gqJ;(M+;^S-<$bKTHJtXgUJPS( zP%*)y&xliG=~n+)35yPtJZ8!nDqpO!90fHU^p-wPS?}O7jX^6)Y-x-*@AZ~=DaoM0 zT%(5se8vHHUH|* zH#96YX^iM-C1>U2=p;94SMEVtkK4)@%2r%6ZavJj24U6)@jn*#6v;y#+zD|*^Pp1d zj)A4A6J=IXft@CX*O56#0a_ulmn)}^w@Sa+m>?Zq?@IE!j(iys+U`cpAKF-(?YUP@XuH``v)iPQXOJx=XX4Z$ZsZEYCR^ z8xb!P32=|xVL3JR+~8bfG;-R2)FpCNE{FTwQ*@Ph)KN0mp6YHgb)DDsv4yF|znq@veD|l=Cta3<&TIUv0PN zIVL=Y*RaS9Jb4#5PEYbq7x*m;pYDqN&K_QZx$A`<*6zN>dUX3NDKOKXzve>r6^20m zj~mB}%VK*82HWXf-6s>=`Toy4gKdx71~VKJ&%;Ak?Po<5sKx>$=2|#^>y=PjW^9)Y zF^F+&aNCh?EmM^JuiY#|2TD9{vo470C`lZ}GzY%N`J7EmIoaqq*95)!D=>9zhN1ue zRb!Ck(a>Ark*$me33%ParR@&@yPU;*k25ZytDhuze_b6?-I3|H+3Z)fcz0?7)G2ya zx_$32kS2t^OzO%zZ>4$uczvCA`T8t#GkE;4Za=l*(dk3=-0Yvc;U(Suc)CF&1cV{^ z#-XQ~tY+f`5l{$%mM&%^62da<2wYLsXKcG!)Y2)B%`qjFu-*jjWH&B1w-o2ozUy<~ z%W`7n&gMx{ZnD)lMpnM8i{)EQRc=Ydo#SAt`cAp;v`t8c;6sQ~^4XSZgbkz`PZ@t0 z>0Gb9((R=9{3lE((d{_98rh)%N&?0Am&M`E$G1`tlg2>j7uBmkuu~Jfx^yNq>J_%u zx`SS>at__Q*P*NZbErEWI&y|LDwdChfslY58I9AL1nKHo0u5wtPx#MUnKSwZW#bR- zM8$hC7|{GCbLOCx46;n8ly*f3?-;7DziCR4$@D}i)IQ0$k&?yEv-&LA#EKa7q$v|m ziQLCk_oMI^rgWwwkw%!LclN%g{-ll+((M&QwZ z-UX=C&>IA9xHjv92Q6)6Gn?%}V0l$et;w8-4Ic`Ix9azkje>tf%@ixVxlgXx-^}yxbS>p1?@&@ z3ucdv@NgvYIZfHpRI5wa@=K@8uV$zwzgRZb2NgbgTN4&XjaB%a(188d=?CW8&-4v~ ze^@+L7Pl4|Xx`mvaJikqRe;S~ifEzkr1D%dr?h6U>c9Ld4?;SvYKJln5 z8`@_SPnmKeyH15UZdU(96GR5Rn{Zu8T)epL*#rG?&w0G2;)K6`ps5zyW*#VdjpKX0 zR|E9^!Grk+I*>%ckHVc93zs@sJ5w$7dgM&yvgJ-Xv=1dn1)X}KTfe*(Oz{hVD4Cw` z4;Hy5cSDO5kt)Dme9WQ48v63wz-a&dcI3*Y4_MaHaK*R7#n56Zp=q z$Nek?dko#WpC8(V+WDlc4~h^OD)Sb01(dpA1+NTfru?~Ia~9wyPoi@e11TNnpb;PK z4Zf~_yny&agZQHSRw=%7mnY`0OmDnp#ZR8Ui|zW~l39Z!c1SvY81t1G-_+`CS0^Kd zOFYcZOTYkXn!x9*Jzvc!+(O7wIu&zxBF`69DGBiJ&HtBpUak#MOJ6ptPqXPZ)e7z< z%V}@gcJxMf(mJ>dkc%(u&GFt#Gd~jgbGIGKy*hr#BTo^`97FH8(UsJ7FQJ}5XWyM^ z#gDu-&jD0dq^FX<-v^;y?V^@@k2dE?IKnE{213j=s&rT^L$2c^P%>8(R(UFqyG5kk zB<#$fwl1)Q(n-zNH{PX7H_PyhrJP2FXl=!K=5cnTs?hiu>W- zL`L*E7+2pTFZysHMF@jyJ!5y>I5F{?wqAB;QE1JOw07Cv|5V|Ku)MGc!6FkWV|W1q zah8YczMw*RB~GGTQJ0S_loHv6^8?{-y|rD-YD@mJd?MCqMb{g@^y;4JgCNp zmVCJO{F?@9FI-VisKr@4u;v2twS=}0S?3htNe zE#9Fu!+@j~OX&*+xcR0mQX+c&Q5p1G1&1Tag&eYs()X*u+5M1YorTt2B@iDq4CM^$ zH>tS%ziV|`x@I3ZUH_@oA-U>tlLnPquGnz3?^F+S_d7CXkt`U&J?mB%_rfVJT|e~S zuXaDLqP(p2p$X{<&hUF3Gqcg6kBD&V{cRACm)cwg?e(?Bbr@WCR_`;W`po~|Qs^44W)b#ltt)DW0N zka%K*qq8kGw{$!daaWrp3zU2>S%GsL6G!GjWY9Xrthe7iOo5Y?La~b8Qe**kJ!1m2 zEGU#h>tFJ#NMPkeAp4=yOiSH9Wv62?Y-}RSePem})@L(#a;X~!%6JRI{_`l8$z$hh ziU!e7(8T(BA-Nv=c?lX}DU4@+weKD~xX;+v+&*lpsGCj9+`GZs* z57DRW;2lW>*sqUPK4(nWtlszvMdbTv7FQjE)D!dwc&e*DKPc}CvmsN&Y?C2UCrGCk zaXPBJ+%LBF4eArpuzZwOvrhSbTr|mq3p>Wvo zcXo5d`9i+*@z{}<2&E}|sn!`O!n06<{0PwjN6qEf!~Bm{&O02=bbsTrHeJa^5^a!8 zh9Fu*FOftNK@cQFCk)15qD~ksv1FrXwCF)*L>s-076f5}=$(zx+pOOCJ;UxzH(=f1!9=YHMEPBTL&=HX3qzLqntiwnP&cV|v7{nR!Q>h@uB-XUD< zq}<*{YnBs1IzF3Yr@|FSGQC;3Wt%BPU%oispC@jpExy}E*k$chc-nYp#n!bmSX6Az zAkT>Jf-qE_2RlQUA#^%5Cv!{n9#ge3FtTDl>NXr%b|a=~nyeT^TCb=i`AM2E&9^=4 zoB-1w#n_l;e{Mgq0;6d{#a1uW+c%lp%1wbtg81uQLimq&vTwAS$}Xf!!*ZUiR|qDb z1_~c*w22LN8@(JqQJ|jcV9=e*$9z62_#6;YS4{C@l7)sV3ZCNY9D5T{B6=Ru>cgrX zg2PzPZOx&*6~Ty37a2sP1EX-)l5}_qcJIIjtfw0-S8m=lYR_*VXQn=IlU8^66vn>0+QK6gJ+|rWL4tJV%%|o=V zG*KoxYc#)4bc#Z^&9xG!leqi(n(d@y{42P-7E3h@67zT&oT zV@17dJ7vkkP6pTxGhub51_o4^#zvF?ZLGU35+Q%)d*aw)x93i`sLEE2+;&Z-XoIpL z+9I!af6^1S*S}Pp$~NZEIlOmkVu!=gV~@Vjb8$7Z>1xql+Cs40^zM{q>zp1uM{NKc z7Vo`GeE8L!jam#)>2b1VhPc3`jz`yFw(`^yDABIF(>WyeDwl?PKPaN5>+E!HSLP3y zh$^UZgSIR2R;JFZ|mV0~;Bc~Kcpug1-aCWO6kFYSSRNf4}w)#+{41;2u{vZ z$^&e4UZr?vc7fG;lqb8}cmmQD#lFkIn&l;!y^8j12<2z?d+4w?B)inmc0Jd11jkxJ z-on9ccbYY47G_h*8S@w2jjY&&?>!S@vJS*-soc-9{pNN`SXQ{eBsDjWOTg+$LkG2(q zQ!-#RhQ+$a1M&qMuU$;P%!3uEd^heh9ikw^#cCrxXt*-MaxZ3k(O}cU6S-NrCrAo* zZ|sNpB#+oDt9kHme?&@>Et(&uM;f!+_J;4c);Sqm>9WpPo-cslND?udS7Do0yK*-D z5ilJkxm&>#=QRd2H>CV=aCWQrN_!JHuTQ;1qPq!gG7H6M3hq@JWHr~0MND5|O04>% z7%7EMnsV&3rhc|^@&gVo{Qle9H12%Cm!DiX3*MX0I9hW=BL{qJtB{u zdI5%Bw6D(g7xKJH9^Ja(gGB!T_=BV$zh{QvW4`e0(m4f0~>$+54;gGj~r{*IzrI~fq5Kb~z8X{vpCOOkSr zM@*PBPVBTAkA(UC1dj%sb9N7DJyAdjq`X`@`xN!78h{DdxY?ZfJ_4p*R?OXB7_Jc} zzs6MmU(=)eJADWWIK-U(X{mTIOv9TN1i;T_w3M966bo(CxI)`NwhKmHTNO6_6=ZS4 z?*{jxCQAS?0N%Sbo~eR_>0AIq#d`nRYLO<}!Yh84upRa#8FgQ$9C>5xoSktqgbA0x z?If5Eeg-^cYM&;~NWJ?0dypX(F1gF8Xf__{QEEQdvZQt1lFz?Y#3-=vl{ufZX@TgJ~D_!v3^N*tMl_&T6;R5}q~pWB6atsG^TYO=olYNL045vrqpm zI6C^KB;qGsxY^i8FzNe%FE4(+ z{_wBR(W>NoV6T+LR=KfZ8ell7a%?e4gI6t9^k{r=j$XgMACcpj5)XKqp&vH?eDMTf z;BA{g#KG_-3{da#YZ$0YQLEP}02gTfxY3ZC@ia7M!sTqh;?FuX;tKjKYym@|Tn(Em zh$L=|2#AQ>y+WdI?q~AeB?}>u9Q2(TMspRI z1&1IoY0`($@Tp=j-8TI8Owuxj;HJJq%D8G6U5R@5|@kAgJ2X8E%2m(RP+c!Tj z)-odQeJ4`PtRP5J115yN-_o-x(%^mK6$^n4W>5Q_Sv@@>V5Ehf!Z{(Da1AhKH+k;t z+_jV_49@J9MTV8UZo%Wr$Z;8;PbI5E&o)}o7`6k3_4J1sf`x^}%5VcyX|t-|HeU!S zaedQFI90&*>vS1j#ap7(+D*2xcnZkFEFLTv>5=5b*@L^|ODf1~8%oL`{@>~6Py0d1 zkmvDCFa<45UUP=~fY|Fdc$LYtyG=0%F0Kek4NBv4DgcaV-a;SiyiQ-WL{N8KQ>&$! zPM1K;8|Z6*lFT@Z-6=qBm2&|W2?$be&dYPrqGSNIw9Gq!@3BjfS`AWPA1BezT3+OQ z>N96tA5$saeYy{T*+=~&hu>)~%`_m`f4xwDJcTm%g4F9NP)~T)_KlZEyG||4t!;`; znMYP;?umi%L_#ab`QM!mBJ~tzhUuc|L(RSFJeLE-hbFmf$CKC6Py+zwfWDS7)in%@ z&zf@7Z5+-u-tFu9nu(gvEXr4DO^mtb+;U^fx?lx`6s%ZJ!oiUh8|HFX*<;S8l{91r$ zg5oo;Y(-XXkC`JdK_rp3C*bEPd#{O4rG5Hswydrw1_~$fNKnRcQbcCaAZ1>`!)ZQlx$y$71`*%k+HY95+P-D5?z}yS)MmmVmkd_K5>?+kH23`P%9@e#*^|2uhc~4m$?p(P<>u(V7RbcgZ_*lwVmhj#A=8YX$+{sUlU- zDe{QR)=`+27xLGE3?u#m7CWf;xzcRf8p~fH;uL!dM)K)+vBez?74ZKNL#OLfj6hoC zh@h{PjUE>u=*=)#u=;O^zwCTNH7l>hs-<+E94Y)-*ysdq-BpEp;xVeV6>YC9L?ho; zA`?&n&ZkiYdgdku>Rt72cWDP=ijrF7k;cNL{q3@GelfVIXA%QrG5}p&Uk#?@l?YX< z5@E`?Q^X8bl?y%*^9iT z;hNW*v8%zy|G2b;Z3q^%;1QG~&89~*QjJw1+eWh+%6l-b>9+~Y%Zq**k zThq*dYc>s3L3htG`ujY1sjdX-3U-jJM+O`X@2Ed&7hg6*q0`T}FoA0FzHy=vm$915ih2b7E+8abuN4! z0c#5@Cb8U$fHf3Fg$)-05QW z)^E9_zi`md$ja$7K!tG)4`huJPJ^G8`ha82Ea(RVCtqF+BDrnMpVi4oGq?LR;?b%K z4;ZuyQFy8w5pGdhW&G97Nq{KqYhwHlhSt~Y=&hR>y=K9+ugylan0yi4<6#@8080rf zeCu-iCI$Q?0WS>hBIuo;{jV!tH9`Zho@?0`DDtg8i|1~))6*tOC@y9PvCv$D!R_w! z7XlvSWL|A>1Cw;)o)Y&wPZU-D)S&J1lA;(zZ4xy1<8vCg1WT^I%-FsbwamG9>qklu zJIP3BKC*o3`YT`^{>~5lHA#BtDzG`f!Mk4aCHH=ylF))jZ&s*DLa-7+KtfZm7XKEZ z2jFKYu;zVr2!J$cJz`9lC=I+jRZq$O)Cc@6NNriJyH>d+FT5n!ty0V1r&b60Y? z%2CS6pePmTPkJuHt_j#%c(i}*%Y#roOoM_UDXK%<6h7Gv7z1GN&TVh}%sIhT36Ie; zfO4aX2B4ll^=mz7fl;Vacjdv1N z;s4K7oUQ?B_n%w^F|0kKM*@jrEu;%DhiFhi7w#ll|Eo?^_hHqO~OX9%6eAC?sLdC#e(nG za(q`R>H%~T*bqQO(a6V7z;wD`$45*UCU8Y&%kiL+wT)_RGwhe=1qKTgd2DsMtO}xA zCV*hkO1Db;T;Btu1m_aHwKzDQt@14_%I%}mpsg6L?_(-l7gCRd;`a1NaaMJ5%@d-G^U(^;uQCA97)XqiM1=SH~Um*CKm*8aM494AY2nW-Fr z#D*Va&$r@VR0}P;d&;m+Fqq1=_yhPhi4x+ni(1_y8|3CFfCzd~25BQ4!WgdMGLpgb zt6Kou41K;M6F1hnA3uLf^76}zllG=crB4hWwC5LJ(ul?(=m(WCql5RTdBOQo^%=cF z%=J^T1&FOs<7M+}0}wkn<>o;^+@1A0>3-V-40PqlZL9B0m;iu5H(n=i=ppkc&!Zr? zw6iAHwWHy}v0EY91{mncn(RQ6IQugg3B9wviD7fYyK}>hBh)sJSXn4Qt^0j%m6cVV z|BYp^T)od4c&~y?l-%voL;9HXtSjw;r;}{slTKczf4BlMTSJfK_yO(d{bt*Lcj=b5eo$KwiF&9V9C-X1~G~-IQ%pnt!Lp?FQMUe*TAF-7ClQ!^5?t^*sE6OgGv` zWdYVa7dWIReRLE9%!R~jqm|cr>v)=?4_)`V1l|*dU5W}6^yvCV)_)OE35C8UJ)f~O z-gs5p61KalvZA5CYxNM!(Brw9-3A(dXHF#px?ET50=sSFP#gY$VLyl7VH?YCo%1uS zV5@&~;Z4C>HYj9G(zucW#R(-xMu9OX8vV@}NFkF#fb81lKRuphLp$g?F{JAnqnk!x zHeH*|Ythp4Tw5LS7X-1wcnos&`YCHdFR`@qNEiZzxQsZjIKmg_h-DD+OKN*gAEi-9 zTq6!tML+6WuwP>&@QFOXsa3fL1UCglO|+Fa+Fgu{$y5Y>=)P&y-%+PV>E-?%K?HqW zSmb_l?V1TX-wLYOR_8ORAh)S*A(?ExYzTi6A7I5_u&y-jY=8p6&l0Zjuy4T6#L(?p zZDTw&+%JB`)l4Prchq>#=iafcz}>v!9f)qoZsC6}a> z>lDa}G~q5Ii-a!%0Ei?{-{BEi4ut8@+9rye9Y&MO%Qsnza91T&{Xj2G$&UHB%ROz) z6z`vi8aAH}IfL>ulG*;L`MMkt3G@B2@3-bl$)&?PwxA!J#(e{pJVDDa_Nn((V|^T| z4O`K=GD9vUk)?NPja*ha=Q=qw7GrE*L5m*LvG3a0!pLxXTxBY3cF*A&$R39B)@rI~Z-R?Ub<}({pEScPH*+Zfw2^WP5MR zP``lV{V?HDE}NC8`_{w)Pt~U&IbY*9g zHM)q!WXjqXisgrbF1I43_jYL3lu6mrdCIqGN-DGAZ*PZ*^x zg{r!IniOck&rUFvYAREbpy-pNT;r{IIgvQkaE-tgerajxtD;!&c7%izxeGG#1)Yd2 zeW}q66Z{M)^o`UgqkoQ)r&+J)^<9Anp}xUs~O&0KIJ-EqZ@k-eI$D+Vm@?( zUNSaJqpk8sdaKAAXPh0=857bAs=)J@Uuyn`*vJ-0X|R1L=dwR(wCmV+@VOpXrDZ^l zG&j)lv|lh6VPnY)&&%`$igrWGue{d|?kOuU43d}R;zW&K?|jf+Mz~%Dkp5>YpC5dO z)3fZ!m28?+G9qgNrqb@RNK);3)e$vU=f0ZV6ACf^t3 zt#v1jOLzKsye~?u%R8bGK-xPRB&eo|;#WZABkXU?<1?a!8j08LbLM%1GuHZEY6I@L}Zdv;fv^{N%19D1l`=OP5kydhX zDQ+QiiS%rdx3Z+DsNAm*Yc=VL2>@>Nx_n1|{EZ0}Ctj+L@-l-%igI7x9+HiRmj00J zf2%`-+9}F7N2$GI&Ass(cdcCpU}LlEVK4SUS5rp=HOgBE{F3Omk`|EuNk6}=^`NPF zcZQFc%}P*d!eg~!`Xse=oU}x;EpnQnhZnt7V3M4H5L&&)H9XXGgMT?nEfb3A;8cH zT)OWb4mN6EWgptZLrZ@SPyWNwpCgz3oqs2}Cl~sS8+y(g&ISG`mEv91JNVniFaHlr CVG-^C diff --git a/docs/tutorials/images/barren_3.png b/docs/tutorials/images/barren_3.png deleted file mode 100644 index 1e273ae4e4c7fe3642019b15c778f39fb4593bb3..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 104569 zcma%ibyyVd_x92Y0@4lA-5}i{-Q6YK(k;^6-5t{14N@X3-Q6iIrSItH`}_ahYgw+H z*?FGR_qosPMkp&vp&$_;fj}S>8EFYs5C~cu1cK@ZLjyVnHk5ZmV-!i-|dNDYJP zTvM6i#02y{y$fj4^~cT61`on7i-Fu}a`p?80)O^%eTZJBUru`$AGU689&U`Rg?fx$ z_uX0to<~L>h8}L8LUI-#SUtO6JjEw37Y}P|J39=8hwghI&$#cn<-Oy-{p_jfc1+$m zz5DsQ_4M)~=4IA(&a=a3l(0cQ!kc*03GGjrh<}8t#FX=_G70LVFU$@Ed6oxbo{+ zv()|kQZ_`*uAR%uP0iEx;1Z=X5Vs<$l#98DotN{uS+e}u4@PXL@l6n71hVIw3fMP2 z-?YU^V`E;BB}vp`laPZ|cTg`9vy!9tn#wJ2atU&6KStE1eZj#{u}YA&@g|^TI23!G zTPrV6Q5~yEHZ4ij^u|wR)MIP387Nm#n`~CEY;IXOwW`?H%xqrKJ+;4G!bH>TxI6Ja z?T+ggM`$JK@^Ko*0QP%E)z;=6RJWA_s;U86Z(C{7`c59E~E~Ab2--R{h@Wd zs%1Y+?)Ve;owwv7r+xG0USZ+h6OT#|Q5BcIRLCG8CQ8!o)T6`i{E?4n}Tbv zt=4}$+n#BZ;W*4Y+irguv5DW{p)Rg zRL!Le`H}~+LNw@%jWrK%^Xn*>eJ%VWzTd!M#R8VS42@1U!W!I`J7EAlxWmDX^bfqU z1+)d|rY*B1J>w5zWlXzELUt3nWOKc(aXJbsQXg~T@mf4+VmVYUmrQHG=-TAEe$m&< zA8KDtGUYt^gnC{&q-83O_5N5E|gJwNM}hHDbf9rM=b)C90Qs``Y z_msbzs^xIHEm+woTdsqTEQ9ah=UaXpj3))@-s0@@`BE{prTmmQl4qmW%H&ZU<1O5{ zCwbv>UYF3^^Uu%Zvzk{si7AdKK73V+7dAb+-U^$>8CC*EHQ8JGx8j1?Lb_iN6^bH% zvd`=(ds%Ug(v z2g{lgYLEPx44i!+r}$;$|EEhuRFd^L#17qa=Hu}z#@2Ng?KGw+p=XnI9n#Wp?7j5) zUZkmbPkxQdpg>@X?*yD*`&X5Op>@UVSCviTV*Fdk&UPmaAyrkZw`@X-+h{AF($H*R zG*9Mi8F#}5HuK8m5*2IfEU!sT1j%)f*x$XDra#<3jX7$ukV`HWUl zxw$iKf7(#Y&M^g&7SB!+xblmkhSUqOJIh>-)7ep*n$SW^e9R_cmk0wEK@u#UL42ah z7Cu*ge)>bTj~wZSQP*={zLAZ?eM*JD9Pw@XIq~j2=ydgRw@$k zQaxIv;C{QxC%AMG9IkLI>Tt0d5vV{vVGd)G+5H<4>g3UGnX6;-ln&?2y-#J(y=sh8 z66wpCJbG5Y(99BLY{fX#cn(SOefo)lnB!4kGg%tDbG7G&ryFVd4nK)lPh9bT&y)Z1N`W3oMHvS-=i>@omX^qkt#;eFX3%rh{mFi$ieHZ9Du>ogClZluJ zYeknIAqI`D>}0qs1sT=tg{`dCFm}n6O63+P{DjMA>QofH6RCFzh5jX1=Uw4Lua|Vi zCn7sUk)OHYY#J?Lv7#}}okhB|Z}p*GgKZdR_R!pM!#5Ojy{2f3drl0WLp^^&eqUJF zc#B|Mg^4-I{NO*AE#YoguPOLx$psl?tN2VGB8PFVAT{ksD~~s@DAG*m5BpsQLM%f8 zd!KYR_BO=SAvXcCV#PVmtfRA>XQeO$xm3uPlHMAZY+DZ|Z2e&FXn`nubEZmfBjZM5 z$$@y78i6|s{ZSdxZFFB z>4%a5UpeTIf(!XDV@r0C&vBY=+21E-OA#LfceWMJmF2P6;Y}@r!oMM%YhDo-OUFor zFROh$f)y8r)p>7R{02`fbbCxu>8d3aK|&HMnZO&r7Aw!Zko*D@0}Qo;W}6CArD70k zn%PGNV?hv4OSi8(sWnj;Od(RNuH(PrWJULT_43D`tT+C?d5H2*ab2>FLR8oOh~@V+ zp&pMMCWiNz^)jYl%vY)72)TUhQu$ual@;IKadWXMZIl7S%whcs($Dn^yM_A7iOE6` ziMLG>vC64%^U-bNuF*X2j96($5n1v!w6v*U99ONK?qa)KT>@eD3cBn13?I)>z62c! z>Sa!CG!Di1AML(f(MDy0u&J13%)Pyi)#;bBd0#u57&fy^E-|H5%Fvo4Rffq)cPG^} zy$2yn^CBnKEyhG92JOYkg9;Nqhh8KcyPQnHH8$sl`WK?pG@h103p>$o)vj<4&*ROX z#cM+M;Gt{7ctTp(Es|%E!GSQ2 zz6FEbMM~IWFlg+eQ*;(=q>inNX5rzH!yADV9rDWCZ%h$SDU!t&!9B|ddsWq3U+1K6 zB>x5ywhNsWwl01F$etUA=+BZ@6L$cL3kvu^SO`9;`o&FcG8t(4|w5UPt59v)- z*{fuY-k?<5@DR=X3s2C2S;V1v*fY2bnkzMw2hP|SCV4a1ulQX;U5JS!!bbphaQiFH zZhlbU*H>WR5f&&DAf?8_?HiwOQv|_fUQHi~F@ah0veq}kMJnZC+$0c#W2{8pZ9r~$ z7zO=FB!;J`JAZi~Rl?s&PbFi=z>+b+ri)i0Uyp-THjrV;i!bSI^z;~|)TLiV-+4Ip z&EQGnye$?W*gYkzMWLYfs1Wy+?Vq{o^IlEf=Y*x8l1CG<>vKRba~?!cPsSXje0w?v9oJx^ZA{7xyPYZ8FhaQc~A zxCbfne1LmA=#5E?_!tG4;0RWTOll{gN5Q0erAUf=7o>dNz8k47^>}PPJAlQVIo6<+B}Ki!NgDe)6A);e6Nq>z2~1Uu8C3YEfr-+b6)xil8` z{u@ec6r$e)nCnaNIf$e1dq5<9t&3Js zN3-IrVo@RzNLJVf-0<3FDul5dpySj)bR^w9oA;$C^3-3`yx)w*xivxh_WcSW5W5NI zVQbqMOhwUVH20+}9WmVi=dp`{ZcIoR)Yw^Oj{mATmsGC1Tz?Ia?;hAbZv6r(u;?wY~r2PsI1=OG7Sf4;%`sd! zCg%oL)4>Ok$Tq@mvpRij%?nR0~5=-_8};Dq#e zAc*y;uQWlS&#ao0U?F)Gr^)=gPL_wbcv(4Nn@GD%XaDt$&f8Ari-qy6@o$xQT6wA7 z0uX(V^fZ>>{p$X59yPR&T-R6+_g)c++ao=Z<{rgz96N0)>uCJo0~P`_tr=Jz+6#&- zd5Q_HspaRe zrB(H^wjh0SP|eZ9;9oKrOz&&Jai%(ul>m6jMO@jSzU>{o1u;cddBK$w4>VE7_TBQ_ zZi?_U)&82|FII8Vgsb83;UVQa@m`HK<7)55nOf|ppV(o2RGq`V{5Y^-F`SE=+Lqk| z-^7<_vv)*WY0ybVD@#=z=|W{SYi=X=E={kSG0%idn|jgle%_JFV^x#NpP=8xAoc*JwYYez0CRzXmX=JdE)bOdAEU`7-N@!(O<#$o>) z<57AFKiY1do^33GWm^?tY!aDOB!xj&$C5~%7sZorud0mUD?dyX6RGnNv^{LY%ojpe z55_wFBof{CL7oxImy9Ax!Y?tl9!?6+RLXEwRX;(_UvL)=j0-zkF{Ix-yxJ@T&fXy% zGF4{D?9QKmI&$JY_({Ix;@CMmEtmDQrp~C1$7)=cu7IE)^sCN_1RIQC0g^m{?0V3e zR}tB5jmy!ipSwGTQ|`W_cxrQ8L2`nhsz(#335{f>l=+JM)9!;A84ip#`P%Yw58=TH z20fMhwYVuuhL`5MWb5Dzc}J)X3=OEAS!>QfH~ZJUnP%aQpG|#FG3>9n7In#!xhUd) zu%`38E z%-xt%1)~lQu;=2R#<%4)zU4|3O($UBRk17Z(E`oQS)si=R1R0J)>oxm5vbcz7GQK>YA zxmaAAazzQYg?cNxO;=O$*wR^wa$n=>Tk;d zl)Rs}oXy~{rrapovv4F33gNHIurwQIC=z+8Y?|#ys18jh!(kRle@F*9&Ni*qk76>0 zD$tY2|D1uhl1(;&>?mc*OsM1(2xsjiRfgTFBsbCXZHS^jSefhOabVsGtUNgZ0T zLWow+Ry=1nIKM_-Lys19l!{)~o|yOv*^JA|EA?0l`PP?{8vcy{0jh(|Wv<{pKZ*MD zTQ=w{Bxo2x^=4B_^F`M=__6$=pd-|iJ)IWK#obO`Bx}C?_dF z0gbYd(UwD%C?A_uk5+o{{-*e)3cB#zsXc2YV3sbtDX$vAa@3&%ZlJgP>kfCQxrf=b z#n1C#r&iNXZ^_=3-+ILe>9=SIA0dTDRXALhPmQf!H^L<43ij(YMb`yL>!7b_rnaop z+G9irb@keKwa_G$7Vm0B>t!}MH=~+RF~rNMLfQfU z<*mI5G}|Gqg2W&!2jvPB!u*2Fm()dF*cI^~nCSHb5%TYCJU(P%<($yjRtMQwgHyq$JlC&drOTgqyJ8WPHoT_mT(ZJU}w|()rSvX9^3v3A2%S`Mc$bTqo07 zw04=hW(6$6%hm}|VPL)jch&;Jv$;ZRB21G{x#kG+Wn9FSumqc{jo}K|6$0T5pe`$T(262WSb} z0u#0fx^~B4vXmM|kG2H+K0LZX!MPvW6UKn3AflpCJ8WvSZ-crN9#VE)Lczn?{`;Z?!rLq6uUHN<`BDU~I$t(M0Y0H++rUs@xi3VL=>rM-{ zS=K2{eQvuoQ6+}>Bw4Ffg0@*@eRVbEbkV!te%@)^dRIG{Pt~u!{Q&bRcBIAad*$p2 z;a25(Ld#Gux?Q0!x5HJ<3!}(Mz->l&N!fU1?0Z`+*DYjGmfs<|?(yVAANoX|%b9B0 zKV_>;$fUz`vChhS*iNz|I$X(B4!pLS5LGJ57TU#a&Or*Pt3xFdEIhNEY$qJqI9TZq z8jp%a>uqf<6tip}(A61-#>>F5Sl}-6wTh>%26!d}zlZBdDHF zMmg^S`zysOz^(DWtW;-ZvlkLfo>1hQgSHRaQ-V#zKgGwxzM`sgbSeD7WU>F%e4_pl z?Q}j?j;v$_U)(75YYv&Zd!U}z_X<_sH@wblwC1L0&EVLdAF!bkAXntnnhrV>65VR1 z7$H+vNGJSEk7V*@AKr4?cac?3AB(^F)uy2eFj8yMJuLdY%HfOyH}CPvVfOGs1SMkW zBlf(dw{vHax~;>@R#tAa&~-RF^9t3+HuAa;xQN1V<#&}4n59#5UcbEAgxWHjwG89j zeN@_Xx6iSUkDK9QjwleZq$JStCrd04@fT5ed=+w%H!@|DcEirt=%O!^j=3al9Mjwr zUH2g*2BSl${oiIGyjrn&SQ?r!RHfTw2m~L~~S5hxl?By|4GTXA)Vkp!la^w(oA*Pja@C(Hq z`KPC19I6rm+Q8`Fb}3rgsCxPK-tm}X*>%fg-}sh(W(u3)l^*M4@$y*~m1rNKkVs6C z^5F5Mt2UR_%i(rxj*y_ECAM^IdhXlHbJ+Si4T`Wjy!8rsOXKKYZz` z*f2FwFdtUn586Ru#AZ6CYAyJ>50A(dixk9cEcAcU2h}n1)E6Q2YBOYL**h}{+KCt- zX7%^e%ePlwf)QAc;3KT+~ zCh$FJBO}+64)j8-u#4bFQXTm6nCN$YyX~uhlwG&`H1`k>eRcGjK+;Yb2Bp{F%z;Ml z6G#(u7GzNSYN!u=i&{lPChEgOk_fJ98M@;Nii<^0A+7!Sb8c=7 zDqQtCScZn_!{B4NgT3L$!qxdPzG4v4n$vx0iQTC85vBxgorHy9rcRW|oR<6v-f_u? zBC`vW`L7=%1@Idr^_D7Kl>@E>%0xQ33>p+8$C^crrc%iq)d-LG_+9RHkV-3wu>h)1jHf{Pm}f4;cwMGYShJ|#`nGa+W; z+MOwS5_vXt%;J516i>8_s?A?0m1a&n z9x;ewuQe8Z&LCZw_Q~;ZQ}EBjNayFRv zaoat*8d6!!kw5XH@({LuAA?Ob*0H~;zQa7iq|1iwUdinKszMdA$g;uWb7|NM zS?0n8H_jNhMvE@hS%#M-L#;br&W$BeKbev+CpD>A+YbT%V7dhC0dsn$! zEV7Vb1egv~xTpMGMx(fRS;(o&+vq@Mhpm;+FDTpcgAXS^*S2|2%!KpMXG~mt-#X0| zMC4#|zW-duXan5deY4G>F$fcA{`7V3%rv!szjJu-md0ql?W-uyU~k)%(6pC_&z7RasJ^Lr0zT3 z$vrUr{HzlHJ2k?mfKc^aigL{>5mQKp21k~OadQ)F!yc>B;%=Id@a`?0?AesiRJ%i> zYX=vm8`|^Mg*6kgr;^mFCGjX@H@e`N=rmO2e5fL(2m@*>#}y^tSTv_LetX?d27Rqg zRP2vkN4dJq7u~L@a3lKGP0{AzPZFM|x{7H=gOv(7E8LEvk>Skgwe9NRH2J^L#LMvp z-B>dMdqd|`2t;jOPbetayvmBYp4yOTQIrM|$axGrjCti*Or@ft__ktuxmobbFxlpI zW|0VkA@u24YBUGwxtnAtvR&H_cj!V$P#*kgg(02`BUDaW@Ddq{I!R=Xx<9bFs`-gB zzglI<7qHT*KhvDs{3-qQ-b-k>iiyjUVDP>uvF_Z{=oUH>x-#KYyg+Rcvr(LG;Ige0gN@RiA85Zq8%835PH-YD1JT{2g`R6<8P+6vnJN^g!%Hb6)CCI3bVzjjjM{L z6ZOQGsj>G4e3$RyijY=iW;tclPJip`>yJo{G`wuxPx$?~S$j(pNe(6pj`yW1AR`S% z6BCsN-YKJDh=Iwlf`Rv7I5M*TufuP`IBWe^2L{Ay7v=s`Vv2#GoKCA-Nd!Dudjsog zY8=KAsf>ON;2wSU7Xy#EprD|fE>ur+Mk?1XvKV$P*)-1>fq;#I`a#~3fC!XRDt~1t zAf*I0zfMUG4L}pyC{nFZXU{19)W(@1Pn{x52Sy|_E6B*sMh+9_L`^Ho?=xo4p#6Jr z`=@vJwv%*;>anYLL@Y^q#7!9)aS<@t-dC5-b@$Hob9c4B zLXORn+S%K8yP|dI*Mx?`r%inLYedR-h=E(Kd~l+k9>VaG1XRIj&im$hgWFbicCI`` z4y~(pT);DGn7#Gd*3r>2C2!fz!NGwmYto^$v%8yE#K)&|_SkH5qQ}nB5f_&1Vg0}_ z>0Ax zM5t0eK0cO~hgC$TDkJW{O%`t-9;Ti{-8;LRo9Atsbs2F|!_R=#5sDzALILwO@3ST) ziy-^X=Dogz;xq@CmGdiWYcHsqdl6|$FE|9)Y!1L6|iQK9EPrdG; z*KB)QcS>w5Drn)&<-+nZV!*3yYWExj!gi{+^xody-lf*$cxh>AwCC2l8`#34(cm|~ z{#jVZ`fFpD99HAI6#9Wy`_VLIo4Z{oVH7yBV9@Bs8*{hqwK0}J(x#S{gWNn13>1EA zPJ(k>8#T?Fzn9}94YVpSh$Bgx@em*ZK^O1yZpMo9N4N26i#>DNgheBMjAYdCjw^th z4$Nr`&(s!;86{_{-UvtowlyNvKZ^_IPONFcWZdco_lhhny0vFChd7oFtv(lK_+_)p zYinyWGtRsz*^?HHlV1K_#~!0U_`4er%dvtT^-4tdugpNMcQFoCmHAL)!E0z6Hl`DL zr$=oKmo7X+nw5Zg8?bLDCns+fyckPb>{q$C_q6H8sN0v$-5Zy)b8tU$5)cv+vfQJd zHQe93SI&<+I%5T3A>B&fV)fE!G$Sx^)wdd{R(QsQTTx#1FCpBFfQ`J43#?xmoM}v4Y%pHJM$8 zkwD|CI!rG3@5-Fq+^x;c?duNrAQnPrI}7U001dl#T>Ww}ehYIOOH0d^Uya&~XI|S> z8wDjLCH3{6HqvOpwPc3!FyIk2POM-w=sr6xBk$vl)yQ%$mYyMgeVyOtkZK4+!43x~ zk?3_3EC%+PD*Bva6oFJ^K?_|SOC5_LcCCBysswm$%e~WbUC6&PtzPe&OQoYI+7iaZ zS@qs2b}$>rC$9a{z_Ro+9*}U|r`~G>&8z2t9zf-|ySoEkarF*A)la~Rj^5TT~OPr|qn*P5efxH5oc7GSAr^ z8kd)1Nfr^ZcnHEF@5q4^B25`Dgp@CyYE-Cm5~SgvhC$I{U3)Nr4Vcs3m)XD75y2A59lGO6uzPT$fX zgvb#Y@uQ}*qH8c?tsu~)^sA0QTJTc$&O7kU)+SR>YH(uAZ^>|zKumgaA?OYdX zZB}x)1{`8go!3*<@H5&GN#wFTEifMKx@OvnGwrvQKs(W*0GP!kYb(lt>+;bqO(Sh$ zeqrI;z!Z>Ct5(bpUVCc>4K+*S*MY*>nb1zoPKlbzXg~pO>*`q$HeR5PQ7m)WuWVJy!+byXDuANlId4^`TC;X>;zuKY zwxd_Z;_0N{kHxjM?AKJCPQ!W&;k3R~6aDJV(-D@OC&iKFkz650U|+A=F?b{ozm*|_ zP8{1aJtzF`DJV`(P9S4bJ$n48KwBO;7aTpv0SIX}nsdpse=5j>;~&sscb94&7OfYK z_KW9ASUR->4G;K@-n24T3b=UU^(}uA{tHoEQV@wa2Cc7JbwR&nR=(zZ zM@KL*(YxyztG~c*vN5JeghEx(i~Kh|4yf4hVHAh}oB}>b^#NYwy1T0Y`|p`V1}&30 zkn&e`3biIB&VQfH!%m?F5mdXI$^PHcYKHk12(@{{MgQ9>DTjwZA(E5epI~W0>58DA z!I=LiXA?WBh@>pB6e3x0AS(-Y`93IG^5Z3`ATPfB$A=o-zrN^4BtZFRL;mH6qAwDml0pBBhWbAf_|o8Ff`Xl*|8JE7Nhpz` z+1dYFWjtCJBw3W;e)P{?zD-T)2qMgB?@ow||EIs&SG9&9r$qSwR*h4o-X1~*IYzg9 zrTMF3h%GsqOPBY>kk_BJH1#epDPB?a$zN+YZQH!08NJ+&8NI$Lwh)nUn@NwGK)h(o}(jJnb z1fhDo-D^ptQR9-{L{nCV*#5! z>g!T{VCzu&cn>m-*h}&;{CJ2$Psg7Q9y!$2pz>kSG zWDHS2>i>d2#0U$=7Bq@N#lHWY7?R^-SV=br zJ#E$We@1rlU}iEyQ1xTu$IH0RaeE8Rni>5?Iyv+|Bd}MC*)Y$LT3*lwh)Z=YL;WxP zLZ(zdg8C*jN((XE4~{ZK`x!ty+{a~C07XC=fQxNTheFfJY@i84cw%d;%Ke`tFJLiq zlMvYgh{E{t@qLC~4_=4RelDT8xO`&fd+uEi2!DPT$rFHiwgSx2vh1%)LB6z6;&u z6#D0tpXO!X>jU4RS-r5tOzYuF|qf5={|9< z3zN^40zGQR4l1QCD<}U~0^xRy_<#669`7m<5oJCnYLZRn-Y+|zd@M8Nki zz`&Be^{$cv{P>v<^Ji<2#w3%|(&pvDX-x3!x8Q#=p;gh(LZOk5nV96-Q#IU}gOXWv zvjr8)ID~({`V$ELUB!25wV^bb_>7nL(=q8;PV~B7FGhemJ=~UWQI=#Jz`i)a>kh56 z$FA-ATD}<*`_6nhf7@fZKvJF@#j;sC@M2MkWwT0&nk>|<;`iQO)t1GT-$GNwr>Rf? z0|S-57OX$VPHz&C1@9T4t(@NjT}WtX=+n~^@ESEdKg^5?OXkzi(z4*tI`zbyG&VQ= z_W@A3nyK>YN=6WxoJDLL>_Jav4hxKkk-nkTG)(-&KFGcIA6NCl2}TVo0+@_NqgVGv zYAO!Uw6Cl@wJ!K;4aAeWmU zrpnjs&!kWRz((!ut7~iXo>5VFJ32aQ7W#CwzMrvtI$9Hm6KL#S0G(alR2lTZ;+B2~ zEp^vM{k{eoDap43_Ny(3fztrZgFq7WDwpkA?B2Um)ah2gzPKd0mee%G=dAaLwS<3WJPPeUhv)BdG>&<2C1!1I6bMHgyFf|BY zWzEC=ejT!AXuJDco1UT}CPNGY8(>T+ISb&+UMQ+qs!L2v%uP?`WbTAXy|(j&lsn zKwan#s_sG-747SB0#r*@KHE4Y$iNv7;pU)HSs0|QnjU;N8?|C5P>=-4wqx~;CdZY= z0h`4T23IX*dPpMcZFppG3qUQ|0TYA+Cmw(UtvgH^?Db_113TKe!Rql`I`L#?Wi@(; z>gno6OHyPljvIEMir+`9rj8V|AXfp$o&&&Y~ zFDT*-8Nt<>m9xioR#u~>QLE<`6C0ndI!kW=?qloV0FVK@EAe|@DXEcteG5(ketv#{ z#*l)6uAMuG2}kbpXJNTn9To#MT^V+`6bS<-wRVfvU^&a78nR6;x&5k%izPfy-0FR?il)z#(*?eCY( zJH97(44J2SvU;tTj+qJ8^&C71LI?agc4nbde^3a4sh^AI9=}MoU}A!p1+k*ZjqP72 z&dk(9ApM`>6syhDEJ#)r|4wZ#smG|;D|&nL-g}M3WE*t48?-7~a98c8gw!j8`tpU{ zrW(lIuj5<}Vl6ryVSr8mijj>4ik0hpv&|R%zfc{;CJV5W<6vJT5Ap>xm1Gc?rQcS? zzgqGVcBuH#U0NlgqnS_iFrtl?N|>CYp@9)WnKVYJhVMRk;b-vn^UEt|?q4l6g7;>G z+928T)KEN1$knv7P~p@w2I7qq{9NXe-{hd{Dfx>uE1|mF3om;VDn*}F0M3L0L_^up z+OU^m*^T`jfq|#M_E4-Ayl*{nUNk8D>Z@Mb_wU5#UD9aOvl;5T@|F^y5_9u2svu32 zmbTpg;t40%PR0nBJwdFppH*#bD+jknSIM5QzVutMzdmDPR$aEJ)ARfIi+yteDaOEC zH0wh_~(UnM3cabctzd?=-WRxbO z{-1p$?qsD!tx|UKfX$rr=}(_P&i4J}L4n;X?*%&72~L}-BTeaK-=8=FxzB`slUp-L@7`DjsocoIUBw z%Z9EH`~vEd@t?(*l~?&y<{`zcj$7AR0ra@KF%~zO9T^{+Rm(I3bN^|D6I}Ji-OtaD z3l&HdAY1Kya`Jk9O%#Q}!z$7CM-D*8KnLhoBu8TC`~ilszbdPvEQ%Z3=I zPbM$B{M=jUfH_{e8N! zmNC%o*>~H*bkccu{SkQcVI@5RP0S&mbSMPJ765}CI2Z1!De(!}CR>I(P={0ymJNUo?CNC=3Hf)}F4k zpam^m%297d5FjxSGH3$&4U44CuK75b!wy55o2}y@e1-^Ztb`Dhur%5fcRnIPya*r_ zU#I%4eG3%XfgTHFY00W_`PzMsF$x(Qe!rY$K^XslDqDs^t6~mRj5I$3EpzCEA#y2U zwM#DUfOf&l%XShspJD@rl^*%lGGAGXZ#K zMkoMgzBV=;Rv7xe9({0^QGc$Bn6(ZkJvI@n6(|!9w9T2ecgB*%sIj5R+?NRtgX3x# zv?dhWwtNi-)1L#L!;`}bYjGGRQCIds`xBtcj6QZNuG0*_S zYF4^AobBK64SXDli>erc4A##3884f$`n!{NwMjEV7>Whnl9DCh&ljS}d{fH+3NJBk z=~$hittHK1gt{dCA-2u=XfkdcT=ZRC)C zzb61-7&ivO_-?~_vvEo&3+$azfSyl_rDVOQ3hD}$Gnc4Px7bM(g9WMt+~w$UmWN;S z_L-SBm}cJDusS)(*&|suTyLkt@5JWa z3#MZkFSD7EIxF_N&B5znH}`wH%?5nCB(Y2(SpXRv(73p`{tni26bXuSu;`nchL8>6 z^H<%J$6Y`I+rRQ@5}A<9)2$g+cyDPYb%D(H9OQUk_`~?@gwSLJP!(+KXc=c%WX53* zu&=@b>frd(t-?l0paFnb0hn5+&FF#)X_Vo7O| z(1V3Rw&VldR0)SEfgL;Dk2m#Xj;;wZ!#(=Ch0B|V5!AEv__b=X5BKsLeriksKBrn2 zo(EK*P9xri*Was47FYnyTryAtZtYx(p*6G=g7ldeyPlc%?a^u~>3IYg>5RIq=Qsjf zuk8*QLQR+NW#q$>Kpmle%cNb6H9swDQ9fW=r$lRw{c=g- zm^U%6-`;pi0k#0NTY3FuRPTn>`9S>l4o*BU(#*=rV!5(^T(dgg3Q(d>N=-|N#m(Z_ zEla|Dm1O3F+fOk0+(P?3oO}i`0A`Ee*3Z>|_t|j4mI+TTgVQ-B`+;(k=(FwXI1nm! z(ZO8l^U&qe^pGI1Q z>vp+W>ghP*?92XYVH@rp(_GGZg zWK@jJ^NrC69!uuoA%-H|=4o~=-Ev2HB#&_A0yknxX*mCG{=!|B($HSiFa3Rac`#P= zUk7V|EY`VhsHZnE8FsFgmnZZ%CG63|)A%)5&HB9t1L)TeH^M!9P}mQzMGfq_^9=Rt z4AN-onPs?;szriI0yo(T;Z$CJdvaU0Z~#|;=NYv^lMFE8)W0Off-L~pn-!Aa@CSr8QB`1^K448?Z2(Q-ce=mla{pv$!v(NGv)LgBnpZC}oe_5n0) zn+*D)7&`Lm=#De#p!e4J`Lz`aK)d?=d!RnnsG+g9KL`U9i4h051NW(O{gKyDH{_Q8 zonAG41r#eEs~VPOdYa1<1_)EovD{1`KE&_0`J1D+{I7(d9)5ZDY#V3-lObfmiF2oE-FdNn&>L5kU z9%vD29ND;BSf?MDI%+i^m5``#BL{$DF-mCesH#e$N1ho1rI+}&p*HAv^}dW+TMN*0eHpU@J52z5#-qwe^< zMJAt5GCCic@vXtVEXrXoQU zMBZjj=&+AP0K!}k(|K*Jv~5yHX%S3Pq)WM8bEQp~6f4q&7pYc_99ya)HCtWS9avWZ z0k{k`U4;)=FIRNb-N-Zwtb=p^@DNqFw7l$f?7#MAgCh2@_t5i1>$%g>NVma#pWVcn zkt7B*U9Pb*oLToa|Np)VU<)NtCN;PH36vAmqqN!JIgKe(@&QC0L&W&h(3go zBV)17UxEe%#dljX)03aMo5*>BcQbzk_}SU+Sl+%LO}4o&2D*vG zO>bP96qs~A{#o2}+?=8iq)$V*aQQ0I8qPhZVx7%y8gOTsVmU}6w7KIg>HT>QdQ(6C z)2QVy`vz_p0gol}flxVT9wH#7Q%Z)pt2aS?!h%;BwZ4K$;SRljS}GFAG_l}!k7r7_ zp}?^n3Nv^C=w!NjRrt3Os`%(4Uu1FGrycBNw!aUl#7GWyxE!{mkKu#>s1cZe2g-a8 z%xo9Az;L2Y0LqpnDa3y=R25PpvVVR`bL_Yg0wox4sTnnFHh$Hwoo%2AVK*!^>X0E< z1Eur1p^4}YUDb!nj6Em>gUa{1WPey*#>!F=fegL?zthSi3|3TNQKvD=j`gG3OgdI7; zbnPAC!^k+2#X-V>gI=%Sg#Fpx$2^~{?aSYZlbK#X;+6Fp15slmYQ;(5Lilyxg&Wd( zn~;_EJ8=>EXzf{_FX{2F_On6DNoqwxK|`i)j_OIZVcZ5cNZ$3`UfOyR`X`5RBZukO zUxjj!oJbWCWyt_!Tuv?&XsM_2B>Jks@c%nkiNrrs*5uBPc4UATqd8r-Bs{hKTK5c6R%ZKUYMA_$?Cs-z(sJ*JTqS1?9qOO@9p}R;;@WXWRh;Jt8ST*7;ar zg$v`YMz8@RI<&uM(9sa?W<5HFpWl1u0(tz@VzHuYMV~mQqgraqMkr*fNJeZ~p*A+7 zI-+{l;&5(%6)|xAKKkF6)FE~?y@(GoEdSNpPf3ORZSisn5+!^Aau%uOxxi;l6C?4> zofKnyL1f`OGh zhe{?oF|}>Hfq{+ENPbTRHg>yB-B6G%d>|ZYlRkX3>Tdm%+=kN(qHhpFxm|+D?c{>= z=HH^+MZBtTs&&qqb(!jW{IFBin{x{I8}ITYN|8TsR;?MktrFJFud^AWwg<9k@xCt7%1Kv^tH>F>B_eY6H)aw%S5k{wOz zI4uwF>C;ji;b>N?xu@F2Uh~%4XAw|!CL?3l+$&mpA$X0*9h*uzm`4xMioOygi%t-$^^2r!%glfKh zE>yQ`^X?uKfCIZbkbE+zBPqoMyPyHH(cf|Z3cLbpeQoW;!N~FU+hfR2xf}m1uU+ma z->vpXtg%8p*7>SA`z7OL^pwZ^)+2x;re@X-AG2=6_*?69~GI z>gyBJqCgr1q?;xlWrAdoNphI1A1*57OA`YFErA#g?t_fQsMl=9G+*QIWj-_ba_OL} zdeJ$#twh!ngQI^t4n>mpZZIG{ptV0iSzq}T|+~C zeemV|M`~i zBl;pEBSA6|!ot!LO}whSeAI#i@muu1H8s9^QdJI1apmJ(R)c+;_4fHWOTgdI-^{5a z*g>r0G=Q&VLVHtFw+4A<|`j0CldHZ_o-$HEoF)yX6KO3Tgdx`p#K+ZwQtZ`gr$31l=n*n8q+W+5o!yAC!Fi?3pX#`dH zrlypHf6@`CjG5K!?t8%Gh87!g1LK}|^eyf~rc*Xb9w)VYtODs$6ac{o&zjg4cV9*v zG@#MS_U%wa~L9WnpOk=cef;h8>)Us<=|y)YG#TAx6Z_@;&b z-QpS%Ah7*MlP&J0B1)?SLdxTC*t{)VqXLR$ZMiqB>m)wKf^0OB9=YU7gacPrFQ->d z0ej4@8TNnQNANG3WM`9lOU92JtGHKwsdc$2VO+j3<(#J0PdN9fr;!WiS-t`3LT#Iu z-b`x$s}(Th<&U7*d`VV2mq+R1ed4Fa zn+jxbeT7lq{zEcY8iAnF2ug7vIaFM1xn>JLsPFrhQb8_B_NVjGyFsm@O`d*q2RjI1 zUF__F91#057)hP^dVoXp0{*RD^h|SONdxD~i_;n1uCA`Yb9Z4rf09giRS(z;%7jm6 zL(?OQR@I_PL*m}`@7c0mnJIa^*(N)a{}0P1K!X|*D8QJt8G~L=kWU6>Hfw8otRGq` zuLBzgm;ry#idtv7EV|LmLA{p zOdSZZ?}EM8ctA{Evaf$VWl;Qn*bSmRk5XPjhw1+>h`y0X|NyI!R`c)o1(ik467 zmrtqUEJ5ofC~$G;^}-xw3Dvjd6`^)cq^MwY@0+xW4LLjIgTYm>HM#z(aeTh0lC}EU z)PJh)cYuMSo|pjO@_C|ytoQ3|HmP0#z_RF#E6j7TSK^5fa=+slIe_1PNYN9#}eFMSp1xK94X z4@clPpundWb^4O*sbcmW$5(!f_uM4;ahh*$Re*TN-U!Pd!j zIhw+R31w!kIXi8kXIVW3-Q4^yU4xS|s_MRjl>4IV^~3Yb`EmIh(Us!cMq$UmT+mDZ z`UEWnTa!rkXUNa-qtMx^1;Fp8prm@g%}GKC=CB=n=zb(E$dex2N-S$~jDR><{&8Z4jM*eI=dD`~R0~5kD=gt*y<>n6RZ( zMN4HJ+Jrh0vlRAU{5uLgK0<*}ZoH;WEv6uPw zPqq9!sQft0dHydapa_i#)j1&3#6XnUd7U6y0p~QozF64ss(6KZ!nJx#PJZZLSi6g-!55a~Q?#?J_jkogjca zzi~fu6lf)f8ansjNa+LZHTW<>ClS@`;YpQ$A4j9ex-fx{%ny&wZJ@^FDYVd=S~2-w zRq+1AV|@y}oVNy6X*V~JEHL*j-z^lTIZ44SZ`u_Mngccp6;FIF7B)15uKt8gk!jIE z251yP+#BJEo_Yao_5|>G6#jPy`fulHZd~ry=aX^NxBRYuC&I1jh6pzn26rsn|E?+_ z^jl&+Z@54UACg_=?3OADy!Bi!c-|h}>AQ6-2z)&2ALja!ruO{bGc}cwfIg2CYzsHe zwplAMj~t(!*;`v1PfJ$*Tt0Q<&9HA0c|%WX(q$@~eyC8?oVDgOj?0Xv(efGg1;9yC zH&3?yccr4nx-s0CP$Y5msxmMEjep*ephXd_wd`MuS%8g7$IGBQ@#{17jW%*Xal7%V zcjl3Rub0*r#sCC&lq~pah7l7nfDTXSD7;Ex<>pS%W14U-2CCzfZ%3|B8SwMAIiZGw zB@ey!(P~V=@^6x#7p^e_6&*63+s|QD%cnNHMWA`X#s&mH8u|Cb?ToT>AoMcTN!A-P z{YGN5T+i6T?f?d+tn6?ZdVPEQ+Ns;PujN03HP^*{#xw8w>x<+5X56rP1%bfrk>(vK zpxbLzVS*QMR5IkCEXI47kqJriDADU20yxw)I5)@2#1%{xNmFiJOs-*6%rCYKV#*L1 zF%U89)a%t<+SPx{#KX?<%C~V1Qm5L52z=?f(8%aESA_U%fsAPTHCfvEB3gn}h0X-A zNw-|R{EHskukD?KWab=|A;HFWZKec3Q>j6-a`yRe-QUQH8le3-W^}kT9HOj9r|Vk^ z=uKgIN|FTd*itS~~I&fEY?*lJb9km^fR{KVv$eP&AwR zqUZK@+Z?gF?tfp<@pdjx{IqG6W!ZG17^JCsgD8LEXTpRNzrGMKUGHbFkhFVw6 z8n=+gEtX@6{o4A!);4OnL+(%tyl4@2u5G|*SuK>`-mZrl`1<-f+%aOprp16=PaRJJ zPZc+4Rs-5}s^~#mx2UKn_>#c`d{Ydc0+zYAyM1QMu%VVa{sfsGj|j(6I6+Y0>wm7& zWFwcAq%ME~hVq1*jwpdyYrFQlmpe$@i{S5*VkD_QMW~{Xy1)4UglE@Mt9JP^+3d}U zD3~#LjxNm*gO5Z3z^iQc!r%{I(nUBG=L_?yM^ zBnpN5rI%e&BBXVaqH>EJ!!ndVpM@qOpViQ4>tt{@%WtCj@2Jk`bas!B;C#*F*O15e zA7B1%r^tC-p0J)?j^YO;CP@J_yUIZvSc;}#>7niLA3qkb9^I`=5Lec#cG*2DX~q(r zj*;WfnX!NvP>E^58I**51g@`TY2S%fP#>w+4}wQQz}anuqS zOK4O|S)whYKr+sINd$@VbCXj*E(r2BFJol{HJDiv$v$P ztfHdA{r(uF4*LDw^R8~@Uw!tKI_#v?dHNk4BE-FG5rX!~t z;f9jx$$hK4I;_a%Hn+jH+;yM9g48r>YB4#KKQZZ9O+XU|4&`72M93gN)2IdKC};ec zVdsB&{$qDn1P7v8X6E?>thAgpRsf;+(LaQ(IczCJ=xvbLZ~FC;+5i5}s5OMJ%6%2+ zg|&Ra=MumQI3R|K#=fu`A2`!{-bV3zJ??m!>v)@;*Uo$%B?fHT-h3Hkh+nmlmPNx* zMXE$Ph@C?kP|_@n)-Tul2C)TUD#u ztmwQq;NMxk3fV79n=31y$-+gD;)Jy`QjRbFhJcG*evW+eK2z}K`}$wzGTmy&n9azg zE^(^azN2p%PyDT_iId>W)b{6*!|BXd5N&C{QXQ zdrR_vb=`Anldzl__&IzGxpr9GC`P76aJex7mc8}9W%^+mqL&%pr^rkMiLE%LzuAgD zKYU-aCohaw&2^zN4;!In1a<@|%$jEj1ifMjZ9Lacz4Zhqrn!H=(7*lOPPJ-F~J5bpSHiBmh5P}A+kVRbYwfZx@ig?iyDeQx>Rcb;1#1l zj4N9j4Jjq1*2_x?C}^aAP6fmuA%&h{b;wx>Wm*>mJjXB~etj;UrJN1NVTK@SjH+qL ztr9~I&?}7;H~yN-?2#ThQ7l9y(0w9FFFT4+9X9VJ2{(=m89lV~;cU_AuD||+EL}CR z+B@$BIXnfOMX&1Ky_K$`3l#-2|O)*6iSY3{lZ;9lbfw8MwAKMg+HX>gE=l;6;e@ySft0XeA@? zZM8z{PRFU@LAskp1Mw$vWG6y5ebx!bXA?=Y0FU!Ygp)MT*>pJYoqN8z^=me|j6lHr z-AmQsURQ@-)H|xESXXgPKg57(^-mi78*M%FtrG45IknJEmyPS= z_KJODWAh#tbYY2#)2NcD_YW`GDO?=LSyii7iy<1TcZa)ocJy48bZVx6aK*!>`|ECm z&@=bb_?mvh%h5WKahsV)&ej?6!xl=1rT@Q{p`p_EM)YoNP;mTWt!8*MoE8az)D!<7 z`2(^GzqXh_UF-M1UgXr?`WPV%RJ;~J5V|}(VO7V)&0UkES(Dt*`tvk&;1x^Px6OE1>{gB%eUche#TTF97Z}$MG13tC zb8&C%#|_C1^CR;eOV>$R54e1Sr-BV$TIG7B*$fnc3P0tA zUvb^0V;TJ#tA};}X9;}B;Am+IJBD}XZSlMBhX4w8RLK}&ef8Yd3^))AQAp$FRA-9k z{Dk{tU!V74`~Bx(gwb*3<*!LkAPoF>-h5g&Z%e21KV{1L)nSmKJi{rYfa-3X*xPgk5#J0m59x)H_pk~+LMy@g)w-CDgoPtOND zNHFKDIZ^yy<8;pagX;f_|NvMO`H&Fh1TB@N{;8uHhopl7?qb&`#`ohNZ8rr1U2fr#(*p zskkt+q^PKo4rZ6v`2op3{XInw&5q7IF8PsDmOFk9Zg_8A^OhWc?6tg9Jbq{e4}u_3 zc(9Rlv2r(g+8m z0n)ReJ_o_khm^nkF25@Zsuwuef70V5ot>T_NEr1$1`C<4#=o4-RTR-P(|X;UqbXq) z@^@_=4Vkdv!3W(D3o6^NO|0OF3_zYo*!yfp^8E#UO!lywU!mYqQ&UyxX33-YFISw$ zZP(jZt?DZD+EPsw#l?G%W=pm1>LiIgp>gbNs$G04pO-XP;UQM4tR|=7o=7&mLkc@? z2IY^2OwA<)4mC9~#TC>#rA>+l{XDg7b$7@^!r$fl|vU9Dh&tpP*tj^Ajm2y9&|+oXcaP^Hf5?92N^v5HVdWHIIqG&D*kqJ|EDL* zO8eCkQxNcz#$kVOVL;j*RJBuTT+yxZySc=mGem94n;ZUyuBjjAgb?Wm+f5Ajd*3%%;`f6;p&;_Q z^;qM+${_?XvAU@%H~)-M=$}fb3Cp&4t{6=VUhy>|J4z$?AY;YN@TI4Ef>`!OF$jyU zuAjYpKnw)*Z1{9+l?tY8WF*^^uo-4u+Zf`k88S|6yCB_dZeOW($-~K!Ak&8>!^xvi zp_(5Xs!0+5Q=I?LL9$Rq^stLeu~=6^0(R2;jClT%9D~dA!sL4UIa|O@X<{0ESkz4h zamr3=U^3K zwp*gM-y9{388VK1U#CV-k5q?vNEY`v>!--ZC_~YzRC)9BeV?e`7wLpCgY@@Q9Ef^D zS295(IPm6EhjLWN#arheH>*%8l%yynVX5Lm8+EgHctUlvV6C#6hGhH!qG zU*qSC&?(X8h11DkVhvBjBtZPRdi*o5C*YG;UAFU;k3sRwmpn#J{z-K1F`=o$pla}b z5~cO*j=3E@oVshWNQ5*_Hb1n?0@?omyZ|O)|JHBt?Of3;0=&^3xn0fvaj^I2qw(Ul zz3=UNAllmRrMach{@0}PLjt3rGp-~^B*u%daozdjkBN5kt2$uB zUu%A6|2pltwIC(TxUbb;w>2cb6l*2_6|HljSQI@n?lPS_9om3y0t^2d*qW5$#}B2N zLV0_<7j>`nI3fR_GMG2cPFbJ=fcCdBo*D<1I0cjlh3B*5!sd_ymBV)dsUkOfH}o3X z4Jm(@{FrQg?mw)1UmltZx!MZ^fZnYpo#k=NcI{F2NPl`-4hZ;QvO(2mH%HC@`?LGS zxDL;KB*@`Y{(?~zDRb!{x+3A zjC#FX``p=Zd+XhlOdOCbT{vyYdIAw0yo17gl7Jn51gc_euT_z6ryh@;hsNYzpVY@{ zp%UmgXJs{@3^JBg*#vQAD2(|J794my;Xq(Kr5wm)=<{U@Np|U;%o#3nbTGP6>r_g%CsrMBa-e4TE#B;0mX; zbaaj#crKk=LF;xjGok?p>af}7y6?4FemoS*kJx{doPlNNp7+~+?poDmej=aPc&D6q z&5tOoAUxAg_KJ!!`Ve&xDK1^yul-5-e&C{OR1DQIhd3XxxYwC9ih!RPyk#mft}>P^ ziHLp51pTv&B^#ay0ny`e(uEksBZdA`P0a;6aQ<$E>ZDi&)Z3HrK;$-vce)$OeZUj2 zsNE+9n?XHx;{6Z>N+SkS1NM4GWsH*+)^h{u>q$&Su&6(KKqqJ8O%<`=^Qu1RcB8=c zthu#)8$V-Up9g;QV(bS?V5=oUEr9J`d{AI?5JBKovtY)Eom>!pldGpA9r>PFwgTl_ z_+2F|P-nQ`K5eu4Foiyf5oNc#J9L?UEOr)+o4%tt&JGd>JE@usV~4{{Tf7v;#Zu?H zNqVoluWMFu_`>|iO(Ldx)(|YS?hHhiSeCoI`7XTi@O{c`wE8VY1j1k#LqoI1Nt^a` z_&qjK)BU}#;R7)i&xWxUyuo-et$&#R?c|gHxw-pVTlKf;WU7LK4CZ!^EfYny1lSf8 zZ4FJs&vdd@*LycZT`6_+AuVu3pe3=*|K&}f{ptZc>+G}VcO7+xH;7=9X=Y;c?O%^u z_$hhcu7Y)BwCmKJalNq-70_rdd*FKmYaxUM)>NfHW4+A{Bfbi47tt6uL->=H(txEr zegrJwfd>)@KMqwd6O!~_tFCscl^Fv(*xI)CmOWDlIv>cNfZq(5WP=`bxhkb}vPcjV z=%FqQJ%S7WI6e>Ug$r}{vLcpXsCFp}v$MxSK1n@ZfwtoM9lor#)5VU5jgsAA;5w7P zeLsPFAyPpZp&>`NVbO*KE6JinJ$Y^=zgYVZ{p`uI`x)Qmg1)%g6c;M{hn#CNl_ zi@DYx42yMBB7>%09bKuq!nz#L5re=42pJ#;LlUQu@z?8g7ef?K##5^JjHQ+W;Sr(` z*sGG3LpjP(1rMei$`roWeCPfbtiPfN42)~>Q` z7VFxH44!eVkndrcI|DX%k)!W=@b)v(@zT?6d9{E-tE!nHB`o6UJ>o!p`quS8-L>@aoM*S^mZYZ&vZ(Dy9>@a^gCLcz5 z4H0-LH~bQlAb|{^cIpfAU7ZAx0eR$I>o04PQx~W}^6biCaRScx*sLibpc%Y0OSqPG zaYu3Q(M&$$u4Z9?l5{{Jg$`8bFOe$}cHE7(`<`X_dT-aSW<3w|HYo%-<#XcMTN%bt z$)}3kw;PLEZ~Q&AR7Ap>el6WMf70o`)0>6^G-+N2#`W|*oVj;T^nq5#`Y%vGq^?>o zKZ=MknvM+PgRLkHHYW; ztgqgdoEj$U^7$>x7uDBo_i_K%f>CghqAI)1KTw^}9+$Eg3&(I;`&aHJKLEf-bv}C> z@xw(8D#?9y#$^~F%&-Oh&zIHB53pN&UOHU1i>-d+5u2{d2L|aR@IbPr?#rH#&>IT# zsGI%LZXMb zOkvr4&PM}5RZ7zwqLaATe>Z#}08L!KdLGw4@Do2x!}RvY>c89*#dbpcQpb>XXeAs7 ze%UQ75Nd2W>uzb5{}R_zJ+E|o9I{WumjDw0#Ru0)#b)HUn;nHh`TcoE`JxzOJ5IJY z)rXdkifkf7$#?Y*A&Yy|Pi!TQM+T&p*Z3>&tLap;XENkXYiCN>f8esQH89tF%^Nn+m_ zdJ&)Twq<(d&-x{$<5?M}*5MGs$%SmJn={H0ovb*2r0N}?66f~E%)kCJ6^AM2sCCV4 zP|MENI1H?1J5A>0xYW$S0jA0si!FZZGC>7HNq;YPdi`aq1x%opUe!1!S{m}Kv3zyp z?5{s+`j~q6ki!Tm%E95CUvPHvZnd`fZTTN!QdTI{;O87DFDIr(e8z z9}{6^Jl2-+!u5C_R6w+1(jf`0Cq&mui=30G<=i z`O7`Vx%c3pp|_ViVt$X`AOT!gr#A08jy5(rO33GSQq@oN^f=$~3hp9g=aUxv+fGcD zz~jzlOptN`ClLywU*byiWtLurF>WKgObN#Bd2ehW!emTtNd2vAp$4VGYk7m2|x zo?oI(j5ic-ec5B=$E5K(P`7+{(?GPaP=I?oV}%S1x$Z6m*+|E`y95e0*W@zx1bGa( z#@O%x!qYA#luhj&m35r*(=Kg#?q6^mx~OpFei(4*qaRZTE3P;C7ZnHn+q5VIQow;A zY0zXY)w-jYseL@?yyLZ!m^U(xKHlHU$AzE%J+hmmhnJ+}!A>kZzy#UbM_(6g+0ywK z^?V}fzT)(~5H3!fvQ!BY!HG6Jk1X*3=AE-H_jG2f>lwCW1=AjVz-B?kB*XrKn{~5% zEIxb_06-(u0>RMmgH2lcvN+WDA5#ZruXRP&6GHo;kDi@oYi%<7EQ7VCyGo20xyzOt z3dR}Z~k<$55KIEmRO7QOP>E1-P+TVc$_Z_5g?j(q>Eq-3i+gwV9 zL^Tv{eJS?1^0A)E8*I^n*B6R9*Wa1Q7A;iBSOO!?TM52$fWJ%m%S(8_pWl5C-kIW` z{ilIH0$DAd)PT=b@5eKTJ-R8`TxTF4ahw2G%J1guIz&Kmj?bIBBspY)|6!HT&JmMOM}@R{(aafV+!pU0p*4KdX%v6@Xf{A%#W3^z}HJqvo|! zK!n0_!LHrT#ty~C8invfh7fB8TtIzdAd1^c4qjg{iW;6vQP($MvYeDMU9Er zFZpR*ExpXNj2pyTfv!()W5{jp`W?=)?m3=YoR(8NtKDfM2Bm!@IbcdNqUh>*rin+2mJQ_7krZi9WvEPhQ2rEOz0+ z843ndAe4C$asAt)4fyqH)QX2LIV$q7oFv^VW`&iPJBmP@h{&v`15O=pUTsB1Cd**j z;mb)42}R~mZrf2z6C~gpG~0T>)wF75vLgj0(0Wm8hj(i#uP_L= zQ$#%CA=!jtqtULIirXZg9B+~V2Vw(vZ};-xy@XPpC2m25Cx)*Y5@|o{&XP|>Ykq?O zVF!+IqJ5C5b$`N3H>R29Anc#y^mQ|QI1i1f3E$mz-o?VIq%q^|;l!$v zj-xsgAYEy4-WNp?xZWmyyPpKh*}qKfbwK)MejOubie50dxP|g^FwLEt!#Q!Zs@%8#47lWL-+qcCQ2j<(S5w3)UP)?PF6on(~ zq1r&xV#8D_I~jqNiur_K)6|j`1J6Hog!5IUL9;W>5h66&!(+{nKd~wLmv?c#x*F_rrFBT(EGIz`&r}+;HM=5;yC)35-st93xjx$8;->1tA<@4>IKr~ z)=~?D6eAcgC`_9SNVoiboZPK{aNAPT(-oYie<}01yC3mUx^#BoCA-(WJh~Z-(w{XQ zo{g!JPC}v$HvSAD`2!mW8d?`8J8@TCmwCf~1l*uqdGCL+S)b9GvX!gCcDh2Czq z%T`Uv;#r@X`%`@)&Zr8gVYeb1+pbdmDaFn+`jp>lrL~tziwXHBG{{GXOqXuQcKVma zUg~%5xsn|4HjX7(APX?>I{ZKoR++1{R#c{$sysd_+5Gr1*iBb8anvLr6ecL3(=42` zyEYb0NEQuJ^fxBjRxyhr!aFPkSm;bsS^{Un%zc8+pI#qzN_s_JzmCo9YJ2$lZ&4TG zNQZF|xOh}wPiABggY&vOgkS(IB~IIY*9+xfvU)RZH`jar`>3~NHso|Hhp<1E?~!U7 zzhWL1t+EosCR-)zDV!*CY5e*APP{&-anEuwPde@{7!|Di5m8b8uUA<@u1E1n`l*8^ zL>?Po8*q$#nGrYZ?q2sh9@eIX1l;xt`^SF%RLme0(8sn`tJnA%=hKTLFM^MnmZ{)*|D`SSA3}3SQl3~NFrHoxj9xbu+2Pv?=dERe(B>X zYW<6PbA3Il*;u0?E7v*avd_bkDr2F|@_d}*+5+5np_(DQ7hbJqaKHzhC{J70Zrc{23t5(8M<+Mn;mNjEL5U_b39+Q(yV*o|({M6zv{YpA!3nu}2UPj3=G`SGC$#8$@y$(W zf65LcojD&hV%pp!oStU8exG-W7!Z+bQ$&xG#h!{ticDeOi-n_eal;m-@LJ3j?WxeB zIgVW~raUOFtJ3a>crUBzut&PeV$yn|!&xB}Sb2IIUjzFZz+bbgk{jmGNKA75Id&|j z^TYaodu4~y1lLHR{IIzvY$H%8X@*m9f$b#S8E=+WJ8nsezfHa2wiN+EJ z6$_i{4)2z)sVD)-ARi0`nRGA`k=3&$AXbD$Zn-S(!?Sq@SNmK>SqlgY>aSEgk{m;i zNWqF$Ow(97k)uXPZVi5>AS-@6uANY#d}}6^j3a0=It(M7C(rQZm*4qNP)-{fjlxpK zh*rY6^Q*HF-uj^4uqA9$3|eL`quORpwe&N#c$HlvkZAWIb*iYQHGsyiK93-iE@}@S zn8-r>dxGe#eb8tjkJN?}D-CV8JmUT>J%lar2||eL)CRY zYl)>iy{JB5J8OXtH6Lpoc?_m1W6ZX)Z>9%dE`+ZzZ9x!KDG$HyM7ZglO0O(=KM=Zm z)zpV`>#mi!IKjjm5cZ17mI|49NeW-Hvf%`oVJn)M~2C{!7k;8t(SM1jdLpH90T9t0# zJ9#yH&fgrWTGwtLEQd`VVVV7NP6@M7!-X!p#OTn-;nPVk6i@Be*U-FH|HN=yg~nY8 zd6)|7w0Us&Fo?@M9Q9m;Ulp>vBNcP_O31zbvCt7PuvRvH#5!|MK0I8kvNrc~<3E~% zZC(zpx4VA4ewyuyu$jdF+N8ncvwg9z z@|};ZoE1ps^EY$49tr+%mO@($IQaGzmK+^iKZnUc02PUMXNxyogRH+iDX=C_Sb-nr zTzdXb82}5n*HPC_tyPB^X~Z3z3KUoIG#rDZ7 z*Z#F)yz+Tt=lR^`BKG@erfB{mCnIz~l<%H1bxEtu!a!ABlS*m{Ak3E1dUVt#Q~o@Z zO&s34B5Z#*F&FXD_C-{O8`ekUYUbaO!NTck(2_h*Lk;dWjNEShdUF=4+(nO$|1$XS z&1M1kl{%KP`>0|Zv`;mx)_cinqUsShmzpwn-~awSp%8mwoYKO02nH?WN>GY4rWAxQ z8^!Y>q0ZaGexdwuJu9M;kE0)38Pi}~|J}NWPF2NnL@XV6H`B>|-GFCNhF794+Lgu1 z_ASv8H{z&MZGPBjF}}pA&cITps6516>ic!ZM>iha2sj@|d9*M?`!jeoOqD6yUI%!B zn}@j^fb7(btc8%$>@}t8Tht)0nqbe@87U&~}O< z;oqpv9+5AU8KR-Xpzhn0|jBmwP!beaL_Uc^h!w`1UifXNbY<)AzytT3LeF>mVBhQrBzVXTD%M z3TepW%Ksh*)0Dlu##n6_RViZ~1`iaIJT;fUw>kBF=#r@~2zv41SsYSIJ~VNvnZ&K9s5q)6my|6TN} z)77o#5HH8p?MKLOeug|gpBz+SKIBOX3vqlGhrGXZQ`HoklNxEn<2w7eFy^AgD*r(X zo*cf51NkQsIxdL{%np7+`l0aHFV5+QiM_5BquqD^`_X`#3X3GVC24fZOw+FavY@7w>$rKe>Wd1KicCvgbS%gPeE=FyJx@~vU5D%eq>X5xRsTC zXnyT$@p;$e|HVbe+u_YLMpT;p?0#u{u8x|mTnQ+e@2L^*cbgEFIj}|`5Om71iH!~V zYOF+G7${uS>_NJL@%Q<9zvBw!&0sDJm}t6sTz#s5322&B$u(CESNOf7jO}8tA48a;`-M58qo@v zyHv!l?;jrWnf>>*2BF;zaYrCJ{hl0yBoQ8%1-Nz%p@UaUB3<5PR zLHNYP9V|5)3HI$0*1lcZCoaDlT0HIkZI{MQMzF+suI}O(TL#)|w~q3=P(>W9=_a1q zs9c0$NBRB?S~^KHIwAhFaqf51j(p_&qG%A2ekb|TmA}53Ho+@mQ%jd#RhVCyGf5U7mDzZDX<;-mQ zTy|W)+h~8?156~q``wqc8;4|}U68;6;6?O}rIL~i{tI;VaaiqUT|rPR*C`>H|1!F( z+mF>%)f?{{SWY|d;HPGLXR94)s}&jk!$9`)Ms4uHQh^%t zCWW}uYpl`R`H7Vp>#Yk+79StZ^#!J9@x6U%1nY#Y-Y%Or!&DTt=#m7>wiKk%{AW>Wq=iDEkoyG~m3*PkhPRVE2=TqW*3C|X^A zpKY|SX0>05K84i7_y`6U2&B;r4)u&Y$855{UFT+b=2#?~F`(N;ivP=&%xGQosQ6go zj&^o`1(YN`#6@!7?(~Hixm3e)zCq|)b433P1OliW6RGsI%CIQAy*0bWYlA-aI;dM| zi`K|LZS8mrbAZ5{J$)o7t2>yMfFUKric zv6Rh_u1!7_-CRmOK9KkAB+Ku2M^3W;`FBTPBEPwW4(N2?kE(qf`f02KY$(=F7T@4N zAqB9gV11Z+9OD>g1tzffT!t^uRRoL#y}TtY^c6Ywyd&^m8tna^LJPCt0DYcBB7#)C zaZpVqZU6}VwQwy zT~Wr%WJKHbs%qCjf3yY>h3fJWNJrW`{1X;p)xRhdB8bA{*=+Y?epHKew~2H;>6? zO>+1e+A+o0I67Ey=qSQ0a$6ZTQ?k~&ZWA`FoL4Q|6CIq|SxcuDs8!5u%rd0yN4~em z(_NSA(18g0KQBPVKj_Hk8VK@|UK?NG^!DMqH_yT0J5Pd+f&Q1dSAv-@;&;3irTAQt z+f0!2;()pgR4LQ-?t@z1H?a_?<@q>TX}) zUt8Z6`-N!?!X5mO$oIs&P9k&iI}1rqBKhimw^fpi`iM-{bVCw4+k+;55AJMLsA&q$ zHb8>1SjUb`7$s_yK9$trTJ2b6(*Yjfm=HX;7n5lQB+coPi2ot- z*SvAr(k2%6w|@!|l8xOu2MPVrj0|45YgMQn6ip;mlY_{YjM=boKzL(ec3v+H9Guo! z&QOVx1NG$g<`kDSiW4F^rk|{JzCsPG+@*L6`tFCMCIx_cTC1-lkD%adKC|y(May3+ z-vkF9hOn)tel!JUGsP@Iq4g!q~Z&%d>Ar8=XEUttQ_yQPbouwQPa-sek)HvzxC@4ubO?DO2s zcrh?=Uu%eK-5zcBkQg~~d4Q1Q6~XL6qrblq`FI&`jF-?GYETk>_i&uLx{5xMa2#y8 zPls+wzv+-ncJA_{oKF2qjecsWUJX-oxd2bACb{JtmnshF@PuVzzYpuXenzcYTk51y zKMNJ3tLDh2*dr3L&$U*~8@vRfhlzY&R3pdK6`1EeQ&-~On;H3N{?a(+1s*ldG35bvq^iIt+Bny zVIfa?8m)cQ1pGaRtAE=cBT3YBT+#AR%{kxr6uAvH*0--{U6s$LLgP%k7CKLrS*@?Z zuXzmguB$O><*Ib$drq@5b*gk+_{h`MnBsqcgLyp%1Ogl$Dr`*ru*uJ^8{NH&S2ksg zJu2@)C4GMmOU98_FdPPK@_fpXfY^~ya8q}t#~GrE9$6bb7QFofMXeB}-t(A>UHy%D zajwbd)ikL+UeZchEeW~4{6o=56zM$JAI*w=Kb-KwlEd4o z#iL9)4-Ini3|`9qKld2vN$MytxNYUuMQ!69khCK!xrobURP;T#;a zj+Fn(X>sAu9r=iVDL)2z8p_Ts+@b>Gj~2xxgc;>!)(C!&w@?L^8H8DEjMjCUYweZ> zb($3#hzY%e9rxwSLKoNIw21@r zMW13_tZBwj?l9eg+*7b{n)eqD%`Sn}axkXqb?vWNa;xq{Q7&*jt8tWoKy;; z`x#r}RxfrSeUvL0rzh16qI2l7dZ~z+kulWO(zeurV&p*t`b@Dvd7$1lzwi7xxN%nubr&5k;IDDBTl zdPW1A!kQ2aCVwED#jO2}(U0aRo}G~Exl z2RhR>(hb%noY9x=K~0s!nKg{E6Zu%3|C{V{`h}1U1d~M4cEPtpn!lNAR#HBG}0~I4IB;KD)+MaKp^Y%tc&}`#5h5IX7&-ymj9DkF&YH-{y&G^6}zO zG*6}dPE=B`wKOk6mcz37QR6z4I4wB{Of=c!LcY%}hxBV`&sSM~Xy9W-N9Ylz61~rA z_Vb2Fs?Cn$fU~VQtK~led1|)(nM+e%E|=~&b;K1KO~xDNwh%4uBRVbiDk8u3TXoIADL)I4r~>MH?=Em2DwQp+1Dh_bSG5gk~< z@xD!~eZdC&DNe~HQ=_#LN80e)kI0u^=)&a|@>s8*TTLCBO0|73Vy)?2C#$nOPUXH1 zU!%Go;90C)sK|5ZzUd=;LyNZPI)0m)S3PU-vT~P2QT4kO@0^IK*GK&_X{hbJr(Ii4 z^cE2+>}t`kOK+?;siaPm`nzF@>6~Q9SW5n7Gg+&fvG;~t(ger!_*+rcEqrJ~5q`93 zC!;MA2(7q?kq))+^Yzw*MKpHyA6Owd7FEVjo;->!ti}DEJ1q(+C3z>PT0R37Yim6m zP+brrkuGwBe{&_|x11zHsDRLeD4hMtcYAl8NrdmNSqpJEjbslNp^DLqdOvgRcLyzj znZ}>~V%9EBg+`Gq%>1^>W0)@1AvdCm{QWI{^Fx<-ln?hCLn;1uYfxFt1(i+q#)WU6riI)cCY2P=`ujl`EZ&eRi`*mW?6nB1P=e*ld|aG>Yz=Wwr@XW2X4p72 zD`Olry{6OKSUw-a{}|(ofRTDz8&5ZvJUhxhaejFvymy!=R`D&G*tB0|ii@X+RkFRt z=}Vf%m8SeV=<~ge^lZYAkDj|xTUd&UciF|oyC}XX3%ItJj%~#p9O$qG>*N_)$6ITo zpQA@IW4}aUiI3m4AYZzVOD}J9)6PN?6EFP+HBFhBI_PX1KOYmiUPL#RMM{kp<6gP# z2}?y`mh%Epi<(?_uw$g~ksKZw@eIp7Mx{t(KM%frvBj(9iK0Vn`~dg^3W~$eJ3U{^ zmjns1XW;|ccFnRJU3tE6+v9uYYn}GI`i%94O_e2fd7#o2WnC0{ScwsgWAe>L2R_23 zlSb(v)W<*|4i}dr-;1dq+*CKQ)HHtkzz9Ou=Dvx_?l#tp4Z35D{nZoDZxChlK7~d+ zNGd+QUP&-|+^An9`GR)mT>KAgUe3C^KZm+mjeeq(aUPyFM;ocm9K-fhV>m&W^hq>E$BZ|ls%fOCP z4{j-3rDTq8^>|B&{wZ09vY}@j)72R=y06gqL)LQ}du8OGhs(V0nidqJBL~7i(OXdx zDYK7g&o*B+3DnyvP*Hegn`lL`iP$y9_OUSF$Bg6ssXU0=JsZm>^7_?kd;U+UIB3hK zzlu>F!C%Yql6H)A1Zus2>b;=#2KDN2bW_$@cFw8pYH~X!U;9mJG;Av``@dx0Mha4H z+0Dmo-mb2fqGVq6=6UVT6ee_QK2ZnDO`p@Ol1o0+wM14SbU1W4h(p`GEp9p&Cq}$s zfIn@hErkZ>&nB~77@xLA@7KIhXuo8|`-cn0ub_B?nZrh!Lq`@hP11iHBjR&s9Bq3+ zb(I>!#&)WTyt1q6G-l6-Di5p~Hvb0lE!W)(+C?Sek+V1@G#!xY0SXD6oyz8C_&QES zs#~3JJ1vtpn2J2=7MuJMjJ4A=D_!&A^yNwyjQ9!}-;d&#L`S@i*|b}nf>DsRSZdRZ z&vSCtq?A~dRM)5}6X)z`@eWlZN8@rU#BbPqOJ(|q6OHEZd*HizvT&kO)el-l#uk>U zJ>y*+X}tZ*X(QJ#dfuGXcStZeM0vs_y~q*eW;7d%rj6-p#u%GQ<)@{ASt9vdf6m zzlQ$5>|P+m}(9dU4qP%yFC@v z)9Opn=C&Sw1kfC(LK2^&oTT2|{BsS9IXcu|PES?+SVc1j>v@G3Vu;olj82QmnHi1} zglMh>%h9j)C8PofE^?~<6PO5Q3dvtl2a!JZbCNEU6KF758d`gqp8XWnDfJRVQ`e|j z(!Nf~x10)x=BK2Q*zLO_Xmz&vxqf)t73~NVW-I0oWm@3`_Xu?zP=We}FGlX_#+&7^dpX*v9b zdu}v9{dC@EKf!KmWTwX@7m`of$11m9M4%9vG(65ba@x`ra^dxs2d0KQ#L-yBD8Hg9 zY_PDFEhG97o|LM7+H=a#SLObmaK7ICcqg3h>lw@zD<<-Bqmhb16moE-4HgID_wN!+ zHVAoCi0!r0}w=$+Fu{}!jSHMQSHIdxbTA=*f@+*udGyQI~>$*_PB z*ex}0xq;R0bCs3FJ@3Qq@Wnm->Z&!dKTI2Z49u3o=&}e$ zyT65r6;@E;oR2dc#YeN&3MS!lY)}7GCh1$yG6Dl~Px^VBbio&+N;7^YZ4AM`bg#NN z_(71Q&#Ktf1auLG7(Cty;@nJa`38XA3;OJszK&*{e@=vR7@^QvOC>PT})vB{`$cdz8}NpCiplc-rFizxae%F&^(VbKVCM2paL?%aw< z1mRM5=j)?%9AI`!q@3ImX3B>F;%>hCFxQ^ejEBe5&D81;T;zu6TAx3}=qTI>Iy*v3 zu8%O+-9QsL{Ec~Q@mh^!1;y>+>em2ufc@dIgoC46@b86sJGrCdD5kBF<)>S*?DBG3 z@f8n!ni1CF^nQmrmfJ;l(n;`HE9S1ZpEv7)m06e5n5S!1Cr1>3Tkob-(bquvgUb+# z|0oMzI->wr6Qv~$aKm%gmd;r${CKgmoAjC4l;Yt+-86cG=aG^PonkASw;dC+LPut{!k_%pqyU{@&IGM?hm=Dxbq7XGzMJocO?zeM|EsE@PeAiAaJH7O zF_-<+$wHHe?GyM3AAzE>7I&LQUR9{F=)CC(iutiUhQf77T~0EwmTG&;6s*2-)-Jl< zi!g=&V+Pby_SFUp23i3wC%Obbm*yMKMfvGkuRNvx?UWp(YzH!4#O4GttT$d%u1hn4$S~}+t4ex++E-%xmeV=&ECH|r{HKQ9n<~0itQd!%5{xMbolc;TG!nBF zm76puSCBz=KZrvS{QuarvY?hD0o-5yxh-WbKirUhLwWD(=;U#==JG9^8th=&qLJm_ ztfSw~1yUo607`sM0?o>AW{y{z>W}vgSkv<8UI7~WO?JG%O3K|v$!8C3R5uM@-TX~J zSet(emm)^+^oJ>tu+qQ?yw>G96{h-zSbZ1s^LNsr41rgmP7zceClD?A-nxH4Lbu-?w1uDr0XOb}GkLt9k) z$Jpp|^ZevwS&qDhgQd~ngrL^L)Xet)i@fv2L1W|GoNT)C0SJ|nMH~7Qa=SLUtp3i{ zAKq--O%p(;Z!BsQ2SC8$>G580;pg=N_%gnSlrCO}VtFlbzP~+&T)iF};gFogh|)u7 zgYe!bD`r+Rq63`uq4O$X$ zipH(}YX5*wgQoL&c={kbMO&Err4^ZU zGs~JPciClg`}wE2lI9s&U+W@vPJ^KyUg^mWl)60UezpUx09#_4lv%4r{4EElDNouj zG@W{#@3>%Z|FPY|GFiO zxtFK@!ritA(dk`1J59XX#wTtKMn3v?)@fHUxA3=}@?@BBGJR zp}*luLw}`Cm($y>BCrGE;SOP?rYMFMYGE#w z@BA?H9Imnf_h0*5P1Tt-tX0aars%cGOg>^(Vs^0n@+9A>6I@4@&Zm6BOudT#%?I5j zOtbkS9XwN*MUECo?juNnALH)kZadq+im!+qh>(__=T!#!uP#;Z0K^Ks!bH*93`v@4 zl+r5W>=g*|HHzQ>_6&P?Tvrr4yNN+qiPpBed7u?M8+D&OU~mq$d|BUiCD%l$0@I1eGy*P(J_zO-5v$MO{2YTF^_>wojB(5wEr z9+j?6fILx3R(8ESo7*WV8iquLXG$y$F7k^da7*GAukO5lf816WoFwvkZi~!!3rUjp zJpXV?y`;=OL1nZlAF7|Sh71A8zA8EPaic=3Je94-FGK9H)>M5MuaD{mhx$232E+N? zn=;cYpl3$%68SKvtzZ?qqEcKyc)cSIZ1=u}(+%YpYF4k(<%_;)u;nW@98Al82Rm#4 zuV+G|uqn~j2uRsU`dU~?k)s>YPTBvcaLcc;ORo?*;e=2ib00O8o0G|-s-f01qT3+F zq67XUmF#~%P$z$3qKJ<}d^b+%5=j*s#n8p7rkYfLX>PZW$jjOIX^?102{{_cpEmrE z^zac0Np~q#n5Dz($$4Wv`L@Y?NU#ED79r z>TCzTzleapu`BAS@4}0GVwR+21b^yEI;5H*&q+6cyxTu5>$9Hj9x>5D+ci{R*W*Jfge~YH`ui0$iZo2AR>LV=liNsR+N z$yrq+vesFebWwHoNP&oe98;qc3cwP$Sm*wH*zDDIettTj!*Q_P=_k0c-lW}07~Os( z%{cUFN9Oi6pO^C%{vfv%as6~e#$obd1vm3%p+dLlIM?1V>LdX5 zy~j>=sN2Ep>IkWhYB#t)bB-l=v*MjoGhd;KhOnXvIX^G5K><2qmx{7*F%)~6F}nbN z6E@H3Y46t5m&OGl=8}3^38IwOI{Sm-FLP=ptLrmbPAUPz9apWfoFhS_@PFs;%*q2UUiq%`k#0uSsI~_`@ZSc3t246X4z{LcS97EU0=aGESfLOKey7?7# zI01(ewR93QlcL@5Z%_;|$xh2GH#{=xm9a6iak9I5N=8O{+8q{^A7}hch6?AU+1)*g zz7hxNkR$0efgcUW5UVZRG1e+2zVwAB4MXfX^Fz%m3~v153Pj+90lQkf0HAf4EK-amK+aZeC4C;a2@z*2r!5v^Au10 zRT#x1L1vJ@^*g=1$T4;KVy*TLF`^sSFhB%aE`90KMn1EbhKWbdZGSU0iHv_f&hf2c z@RJh`@EyrVc;x{@dL_m2+dXlW77FMa$goh&xB&i^s#_plN-M}vR)!Kf8EYd4gYAw2 z`=$H+asSm*Pk^wE7zuY!CJ zSNnr-2d&TMDehQgnOEB@b$q2GpVG4X5I#W!Am{h!Fk`P4pP8gSPQz=J^;@Z`BJK(|25^5{!AmNP!HMZa4f9n zAr1Know%*3c$;7rGQHUfzT+`}FWJcUhq|(NlECcLq@Pclmqp8tE23AFR(;4M4h1RB zown$|WBIrn_c2iavHaA9+A2Fo_53(ie)F`fMY>ARc7no;?RB#9CzQ;uL8~mJYZbh& z2*SyHB(K|-zn^S{X6DmXtCg*D(^S^}CbE7r3&(Fi&i_N2fdicEA+`(gKPyVowooY~ z7L05v|I%cb*qv_enIz44Oh;naHLk3i@{w1U)EBIFTE*b<5J(tqh*qU1uvk2l>dDDg zV(&j^HzWdYP~4rMOEQRJ(Q3KBe=y)4e+5+A>GyLOCHF|dYaJ+Q{Vq|>Y52#AXBLw_ zhl#BC$8yRUZ@^NjDnQ_mWNCZlr< ziBn`5G|Z-l=j?MMZ_$W!ZeO|TF5#m1-`cEd#|Y_8C~=Rc9e5-pvQ~Zi6wWYI%`k8~ ztu8<5H_*f@wxowt4yrty~qxVIP(~(?z;E#{lEXRZrhvA=k4c61~vAy4x6Dw^$|`? zhoyfm7ah>o&>WYAkL-~{WJ1X$KcB#z>oGdM(Q~Hw!{l#L8Eoh&tVJR0&# z#GMy^8}GN2t~EyMg!bGn4*`}RiQ)^(6%yG^;41cA+qQSswP)7#-3x%6PNrYndG&)Z zD>W;gTkJ~r?8Z1B%TMxg{}+5oG7=6RyMr82WB^b{HJ9E-%O8w+Z5mqb7)?Q&(5?S? z`ME;RBTRVQTj+KU8=bgrFHM5lquA$iGb&x^sEwSN1{u1vJ#(e2I{ zxR;h&;syXs6ON>)v}hVX5d0}Q!1bctwD~xlW>g|kZj-g_2<=}}!dh5~yxhI7r;xRz zHbU>r?Tx#S%2oqJkzw1sWfGGJ4C7pH-hcgVhK+2*)=G@CPV#P=nTd&Ml-YL2iI@Qs z2vop9Ibl4M_w(hJE}(sFdPRCPwk`IeUe~^HO6L>TI)<{1C0h7a{MynzDxCUU!ghN( zlC}H3H{qUbar}>F!MDc(#RpV*E+F2*W?|M4ct8VbZvZhJT+`Q zZEJ)eFX#7LZEpztG5yzU)=K{U`lPQ=*nF^ki-w-{JIhQT3!3z;XM{$3UOUEE&}Crx zG2+A=LCjXU0Tr33Jg8vVWwB7cQ zz)urq#;;ke4R}TrK@Gk8%cnZ1t2NmV@xSN7MEgWnvVCy*>Dlaj9kWfj^;S~M3U-kT z7pMv%oi7ZS98DPbQdXW8;BB_0?S40--l*3jh{Dav8T$4cFC_NRE6Q3PFn}NY*Llw; zVmE98?w;$*&vQIomk8t*1POZQ0g8V308w~e$`NCc>~FoIQoBTkN#s6c?fp)QW)NfnbqB9 z60ePya~3cAO;^tr7qgvCiB&UIk5A;kSp|s2rArew#=tTE(SG0j1ijja+{$cl zd)RQ_wpDQ}3Rf`+xcilJS@AbQE&^~r$1FOlu^u$4c(>7g%>S&Pn6tbjK*@OtJGFrX z-Y=M{ZqpBvWh__}YTsvuUYV1n4OCNKVO7?9FN%<#KR@hpL5{7LYZ=e=A*=fQKG#)b zsMwi;zFsyq2d$cHH%ADolFvgm)Z#tJ|6GEgus*j;3fAauIudalu1x(B zzRZ|kbVE*Ci_6?2mr{bFu^ZhN49UBw0QJejWwn&C@%nm+yl)jP)2VHj%EIdvLqj71 z3+K}XTBqbs(CRxc3R2=!K{P<6%*FRf!fmTNQ&ZE%GV3{GTP7R{T-69vekzLboxlnN z4YR_tUFV_MO|U$AQsD1~!ismWh@sKd-~FMt(yV|1|42AueXT|!e9LD{Jie==fQk(# z((PAWMw$uk+0}L0^nt`|m*q3Wn5cQN-6+ z42|#hiT8z^utL_W^OLRO^X)0Kwvn5V4=Gpo+{~?af8TgGS$yJiBu`R!$w2Dy�?g@!+z2N|#9 zetV*-mw#3Deb>vj>yR*d*JDh-fz936v$Y}5@EtK(R%Ys!hu^ip1aca#MVc^}Qi^9j zD(W+9;7F#ow_$Te?FDwnA$`Wpoy4U&&W?X3eGr4hdmkJX#Vo^Y*CAJXXU^N(nO;vu z_BZ$at&)lvDZ7|H5sYPYmfQH&Jx~oB%k%WKI)S;V-KX!uxIzk?b=mXF9^ewlJWU}n zsX>#gJVV$LN8BAM_adtuO>C;R=96Okxny#=%VX4W%XZuH-B+P&CPYI`(s9}E?}hvx zrixUvo-PWOaeYyjb*(PypZCrlI>Qzea|q*poTeSp=a;0|$<{6DqBwAeupmnqpvfC# zuIuPCdSzk(KMCF8`k$ft2wGy@K2j`ZsC^+c*Y30t`J6KZHM?TfkWz4YrN_xTF}nO; z6-SQ*+VVnp)0uQ!&MpEDOm&Zq+x%H2JSTGXr%0r5A+ zNS@H_TC>y*vA``rIKIxP>@B{d9p+fT%%=&hQw&o{|NerrfIQFq7jioFOCn5s@i-s4 zc9;5*{8v3f4j0Ppx%upA9T=J$kBF=3;l6!NY5O=D!3%LuKKOuxMvI2cgT z`&$1Wk57|rrb7LUUili;@~Zn%({*{5#9v{Aojz9^|3wDXByO%ys@rO~A>=Z6Uok^F z+RIqLX~Cjl8!dGAW{jC#4o;>}t@`bsW+2Fh@!E8CN*9Db4FFWZ0OXSCt;|9bn~4688)>bw#NO3 z>fpFkQA0}vfPIiyrqu(!&I?LPuBE2D&<%7?noZn+#uk~z^`kXekq_XBmK#P(318){YLtN$JI^sqv%!!&ejfm2$ zVLAvv=YhS4$ym%u(p{r@dt(!#}4HUYP>DPwoV zD+#j_#>dn~uVXudPko_38dQn}FCQZc#>T|o5+TZF@v@$Ggr#cJ+f#AY(OKp}-9B@2 zP%E3?y*Wms8~vKN40&ogSOu3dwmQ$3m5PZ0K6r4fkb!FB!{1rdtLyd}2M;i1d6)P& zD+UbjerAU3UO%&M2sy-$Dv)xesx9;i3P?Yk8I$>b^>?tewDj>I0Xn^2&L&2N2tgkA z_4T#1c>Nx3yh}jy29(a2$MlM;w@wx9ZUeBxYf{4J5&b*K)dOF@>~C|41GDAzdi#Y; zYj^#bjv)O83(lciYaX;-cAQU=66|_absE(7F~4N|+P6YoD~s?*mKql@XneX=!ADeOz z>klb$AloP9VIieq=pt@pY1gxH0RTiHi!yN_BxmQpznhzGgQI{Vmym*|okfZMXH8g* zGDGkZZgYgUGmk|n@P&%-B{C*5W+qbR(xr_zDyjrFwYaKDg}asupSFLW!#J)RNpmCY zSNWJ{NDGy)+-rFR;g$RR`BMQ}2hwHdfL}P_SlGyb1qprkF83HYnhb7XX{sH7C!Lsf zCbPhLn67c!>?9y2wp^vtxXO#KBm%!PpwG0EK)ATLK?^ZVFz%uw;Nt%tRY00}f$%|- zm7zWR<;78)rm?S~pP{Lp;q5_5vO8pFxO>MypDF2wVoc2M1bbDn`K+)dx=i~PBFj^T$RcLFq@bjzlx!RMdVj3ya=mfM3cE@~od}DlfMzbC+J?n+@ zHH#kiAa{rGZGxO{-?)9?R&}`x?*V=%o0Txd#iw$>P?Ll3N&kmWjNqLGa(Fzoz-c$G z{_Wt`94g4Nf2A$AzFmp5VZCBKd+ zGly=!DA~DiLh(tEuUU!KaTWY;>*WU0iJf6Hj>C*$m^+8U3zkNIiRG}VdDsQ(j!(jl zcs-tm9~#MKXPQ+j191FpgY85g!CH%8P{@o0?3(ar{6Ok3fMn*?XDf@cK5v9_l_xc` z->XqPtz`cx>g1$`x@`AsLan@kj)d9MY!8KBG?J+0>fm{VIszO^#EikSE@`ObQb8tU zv*=j=fN>SFk(rxax>Z(PXHH;!o_94DJa4CjM|ig3;nu|L=S1m^uJlt|)9+|_E_sH- z%>tj+Qr0$E4^mZCTSw&y1|sB06)SWB?c229y8BaKeWv%586?Vd4k^LdkX=ZC)vIRe z_`%B4uI>#qe=$ezeB)_+406_%fcHp}NMj5hN}IHgD$I|E7ND39Y>H<(0EGNRC)_phOHa8!Sd zn%l*i0+7Qz&GOud)_>@3U42Oa=YyOMdYMcdH~7hOMv%%Lj+^9Q&%%O@kegb0g60@3 ziI=v52}hKX1=*bGFf0Pe!MSfB&M@06w+s|~)4%^i1Fz$&tGrDqvpcX~Pl z{`_HnntZ+lyN~MP$6;olDzlrN{s95e+N!pxs(}E;`Cn6untFvy`si}va@3O1OR}d+ zu_fS4KbTZwxb!}QRH6aT;bRP5&cVlULkwE6am{{`J?muoArTd-FV>`qRtEdrZ!m`c zK~uT$CS)R*eLlY#U!fkMah9Xp27!mB=<*XIesT-(Y0oc8y3{s0atR)hf3(Y>`we*V z#)L;roWM=naY`@aO03LfzzjRQVv#|(lmNb=A;3=GKDXlG=iuOAWj2pgnypn%U{O|V zy%&jnzLT<#5>3$1W^WZ9ddx89o(NqkN*z&g`EKGt6BpKFw35O;6tMl{Hv6Cc9|0=d zZHqJnpo}U5as-ihs7A))MvpXhm2`v-xOw-9kcPyacA>i*7I}OMObWFPi#q<7yWPV2QnK~kZ+xYEEkNEEBLV|XkCD#q~6If5BAf9y)WrUxyuil0C+`f+4suD!^flY zcw6wPgZ{&La4-$!&EZL!M2Vk|~nZvsi`+m8#EZP$Z=M)Szhad@PEQgUk( zr-ux!zwu5J&_{MlVdDF`q=g?++OB-7^f>egZEIa>#S}M^T;nQS&U`OLojy)}`QELi6*qKI)i! z6clv0$iyZF-NQ?v1QoMr@+U8O1qf~G^&M@7rqudil-$%lsMA)%J5@W#qDeSo0lhiK zg$NP|!hRMu2KrwGYESR5hLZg%x48moUx~H&zrkZNOF2+7D8AG{1*Axt?@t<5DM~Px z99lY@N6f|x%S5m-#z-iQ8j&UakE;RuTj;iQ5*u8cCV{jFU3@aTXMu0N+ylh4lNx_a;^R$X<7q!vq zL{Kid*&QO>@prsAG3-+_xM}=77B2(~BOm1rI-)Q_N6m-zAftkFD$Ji^@~% z*A%51zl%&Lvzv7p`2RA8phSlp&X{OhYHTS!7aeN?{%DOIDvC#Bq-5`J*4rOAL1&m6 z-Lxt9pe-x%vO@>eE@@sN@9N6-p;7Ph$r*;ZxdjAjS{0PRkL64-8EYL_`zl-UWH8B_ zJ|jwVgfQHVqe7{LhON1f6uB5O{`bY9CQHT1GmJ%RyH3%x0^#L z%S`g8I{7qV1OxZ%?abV!wu9KJoP(keQ#+kTaeqSOFWNJoA7F4qcqpJ&DcHG4PHMaQ0bT}b#(FYk{uhWr}!h_Z(kc^6Eh zX>+)223{<`SmPnNhBV?z5@HD6n}IXeUvizEzJ8%6NI@fZw9=M+O_tI6?O|LN>r?Sn zkad#107T(s)u4sB*t`YTTib#Zi@R>!(4#$0RNZ#R&CXr>%lM|p+=Sfwd=fGq@7vxZ z*XNEKa;n}PF*`0wUajo|>+@D|0)El<=h-K<_EO&igu#hn)vm2(*GsQJSRzDdjob2AzVduSWu%!2;i`LV>8bE<2S z$A^NWXKgG@#Vk*-WB!`y`}9G5`Id8neF?H4ipDrt=9R1R3Mq(mJy%juamAlK@I5FFpBeP8BI{GulC#!6SxbLrxErawzsly zlfV`(gM?X^pfKB5qvE&>* zA|k@ce4n)UU4YD`>M28@Ys8mp)5aF5bM~jsC^Kf>Gd#cRwkeQZ`2NEgeMr&w-aeD> zyt>p1%r4*fGYMYgn@)xe=F`QKJCJ+LkTR`*oR0=3;4@=(j{)H5F zs1c9A%PVxhF*SKhmugbUfLmQrcKH7<)A)|2f6!fJ(1Nxj}N(r zehT^$0iC_yPyUe{qhUX77wGwHU1E|^?TQ`kDCzui1T=NDbnS~L=Ibh_1mffl(o-~B z-5Ry#cj6Vao#XyZ3`%O!Z8c+3%6*8X=gOUDiN{GO`cvgg-Gv|=*}D!N2wWb|fSL`J zg{aYU@M(2)Q*n2b6EafQG%|#}u#7_h#;}S>RVOb?#-75aK^<;9(U%wX`C`AO0+VW1 zc+kGA8fnl)n%=!2{nX2bC+fni$GQYtZO{{@$P_j#oXckg;}m8I;?b_ub&Qs0^0Mi1 z9ztkyWlYM-f{|uu@vm&;^|K;i0fY(|a>L(Ce{FjvL2am}Muo9OmCD}HU3Mg1YN*?* z>;8kRvOIbL3EQk;3p(24PDt2N1pl8B?|q zH_QAy@A zc=h)E!w#*ns4ATTqCdrS8l1Ty-x~)yQjunQ(9c{<2&Dm0_k-bkPls zx>9=E#_1n98xGjWar?ymzg2@pn4D;pj9NY2c2X2J1tk#3SpaUEm9ydPGTPEPNvhBNR8Fu6hD@VR_P|nla>?Z;x@435q=`$N%Hq` zz3q^y#QQGqANytPk?)L*Fa#xEWMKmJb@WcY^VME;EHPoqPB8)&R`-Fz;X%}%M57U( zHnAs3A9&uAEnyW|d0*<#EARKs>xrKEUyX=|IUTHWhFQrO;UZ&buMxmU;}!T}Sm?YS z`L9?)6Afti$l4oBs$!Z3;BSRf#>w96sfDM{@u>Z$bt3Vw|63{~k=keQH{fmYTFlF!vK%OuF z6PniqW{MJHWh&N$2nd9$?C{IYi0qF6vBV@oe#{b8bPM8ky1V^lAvE=y8pfy)y54o) zU=hmH{w*g%vIA<_lr+xv7CjqFLzlPe`$w(2VmMt+jLauN9d1bA+6s9GBhNd6*p6fq zOu$D-Nv_XHr^n^t(2R(Ek18P4BRaRvSLo!STrVZpVamN;bL}v*6;Q(dq4v z;OTCz(9g%r=;B=H{#`orfqz#F8572bX6o=#D|7wU7V}fN4NekLkUoB%|C)95cHHMF z;rf~Tv_dFLz!iQ7PloHYc)YVfpy2sce|xO@bLH2CLwvc0brXoND`4-oWq0I~~6gFIkD^y7b5fIzIG;O$0o`@^Pcfx=@eV(0ba*|C=kh9d?= zQqMa>V*JeZ%71E(@l|QWtHT)D6ZIV091NKh>xdO%nb3q?QR^74bp30*wV z?nHcqxtAwPq^yH4?(5NSPBqC)V;5LG@u}l~d~`MtgePnBR1$C!4EtUT{pFgO;SIsX zma82h8LI4i?fRAuvnSq!qf^Ao0Gjm(*v$CfM{f?gh6w969?j*?csXN+%yI|Db#!-S zDF@QrAX8AkjNdaw`ACAYY|E(2Q5^A0?T@qnMy!Vr^!=5z-@P3ImE{Xj)(TY$9Y&dV zMBtVMKC;0a)Oar6)U$lwF0IFau(z6_3;axJA4(L2eji`>;+zUwSUd?J=ZnHtLkra? zDgBz<)fZvF{V^X&w^~F0ut(%y={uDuQXvJjqLVN0_LE00>HQQXSU6d}oK{s9Q6%h% zDodSW$gWa{yr~*~Sko#}b0Q(&C+0VIA0SVCw^bd~SoQ78;#?Y1uz#*Y90dxIJ!}(R zFmKFApgarx7LJ?&N27_{5S8K4y~XC3z=rkL-f}2)31~P6f*eUca5q|Xx@?dFZk=*^=^kGNba|(ptDp{dN2FJ5k7c zOic*2HElvt(BpXJM~Kx4FYyLWGAtG@|lF4IeRDf=ljJq&~msA7Eg zR{ya@x~q#hqd$1Blqb-IJP$r!qV8jwqCQoLFr@Ay$rR#$MJjviDQ$Ka`hv!+GfXJa zB>!9_jjG_hGsZ%v4Ha9j!D00eDp?}+$2ckerU68Ws|L||D5YkW?+U@O!R`W z<^Z;7=&Br98pTDSkV&vKxr1B<084HR42|>+cJ;3F=g(X^b9zSkx9e=pwf`)QfCJo%H`!=P3dEwdic{H z{qQq$wKEIN-EgSr=G;p8d{IBMs!7Hc7(2~%^~(41G5YgA`KVK zZ$EwEspnqmIB2ijBcF=hIXfJ6&I5p2M?ZW1qEX)n|2Z~3e(>-if*^Ifa^L|v*dJYy&VQ8LX6F;0xT2;OYVnjHW@LSUkD`;P9^BzgIVf88$b6!=hloZ@37 zMS;=jNN+-R@pcE7%n@Q*>2y4Gt6Co;X?)}amK?GjyVq3}02l&ICKCyf$5AAKT9cy# z-@CL_+$dVQ;u=a+9J2Gh$MC+X!>Z-U+E&IZXcwWmLh^CCe>$W+A@ChdR*Tnt6i09x zC~kKZ-FTKCOOV&fZ3&GN)aJrrX`}i3U8F!wNh_rIGki3n#s6c@XA{I<)ubhakj7QjwGv{j_jcX z#R^dzZ#6f3OT#Iaq#4Ttb+dPFQsgEEvf=6(1fW`OU9|}eA#s$#QHI3QJQY@_xL(ju zEyMPL03ruFt<$Qt^?()-j0gcwzy6|C>6(eq1GCK7v2dFb!C@X(Kk<==r)FlZtT)ST zt!V^`vv&YjUtiMXwx-BCnrnuIo{3~4oyoR4aAS-IEDVb^~@LW|FR3{1P=8V9=P-9PoJEQ=-+UJ4bNGIdgwsk7eDr{ zb)N{~FmzMf{hMQBW1s%?r{8lB_znW$hrjUOWJTq0P^7xc=OVdBLSXJWJWk+PILU(m zEXRBK(zWK=f@d4y+&g~TiCiw{dA`(^moL3sTw8INB$68461JddCK*Ffv{Y*X0FGhF z7~hvmVJI3_>9*FcdA5l$qHyGcxR?O|Uzbp=*{+p)(A>20HE96D{Jk`CHYcO-X3>)t!6`!T2gs+n|?jCm?3Bs{K*9R z@a@wF_YLo6Q+HUVVF?Ia?Z{C6*ih`jnQV+9E?l}|n}#k`msi$`m0Fwb z6NjeRWS+o~(8&H~&tYGd!%&pMQN!^pU0ah(&GxD~I7-V=rHCQe&7CHa34$bZx!jL= zQIcvUsajS$QdFVaU87SYNN#c~aenj6YxnUoA~SZ39_Z#@UAeAZf6-~KBLu}xzK_oD zLvSJ_^UaMbSZ)5Xk3aOePkrQVi)g}3WU*2|`|9PW3(B7Ocl!KXuUmRJj{;K$Qyx8e zitXFS(v;k4l$LL(?XDu``1HvC_Tl&K$P5=Owp=gRE2$I54hQv#aj-o}SV@M_$D`b)w zin_L0xpKC?@S1Cz+~l26L3)ZOtY&$K;|W2MBpY)Aa1F)ZOZ7)`@q6yMGZ7O3z_7iV zY&nh{67inr)m!aiZKv!L3tS5EL>iA%Aa^?TR@>eT6Hx@Egd`nHqA1d;*C6m_hq5Fe zFDp)fkzN40Y358ZkVIVgKQCul(t^Uob2;2z&q_)OsuGdd#A3Jeuzi9%9{ z$M{=ZUQ4rk;qaXrVZQbj2pR=XA+t^8#?j;u2${h5d#Vqy00O_Z5Y(<8I{E(T6L*Ej zH`dlXrO769HpX6Fsk(+D@El8%@nmX;mPD$R=|V~z$fXZXPh4wS z9m4?t0z*?QakVUWHr)qB8b?rMy=CauW@Z#eV+@X=2uortiQ^dJ2Z81Kj_dmYm^b-? zY|An=#kNc*KrJr_m^WUXG8{%&9p6w_R#r59kJ}+hGLcAdJWrBjgpRtdb0jvD-TUvi z4M(@Vw(eN27h3P3NH<{g{5=*}5=WVvIBQGwsw%flO>5PwYNw^+qQm93lBRpXAwp2> z=rMX=wwvvCEbaP>y>&j_bH+|EBS(4z>rq;NOCp-HuU?*8Se{>4Is5A6-IWhB6Qd`O z9Jm$!nQ~xjsB8JQ0RZX!cl6EP0iLt5Fc+;_{h9bp=K-Z@e=R;XnV+jkW7zNAB2v_=E{~Wp#1xg{NJ;g9uq9 zIg}Wh46)F5y%(>nx_}kZ35LY3&(A59?zUQp#c#j;WSF|INy<`HmThEr>sQOrHF=xj zfFN*&Bw2>SQE=Op_Vsff9pm?ZAe;cGF7+3(p6f`paA_jT)j_FvLcu`@(?z`P#Mi;WB!Ocof$nHl zWC&7-v$(6R70Y1AKynxx!-OORf#dGHqijM*2uv=)XA?ISXG<-~bsgJwXojXKn&Y`} zN8gl9*+F{soCHM+i8RgejF3=tt0gO@xph-GhQXNjW-N8ucWNwU^nn>W_4^g0Y+j7a7usDT;4S=%;U+j1QTLD9hXgVL2a zW}dk3Ly2qw036$X<>IBa<(;f}C|gM7`-q#gytu_ka%ALjqgn2bmMK5VaQ(pXe8aSD zAC0{1U8R8w>799EL-zGfip4WAF3F(#hq50yI+c%Y%YI#7ul?28`4=xQRvVIG+)DW@ zu=LDGp+6lLIC^HdAh7hF@yd;w_QFE*)%Es`nifX(*2+a!uCvJ;#mBj1Hl811;;AqK zrf?L;PznPQM@SsQ_9`&-gTSyoSu;!ZPO&c8yWUlbVHs|VzGe{Db6tU_DS}Aw^noO- zUVY`Ia~B$oX82aHlXp`2iQcI3f4T3-EkrbFkvlpRYd5NU+PL>N!c+|Bx$|?+oV$8` zt+I`)0MC(Yp(#vqc)GPZ4*>m#?uw1fHi|3t;#%~C_oQS)`?}L-UdCg&k-I-mp~1@B zrBD)e$C3TN_4!Bs0SNzgfbbLl=J(lHQfpQ8c9Y^_`I+OM7u>k`Lu2`s5C$ghdF0sP z{QzLQ-V3iSwl#~PNVl=-1%Mz(C~`q@VsdKg;KA7_m{_c7yBow?hOVn};NP@~0fqqK zn`AtT(>wt9rp6kT+}I3?;hv$iE9=FLjb3&X=i)*#o9P=Eh+!Q`s@9vWW=mH(5f*lV z&vx3f0tn`~j;3k=2tZJ8N+^Q(p52zU-eh5bkpYVPt^+uE({2*J2X5$v;ehKzq8Nlr zZDmF`TNo0dhS1jRRvC*Ie2fI89w=3?xAyNuYLLo}nHyJKxlZx1+|g4m7293K)w2i3 zSeo*DF9<;0>ztc`pVVqOmf3E1d;qB>Ikt`Ccru;J_jZNPnD0T&pIj8}&ALPN28JXh%B_jAY1omdOtD zEbaP%<#-h8W44w|B?y94Ot;x=s;ign%0h3ABas@U^5ax)B0zD+wp_>d4aI3~1h#ST z@S*9MZaGSrRJLs!NDgoU!3ZE|41pNW^`}LS=S?5ONa_t|2Zf_4fue9UT*MoSty%8d zo@;u$?s!2!iFSZkuJ3N|ZRU2Wcx;QwBm*0@WWTkpx2n6d-QCSlUteK(!l4r+7h99e z=5{X#a7=Iw#5M?4^lja@49|WG=;#7AJvOk~G63Mr`AaXo@V)ZtA^-r2(Xn)*e;jA{ zTD5B1b}CmW4302j*QdU_!y{0Z!f67-5Cn@N%I(9joTjX4hFz6Z&G89}MK=TY9oi)T zz%Z1fX_lb`o^hM&a(Ok>84#&KcJydB9S-0$*PV^ag?$I2LfCNS=?&CkiDWvL$1n`X zFqUOFhK>omDDbMPmMfK3Qz};~s=6&Bp33HZqtjY^EqsA3i?FGx}>wi;MGf^M+vn$>Hf? zKu`d{e2a-?T;KOSFYsL3GF^9%m4)R+n(GpGdZ&>UnM@{wV@Ot{i>u4kN_Atshjm0T zluKlCg@L{t;DqaXrG@!IJ~K5r@s=J%TQ$NOKGm?pxXO0D&`^WKG0k!b0(U)Mmm0If z*?f|p$Z!mS^~Y%dSZ}J&Ubt3lX^pn(lkAp?y0@cmcP*z8F&}nrLaTFrYt7%xhH|-~ zbgn-ZkAd&dwwSdyR!RAdO2#5AdX<=PS?heJ{%3`U1i3{7CgJukD#=yxD1zBt81*qmf`WGI+ai;liVjJ{l={#>U3J_O-9w8V=tP zAbj*Up9Y*3Ic7(OSM8_J&s|9Wx0;) z0LBTia7Njt5jf^hKFzaqk|zZk*Hm?Nb#4CIbUSXU1IHo> zBIJ;J(n7|@^{aE|&xF$q0JvCU=EPlN2e*Cow(D)w+HKX?sI?g}4OE|r?013Seq5UrlRXG&GGzUEomut!X$GyTIMy$JwewScK>n$k8D z*>jz4S(_pC_RIoC@g;}wV;dFEd04!atEMCR6I+dU30Bb32&yXFh9e7@Vpg1ESIDwHg zN-_X|GXek*!j>yj9rIu8!R(IcNBAEyYl58&L7}}*Xr`KquZvZZF{ywGc=B3n&kk1s;Zu2 zMQ;>N(EtcWu|7^i5Ll*;VK@Nb1c7aCD^Upk-ACUe5n*rtim!a-D^EP}#GY2Aw>T~A z4E*cA{_BrF{&?tm`s}mMo;r2v^y$-2KKW!=?|9+Dg(sePB2=cl+dz2dzx;h4L4o8j z3>8U)4NR6}#r*K@Ga1Cmr{e&iXyz-+^|q`zJs^a*majIzR4|-K6ov+JsYEvC2B_uu zdnQzD+g2=3cfs|7!1o-r?Yp+_1R}@gGVwUir3F$_^wpLX>JL1}HgzQ(2t5&SXo}$;p5&lcVOcb;+nDq00>4PBop8}*|3Db5IByZkPvv0qX_~daXhrPK~W^+ z#C|+u>Gcb}aL5R;%;4DY{-Zb-4=0{&YSQX_dHK3c2rkCB?Gi60xZJRZv#siez9~`L z#%J<;|KLzQmnCsTDzCR{rON7}F7Ju}a1x<-;8I9@U`Nr*tp~@DM@F-*4~psff#08w zcTqnisXV`~*e%Vj$rfz(W3t+{kUvOd3qm4gn?}937Ab^rg2ZtAW*QcfVF`-CaolQG zt;V`R-CK+=@sn}Gy zzHXaK{8%AzVBc`aYZV&K}ot%spr#LKP&7p9IRho=EWs^yCBShi_&+HC-!Q+;%H7$Y|;5Sk^oxfT}%UJz`@vUF3| zWW%&Q#|GaG91Fp4@IAorH||{(!f@aPo)cB+Wcr81WG33*Y_g^pvc*uJ001BWNkl?F zp$ML(^k(Jgftl&?;lKRWx98^Pqe_+IM-G1UqaUSM;Wz%jZ`RvNBs>pGy@|e3ByGFcJx?eS3H$JaAn@g}@6eJtVIX1YubwH#C;)8|iMt0U!v%ZyBLK7H* z0E!?OhP=tn3%27`B_%T0MNp(|_#{O;wq+Vx;0HBH*;3hb)(;j5Mu{we?1;Wm|SSo#tXmgkmF8O3!tQi}TG&(c49mQ!Gc(3@gOMWM;RE zSZ#T}Q(kE=zrHQ*$xo8$A*O!{$B>CEA3C!Gz)H1!<@GB#8a!}hJkvikUs1yHZI;B+ zgd;WU#fDsx^ns%1a9j}ap{_}>2FR635pZBWFDg^ zAURq{hJCihWcsbHx1se9f*>5rBohBWdv6wF$(d${eg77F#NG4eR+&|qwR3AW$!2T1 zH8YZiBWN)o+30~SS+XB&j~?U&65z1_88$2%@Pqsy!?5ADWCN0|m$5Cuvd1%W4@RRI z&C=Z@n=G=5#j47x-0zZijT`H~`5|uJytx(0X*Q_=7!QTas#|x9i2wZG`ObGv2>_6i z1OsEn>rMKnU)%h{0fd{In;RP&v+%;p;Oer6ZEbCBZf?#Ok(Z?*7KP*aXP$AY1+Dtl zTW|fi72#j}>tZxD8$+h6sfA4UtWT!qLABwQDqf|2b$zW^Hq*VQW|LI=K@&dnHJAYU7_Me4^PYcyay%KD$G*)WvQJ8u3A!Ux&T5L zv!o|GPv*z(%B$DcUi)Lkj%53z_q)4Kb<~^Jo>Z2vE^pqgFRcs?o(|fF?VYU)98;Y# z@)p1;BY)v!cdrD@6K?SkXCuoC%;h^-632V@)$WggS;(mZvjL&PO&A&O)K|UuI0#uL zGrjUG2En)Jl1~;&&b(xROBH8wobuvSiacxWYz;bxMF~hu2N5cg31|fX<;5#kH#Uc@ zorAqSsdP}SyFuA4mA#-c-+~f(I@o#Gee~Wb&pgwmwe`}~mlm4!JFE5L42G%r=r9aZ zK{0@mpd`V_cO9;#Z+K(DrTEk%pL52s%otLx>-dgM2`X-ivcsMaPD1K;A8e(>;c)V` zXeqK`=ZwG7$=wH9<}opBR$4G>tBA6Kbe(Qxv0O2u`(8H!+}OC}zL@khJeI2VO6jgYc+G$=wXT-&I4&gw#Gu^Q0R z=I?PX!&D4o-X6x9C*w(+B?;v56yDA)0!Rs+hB}W6@SAOy5kkbUR60>-82iQbt8f1N zdAr@qjw}v&5?Q9{QSG^QF7;7=G)lSRytT7cP-ukEQn|z^Elv<)Oe&pZ{Nzg1>WxNy5XRf>q0rd2497A7z%_|$lKQQill5!Eqi*Y9?}(>IgYja${OXOB zH?CDa*c*qbFq<2!ytw_|uZ|D5^Vfc6p%VBOEtP^l@$EN$@ckd`?HpStGaEon$Jl50 zxjI?e+N~A<(4g-40U#`Rp2o3~B1_`d?z7hJvxlB{<;s< zR&Cj;Ez2~@d%Ib0UrIIGoJl}nxDr5)j?&Ror&HvEGTYNCtyXBmB~R~lGhi98Zdz0x zB8?|NEwFr)O|-}$iy@D7owJ~l85mCnI&i} zAtgbiG&*83Ns{Em85kiY<+!^)?(R=n3DdG%&+$v9?V7f;xxO+ON8?HSWW0C#%EFB+ z3yby2(O}#ijP|<2z3vbIZmlicT3eXah>k%VgH$YJW&Ojjhx#3T&Ycxe!w71IUt@k1 z5y}rAqS3xgC#K~{o`pw;A`LZwm=h4G)u}b-rKbQ8hoizTv>130JU{+{pLd15 zv?!EfHcfQ8TeSRn;nYtY2)}^i?YH0l#b5lz%Q(;Z8u7&F3kN>Gy=%>4_bl62A zwGBcjHNEnMZNe#p3?{ivYh};3=&09y@^r_T@>g}kqpoeWjt2cvm~#PyDZ(D_b@p1_ z<>kf2lHI6yj1T~*_)f)lxRBee{-bAmj%`;QfxZqZ~Uvi8gKp9|24Vujg_mLb>F0v{E?sf=GH>(gCD)Cq-0dh&mLgI zW^S1au6PDY&Bz}i#CXs@zp>I|L zPhiu^{^W8I=?Ud~PWzz4E`;4eKT;?F!L1>;25G3%@wEP%ubZ*9ER#5;leJQEH9F)+ z+u_sqi>1U`zlEFCK_Va#P$U(Iq+TrpfNNW~Z{NIq`{wS}qxQ3p_Z~i-)vqcU1Ad0= z5-?ll(J+~G%9XFbbo=_PTkUpxZ)bPw$&)OJpM3Bm0Ij!H8vxE& z@&t?PZgtTr*NIaGgt8Y`z=|-R*Q$o@99fnD~v=oI1JGh4KyY#4;G-aT-nJ@zBy4MtTw_zxB@j z-+Jf%cmMHk+_=(w>y5iV|BV;^gFm)eU}5vh#=Vc;|Mq6>zxd04`uq2v{{3J5-S6L@ zx}*7y_2+-))qm?x{ZwHJ^PBHIe%y_I^tAn?-{+Z-lcADAD=B#z0)znD@15TC0L1&F z-1gm#*W8d7fM7R!)81ly4vQ`sr~MI7fjZk(dOTr3RT}=ft?1?tWN6H8rfIyKh$3 zZxwla$9&PGyT3E&wz4EP3}bb5rP-*jUfTeGD;qbiY}|PLkN=sidvCXQwx;qtC3Vz4 zu7}`h`=h7-oD#obP*Pu9SzibwwU7G4G|kds7sma3Tpjog({?P|rIecXV*P8&jTisu z;Mx8D&PQU{0)S!b2U#cYcqV_U$|0EMDlNAO7}7Klm3`WudvW z;?j_pM7!I6>t7anud7{&+GNMOi!%Spo~AcVikjxu+QJ}A-o3wtK`k{K*fk`O_9&GaXwaZR!v)Ak-K_I8&of{)Ux{m1vSL=(taWotZM`8Cjb{^<+w5RR3ez{^8%<{{IX*;E= z$kTCaJMOgy&+bR>{Ki@AHY}@DC#GYT7bIpfOIt@ra5R{H1g${p>Cc$BLD`b=aQESb zNtTou+P$-5UR5T>Wpve{C7Z?J=-Gq!3lt1W<=5W$2DaTW6=A}AlWeFFc3^(%!A$e> zTGhV@vCQ|r_q~~z^EqOf%N|y(R*Pwh^J1CLih`euSmrPN55M7;=M>&brfC)ngbB%n zj=H-^XIqTgyuW|;wleLX`PcqZF8{f5``nbRH`+X<|p(8&xc2CC1v5dP^5ZYtovvm<{BZx0p=8Y0dT574Z}1i zg3Pd=5|j?w;nRDvzdNr1sVppSzVw<~ssMoHdI#Ijs2i+ayK$LSn3OV#;$D9+9*6zG zu-87E57Y$0SapS%F0oup2qyHrv(m{^C1Vuz*BhQearba9j^Z#*ee7hfNz( zQ}9pa?PpkaP_4T`C8*Ym*UBctacev3wa2aP^Wj<4aWx@-#wfmX&4){`J?^?tIN;w7b7^u=5lMV_qewHi+$|{NmPW!3=?t zSIhK&{wqILjN%v9BmB9a`?+8KlF5>OC-~Odv`lXp7=IyuNE@UsydDwTp z^PTU0_q)ZUL@^`ro$q{ShJu9zX5J9>C;f@)CD(EoLCB!EXynYg`kgS_Cu_JoU8l0v zV>9dLN4w*rR=h17e4G_Wj-p{Kj;m8FJn(V$xU*1z%#KlrtOG&;zLVIH;i4coP)G=r*? zT4I*Pz3gZksLAT<%J38*FkED~GXoTzj``8{>e`yQaCK*APY8hlT-yxi=+^DSBY>kz zl%nE;t^Lcs^OZ(1hqLre|P-(<0 z$G-OUnImiQ;6A{3kl2hcf-FXgtw2#D28-)Uci#knG)<#4=TR;-D9jMT0LWOnTUokW z`TCzq$Gt2XXXF0aXk48`o`bfG+UjgjuecKO_LE}maI4MA?XN9d|JuA!%lN1@IO@#o zrU<2L>sRmGxy2aS`}lrq=SdM@JMG7mDz{(%_S(%Cm}yaF)R(S&?$>z!+U<)}`trZ~@;i;XBRw@|}(kz>@rj`$ukIE|u0Fsd& z?CN9$QLmW3&{0nUNKC-;?b?c6SujjHiNa_+n$6f0dIiTX&%iJ0xBE};#l7~Z_3YGO zaq;yyzFS)^HJ7W)*KE(%N~M|T9PUl}`>eV`EZ2YKPg+;Mo;-awZ$E-O(w&ciFjQR! zE%jgzo_qo7tw z2rfC7Qb@rC=cd7Gl@hZ89PfT!SNs<-P+#adXFxTZlz;23x4!qi@6F!8We*z+25-If z)(q-q3ex!j{0lpRKcR~7Z~WsAs^0S^U5YudgW(AwV1jUx7K)DcAK&L`I;#tR`0M{< z>)zX@Uv(E&QRPa`yeOI5*6~yi_kkG5tInDPMRmAXFO?m`H3&Z4gHc*{#@Vx@IL@VO z64xNEd1|jiFzQG7I2A%_n~}QDI9H9pYy`)m`H%0X<9^|pGH9Pf&r`#4s!hZ3l@wM` zJnAu<90Hz*Uz zC}TyQ;`~trk^w-J%ZJYn9v=>7W}&Xd7Rr9Lc1pEBO877q&-c2t8LdmrCF zVLs=I0AvMV1wa_3bY*pQA5BFiXufxv26rupbWsZ*!q68DKnh6OSI2YiQi~cLd#Hy{BUV!JuRAe&FTaVMd z`!XH@0JZ({=GV$g>tOn^rulO&%`flZdB*dMXQ`6%{L03{L&nERXL}Z_GQG#$innm3 zdgXd$X)PL$X29IremFeXihHd*8Yyvhgu^P;yvCK%(ps>%{+vm^UT-kbOdThH=KcM6 z`+Z2m(%P-&l?~T*I|sX3@FYp4$YaH*Un6tRMr;>TQzR2iFg7mUE?5jpA)`3X^SlUw z!5Ar}i*nYqogxHOCgZw|{^7s>^Pfva_=nzN?K2&p3lM(iZ~Ps}eBzc^Ww|g4$tMGq zMOsRAoGI@*j_a2ul7s@!-(n_`B0N%~);GTSrtA4LEIidecgo0HuqzGQG+c{Lx(EHk zT`=79aJWz{l^y0=6rYnMjADK`N((S<_@--~9cW0nj53+zs_rw3Vn*>w#lEn+6OM*? z8ut!%loXTEAQ}ydKjAp7(5)^4fY-c|YixPt_-J2eVccsMWaFt0!Q47NkImJ^mB#9| z^O$$>_WyWPadsRH#;v2lRCWsxxX#*g!!(YYC`l%yM3l{RQshZ=p@M&5_1d+U-)Jmfv0V=#v~}<8kMI2`lyo{{r$osy8aH0n#KhTn zCaCv(PY5|4jpAHrr&@qCrNl8=r7{(}jwkU!s}m+1@~EKkf3im&Llyx5u|3-hl;n!@ z9I)boS$4VBK!&}%*toV(Iy2*8Ajt?qO7@1MVnaz8GfdmGZ8S&RiE%gUY~=?Z%Vd1_ zjc>pBtrHF@$z`WM+Ij!?i*%PM73$TsJ%w0s%E+xv;TBZGGgcavwYnGB1_0b%F8j8z zb(H+dy@P|jX|b|tCl#6tp6_*rd;5ENnrJBiVN99ZylSn#qP=M`%KLlq_C1}&fDt<0 zEzL?*O$S-YHgsE2zHwsmq!-D>(})2~Uv(_u$m)TqNw8JQk(N~T{i2&M#4 ztp=0Wv>nGUQDzj|)AKiGn2LVHXFI~Bl69l$ogbR&hFP46qj4gYO1P46F^ew3o{R=6 z8Rxw{#1q}PF8!tBt*sHIIl{Q;XBPy%OvcH>f04Gg?)==p{`dc*Kl_IX2>&HHJ{KVT zcmLNP?G2Jot_Gx(Qb^7bAp{d@81rO^g~c~-uN0c*fBsK@rSl*-Ht)lf2{lgmI1x(gFcp(jD6NZhn<6gt)UH31Tq+IV zTcjD-jG%eG_c=#A3FB~-Oh(aYkf-UOb5Q&_kB-68Ix#HM4S1Gtsk0;oEz>NQfO*cj zRQc#=mJhd3tFEkFSzWynYgWYfe99vo9dRy>Vlj;JnX3g&`o?IRE#8cZli#f}nY&Pz9ggDeE&Tc>pfNB%EkWm|+@> z8syY>ISIo-d%wN?c&=yT)Ef<|lFs&%bkLfQ{X^8yn1Tk7*(Zm7%(bF7UwOT`wkdfY z4u_Aoo<)3mkL$40td*|6`cuen4Ep_HdpeT8xU!5<&rw~fX&k+ z35QZ?q!q>p5jtC}2}T$pf`Jk;+kXInkcvy0W}FK_F*GZ_=a`L>8)ZBbkR8|Y6q7IUkG0>X@gR)u@Wq;E3+i*Y9avB z3tF=nA&ihvGjdWQo@r2&QVkdYQG!iI5Mimpdb+m*np001BWNkl&>{tXm}5kZ z=cV*Xo(kk3{a6%w%M6@N9Sc*-7ElxJozP z%(!p8NlOSYp`=lS?5-pg^1=;P9;bE_PF&d z?zOc@DMoHkmYnBV0+?V-(#aT1Z^X&;K2OJb+;aWk&g(8Lr#3r83L@Gbi4TA32Wx%kcR{nP&56Nm?JeAgjN_1ru=vOJ@?iJWRSXca6C z4Ho1vSx=EFd+y@mLOrlulg_k!LaOcd;PHMB6KYweVOj;5$1uL$bTvSKlAjs&O1R90 z%7mH)-%x@W#*`pNkV%PS5{qJs5dbLa%gdyL=NT0)RhSBu#p7|POdk_uvA?G<(*R_ zPyWnTK=|bV;Wz*K-@Pmd8!5qLVgh$u*QUB&t>j6Za>3lPRb86j!grrNOt51v-3VO! zjcXME;8OkmdmnY4J{0}Ec_%Pzd7z}4rSK4g*4M%Gr@1^%*(fj0WX2|vO!I23wz{%Z z@vIp>Nu>{a;ltg|B*{VMmSt0d2qBgiScWl9d0^2qqZc#&ut3CIsN`HqxXlR0P^c~q zic1zF1Qo{3lpu^gEz5cahLe;lkt>mFB?tXJ!Ni~hX+CW24LT>n#mQ(;Z!CD_n&~*g zEOo*h?8-2Vb0H~Xj%DTw=HXBe_i+|tmE+n~!>=SH5VyXi%*?eyPZFpQ%6rfEh+$VvoVWlaJ=T+-$$> zE?>vhRl)pXUAEap0j`FTo#Zm%auD(2huNl6#zTu@EiwYi@qET8 zw*1eqzH4QGNOF;8`6T58V<9*-%*zJvT+1SJPHY$<*CZw*u1U_^zrAi}5{8}qz25#7 zam&alb8HdIn>_vj@B}f7E^M&!5(ZTux)^umxTE-4h78rNqUJ3y8~`v)Yq)jq!4H2sR@41ud0}be#aGXGOlraZ_P_CsuYmB&0m47>*ZwEvR0~EPcnaA> z=Ltxz;{h^V&6|A5*eKc4gL4Brtvl)gw4$76qh3aChJn!Z_jQLJ4o^N!Y;_fN7 z7-0n2?Bbl4TqvnQYdzb8SKR59TBKWKLZ!LnQe{F(snVQFrAw}bF)}D72z@4&^)nrV zFyTTV0IGR9?vIXIw6sV|3sS04IGH3T7T$~*gtEf0Mhead@?D#;X%kmUm8PjraQNL2bbA&22Os7<3mUEs}o*|;hGuL$s zGm*hyFrO)n2-DO6Opy{GGA+-xCtS~a{TpvAEKJLeUVr$(hxe07=-Ad|GD&mE{K{pZ z$R`6t=)5KAXIM;?Kw4nJ0CX;qob>X}mKb#az^1VF!t1M>Hv`xHWd2<1jH_HIE>%Iv zJa+{Lim?VwFj5LAMt}evpKGkeQR`sPAMEY!E;O4fo3~@l`bS+A4MyEVsdPbRntsKr zHmG3(p$5eU!6k=zb9~b5jYXMEa+&1vGRd^kI^%NwaxIo4ixGq3JWJEbIG>D^$VbB= zDlHXuKLDVU0Sx!Wu$6>E5-hM%g9Z(3mxvu??I&co6ShzCM^|pY`r>^kIR110>EFwf=oCa1dtsRu(CAj1jT^5tuD^1!`HjQw{%&_PQTD}+ zr5kIDuWU36^6=fAp^(}jV96-ldVhHEADBPvZJ65I1<)`gDX~DHp zh!BLyd1zz=&+m19634x*!-qSk^cSm*a>ch-8)erv3cMVI$?k(6jdmW$XmEUan$2ss zUj5b|n^CVP_}Uxw1|wwHXWB~?Ba?uf^j#t_LeCDn!{PX-H{jy5hRF1hVT0BN2#}fP z*=&aW_MJPo?%XyE0|17D;e&@C3ymGuExDG+)4ko@L4QCD`#GR^x)9H$l7R5cvOr3q zq%;}!Ew;8;t(Q)8#ofK9@Bg#^Cms)qvwZc1*MifVj3k#)CZbFhnl(o7nOWpBN6KZw zr4V|Y@-UT!PyK9whpE)HjFRuoY{U!XJMW-o7{;wTw{PFMlL^%trJdm#sC!yfW{VHRulpQ8pN# zO7pTP!h~|Eqz1#ZOxq!pEtjp^OFjTZnRt79D2{dcG#fEWGcEvOp)|IGGX^DKxTv&L zG|3S)Xl+drjb zOl8W2lnM~OtXPb5k!Czf(lDG9=BU1B1Au8TLU6&oWH~P!foBQYrol|ciU$)y3`!X# zm|)BJgy1xj;@QxM%yY-02xCeqXiYI91QCQWMqk9eIvPCq;cuVJzyMsi@j}0Ia324z z+`Kz)3iCxC-Qi^ZPA|yM@S5*omu@?ZamOIK zv9w%oG|#5mW+KUDlwD9x6}1<|$e@G~#3(8hn-{kdr5eXsnCmbVNiNT4eTXD1-g)r@ zkqH)Aq6qV=%&S;Ib!PrY30m;Ys%unTb|#);_tA&#ovov8R~ue+dA)k&`n*_raJVzt zdo(`S$|ghV2Eocr;@6oQxTPwhM$uHp2%T9XlvGLz$1(}|1Ll>AJZ*jW{rRz$#`Rae z@l!wbpZ~(Qz5>E82ME9QZ~u*a(3*NXF_Q)h%y!Ew8>N-aLNTEfq+G^nCWKT<6M~f1 zm@=h6N-_JwDZvO6nB$ISSU9T-pZ`wMy${oahhlIzZA81r1iDthY*bmDjqBmzuB&5A z&6ukfUV7>3wQF=1_>jve6JaVi&-? z)F~H99LM2!I2sD6in6tUJ;R_v$~eh#E_;kNJPOIg)=8zJQ$B@U2;L74kxz?Dmi&F3?Nu^Yv6^KkL0jCELOP;g; z@%!^|2mpp@uim^{sCcAOS;jf%QVOi3!3Y9y9b02s%zd9^ElHK6S(@i^u4JT?Sf*i8 z9EZa=9M3vrrsE2Y0MoIc2otT8R6=mx8B7Wd^94MD7LG}6+cA(hc>JL7AE_)XUA^;i zbac@9_(y}?k54{AeT|o{JV)kG@$H3*?^;GJa7vziW`D5v@%R^NN8pM7>F|;#sfX=ifjdbIDl-i$-y$)x-6ov=6E`ndIFx7w6E<8yWaO6hde8|^EJn!Fv{^5Fss6rgE9ZhG7s&DIsTCP=zl{p?WQDQQIIU#RVu1!sOxh!9j0w$|O~CNXJZN z3rj1Fm9?27bR5O)?r>bpiYWp#>AR}h$oXQ%MRO}EQyuiwZ?+y`_wQJfd`XhFiGXyo2vw#XOX0Lc%sB%78tJK z`pEPy+*@Emm|+q^7s~EW-(LLMmC`@^?ce*2_nst^kl9}0-vI!Gvdx!X3##>OGCch7 z`}3k!!wu--t<+zg5e9*0t1MPov{LoL&Yoq^JWH}TvRvN{N|lA>N^{9D?L14i?!Eow z-rKYKm>OpF>Ro8wI%>D|qyzB?MP14aGpHfUH=3IQQM*+0UAx%6fKmp<&k3)-?8qiV zohPd+OTIU~Bc42Z^!ULCK#VBKq~I4e-k53EmR7x>?0T+KSCjy6wm|@jIU{-8WvFVx4#h`OA8TQjS4ytv(S|1LEIWjx#!|^1El9bv$B8(Uo zbpk}_C(ttp6|PK_Ad}+$Fx+o-=beX3p8d+^QgL7Jb%*ahI;fNb*D^3dHihd6d~*^UBSoJB?um)bL_W0InrvJGM}d^Cy9CnQng!S|Yo$@#3qu@7|sHZwaZR zOcc-s02x1JCl)7}P_l?>3MWYc3^qe0*RGe`X1!AJZKZWL%#vJ6b#WRvmnzLgoTN#d zj7B5R_XuT%VE{smQb5RHpFoN}ZzS=#2S!jp;H>Le=vs?kd!ul#J8JdD^C_v6G1Ic^ zWxwiMvyT%-Nvk^;&y7wSmdy;aTJml!1!d>t{U7wB-J^+Ano+EkbZv`L^88L`mh+5r zEoBl#N{T4Sq?BALjR>C`Pe!1TRw|1%PZ42fBA);uLI^V~W}1|;GXo68%H^8G5D||? zJkJkzw$kGfAm4MA);EL2b&;pt$M1C?y`N2n;FJ+DaA}FzF6Ch|>dR>OoLKfLW-ni@ zu545mS1Oh2V1KLN+-Co}@zQI{cU}(#iPF4t)XSofAyhX&nN*>pY6_1sFeIwOHE&@>Fg zdij(BD9^I?{%-qlueG~f@to$hTdNy4Ec;ZWv@_27Q9g+H`La)EF(QOfVp*1JlG*;U z*l}v5i*R5e<$MqOwcBf}jS>KGA>V$ux7Qu6FE7+90RS9+@O!NXKPa|b)UaxsFT2h4 z$)JlsN0TVJ^t;la_0PZg;(z(?{rRte@XG&rBr&j5Ajfk zZNswdQoX#qv2bmgEszU!I7%@_>osS-FD;ciJZwGQ+Ubu%b?kBV^_#03%QXPlI_T|m z$BT_xl&dHg^V?FCO#qXV4A1#%MC2kGh%6OJDC1BjW5g4(bX%GyOH9Ma12rCX_D4}x zSz5pK(kqwUPC{xfmC|~ecQ2IEdVY^wKth~k2y{G|xVAxLy40vjpokgcgaaVP7`q4n z2IEOCwa_I0{Jb9L*;<3*GhK<~_>~Nb5dwpp_>0;GF({7W6Q&L;Q#TAKb5}Og%B$Gk!$-TFw;9$6*IA~?KW#p6u zGM?wHD!Gma7!v~t0OZ;h!_qPg!(iRc(O`7q_;usf&Fj~1M1#)mqYvi^jq^2AcUB3%29RmAlLW2r?D{KfCy z0DL*;kH}fQ?)-VQjf&?~e7pFeP%xb$Ka+f~I~;_`-sxNytR=ySGJ{dV7$q2cwlTZN zb0O1Qav?-A8IQwxpEiS0J1EW#ktVApl* z)n?5!=%}^x?Dzj^HW?Oapk}FosF~##Y;zsg^-E=hsU~cg>KskCAU}6^@xS^DU-byT zBq02yfArxr&j=&U)2Dlf!zknW^c7!t9mPXE-1nO6Bxrc`rS;7ltMyR3Y3q+e*3K5T0 zGSbl?L#8|)f~168U8>%_+B7J6a{peO=9R`mp{F;DPo)Yg1m9f9-Z+lae3Il`$|TDt zaVpPAA_>8P>o~UQ*ye0>Z;t&}ob#NEEKQ>*l2WE=3QD++1yV3(B4#+A511fKlm&2)2K08JkcV0U!hgC=!gMlu5?hy&*r|7iXLk%Bq1|FZ)H?f9BJ6S{iT)_RiGq zO3KT>PbJU!ZZJj*@sWtf`S?iZvF$jf?Gzd1gtDSf7LA8l5>v(+t5++H#gh^<8fS5o zW0M2TG?^iyR!T{ML24)+mZ>BM1p&>ys0hnNsz2h{dY5JD-DPGmAsmrNE)DM!Q6WD<^s zrz*2jxpeFHoz*L=10K==>8zqdb1{^Z@}%PYY@dvN%_{qqlf&nKoe7)Rays5LxmHh8T}Ru}8d#gnz^ z@Zg}=>ka!yg1Wg;DLQb5WjkJQz3Gph-b+|XG5hY-X4$g=Ac~XzBpXFZf1Gp%A;P#+ zDiLPRmLw!ne)w?I?}6bO^;H6zVnl)lVa7$c$VHw>1wwGaa}02$S$W}V(=sRkY(06h z_2kK*KX5(Ib6sW_^;%iyS*ctC0I#+X<$99jQJP0dUfg4!P@kEGZ8FcXl-4EJQGhCe zO)*9Y3hZ=j>nD2Bh17YLDk(Ie%bF{HfP)JOS{KfxN<+qFE)@W9sXtNW^l7ef!(R#Vn*Ar@SBlh13gyx!{}6*RbqvJ?kIVi-oLTyb32aosr0Tm3MM(>To$0t5wPefH%HO3u`CoC`v5 zmc&^Sh5arS$*8>t0N8M7bs15n(pXBVG0{qc%nP0X6N)h+gdl{7-vGlFJXbtdJd?-r zC1jp6VK@g%$Z#&B6E3@QNILoxfAozY2#z@z`E;){84t50ibjKQ*uRYIokzY|N&0Z7 zv)38Uj2()+0oi*t>KxvF^Jo9NfAgz>`Y#Cx|H=EUkJ@2xl8;Z<0Vc)1&0Lcl9E}e9 zlhGu((kRUatpFhBVr%Q^)(3A-4xUU&EyG2A16!V1TXmK;?>60!-u+!<*jXG!lMo~H z(?9#Om1^bO%C2%Q!Z@3xc`p(nM}?kE#RJvb8x`K%wyy{Kh{x>O>&)`7%!uP(I5Jf@ z0V*jh{N=<>08Rl#pZkD%DE05+(W8VQ;THoRxjEtmER>z;!$ei|+O$S8U-Lu>*Vb^#A;uf8i@2 z{BnTsul?^Iq!&qt(|95kBw)BEEjz3be@=OxFnM~|+dAl-+`nN5!mgflAdA5Ckm&(6 z&yy#HVQg$}R;m@(_0A*W$tFzc!VtccY!cKSa%*F zzk!-J0RYBrYJ1G95W^;xODy*y@=hx$@+>Q8IwAOl5`5q(2F;MbO6X+72Zwyv0szCb zE6pVUs4OfSmSvc>U#XdXIVgRCf96hm`0-vhjMKn%0?!FN$6%*~YEtlV|ETaEnORq# zbP@cLX*+IEp3Qze=e35Dtlb+-CX-{vPf(GVmW>dSJX4dt5;?-S*sY84^ftx_vZEg> z2(-v_ED)ub5O8YE;)gE=DfM*rP7A4|(p+gSp)fH~O3qL6Xr+?&Q;~!s9_wVRCtb)t z-ExUAtF9QeRaU!lChb;Bk&SyYjd>O;r3f>C*@#f7k=7dGq3%7^y=QYLwT$dC5CbR! zyNnPblJu9;rx|7Y)AdgRKobFsDG=M^X3 z8JS5Y$zrjptKX7^tCo#x10G%&@W2BDo*J+{@yH%|;DJY;d0+zu40zy?^%pGTLcUh_ zmDK8%`cYl#uC6K;nVDo}L}a|q`<(sqU`L*eeDG1Nl54^Zq7Wpr7?~&b-fOLIee3%) zkXIQ=Qi3{yN-47)sP(Yt?Zu*4PVb0+O%6&TFC;+^i_q{$le=Mgjx9r$Aj>I^59uF8T^+SH{8I~Y^%Qm_0E4v-hC4W>M8EC8%>2>{EyadwRL`RDC^?(ZG? z-2wAL))}pmba@%BhQlGH^oFd@8$#pW9-+L*GmMbdvMMX3?qV++8FxCP{e!~?4_iip zYhJqk2-})Q^QO(&bzeA@-kKAtLLuzxeO}lXrpen+b&f#sB_e>PNCF%5>dG zy_%oZWubu#p8W7=+?#a8t{1L0Hmy{nOkUPiBhzBrhH>gDF7SZSU^G}mDSg!quC>aO zji3ly6NUjnHVlK#tKC+$F-poT%ht=)+4Ij|{^oDd;djySJ*X0!UAP%!1VE7CodM}i zXm^5N#gMkvXla_N&JtZ^GMR&xgnOdXx75>h9z{_nipKXIB==dzi2r+Bd39Wx8})>WFgW#g+!+=Xq@F#i&OQU?f#1j6 zM@$$@LEL@r9Hb#Q3q0{gY~z*5inU59t+kPjFKNGEw;mQ|IlO_{#@ z?cc+W`8J6s#_zdgh!eOs86Dg|0_N3?%(MLE>2aFne@LvbhG4WenSAi!hm*sr62DRU zbhe(YvR(2{=zH3js=jIj_9EXVvszm|dWhK7Bdtz9k&9F6cmB=)^54D-gx^dc{LlZl zzx3kajifU6SjVUBb~M(e<=rE$U+)qWTipbL`yQtR(^qS$Y%XTWdXX!%mNZ*TSLdf! zSI_1Qw8Vo4?C>Mr=@UW;6X?~_Q@YAbm8)#2tBfK`R7tpJjnP_RLK!91YKD*tppC98 zYmHV?zjfe2!$)ZHVLK3M7texb)jxa?4)$+XPOS;i^7zxQ>#1VDL;QFvlfgvmQGhYT zqXXafIVAzdo*&#lI(p;3=A0S<;9@p)tl(_D005w#-T@Wc;CK1k?pLy*s5ci?x++$) zs*zQr>h%mR#Y!>8x=2*9`P#Q1_G8wakf6&(N34G(nxmUirYl`0^?GKt02JLMvmlEK8RcM+f84Xn1yZo+QcX(=RF-@Zk};)E%{2*Hu{-*==aVrZXtlQ0KN> zL$P+yaWI^?8MxbvU=(>Y6jU3NS30lN+usglp31sXQd(;W6-0Q1agXy>KB+1zgb^i( zAVe@i2pGW_LFkFwB+{E5hoUTsLMm0%3J6a{ByfJ z!349gZxH#*|MkE7S^^=3ck3%I9$sBfJ}04|yGoC>GU+WEp;j_aHn%^6x@iCtgQ(eQ zN7s#Pip(7?Tdez|ak^RrqXPhNZnwc;(%zW0G61xx%=(i4;O68Q!Gd8%aLd~2j`q5v zRv+XE0IXK)JWI1ITP>IQ#mnWFpVZ6aU6pv^$2jQn;eEt}GYa1Rxfc;H+Eo?MWui7S zQzmsX&x%b8zx;to@#2|Xzd(}@(eOPW3=JQZ@Ti#n-v0V;K+C~{Zw14H_~2349|J%* zn7AtHeSl1m#&S4}B|Fvfr|=mr_3$_%u`goeZY&g3ZQPYB~us*H3lR!NJ3 z3eP*3?D>NFocN3|Z2OUSyUpq3AKkcBCAHGlSgVw=2m!!0FF{@Ky8sY8+&;13=xf-A zTB)kY2%%mW0f6X^M0eD^|3SW-uAYA?o7{Ofex*g25d8^8V6-w#ZB(sg)7t3VQE-<@ z?WitKf4+U#+y}sxP|Q3g{Lb*6ANPZ9pM^03L@*`;3lKRG=*b+!a8IYG#3zrRN)!-Tos*qsiU zJ}1ubfm2LCzh*RIHJ`4|PqWQxdi-?P@aU@cjmISD(s+!6E{z5x?7!xUw5qYPvQ_Tf zr7c+4F;sAK>up3gTJCN|*)+xiV1mNIm2{lZP_Avdjk1KY!Q>$5^y0zj#t-fqipg@G zEH7?16afIK8mw{58X@@T-lLL(uGPFMpi{p)K$fTC{L5k^o6AY zhke9-=pR|k-P8@mNUvYoe5KYeb$W5*UA*y8(4PcxUl$1gY|fu)*)(}#jegtmkqOb8 z0D$Q2VFZ2<1cQA5=@Y>knX$~U?x zj8aA`tJT+VKY9@jy0kmNOt@{|>vmDGoIU?6zw9yh_YUI0s5_c)aV=F^UCfgt*)$;k z39wM$3yMkL3rD(fuit2E&% z&7Hkv&-21C>~sQd>O5H|Sy7f1@naIiF0KFo8y;amX<4-S)_B$|7CPlI7fw2fJUt4P&p!9`3lu z5fTi$l;Z8pr1Dy?3%Mz`6Wr36uL~`g$pXM2!s(^Xk+YF=1M)XQwa3Y>7}#pLnoiI2 zWK$Hy;`{}%O641w-@MdN(8t|<91X?feE?vb6M{WXBaiPo)z#T%`t*0{;;c!Ry3DMq z|Bx%m$@UWD_ePu`ztigtMi(!iyT?^!YX_}Y>E;ipbg$lpGHc8$-Oc& z9oiKVLT$^T|UR4%K+pKEpMmCMQK^_i#5qLsy*3S4^07g(Y zJvK#Bq^s=WJWtj|^2(lffA28r_M5ug6%X15#la-re-s})jQ1a6gvzRVetNcxnFT}> z%2;oI7h!77Kd(+cp#%>ee5?QHBjk0J)&$v(&oKhQ$X>tm$rmU8$A9%7zJo@;nbi}> zML}2WZ^7=Xo(GFZ`<-5o5L!1)Q(vTU_lt(L3Raygw&cf%W$X|vPCVVwct9_GG_k?qU|^*b(1`g_|x zpRF>245->RrPkV7(8lgc^bqL2utvA3nN2+`o6@7I39l zqm08XxgQMgJsRG7^!6XIEA+8$0KoS+!6@+F%zbd$Z|;OUbz=Ys1qlVY*Y^Nmy2>_r zb-qZ;vNTOez)I(Lj|g}jBs$94Z6nGkn_bw|3s99QHtnPxFb{TT^(aGxBg$FSr%|7e zAEmaFRtjg=BBHZa-W0Nx#{^YEzQ|rYy?t!~VIV>j452edR9NO|9u!$}D=mQiQGd|w z2*E|veei=HxsDH~Smw#u>1n<^uU6A4S#Fie0RSvu;@Dw?aeY15SXs-(i3Dg}9>3C> zJb2vR$lKlC?YH(%?cZLdM@>_tYiq1(YHPH$HrcE-W{oA1Ml9?m=g)Oh*XwCh7OL1Z z%NM(yDwWK4^Eko<>x_U0wMIdIf++vs@rQs>=nF@4{YGx8wj)CsbD3WhT@l5#)_GMo zjZ#{tS()bL{P=Td3aE2jZ=fl3`N|RDXt?jky{OxdM+e>hpgTTrgdJn;I?1QUU#w?m z>$7L+#kuR^=gX;+5=G$vx(6uQgP=z-;>3!0gy_|!SeV=$h7IrBmm3>?B2tl#K-jp^(*Q=A? zTcc&V1OO&{I=D|e14O7QbCWHY5C|bxIEEm7qDK8VEt^JZZPBKv>$=t!TSNe$9kgQn z&3AYH`@bo_{D*Dt^Wh(B8W?K;5y$5Y1Aj6e-dQi^WtC*5HpVjF%33zHX(~f_Ue>GG z%eu&S36r8bLX^AE_$mQkYYln}$r9PrRi3q~W{itLzdP}-98EeZ%*UIGe zR-Nt&%C^>MMX0j`K&q}NwgkbT|2lq3Q$Jj1`8BXCI>mmn$rml?9Mr);{mW`6H z(~ye?Bcrv^O3PZ+rIoeWL1?VeyEe<%i>UB<7{}v-xIgZU54MLfTY{3-D3x!9<2__` z`#G*=FRxhB2m>In`udjRV*dly-)F-Ii2GL7u}|aB^Er)t9&qCGyNfg@gOFBAX`5u_ z|M_43**j?Tn+b&f(f{<9hP{>Yg-$Op0)i|N0TDrUo6nsumS29-HZ#0Vzh;rU`#84v z;`93K%8s&aw2*iYf$a{*9glI&UA7x_`#{*q$;s!>PL;*P3$!*y*4Ahm4FO|BJ=Y3y z4M+fRa?C23r@#HN(Yht|VahP&*y|udH6}zvPOhon@q3-%wq~@_W}Q`1n^I~*n5*dO zvX~x!v6`OAOR5FN*qH_+LTM0-sOLvrD?)o~3`?$H7uRJ` z=4q={=0thi5A`aSQERQ#?w2vvItonN_SlVc+fQb*s>(tsskKh#XJ6M247+_75xMZ) zi?pc9TAiGnyb64NOY|7p$4D6>3;?L`2}ZWuK)I2N)2p)5eHHiO{fB;k&qm{#gg2nP zKVUfzOHN6@<1{t)^#9U9INRtvrgWO$m4Q8h4@91gOXT zAnXpP7sOEjO}<%P*g9t}#j%En08xf`fGI;FBqHp1^fsXF%#B~=Z(K@U)VfxtY*Zx; zLRU%)ZVDw|p6j~Km*;i%2IQ8;V>j&4HXWVFxXbOFz@GE|b^qPT!g2rrSy^MiYEW{U zHhmKdGr@#Mh+{ro^%|NyaS%GPM%N`E41Ub~jv-VjrJC9zVzmOw2a|iwOxS0{y?71K zQb{AVx!9DOqL$jUPl+_?|MNfmr|+QAZ)P!ycSDm(=2lj^OsuSptcj5{>?(_l-^W3J zyRFlTOT;~|EqdhoJU{!=^~o4xUUw*j_uiBD8TayJ^YW9wTke?PT&r?qZ5Ag0FkhTH z(#Y?A{*xPEy+>%Xl(NxW2(LR*rKyWVTU^T;5ZT@-7$ZPdmS8k^*UBKHq)`&89IC`N zxvbJ->Ya!X{2ox&-+wUJ+wa9uzY{p&7)tP{d(~!JCE0qN)SERjnj%fGQWh(_je-Dx ztd{9grpxTEA_f3>(q99>YsES@Bj$4T3JA?HWgw%P+_;%RtZK!eHCN!R}2@1-TpA@S{5ikg<(j9 zhzCf72mzyvT=wJEZ{xh7rA=p0teb3hqm$|y(tdxhd+=y<@4el4pp98?k}S*Dn*^Ft zUAh$6y0~^169$BbVGwnD{a%+d7KQ<5Y;X7mKbA)6by_q^=Vg;uO;$GUw^xEPzrMs-JsV21A3Vr?G9xc z!$EP*7(tFVSC>U=N@cCB%c7~?a5Rjp>+1=+vxmGhkX`F54aWo{CQJ*!$p*g25zToEurIrYaUw08mZk63^uP<{a3; zRlTwXssyPDsIWZR&U<+=@?wY&TttM6FTm*K%TL_aMx8z2!Q{c?y?YO$Fx(ptT>9US zqkbF#Y%wb*_`96Gn7uf02W+cPZ?*Hb)w0z2k}%#7hJ!AP`os&dA7duk&Z6tLVXH!h ztkE9BoKgUA_phU|<(qkSvYOXrzF9R@btmifErJ^X9$+dECaizZs(JC@eE={@X`_tj zwzhu=tRN^11F1}2RxQ;O-40I03bfotrgCj{vm2#e6(E6!l?Z_cKsbmHcz{HBXL)p` zRdg%((I5s9X&}ISRjzAgm#cMZUWC0-C`iaDC5T`=?DvNK-h=!1q&5y%?uXoG#7*OX zQN~(A$Qy5;ysY!8+2j?$B&!+=FkPeoa3{I{%JTQR_b=LszP8S(2#_iX_c8a7QN)X| zc!f05wuO16C!o>=Sgnf<0O(>94JSr67%avJBM71xV}vlp+{1z+LZyLCsWzo@EeuMq z8%b*C)+s@hkQQclF@BXcdDApyu}Vt-2D}Fm2Wud#q5l9i6%-3l(&}c*oVFUdD8)Pj zU|6Wi5&)X{i#H_Voq;lu&*rJwYS*M$oGzMeZ=BD3|areRFKMLZmLEGn_oIn5FCqHeY8et;3_u$fZH2={?9tu#>9 zl1&iv024}SC+bWdzAwU#**|>b zeg#1o`?RUc)8GH&a`tk1{4{^|4*;-XLQI~-`wxcqA9tdl>obgylc(`HxdVND^|IF1 z%2d)6wMy2@)ywBen&q|BM1;eG@o>i(jyhaT6YIR*s_SLxA4)ZLs6fjMaL=mr4HthC8PfYsJuG zX_9AXwOmeD^PP&;&6mD~hPA9({PU_~*W>_TAWs*(mV5n&6i`r`*z*Sac^RYW() zY5?%Uh;Yvk&n0l$Lb;=}nrX-iGcO9C5uabVbrOs^KJWN^Z_qK;mW`>Esidwiy|G++ z3ILHH1X%zZ`@-iWxRlm%t{j)${pR9gQPp)-RYvP9N!qTQQp&u-_QYB%n;M$hwL*7? zHO`-Z@#tIMc^3%3vGu3_-oHFcr8hWSmfH{Sc3*0lH_ztzy4>EFCSOyT#*7|)=X-v% zl__!m^ZfH)%|H89zMNj(&LV#J{iE;y$Pa=xM#7tPMqIxK0EYM8LkJpW#stM43k6L# z>(i5y=g+<@%W5~>#-<^l#yj&z_kH@kEkI-xs0Ii#J4Hr+(C_yLob$c?Nx$FEBrJ2e%A0wmK-$Yd0|Z&C zRb5wkmKpOE`sf(r$oD#t4**3~Ct0DlK?)_ z_25R0+vJ-j-@xozxlb7%9X;;fdoLV6K$mJbrGYZ57g^m6*yO_>-v9O=GlFGP%DN_$ zdBIzeamq$ZDK{7Ab)#}atUu7kgqLaq+xyfJ;t{YQ?%jV9Dy5A@0E}}$sFn(u?R0$L zi@@K%8*W)%z>Z9%@B{JBV@{GMYXksVBCr-LSSYi##Vpw*QkzTy5CKd8cmM!w006)t zFPpqhG zaoGdjZrVGNq*o!Ri29u_Mkw+H036=G@4UX z)3Z5fKy1Lk$2(LzUY6g)1m;tm*O7>G9M4p5vII-gtru!Gv}PdERs;Z@IYHJK7XcRJ^;ifA6AUwLaPYLFpx60)_Z+`b> zqOS)z=ZB!0`o|JfRsaAX07*naR8GE<~m#P ztJzJ$Dfgn#>qemyn7tb88(*XK8*7uIDH^@VWn=nLG#-uL@B3G7i!L;#tBaG*{;|%p zWOc1EqhwQO$@2N9^?DYM_Jtor-M$DqQRn7xCl~&0t0RXnH+5N-HR}v&#KX9EZR#-? zlK^)8cT_F6%)=}U8)_&w&CnZXy zH*d+rKM!aaWxA&coJ{Wbk6+p+MQn8}ZNhN`Md+&XF z@4ausj>$ujZf-WfWd&9fY%wC+A_o%)dMM}t972`aVtw0vdo??|-MoCeZXKmjA9z5b zq04h!2z0tx&EmBD@)yL9t*mKxjDs#=0ssuA)Qg-Nn)CWuHd3xOrc9|XUly1OpRz5I zshiT&r7=b;shriuCGVe8%qaCZt@4aBf)HW^gM{U5b;kXm{PO#(254SdKC!c-dLfY+7 zx!06;0FXDz7)UE+ti@mgMk2=Y&1#FCIz*i?@8C)I;E{-W{-w20o2R!a{7#v*(Mo>3 z$4x7Bqk&39!2O8(UexJ^Q5?o`u+KbB1jT}0ox4?9N~HkcY>_H$q?GHdl(JDuYxLTC zugM)IiBuf~KDMUc?+NA&hXclW6o!o@PhT#Vd1bSlB++$Dt)$k*W|eY#*zKj=Ob#Oi@P<7gm2RY}s!}Tn7D*+I z0p@wI<3`Y0Y{vT!$ zL3A_AW&{O-3W}|T^XdHbH-Ec${%IQm|(iICk5fcNg25^4kJ`1i_KzQm&I~Em)jOP=q;=ZjB{&@QVIaMdJ%Jv zMLkM6;Q|1f#i{EA*eZ32?X~gnEvhp{%p-n`C_~I6VV?wD+8N$VsAbx0rrEFm?Z$yOMER4GAVvw1#$`j(%vBNVv%KL7eQPhFLD z_0`y_@nCdydBr_S5JiX*G#U*V=R6+m;%~BPoKLzk)>>;VSZlSl%Ge^$o4V1Le`t&W zu+~`PlrwjizmG1V%UZ$g(OxU062@I+V3lM^l37_| zT^rfRDz7ew`gL8Dby2Qia|QqqMp3`tkD|zVy3!~DfGkb(i}}U*>FQ!$)sn}G_3=_jl6lOjts z5O9pMop~yZ5%VLzzi<7nr9w-&v@m_P{PmZ!xDyTf{WxH|JusgUM(~ZkAwsAhym=^@ z3;)gp$Z0n^blv3;w`tOCdeQbBZu8H%=UEG#(Y~=3s1V(W1-sj(G1j^&aQm}g$9}B^ zWo#{Vqf}kXvZ~6ms_It=4{h!~XM!_luE{wS!5$6v4j=syl0`m0kzVgzApFMG$?@}b zZ)B-JgaQx%ix9NnmD^#I;KTjB$RijbYvKIG>1QAR{QT2jwqG-T^26~5KXiDe)+S$` zAOGU7${mI$9zEvwJ`#g_09H$>mC{C8eW-U86dli{++5RpBn+rPgjm_Y`qVbXmWl2n zH^B1O@C$&+gU3f7eE0r`-{HcKJ5k(;0O0B8pEY^GP$WEWJRSo8i~@Lk)V98-)9Grt zTCJAuww1S3K|7($nbzs!-!ii849JAcr z)Uz)USlg5qpeeJaD(%G?yv1z(AF!NYUKmA34~}-_Z2J8^=X{wrX{9da^Yf>_J2=?y z4+aPOlQPen%Sp;S%hu~nR#s)z6h#SI0Rn`uHA>a590=R3%XeRY_USiv1Q;d05Zv?K zL+B3%yU~tairHGgJnA~s6eENHJce&i7G`O>+$3eAfe0!*Oeg`^ANBV~{TqNvlc!a- zX{s_^uUf;rJj-@RFfk?o*3>1^waFK4j9-5Cb1`{BM-Q+UV_eE6Z=}|k0T3VztWg>v zA{LvR`DjxupFH6AMx9nk@t1%9_b*RQs;bZ}Y>KarJjE!8I=#u^@bKXcg+J9)b-Ds9 zs?q=jEvt)lQ>^EdAOH+Z2+wyt;~P4ncaF+9oxj`Jb|{#~u-3Y&Dk&w1$O4boGG+k48TwUAVaT-q0 zn9UnDw(T^wZQE$dXXZQK0kJNW0?2Qx=&?`zgx>v`5K710i@vU#sRT~9VI z$D8S+-@e?V3~z-a6XS>${2o84I?kY*WLZy46;%0E6ZS^G(}_PJ1~B@-10}=6$nE(Z zS*xdF^BI{$lS#*Sn{WDEAH!XppWE)S{7+S*--!iU@WV727))kq4`*AcPR1igTL+O* zya$Ye9O|?-rYY^qoF3XU_y)&zJNTxv6}p174?3)BXmFd*Q+n=T<4}Uc4B#Pyj_u;4 zISN&kIKchsdV<>b;(jPVu-!KK@F8AAFeQv?@p+~Mmdjg9l$IpN9@M_-M`-{A1P<#H zCCmCjT)h=fZSrLBdhl}kFt&US>AuI(y*WIA_*-uxZ=m4Yf z+GMHUYRNMv1B3_^v=~EC5CE=e`PfNO8edRymq_ro2Q3Bh6^U>#+68I!abY16`O!Ax z-^JUQr0mZJjzb2y#dMc2BhSWvzwF>Gge>hpK~3~p71Bu@Ym`)vM^^yg26F;;t$u4b z5rtkMmO>@?a8omvtRo{KaE;3qe%v916T12!p~^wYFv0rm=T#n6ySs-VHH60{mP*x( z5}%S;;;3udeEBh!H8_ut@t6Q&jXK}i+wJr``90F#&s~_JZ_%~93o%&VcAP^@Ik6Wt zy0&obCSrCOk$A-aFwSr4`6#mIt$)z}^O(UuiR+pU80onFLww=?C{Jf5)~f0Af_j=f zXsW86pX(n?L5lD)p*8@_DM9G8r9jH3Ff+MsoQ)+y0o(kS=hqt>wvE;%F-!wgmbQwDI3=a=uxdiLJ7zFxeqPsgc{9Ejk1Z=6S0J@`=M>LhB|%u zIUHv$-f*Zyd~aK;boA~_*9w2L6S=)|VaqroW65+_ShQXRko8nZ~$*;DRW&sT>PaU8W#;R$=x~e>)t4hf* zy76fK%cxp((^%nzhuUFrbWfQ;lKdUqyfH7<{T>b(S**_JU9h+2K}i96wNn+Wiw+NG z>yx!Lh-fvS76OwN5_RCADE|C=rC+5?Kn^7vXi-Hc-Recm!(sYKC8C1FU5#lb3@?#M z1&Pzu&#`oRZ=)Rjx=PofNhcE$SyzFdYUZhGqX>$khVCFd2P{oAv)*8p=|dm;f-ZT| zzLHxA1X#JfJs&doryh>m(y`>{4p&y>3=64Ybpthj^~YQ|Q&NiPk)SiL`)Nw?hj9{> zAb`Uk*6U53J2Ghv3lv=qa;vQ(8u^h0avTGATZ%FL$VI41Uz;e7`k;}7!7@W8xE~p= z7S#8%A^dtA-h|fe34!-Bxa8D*tXTe#OQsvo1@CtM?Q)UpeII{S!5ui>TlRb>3PO^l8q zb7-kTY;#JQ%{^B~!hDR59>$9}0+~)$Xu-pm))JiFa06i~Kvb^;!p~Wa-)8DxuKbse zg`kdnsn6X>{VWExDMDy3)dFO9i6t zx-6D|f73nyq*ek01|zMrU0be zW3LbpiDnt|q}d>awoapywv7YpfetH{Ekb`jPCI??UuI~)U=(vPcI+c^s+C2shQ(SQ6=lt9i+qACYjM&djc%hk7F>O6w>*hV0Mj0PZuhjkC$USuftaU1-G1o(xu_H9}O^7d4TkwY2>_}bq`qC<9<06R* zM_}@1-f3C5R50Zg4M{MSd|%*0I}&MMn3}D_7AcQ>u0^$wg`_WmNuW*$X@^Jw;yO#T zpvOfP(t$ShjSk1q6;|A^CQ$hzERzad4=4EN7LtL-Gx_QL^K}u}H-!D08^(2#*UNAH z5y5`6ilQU&@c*gQd*G}u}QT>LK&IAO9#;WYCuJuaKBbufsG$J@qMJpP{ zl@LaNopMox>fBrteJwbKVM^B4B!*z)n=nwrrc=6fb}>Inb(EmKT9$Lpj82+Ca7w@S z@}jDmXk*3bg&(vD)BE|kXy3_cSw|i#-Xu*N6BtJ9=gtBBabf=Xa6BkrPjBy!H87n| z`~*i#q3QRq9N`-Coe6LxyQN5Q>{%+oKYI*vlweZld)9%Fl`q9}82-u7JW+(}`wzR#Vel23W zN?h{ScLR`u{-JHmk{hO-OrF98+5iN`2xoDxQ5j%8HeOg6TpH!&r!%?PYXV^85E&o^ z+mq*)xW8U>vZQvau>_uDO)|q{jKJj1yX2G*AYSC;}6}I3Xs;b*PAH=+wDPlo2ZdN4krXgPHF@YE(IKt z@2f~3u<+X(+#Ja>od;on=y;|iP{l|Cy>lXcgHpgSJ+%}R7>wv{9p2T;R#;f)xyvWI z=S6c9n@i_i=GH$PJ$`xfOv4hJUozMV%!6(nN)vNK0jBoYGev6aj@N1-dV5S#?eKqLH#VR2gldeULAZum>S`eLu{ z1eHs=y+*9-^_vyaJO6DRn#lfGIG2i&GzR5mc#v)jeV3U@iUNL^4}pD`_=5vy}ifm1{hE*rQ4gR`vW_W9;FuOn{K>f6jBM+77!C4?Km)C z+2MTdkJfK~tn0s@pZ%zog8+KBo92EcR-%{u=17*V2C+Z`eZd0l#4!|!i|=~`MX!&Y z??(YjJKW#8c;wBc##`_ziFn{fqMI~jHi zD@hro!$!FVzbB4aA3&g@-3Gt~M9i4!vYyH|!qB*ifcFYap|f>|PxLIx^n<$og63w* zxIDo-IH)bZrDOJRrh!@MLNVRBaXYQ8y_HL7cKhhTTnh7qG8zn8m1jI4(q(o9YB_Tz z<%Mu#)i@L4o~J6%vX-qunWQz>9>~G@UB|uxu06CWF=1CFRTBz3&U{1pq=RS|zWJ6o!NPsN)vqq;H7ghP@6;UnPixgT9yvI~=6Y<{qLs-AdRw zPH?ub&M1`|X4wE2&G~;^RoO?}c2IB#U0My-I)tiPT4E~iy}bWyY%FQfr9Yc@{h0Jq zq_!5@8;b@2kMK}ZBw|v)reG2TrMq7q7552CpGe}3+6l9*Fx?$4R|w>I?_NlVr}})^ z$D4UpL%E85fP(6cxJtcBA%0P8X5pTaZ6P1+g923Act9+b<6=sX32oH9G0?q@+gy@J zu=pW8%;5O?IZ5=sBzGI-)S};Jum*JZv_;|1;mqBbKuIZh$+WlWaGxqp$ja)J#7{S0 zK6~sMlB_dd#U0bGv~2Gawiw|^{x0C6(?|DD& z6sTwF*^EfP2r-_(1}U7y@)ahgk>AsnuCs{-+TpC(! z91v0bsGl-2(sfBrTs(HCK<(ZB59e$%2jAxNnt2lYa3$^A=!~@H0Y~?R1wRfN0(RqfyZH;Bn7h68mF?j5cmxKv8ONy<0&-i!pbfKzIc zA7hB4VWA z;NhJr*e^re^6U?kucTQweO`0<$eFLMdUrYmGx3~Fo zk+!x5ZYTf9D(_=c>W|P=|4EfT=;o#DoyhxH4oS$j1kOChjArlGGM91a{a!6J9Ypkv z7itL15`hI6-Z#F+zSoVTQ=vZ|?>>5*er`!)1h~Y5%0anyO+P_ri1fiYsI6I7fS@)a zP*>H+q-9O6isNqoSm|D@zY&mZ_+4h&iHyJ%|i?z6XyzMt*fjpBbZxA745hI3Wbs6^|9cW}H$N6dC2y+uam))1{+t6{G>y>Is=BZU;= z6E9$g&KEtA)$?N-jbU3#iDx7tS`9j#&^OyLZH-oHo|2KyAR(|+8Xb5pN zjz-#Bn$$#3VGyzZqds$FOvO!qgNE#FKNpTJ-DKImCO)PoZy6is`p`F7yN3oBsJgn= zFM(jA(5z^RQzecI{D%?&102I%jgYGhCP1SIFd+GE*N9`CIF?ZvDSaTm;2!C18_#qB zlP}W6gZ1ur)pLqT@~7RwtVT?8QAy1-rVaJoxBAk%LTc~r+008P!(U4&6n=9>MpwiF zEm#B)+}Gz zN+&nWuWV8%lIm;;Vvwc6-}EL4RPgvo?Ake#@=Hx=xM-xFx#Ks*l9$jFfJ_7ckfS(= zd2;3ro5yj1a*MKQ=25q@b*0EwL3vG79oMzopvH+@oo09J%RBrZBn01n1bA){z3vB? z`A==Koo?v1&EAa{Drr?V@%_r|Dh!_y(g(U7rrH#-i!_&wM%xne7R(--)#H*4)H4w$ zQpz^|x47=lGMqmrpR)@m++LI669c#4v*Qr^Kes!hU8M~6$f9B%PkTK6H|B9fL|_iT zyx;q-KA7Q`rE7iFV(7r$4vqn8m7d+*)T-?9bRR(gT$DE?eM>im5^76O#ivOIc>hMm zVpaW=0a9*K6iNR^#l}phEAgf4vr3~u$CH2!T)8TI zLq=tN#Y|f*gy9$$*^jAVFl@JD>n@XOy$TLp>NCVQ*P1m{c9F6-WZh-EDK-} zU{!g3e^JyVsSOAU`ds;5!CllbK+G^ViblSFY`psS)+DBDHg-xDP1?hX^;#@pXX+)I zw~kbSCN>zCy6p3rPLY@%Wy#9^dxlUHHZ{RE8b1DoWUmn50*1pTloi%A7F=jVm}{e z(t&9VJ_6C+iv9!x>+10__|&H_zo$$pJ5XpWG_mXdSn)9&P|kZv6sI$ba-ySwiE=*) z*p%4H=UMPRF_@UZ#G~5ClvVKQ0g`>YZ}@CDf#aTjR1LZDS}ZH3C&9NSTbS->tS;aiE9Y* z%L9*&jjsm%{Izt4@H&;5RGQ@9-iNqrU1OyzJDe8k%){eN#w|EmpoehL?Q`tAE?(8< zKNQ!oV+Mkbwvpg~(?>Y@`d7Ez4Qn-sltkitljYC zq1cK7CONp0qyPO;)<`jy))fGNzS5()9<*g8LA;(c%-~BTmhULKe@9Ng0#huWLl1)2 zf31)$LLKP}4l7<#dKyd+hF?IYrku)Z z)LN9%>XnZbiP~k1K?%W&RJ77i2 z_uOEKhp=O%FF(%!>?&f4=P(Y`*rAVO@)2_I@GMhENv$J@@{{5~7(@rBFVNBT=c4BR zTO`nE>uE-hw9T|wA+u%_`gWQgWQJ3aN>OH2Hhq?0dz2=O_qE*vv%r-cCFo`)@({gP zt4ehQ06|o6@~BPB{isM6WvMZXWC}Dh&Rh_}6hahM4LWmN$2;lz^hH4KouS**b35-J z={*(9|JzC%`2#W_l}_W4Z{7D~n()8b*l@#hSWXlGxIDdk6~cj0-18Cwi5udm6JkGq z4Tm~N%5AC>sxX?la%5jLs`Yjbav0sh0|HiH_W_I$P+_WK7$-oIAj-pp7p6zxRRv|h zq2ioZ#4JuDqC%&Crl`_rpqb8)tHBG#**Kj>t(V0SfL@neZzr>dmyEWVl0C#{(W~R9cqDR!LyXVQa!b zRr(|IvR0##9b1gm%76p_b@*_z2A(%x@_W^GgrV>*`3du0uM}s0C_^$SWrQ=Rh)k&@ zj6lgj+0C(p;gHxNRS83pAP6Z;)u5oo_{imKpVVsuv2TbMLaybmD~(1mS+@(>i9v;UOU{=s8OWQa?3zbDNoj$?_1 zHcAq!DYg6u%dp3Zq*1APSF+~`4qc&a zFwL%eQVrlv>B2(@XD~7zT#kq;qA3D@bm)_V1sp5KE3&wNcgC& z=m_MPl4Mi>K{#GNjBzLH{1pZCwdNa5%5Fo*eS?u%L>~HhUsT04Yle zcC7oRJS-+bqTK@arh|0Uu~C=n*-tvca(iOc!NLR>!~azH7Vk-J@ZM+1Hdz}ggzsFW zIS>V73z#*<=9P{cl(nQ!5Ml3~6w2XmMKG0{^$G(PPn$}3#PIEE(JxaX#&MBP6KnJ! z0PhPg=YR2v_X%0j)@IUdKbqUIp7pj=@+z&i48>-ZaYyTZ%nsq>h-FbJT&SEaoS4!g zhnX#6)b&_XBGJ|IVn<`X?~R(;QYE}8SXHh*kNiD-WxF*g zDJjFNL$7+3ylrg^^~*m8OH0czJO5{4B8(6waq4n){MRP&Jf3N08bg zUY?_JWNiGY(s*>*#D42b9gK|Xyq6`lixt?P=pfYk7bI0q2Dwe8Eh-LV70HA}1$rLPFqG=JuW7Gy{Qr$0Zst1>u3oxu|z#--@LNNW)P7f_IxK{`P)AF8S9jm}GG_2PdcSS3Hc^=E$q$zk(jRDdY_wfwiY{8gcmA zloC9~NJg|OPPR<>UU&#W(jk=y>0d=#|B+HJ`WFXXcnPgVD}c(oq<;m*2cnt9N0L?= z0z(Bko3tyVen-v#bPiO&yu0M=c%6jtfHlND85WD)gT*SI9E8Of2}(Dr3k;e8EJR?7 z5J1gMi6_x#3=KdQ!IDA-3!cx*-#xC$!-wMJ=s0tkcT*WaMnedDqz}0UgM`9Kx^eLO z`#5Cq1_&pe*GSA*k%UJ)>SAFOF+E)|UxWL*HMva>SZ6Y)?_3y25>7Xc-I}FOsm_WG zN!pDvZnYYB+T_;ttjkk^zL3IWsw%6fDyjxM?D5#-Ct*FiPT-nnI8Wf1{%ot7GJpvW z4AvmWqtZgRB=EjpD)(MB&6t7zCSQ<-(`sf`m!=8?oiuJ|C%{2LMH8M@RbLYQplcik;rB3F>m|*Cjq+xMm z`X!XV6r;uUE>%Ot1c*a~b>EYO+h+Hy1z!ElS7FeAGO+#alL1j2F3#(aqP{P?x|qZk z6U-*VR3Bq1t=L?GtCaO8@QP4GNJljAsoHz>(uppukt9HKVoqH4oxr3b0dL-d^c z+lU}y7sstt1+TuQ9j>rA1m}p0WApDb1F=M0rU)<@YPHM@!C9YR<}b9Iln1jQ+b=7U zdC{^SkdSy&Z7*bwje`IKyCdEw{4TZh^V3xmtmvwSOudNdx2S}{5TYl z0#%HUDpe^el08dRwz~RIVD=+8;^qXs&o3YHZ@dgWa?|ExL`bwmCnJmmtF$?Ds>{j< z7MFwUVD~N+5~<6%(icYRdEe`+x$xf_*eop>hAA{cMJtS!0|{Y@{~^RFO8XYWM%Qu% zn+3Jnu!9)^GgHn+BuEb7m?+_pu_U$qwx+kDIo}OHz)(?428J_mMXjSBPNoJnR*bxR zE?WITyWLiLCp#9MbZ+F>ou{z$Pdla@NTf@S8xMaaV+ZARkql6!%b9YR!Bi}<0Yhd= zww3B`Dy4RXg>tv1g&1y81P|pZ6{xl-Ggufg?dwE|xU1MiFhiFKyYZu5}$MM^fl1 zHW$|Pb-xQfxw;t-K7fU2F5RyqaKZg|aofFFhUAgAU*i}QCH;Dy-zfT_x)7V9VZiLK zHvFcdyar=%i5uWzP-}Tq6lm2*11uFPVTvnmRnyXmDNcn?g zSwgcuzVC{GU}b!5b+VkDQw`aoy82zFB52*SqJ$~YH}p+aBoemlOC;>Lqzs!jCWTD> z2VP>8d-p+{2e@GhLI^ z&Yph|_{}as_*a#IZV$phYv3Q+G$L#3R2tTs6sG|$(-@$)D6I;zB&S#u?MuO;2SFjC z1o|g2vd#v#Drdf2s@J#;2yY;j$S9ZuEP9Z1qW^aEGC(_QdxcH<`RZ2#G6TnJw5r@EHa5~El&e&HB3-Lx!< zO|_oyJKSV*2eqg!g?*;q&buvPZ19f9ZDVq<7M9`q!%lbs$4kA8(ZSVWCn~^MacTw& z%+RQ_RU%sOXS2*K2cX%Qy{D|QRcpDw7sUjZl|JB#Z%*{rZga=<-P!%|!7&!a%sc7E z0rft&{%2h<=%HsF@DfHe zhtQ$5SxUnp1~?vhD}n(rRL7w*nXSd&F!62d%+5pHBbIAvL5?mCS3}cezVBg4BXV4# z0vgC^u+FuCyGB~dPEcGTieLC2NG4O$%0S+p9|vRQg^8x%mj|n30R$noe}A9CJvEbq z7xO|p&cPSiUe;yzr}&r5aSjf(vOG3ey4jccU%CbNN-=)(k`Mu5KWcF?<6pK{=p?0~IZx4ycu|bJQGmSt z(}^?K&+^}={bd^*9!itgaVP}pY;>l02nyADFaSdVlj%2dfmlr(G5GHRil(+~(HyLK zkE`D`D<0b{)=GyU0QhnO!>w*iLeK#abA&367WGpR2;BjZf|9FKG!^hY@VDJk z(=GPmC%+LpBCtkgNP|KEP!d*fOSPg7GRLm)JrXms7fpV#^RkT^(+urRbj)ymC7%R`;7|ZZnifq} zfzpjl+@G)zRwrfnqT&PJ(ul~u0>D0LG8~W*a&bXA!U;DeKNEh#Xtr#Y?s}q{sto~P z!AK<#+?K@@8SA;$D>m3WNVb={6O*UX7o=0ohp+_IUfy7;0UCSEuKDx9CEvkOmpeR37m5xn|S@ zZb{LhW2+fcrG-LFf^47xDd|x7z75p^*`w>BucJ0|lK++ZOz-Blv<2|F6a?Hg07 zhSkEe$Y7t_>5kCZtex9dj=r7u(m=+o8LC`_+B}Sg$X}!WsnLfEQ$6-hGs9BifWvvH z2Sa9(5_!ad8w!I4BJ+|mKtNJCzFWTLKNUD40mm*c zaA>Bw3`f<%0u~4F&oYQ`lL|Y&MNa?IixTTnTu<=%2+odu5Vh3EVvA-;|MDqinYNq5@W~;UL03D#M7vi@S^WXi= z7)NX*5`m?MR5e`)ywWrjR$*SVfRd^~MG$ABD3sl8JA@hp#uk#OK4Nr$g&=FyQFqD` z;cW_nRP}Lao>hi&G{|lxGQKuZQ<}{|`Eo7|Mc(;JH+`Sg-csRfzsvE#{Q>*>{CISh z^nT0{eGykm1NoG_mziOVDN0>tV{ z@Q_wVk|oE%r(t4$U{rEZo|dg0IlL;Ve`cf2`%a zI}_{1Lm;?FtWEuw*Gg&*WE3C(3T5iiWT^2fMCn`^W7^{57x}icoUz{~aHD!9Spy9c z`k90UrP@ruH6=cqZHiwy86HrBVz2rSmJV4fDn=!k;y?Tzo_9wK%LCik;Ro^B3fkKV z$D^mxR$XqNv)~0^(aFaz`;*9}*orxhrlw5CEGQFr?$X^so@1n*QR zUrCA(ev20ffIXe>0?qXus%~JKr!(lm0gS9b_o|!TO45JwL1GZMW#TkJUtyjTZP-zP zW0#SULI}Z0EkpS(UB?WP3hxFTW72RvMT!Dh&_uh-BJ3^2q2RxyaAJR%TZ^-XHf@7f z$)D`_W_farh1xccmU(H_r-6;=f`8R?^K#8`Ld!t9s;vL!Sn1}^*8ecyFI0I5v$pzy zl(ovesg;|ZQk$UiQk3Mri5kJ2wlr+qoh#xE8iEGVVvLDu9gJDcO#eJVPJo;Aa15lf zBeAeMl7oONST1QW2*#0oBNc3Cjpsc?a%ah+@3?+dPOZ`Zx6w!GM+^MaJsDn~b_qQW zsYri}D7Uz;z*W%HJv;Z1E22vPEfA1vBqGFFQYxt0=Yl!i?OKyPk)9?85Nsn};1fjR zR>KhmB-cikmC-HV{~}dj0v<%d8}dq2#yC-XHF)pA1`dNjH<$U(f4GGN9t4iTTpQ=T zu^Q1D(IA81h@uF|{e)1qKs0gS*TmOg$M41^orlj^s6e9@GH{j{LTK|EnP*bJtOBl& z4pd|tBgG;y#nzjyaZN#e6)Kf1u+U_Jzc3lC(&9OVLCR{6!oUQV$$bKNH^(`K4pVr06(;|6_c2B>F-!oq-qXcc zc{Mv$XD6J|2yS8S<#X0Qs(H4?u2t#ORz{h~zC9CLbor`NXh)WM4O*<>-)JE;1t_H< zy9e=<(Nyq@cH-8}J2RC_mGGaz9lQ(#AK-Esd<6xGT>4$@wL;}C|d-D7U6!gvnZ`;=a2ic@>V2W z)AlBA9U!u~1qzrxD2t$uqe3*sQaotQIt?2#%y6C>OE5c#;*-#xsl~S60EV6#m2ys) zW{vgt;RhX|S*=_$BAP1QNV#Oq_$|SEMHBi8XeI zDo?SGQ>JPqE`?Z#MFIm5oOW=#@~i!F)vM8W+|IpfBd3V;_MZj6>WJ~MT$&8+Y!o#Hb*8;o=m4vWHLb#Teq-uKe=z+6*Z)~!4sPuq5D1^ zt_Wg6W)o2R;ugqRkfDUdLVuH7lhp&1t4j3eeCJGFkX40%u7u#0LO43vY5!jh$+jqe zx|c2Mb8;8u-!WRvyU25O{P^jsOco>}A>V1unb~|`xBk*U{p$89aTvS#X#aW5Fh+7Grlm6%4?-Fr5g@p+# zFo2xpbuz=pM4R?+6SspCGWpMi!{WCz!WP#qH54oMlB+@ew@dIa*`5dHUEVur(M)QX zUpc^XBsV z7acya!1Qzuo%Jll`zPAlQkTESX&?^r5HP=lZxhd-I*a z2b=*xPI)IX!s#p`j{AAo^>B;Y(XeWI-XE6ra?|xB0FYQ)Sc#v?SWCnbom*(Oao)^! zz3vKR<8*rv@L!)KuFD@x^4`{4qY6;NZ1ae)n}XIfxu9fcH#ly_reBG&+SoYi6|_3^ zEYI6p@nP-SAD&aY0vO(UU$m2DQ%?4n{_{vtR)7ih*2Wo`-a% zFRXvIo!$rUTE3GUI;n(x7{OK(~&_x=*UQ z{5Yj9eqs4MQ2)IA+>aP4XOcGFjDTW5XgETz`P<+H_uClwr;Ym_4{88}1qZEpBa zZ*Gx$7U_p7(<=&;;Kx)>8Hp+f`skG%qs#qW#LoqHHwRn0;p32H{~LAxHwOQ=Lo|xk zB9h>`)QJk8a{ktRgKc||-qo)4#Vq(qn)UQ7z0GB;ubQ;TOzkVUrw8ocj3mf##Hu>b zy&FqrsGiraO*S;F^$>d7M)0dUm&}G*$6GD$D?(1bZ+65sVD`3f99J}*^Kcz5+KtKP zTj%M~0iZz3Fx`BqB6uJGgr4Q_LqVeQIGjQnw)&7-!oJMFu0AXFydv?S%Js$eW99C# zV}7rTIK{UxzxwoN44blyIYa;x5lbw0sRYJfRsVftRqZbOR9*FJ>dvMi4&`ovzU zA2c?u6aoO$^v{Md;W%<1b0LV1Eq95ywJQ{toMblY5wwDspT%+LG~XWLv`2O;AHF)BEQ zZJID`addsK2%-gXUUBAkr&=Ddo}ER+p2653JR^tPacf}+yPux8D}go$)zoG5K<6J% zT7oi5y~9BRB^-~^gZ{2IXL1wI{D_-#y{d7l>GC$?=d@eWr`!V;f~VvMi>dl>@#h~D z77uWjDu`P`F{}21GGgg+ugdDG=W#D6>%+DxV4SDeI>>2i_{Lasp>5;+C^Msbx~FGe zxfsK=K#iI>b72cVvmzaz%!0Alie`11>up)!X?al2{s^YJl#rI}r*b8ICsCN6g8hFJ zt=2M`>%p(Yr6`$L??-I^YP?XhKSr<(55nASVXmz94?jjfVF2meY?DZ zumJFVXecmiV&HE5XF1yM;X~zCqa(bYzj}I6>}uPk%jF476Waq@)B@ThWm!#Ly_^&7 z_oy)URf(QJFe9WO@;wUaq@LQBretie#9a>xxPECdd#?e*Bn2h!lwf)_r~01CbhOI| zN%LsoZGB7OE0NR=40O<}KW?*V0wb*5XfcyX`uB)&UV#$z z^s2nPY0V73n7#n>UJ4H;!f;kbFa=w0~p7 z=Y;1hb_De+VlH-c+pk{2x?UubI+Vq~IyJ5Tm!%UJ-&j+IxBN{}l}uY@yuqqUVAoEP zI{!PLM3OhGiZCOr;t1uw3Y`14O_3tboK2USSL0IGo|@$mcpo((fMZWZ_^K+iAN5lk zam}r|&pADEn@@wd2q5bYwa=n62-;*w?3-QU$qY~&xGmenX$-*vpqETqSIC6?fchfixjpQsag zzZ9|^j%L4Y5e^Fk4-5`cQcx6-oTYTV%jm~JOB3w1@0IHV*SU1e?ngMZ3Rcy*)?Szxgw)r?fOSoYf>MKmb9n z)Ut|93`K0SQ*j4#`C+edSjyD4Q6U3}yBrHAxAA_v72@UCvH; zkbuM)tM=Ra;z9@v5}I#X*3C z-65@?iei5taMxXQ?Wnwy5mAPJ`U(tT;c;zj5SH?t%g8As3(q?5RL?-bY_yqsV&=mMJdflU571sdvvJfQNJP1!YJq!^bDFK zEpf&ZCRi`pYG#DTehoMK@4VcungcHWQYB+QofcuI+uU&oH>P97_Cxy7kj>oML{y z*g{;%uDSp22)61Ur*kGZ0FH{J@}gWJW^UEqHQBQ*!3+9oVj?&R#Pm=@ATueV6@Rl! zP2sv0R-G5l4N&%!opoMLsG@=YnKeO)7$wbaQoojzn3$;lYQN!5S^pg^c!qU_>AKv1 zLRkO5B{VYMy`O^(`IlN{f6Jq)%u>d)0&>t3W)d=T=V3>4%j+5MU-lLZkfMX8)^M> zc5)gO0dv=XssHtTY$k+$w63D6%E)l_S5oV##PmYw1!Ls7G_uy%Ur{#3fj;i9&O|Ac zjHJ4#Z#Yw&RrFE4-|pc@=-69L&KHnaR_8T7T*{M6LKE#CYnlrE_mCR%#UOyd@P+rG z`}{QG7gD-Kf&q9qVP%O_EQoJjmq7ud6cksSCiiSWc$!l@5BXMf%YG9Lsqm1PHAg1C zy`MCWCL&rO6KP}~Xojc8a3o@8&ktvrt@;8ccUtw7{O5)3iV2?bm_g}D%)2o! zw$6zUR>k<*>v$=5TOSp=IVC0>w93kHO=}MM6&pQ5prVW>mZ`61tM%`68><4VuF4ID z>icg|`88Itl<}1&9I`;P!wg48w%%dwreJBIrDrnTBWLU?G?rEOPj8J!1+E?G@CK|P z)ddBYK*(aF$;t<;t<=FwV$Yop;ObcaQ)~PlV#}TX_4;R4FGQ!9rn2{m2}4G3wuh7N zaLRWs=ce05hH?6;3O(QR)>BWzA1t9SLn6hl>rbn3nRY(b-g3mJOaC4_mJ5%U`&;S+ z^dIu5l=57mrbkX%h3o5@u%Nfo{Ub&3hk6YGNkT6{#d(iE4s@@6`TT9p7{JQNf%bES zFqCqCk>$65AM|ZH|1cebT^aK1B+-P`tb)!kGmC+oyv?4RV)5XmyhGaeE&$~sb|@VMD$mAn;Ks>TZgsAesq*tRMrDOTM;tu;P`mi zhxvdnI4$C?^!)&kV8yy=c}BBxA&#WBaMoC7DRNq0XIURl=VhZiTezQw7=7=yf9WG! zq^jbc(HR}cI84JDnYg=-JM!ezg5^kFl;B9@Sb7;MBj(MG}XZtl{ zqjW<={(UI7*bf@m8USZMx4(&ao95QJ{IO+s1tA}$XPZe*0*XNVdV<{-p*txXgr-h7 zhp1zs!&mJ}wWXml-10`bEUM+&^|&Y-MPTSyW4rd0(+@Mu5IV!KPqhyXQe+_DO8?1^NLvR7G%+ z$C+a5+7reizH!K|3nlVLnH-BJ>W8ELENu?HwbTq^vl0OfRX7M*^@~vj_RFwN|0YOG zd41kMKnDN96k>=P)0FYKlT3rgHFlX0U%0-u8?d$F)Irq8Fu|5l`gmUy_jMfnzmi^E zoXVQt(Gi^&6LCWT#9n7AtFO-Fhdc}uKDUL>{rjpsef&^zcjEI^X6IGS6);vTAp?5O zXGNEcnaqeS(TTE}AwK%{9DoJFaibeT2*`5{xmK$*>Q)(ZzH|{j(1<-zwip5SVge5} zn>SKY-U+oIQE~4msTu-+AmLHttAx9Dx3^oh_uQ4Bx$8{7>uh^s?3+(CcotXDEZX;^ z=YC#SL$IgQBOsqF{_9|RWl1nAwWo^F97M1+F5@Hy)=^`ROmW^~7zI zv#dN1(-}5ex%K@nR&M4!@zX+nDEfCP7!va-4V6~#tS(*iKHl7i2A|Rc-9dw>9pvm+ zAMMlG9KaS-aqGp~VzArC+ZZ#TlUWZI>@BvN`SQ;xHPJf&q+`Njvm$Fmo7Fc8`|-J_ zKsW9@2qr*Ev`I5tQQ#{4zOR)GrgcBtfk^qt#7z*@%7O7;<)6jdgM7YABdJykbFh)eJ_vHe z^lyD_t#B20f=nHm8LLk7a(|n2HifupTC)1siPY8e8q*CundryA+-+pI0S)T^HSv=82Cl*d(YBlH*R%qzB1hQ>K*VvpD2t4 z=0>?!Pn10^WVgWcGV1>T>g~a3Fe?&Ii1sG4vCzx(_!1@i|rv_0#(u(o` zW6Kjd!0Q5B=4Znyy$@@>?94RB zj{oWyW>D@B>UsO8bXlW)aL_Z@+_WFaag|O z3o~1LgxtoEx7Ext%C!a~w3g9{7585QiCJrw9ddnp^ioOpx^>=k=5YI#kC&T;+Amm{ zN_ioavpmZT8*D+r{~I4s;67LXsT6hHPk!$u`}Kx6l5v{B=Y;Zd97y&4SyB3BwR8#* zoU2*MwYuCc?beEEM>lWAt)Us*dbKyKw+;J;tZ_ePMd-%{8@gwE>S1&IcV}{QnT`__ z@(V(Q1KV>dwN_Ps*}b)mMWWoRp6~IN~Eg#U*L9 zWjP)+_Kp}YDHqobzY~*cM%;+|{SC!?lpNj*(MPY@G1rn0irMOe%d&>OKa}2vT`jzY zBtG5+Tf{!6ah>nFz|#@j|8;hqVNGRQJB~1*;{_>0HPjF+6a_&7Bn;)!K}AJEwLqdk zWC$9NIwD;W2)zUxAs`AUEmT2i25BM)#855}2_+DUhAwvpzwgKWcb|KHWuKG1&)RFf z>wVYWNls+<+BDh8=S(UVG50IW2zKKVoi2IzYNoIGyndY{=7X@vm<8(u)Ou_JRazm* z90qn7jGaS5Cas$HR3Q?xCw6+V25LSrCg7T3fM602l~wHdOgI*_!)OxGz;W^x_hBI8wa3K%4Of}j1X*6`j$^HzLN!B`o%^D*-!JC95n z_ds`5L`6qWj9woY7@*(;|G_<~L+);_dO##(WGE#KF8xeDw;!!Rjr%>#G?_?UjKXJR$!pxvCQDFOWsiT1{dIKgn8rQJB`Z9%IsDr zLf8nV_%=H2bUY=b%B7%tcJ|ilR;0M=2*Qy`p%NN?1`U;eW!0sDuqY6wnyn$DKNR-; zJMYga708YL-f)r6U&p&^J@WUv>CO6Q&LlO84Gros{PH%a(Rl$E_T!6?Xk0|KOioVP zulQ}QepTfiAw+|Kq0>?w3~_W@l<*7J)zU}e>f_Z7aPrfA%+)%9$$QgLL7WmC9rzZ+ zS>X>+NH0k*U1yn~!vc;JS9i_H$pL?b4l|EQFmW%Y7G{u0+y^CZANtOld(@D~$kJ=O;=y^QS5H_i{;Q?Jj!IWnR#v^v_|wA6iRu>^TtauV!vAeb!oqUQuqaKDgopw zanJuRP2$52mls;~X0!A2CsQr)gWQ3Bfq*a92Q{ye*vHogwf1Iwe!!VKAOAM?UsY6Z zTyELe*dSY_3ykkd%E~sgo3=xjn4wF5xeQ-zT1|Zww!6TGr<$6X8F0!Ipj7#w#p+?W zva%o}(Z<%ciHT$*0$%b#Sg+)RCL_gjL$_|%{sx_27d6-&Fxa)<9ehyo%|%5lLDnF2 zSjODEF+Wbu>ce0WT?)j%+1}VQ=+4y1RNs5add!?*H_>lK?bl`B_8OYA(7mi zo#o}_nbQ%_DC+`=JDa6QtqEOd`Yy_RkG81L6&IE~yQ&-Di{;d@y z#l^))9rx1V&jcbpzUxO88QYs9#B9#?rCm>364Jh^?)heKJA~@UHh?(S-<^Y$$i(y6 z#MO*xYHR-r5fKp~+gUW)7Pf>6MF2LLO7aQ`!(dEVO;`*@&86NQZC`QP#>VEr+mR86 z;>%eX86(-}=Q1_1CjAY-fSAa8VBeEstbO*ZJ9-Gf0fWIr%#@#twsszl{7t+q%27p0 z?{OC@i!83@Y@SAC^elFGM5F76c_sm+k>Q$}nl&LQoc6@ncU(pi!FzgII)4duLvlBu zhR%q!Hvs*ZjuSasLN3>`;`bb82HtG--h zHh;^|CS|Oab=qoO3a|wrd1fz3WT}cIOWxIR2Sag#kxt;SXpBz+9rN)ImjRaAZt(0 zc{$#1PvYZU>hD_Ivh71{mt45Nu0_G+A%nDiugb!Jv{{kurl3_S3qWP~X66A+Wo8PE z8+KI)ONK!b!~d-DuA|62R=&?yUY-E^K&!;|NXqs%)5X(|z{ta@?L zEHG@TeF-w-!bBPghqfom*0t8M8qkNcg+T$#<%L|LSm|^{w%KjBBWS0gu!T+b!e?VS z`ssU*Rmus8QQisZ4pzVNy2n=2dp;qzT=jP@L+70RA$q$Q)ijoaZzJNX2iyJNtBUdjw zc|GW195Xq$R&cO?WMl+Y+NWHnoJg)xBEGqBHn{6WPpl*S#YN?3r|w@z+uKb$-12!A zWW3<-R9&yyTuWZ@TdTvB#gJk?7i3gyIAwc9#O0RZOOz_MBTN+P;^nN}J7;8!jhsmc zKU4FR0*~`}cZFpWX+>Y_;>-2;uOBux0u-W&>bVHN!cttzOcNiD31`d#{)u2%F>b83 zGme4eiF|6~cq*n0`F|7d;co#b;}0n-O^Q#nHe$exW+`JVfE2pySiGEQvk%&S0pVQx zr(q)v&hOM`c&w6-&Swexg{x?sX!Zp_E@=}}Bv5t^4Nbnja_ZQmvncYeaB2CXQ-gjq zU?}@aCa~ZsEmC>qm$;=89CLs$BOH4q4HN@Vxrj6>hi+%06ppPF&B@J`fkJKC#-WAA z-b-ocG&Jy_9zshh@ z$}EUT26U2SK2C^$WasAQW@XKPYfzBAdE7P5p9^{)I-b8FgRRrbXVD{O`Y z2M6Qvt1Mcjf=YXZihMG-*6m-ys|&0Kq}Jcxzp~=nRRL(Owk|Y#!x-SF2Imx0H3ZvY zNcai>4puNkZ5FEz$SM&-9v;a_HRoL3;$a4RdcIIS;o3OiIc^PV^>M@jUks+OCsqfA zQX6a-YW)&HBQS>u86N0MmoC-Vgr~fWcUM5(VjysFX-akQPcva?kUUINvS*!Tsf1>zkz}Z+Xf&`|Q2XbL+5)@x0$x z{4OISGY`LS&runf-*?H#{QBo_v*B+}DXabk|C!~q>k$0q%aFNKCx3-MuXfyb^pK2< zpNfpkyhlci{`ZKU6b9dv z!SC5+=F`$Iq+DX1iPiLDB;7jsWy8k?)|!1bwJpjGwO@8koNQh2`B6!ThFnR>!K(h| z!1S8r!Y^7S{Z5)XE;)4uP8UD0T8Gy^cIpf&h~n@i{QqVU2;cXeGk6xH3`T{J)*sQPEA-TiE) zf$jiBEI9V}pQ+?OCD-`!+f)Mq#UeA(A{iOuR61RsE#!F*#}4?by1Ke5XPrHE?3jU| znb}P}u%Y7WuX9t$d!#?zczyMZ%kBx;*;)>Qmd2r+OtMDe(VkGjfr#}LB({K69jmK7 zcj*SjOR<&?j&~fw+Llp66EC-Jv@4E9DAqdAn}xlU#3E0=nKNf`HG>?~1UI zo1?x>TbpxJ!L9u5jk$GAO+E()&JhUAkbsc{e_@}4;nS?n%5Zg$;Sbt#jU=J+^)lsI zb7X?7;Qc4QEoC%mPWkAmZSnH*>Q)jDDh*pwMlFSECagFI$;ESgN4j7#$rwq)i#twt98$lY4tvyEi+QA?jmfm<3)qcdp#Bcqn|m zUBRoC_&JitSJ7LuPv*qI=3+9*VC1p9x!T5UTeh6$w&av;o!TBMyUJ&Eyl5mzUEu8I zW^&-bf!ps-U{RlZ9nx@Ztk;`lywT*E+q3CB)>oL_UN32GRXz#1^kCUOV*c1 zXxP-YwZ-79UY`2LBHcP1zUoYXRfm04{tSmXOYNDrGdDL^WqJJK#g_1u#p)I~eDuSI z!(Xn+KZAd0X_d^+|2ttlST z6lb+Ol#x-1Gxh(rNOh0QX8eZyPU^vU1ATde@e{dj?a}x!l`dG9DL>!m%XRIP76tSh z^E3d#=XaTH}yv8Q?0{-#g88N2Z$ENe=yKP7BmRty% zL*4ZSLo}LcNzyGl1k08BG(BCzn@#r4q0N-cy=n}5@&mfRGvODR?e3o5-cCcE6?keP zuR#4kIaG3|jc-w8I8s+v7si-OW9+B;cd0u>g__Twdt)~?s}p^&HJu5vlJA@R z-EwlKOv2fcoAa!SeB7x|)6&#$?0(#0lX| z*allp&J4vLUB9Fbt^Ns~H^YX@mE7R(;gSsW}LOqfeO$dfa!OUS1ZZzl6$u zh|)^h%oE(5{zsY130S1fyQ`qBf=}#Sa|XtD^M>&K^(^V@29eNO`nNUP$&`9pG&NDr zs(gTk@@d-&*r3>F`!?(=EoOdO(?fFC;+Jf&JAXL+2~VVyRn)Ukg@8Vii%k~B z>qqJ{?Ba1z+gz=4XjxgOOShs>yobvR$ZoM&R<{Ah8dpIWEs7Eg z?ydZvCv?GXKRKAIAI0wrb|yGhr}_4Mcr`V3!bPDas%K8F!D&KATF=bA)Erd+6_n^~Q@zg0x@3`j<$GPc2RuBj(o}O}KrTQB4 z#VS@bPtzRQEXhwgBG6VnW#38@e=%fDdBR(%=nF^ceEXtQ)x@I85SlsZfaJRy-4NDM z?(PZBz*T$}lEQEydPDniXj`@kyVGN=kI4mPutixBO_Hx)ypQ(Hg5k)UXyG zFnGtojC8=UMxHWWf~%12?Ne(kW^}!GCd`%mrgSYDyLWndzTVW%qNutsTzYzXJ*)OX zE$qIdDfe+!eyru;ed5uw)-?K7$&Z#rVmH#J9ZwD_EG$f;$8K`u)Wn7lE*AH-RgQmk zaxfVy7S^*?NxmPXi;cWht9=I1N7QB&?qVpOV5ZU2H#v5c-o;ZPu*L*b;&n!!Mpx1a zh9cV>nsje>1f${l#`0{zTVJMS|#&)+5I0# zdCL}r=n06_&RU&1Dh`(Sp+kr8)J!thrJiM9-%cKDhx>MS|M~k3Tt)LSQ=T?QA%ypw zzfFoyY!{$a^?78s-I=L)Q5|LfU$dHFI$GTdHSj}`l!-_oPs2Ip=yLB%+^T1_Y4xlP z(t%dM2HNN1Fb`MpKHDVIws5jHEew}kiR-%nF*FMaw)$P==rv?hgufdD=X zwsbLc+(d)@mzVdz0qQ~hiIw3lH+HW9c!#I@f1T@)%wLlBBq>Q%o9*vGJ>69qo|2x< z^=9-`*TIE?1HGR*iEZjx+oX>&7kiZBu=pV!EDsadF(A!^;Q(bW%39Hh@D*BK;Tumo z9CC{DE6*wO;-&L*e+~hHXu($O${S&(y$(GuUcPjHYjMq=w77itIk(bg@yP?rH)H)rmO*|8o~8nma6ANz}s#ZQ{$(DYIv zTN6j?EmiRVJ&6EZ=IMy(qhay}f(8N=r@pG?DA!SPvDXh&Aw?)=N7_?{UwAmXxMX{A zTX1+67Z(eJZCpd59}IXX5Z1 z44o!JhnHwro1MQy?zpRsGH5+(@?X-IE3(8^SK8Wjv^Q=SE+{s5kkihY=yt7y>$e=J zH~1Oz&!ais)JmSUcrF1zJU}B=djP0E|8W38=p-NSbKv$o_go(A)Wp|JZgt6zBI0D!~wOdhQ6!iwg_rw1N2ew$^XQ+$ zaXsH#BC&09M7rV+qBs{^5gh3NQLymQ*W|c?))l&VglqweB_<^$wHPay0Zfh9U>Dt& z$uPhpaIayLt9T6Xbq%HQyii2$i8&7w8@JwEP%eFI73{4$ElKte2{em3k{kbCatGCO z7?M=CcwR1R@Z-v+=2eAIL25bGR7Y=hq2jdLFCAOu|Kkk=Hb;BG)bgY;!C3V=Ml3GB zR#o`+HGOkCr-x2%4qi|9lZP{?F{X=690%R70Ag@7}%Z z`|Xx0vC+;uaeC!yvwR#?OyjT#$#Knzu)e|znP3nJ+&8_vl18H zJV=F7>}hea0VmemewELcD+}iBfmN!XW|>(7usagoJK$>>0_?h8{pC;oxwCilVU1ye zq6*nV3^B*hmmmJwbOWtI#P49t1a_Q+8Sw4AXN&^O z%FRho+d7DbB8LY{vSb)u z`&{%FHTuhZxju5qIDfGhA@S`eGaH+Si3cLj!dgLPdGTmEq07ZVV7))_=>Ejo%WF#Q zQ7&_GaAO1WI^fZEOmHT_cFg~FTb=lRv87oCfc9sFg}Qh(jm)sqU*Fyk#wpCb zQg$UqoB{A7bFAN#+BRn9B%1PGG2D}5+6`diPgu6t=kiRn8542~1s`w~ziOFe@+~IU zEx9AKw$jAv#~puhl*Z#Si;u|d%4jW6WlvGUnIAvpXg=JZc{ZRVrJg0_w9VQvLVFn_ zpvV<6e)s*bG8rp4i=dUD=|_yu+5qxZIeF8=|2O8mg{h zm>kV0n5fMp4dvf)Qo)P(sCM{&+-#LEzYA9}rKJKzf$G!j$yduLW3R%(aE5I|r!sVC zYw%gQ^!q%6U1pN|J@^+3b(=HoerJjCoWb+?v zO;*D|Oc5!1e&@OTOEtP-_e(iNJaG5hW-v_TwVTQ9V*PlNJ+yR; zP!nrNHi2Y{eJ;;Afb!}oX3>b>8UF_NKXyTA=NKDK9w{D4)Jz1(8Y%!R5oJgTNPOD? z3@5QSC5iuVxHm2H8E`5`Hxc3e!gCs)TOFITciYyjT~!VuA}ZS|Q1%!2;nA`8NWJ{+ej~#QUupldC#2F^nF9;Zt>4zCK#*%!_*08<8l~9OJR9Y6J zAU#sZ0tkYt7iO6O|0N0uN?ym^-(39_Pd!r=Vcr1s5hV^$0#LFPhXJT~m6Ov|sh$b5 z0i+&+;O1{bmsW4r_dadh;M#sH<@bs48q1+u9=BoIUp+@#m6U4q-=LfyIW|}3^&HMU zsLiOhJDggQi<2N^q*Ht2Mabw{BtqLxk zXKXwT1YNkLeN@$kLPVvMol&AM*7j!b&vvyyc^L-})>Dw(+M_X7q zw~*vOJ>6R|u@Kj1mTNG|0?9N%0b(Vpj7fMK#%eVGoZ`GFz32Fao0`9qW zjv6533^U3F7ndqa#$-KOxJtAqhyQ*GD5I}2Gt=zG1!w2V-ug7-Q99IK5jZH!E9WWc zspE<4V*q3z4Wv~IL=j8VgtsKK^(7Ajn)}$!*TPJ9@Oy(G=Ue4Cn_E0Teh+uv&8-G# zV~jUd@;nTyWj`=~7*a&o2bRZH=K+@;!=EdCY9LzPzn0uC;3wstJ?b{`JqB0NdzWb; zsFU32wfkCB>fGb-i;rNHd)X@s3Cp@)h6qKc@At=nrDmgLZvbWMjeog@_*-lxqS=Cy zYVVZYje_XUrWpsQMWYX|HMZLO0EJrNJBpW^tt`!_Pq2KtuTeqPC?IER89(V=SENH+ zLqh|uOQsW6dK$S%8Q9B*T~(2mS3_MIo>zR(X6va96m+*~QE}-u_SI0*QE`2_5v>kP z!uFhn)G2^W=7Q{>Z-MAC#d{DFs81KZ)3t_umO5)Sfg$9(FTLEKMwma8+v=te-q7^H zgAKFV=&IQz&s3)ffJ?|GinW&3?4!0#9D`6f{ZWxaQS;xTk;#W;MWrxvf}Gd(0A_PS zJgX;Xud#$#h=_<-V229Gu=_^I^p4;Ra!)@i5;swOLrZ;(lqq@h*SAoh-2T_}CK&Q~ zrc^%+dI6F^!h)DfnvAT9{e59AQgNv2%_a-@3v2}aDXNyGP^To+iY5bXDm7l-xlvFJ zurYY5G(k62xK8qrU-Ga~EkCx>&RBTC+uPefG#CQa?Zbzt^feP~SLQ**W&zbM6{j@$ zF%TFgty3;zxvL4JlY~U!=(UI5mOSyqz!sGrwu+Q9G&zlhiRM>nidTyuxF`nzWD{Nh zNVj6eirU&*Din3jN>1|C*F?*1ath*+=Yj`;P;|!I^ny3ie1kje2T&CpOG6`tNm*uv%3)DKMYe-c2 zb8{M1@Mxr=$avCXy!=0&9x(FgzpviA#dz&MGu6h@R=ZL*pkIF^zxcw}=eUZM@&n0c z|4Px@qHx66T)>LRxIzHoP=!))4#Hk2gM&J{`y9l6QYwFJQ>JBd8Uin>c!XnYw`STN z8fG1mH40UFD5$9%e;G?k6H1Z62{TjymgIJHHHP6DH=J9>*FrFVq8o$+i2r(zTTd|T zeljHR*Tzp*bPiN+puG!#Qf#8#qmoSo?qh%2P)js^qV2sF=nsPOU}uUX8p|v*M-%%( z=*)D%Q+sZB{~?Gf26m<-bP;|a>OkVrRZt^1+GkGeOe{lS4hAj;faEmG(rxSErt`EQ zmu@!4SaQABxAtM9?BEq)gz_S}i8+$~ixtv7uXkhsou0ZY0647e-J>;zKe)ODe8m3P zdhnLxsSO&M7Tx~e``699DGd-`_MrXyM=LC4JhhHe=yiM;!uQ6Co9fqO~93BE( zE`c3z&}H#l&v5tFxtaX5KbjkzHpD>s8T=UGtR&@?_TI&2=%H7*QRqN=yx#K(B&$$C zpjo>p>-W={P(5HwJgQb!4*NB3_?uQ}PaA!|K2Bc)pTD^iQJ7J~D1KEMX4mk@^&-jK z%WPCbIp$=K!c*PEjP4i1Bv@65w84u?DC6?=tbls*L=|Lvf3AyzZg&olJ$}^F9sI~+ z*ea-e{24NeRg@Ov>*vZ*jMg!btk@ug(M}u(ikh*R*Gl!>h*;+{{-6F?YEBZEdsnQD z#DQK@yNULWH%}W?1;B~_%pbAB?ooRNM(vDCt|xg=--3j;-wOkVzJks&Bpjd^;-PK> zypcn5@B}%bb|DGK;IdZ@I`6NgzmIPf>|LU5a)`L`&*r}9nXcK|fF=>w`QPh*!(QLF zs}Tqzd1l~nMpyDncF~!+PScc6fcamN$opjvl6n59;_(3Q48(bj z&p!>-;+eLlCXft5mV0zprvwQC5)#}+g(e56Q$DS6xQZ^$LDQphy8N8+on18$(S@md zelGW&Xt_tQUbI)QQ&sZfX+pV)!-IIAL(_lu=kKzu7LofC9j*I-gi&1=sGwUo)}`#h zZXRHVdpFdZrS4%3cfO>91pxb$uD~XIM?vMA0`FL_+LIi&_JXbW%@om;Gl5hM`Q3#2 zibhka+cAyA*Wz>g1Jc}WpmHQSS+Vd zs_y*Hgr7naUg9Sy-n2T~5G1NOR1hl3Z}(DlES9dFPD+6}*A)C#9$>IN4fp(}S%@{C>Q1 zUzXs0_E23cDFjCFZRt9IPv(@d7~*dztQ)o1Q@YesV|9^^nx+4;eNLr=PI^lw?qLP4$0z(L{|s=D)G`p5gdgD&@j`~l|v$aq`fKiPt|bo@IB*IDg21C=iS~Bj8EFD+I}$I6s4iRf4dm7ZajQK9qKYN|k287dCLh}4FWqZ)Jufe`C(=QTYcXD?1)0fyIrG6LWvIPMo_OmOA=Pq6uczjNBO=zATkpQ3ayx3E zLZ@CcWEAi1rAwatuq#@$q4s`4;DSbWw}%eLK@^6-FJRd%7b6{;zo0b8Vd~pbOu+x# z&|smqO{D@DmQ|w#KX(UUdk_KQ9Nb94u@FN_`BcctbCKld1IW|~pUcjOGQBx6EkMM1 zf&dX`IREPNc-u%o3jd?=g={>F@o3b}g1(FcM>Cqn5ig^Y8`0!ss-M4^Rm{o3Rj6_x zt-{{ZdD`OW`r&CMNsMS4(fa9o$z1%5LdF^{Zv!na&TmNfW(*AjG)e}C+kd2{Wq`GS zqsvY~uT?lutcxGIW$OQg$c^?;pemAFdRbmweKf{>oKDj5g1|WvPYVml8_&7JgtlgU-Xvg6_JUl?I=<;JXd@ie9SlCldd_Z&+yxA(^ zP7Lg)wiTv0zIHP3_zH{fc3`fVU|4da*GJH6%^pSA6GN<>ZHEdg*OY@QlVU6AcQSm4 zn@9s^hO<&460x#Qy}ymCh!-};uOu=E2l^9S54B(FP6;2>(hRo=qnzde3q{!$E87x1 zHVQb#h$RiZFELICJ%YDsXrMSc?mOux8*n^>7soSrL7{BR|{8K~f9Grydcc&Q>A z{4WY7$${}2w8O=~ZW`5)`gf*v0bW)lX1;v+^60!Frg(~(Mt<6n3S!UEUx|P#!Q`QM zi8pogn0k5|_$>JUWIw~LlU+f&b|0#J>wLq*t+pxQUS@!&vY*IxD$;2AdIQvJXHOkW<8R5L^v(G7TzT=Dg#%;#t~1eCmr%Q9Fy1 zcF6jl9qcFW%YxXssx_b15--&#ZyrJs^W#M$A2O#t-mmu2up51}Vat{+u*SMjDVY*`cAbZy8LEKMtrtFX` zf@Ry#nC&b?G&H$&z5Le9_O2&)gu=6p-er>RSu4>PqR!yHEI^9w-7w#1kJ37 zCNYQMW|-c0;aDzTzXnkN)#UHG=xMMk;Q&S6=Dr zL24$rvXCT4_5W_9wBN~z^CC_`euu6ursN=*QLlen6M5A27THvAkM?ZC=X$eWQs^`4 zap&U%HAB&+ycIw4A>!wQ#6!iUX&czkL!{!lF$QiA6Y|{szOIDZo^kSJ0TxzjC;(Ot zoUVs}eFO#oLS8M>RHB(0(%e8gHU2!iFjLH2n;nMp7LDX5_=&vFM%tV* z1Xy30Gwc)-hqP4fb2lnjYWERzLw1!3ssHTaEbPTcZz^bRpC6}C-7>TOcPA4a-zJx( zcivBRsv6>{-~$9I{GzSpb$^mf7I32PPYotF7M|T@L`@*v(mz~ZcIt7)(KSPX*IpI! zY;Fv-hA7#|Na1#{Dz;<{S-Gls;v_-d{=*BfPJ|B@n&|fY+km#ggKk|Aa8ky?&q0f_$Cx(4d?pQ_4Y-14s-jV1YSTgW=AvuXFs{X zkixyfX|->%$@hW~sn+o!wtc2SBklJt0C}5w*%F#xCin(K#*6?8BFG>F~{u9k?Vq_m* z-~C2bsT$16n3*aiGm{C79k>~faZRb z#X89CT6JjIV9d8SC%Q2kjV4mZ@;`Wa$KQ@8F9t-qSXs<-0(E$~w5~mll5C*Q1*iPC z9Z)d;p_2(QnA!vK;6Z8MesN68?L9pZxXXvRP6#?7Jmix+Y2& zViQtW$UaCdBDKK4IobQsHyeQ1wAWuA4GVCl1qB6a8d*Rzg{hj>oQbrHn5VEU=-lco zP(zTD1N}`!nCjSEL*$;ev$I3IGIAPB6Qm3E zJFxX$f|D#rsWpErFIFl81YgG%DQjD!R?;7=?#T%QjR+T)y#uDVcRvnA~($&W??YoCcFTGKL_fWw=IH6Mga9`#W_Y7a$u;@^z(1 zO@}yeMm2dFw|twaQ4#WNEq`KQhepMlc&AL48VyPbC?l|pp)&j_Z@%G3rf}!hm5j}M zWnSlfYVDr1GcxmO8zkm0>;#HCxGK)CBorwxz=;ya+PyhITLbq25C`c%7SF$r9h?T4 z3>VN+!b3YwAZY_xdodrqPHQN~;NwCPn0h8y$5uwMFZRBO*Qb>B0GY)EGHcxuNKg-J z^zurPl#-n!r|3oRZo3J2FZj8u=9$qXRwSg{97Cxaa0C_WBXzV7qa!mP=U*AgJsw~> zc)uh=rxr|1_rYJ@g?u591mnP{+FdN*RYBD?`9asV>188$I*9e=mS;d82Y{r<+hwEq_$jW?0EkkJJ>deTN(nVB^KGnD7QR7t8=i)*CW?s5_jrl)2)^gnX?2+R?DS zT2R12vaL|9Str*gEuStHVQ_Or>JXd9Bj%KQo9c?zlozj1P=IcR2VhKi2Koz-sLlBv zPC&$!-koWXeevsCTt(1wt)n)&e6eD$>#bNEkW&xgHfNfc_Jxnu0BvmrY+wMhT`G2x zP3NtG^b6IzGEH>C#X|2DavWlx%bT3~#hD_otB;mvYpUYA<2U)1`(o0N?G~5)IkYxY z;M)?TJB~l^_1Q9+`>vQ%QM=&AYE7U98*(GF+yzy_48&|do$}~s(I_8Z_o;H~ISon| zJ^(TmT02V}JjXW{+259VeO)Uv;K%&aoGWUf6(2RSm+R2{N7|=FxHutUo*3Wf*PxY`&EHwo!CF#2 z(-%l?(B>PKrqbKzbVYkqoqVV4dW-(TeLwY@xdAEGuww*=H*PEr(5PW&98XT0wT^)y zZK=)J3@w$J(f&udkH);b8Bw_WFFXGY?nr?0X@KDC-o&)q0XBKL*|sF$ms6lN#!Y-Z z*UTWETW#V z>}C7ew?jgd&YfYU5yHzKtLZK2btZDJ4iyggW=UI&md!;Mx-A{`ZQo<9(P{{;#5OIZ?F{HWceK&%q3Tymg)8zO0TB+qh`f|_JM9)_Jj%K z*5LDW02{XC(Rwfu96-q}HI91`l`&}hCa(2*4CFl%?N2H(hY9;;)Wtgh${JAh!*nT= zwxW@5R@X9)fD@VKf2n4H>_*9iop^=T&|-ReMpE2V@Gj0&RyIgvP$QZ*JCaBw^ay7` zpoh8&x-pCmhVD&!H|Di9kg6gyd?$O7z?l?XnFF~8xNM7taUgi&nH16Rx+P`y9|^Ec z(YgcH*^1%HeDU^QvP~ob_)g2gL2u@|Zi(YTP9aqP1N06q-I2LQ8CKaUqobo%biYye z*0K}9ul{nZi;}fD!O$<`g#`P1aRvtM-Nb4mTviD z2PfPi@)}jiVEzMz^QgJ+wX8uX=6=uTj=~!Ulg0P#{9)k^q{aIiWwRCkL zt8bP|LYwN@QT;6Kk1b>NgcpPH+mwoHyW+(rj?_+MwQre_LLW#x`>0bOLI%bTV}7eU zPO%2x2G{_$di(m+>y-w8eMBB5taBzQ6s#l?+&ey_BuT2x7N@UiT=%&i8WOs0Kcb9ff2B(W_Px>QDLPNQcK@5M zkN#5 zr6qF?Nz>y#w4~)ye%l_(83RMf(q8A_iCFT?P>`^zkvGPD@#TJV8ynKT`9nUmQl9HA zAwmEy0fdk4LcWpBsj*xmr5fv>putV}CNsD%O0sxXqf9{U=hf}aCnUZu_A0W4ysGd> zcL~%H-z{elCx*H}y(;U3e#iHBmL}2Z!aO>Iw$=-9!38zCU(>`-i$Y+Vp!`a2@Jc(u z&3b!5vG!4gRh+>6@#4yOzqH}w6ntuch_4~E6KZ|llNGJy9$lYzOq5g}7= zAo|ybG=Hc=N$dmW>5!^Ipfg1Py#r&BPo7G==>90jk0QUy@Q0RCCD;#-Mlk|f<4S6z zoa{YR!<(h*1;CEo#nEd&A~ymk>QmrqP?y!#yaAUOGTn4!{mMT9OR)|N81PrXhWPg$ zi{IfkVDH=9hAVM#Y;WQ?jaMJYRFarkrb-qio}?Fgi-2fWqzy|6S|k(VCNNg1ZtlRc zOD2T+*U0>lC}|8w9#mAmi2x0y2OR)7oliuxWV=1T+gkbDC+#C6BE(I&?;*ZGWp-p(`B zYGegUEpmGB1}~YpWys#TGoUElwrg;Ny((|BV_6yyFzb;VfNq9sQ@*{7K@)Qhwz|3MOKG*85 zBUK%7=w_7~_rp;2EnCDmZ)r(2yXtP73OTlRwtz(%NawqU+yPLs-=-g$NKc~_CaQs5 zMF9h>Q4LpU1(ebOAj4oJo$aA34^Xv2Lt&=X$HP{H?f>Z>IDj6nXKBY+G%{L_xkUXZ<86BgkQnTp;(|X3SCbbL9O=Nx4arJ zu7Q37WH$`;$kabff62W;4{X!5-A7FJMn=RyvQ(=JngxL6 z!IwDR-?V}_5Px@ZpzI7>hRD9-F8q3%8SW6~*Zmn>MD6rx1f@P@_C%K^#rHFNOj_)F zm*+>uYfT%CX0pV{=%QGhdOEY1nj9Re-n+ix;0iCFm)he6ARNq4*WRY|n;EG08L*we z!JM)|r|pZBkW!qBz0RuE32->-zdts+hyez~Gi|hghA#S#6|xVZwwWZ54bXj{S_ZOI zQLYFG1!Ez(F~%mK@vHTz_+xsB|b%_Mj*sYS+ z-cC@#v-DlR%%D<-7D8LtG9s^j7+6$!H$N!Aj1{kr`_@l{bI-S-C0*C*_Ce7l$fY6PMrF+!- zi@|FgvK9~KX06_V-qwYrtHg5)+EsR$QWD?F|27?#}~3S0{i zfK!GFaik1eA?hp1$uVx&E7)VPeu;VqWWOU62X^jfqnvk09m7-awmPi(Au{eR*23$2 z+tDx|#75TlN^Tc-?fn)lRFQ z=&rfaddWdzPMb-qUR540%W9wQx>(idA-VR(O;{ApMRtqu-1h_^7%Zu8jHom7ITd#6 z7N3PigPK?y5T1~sLx=y*&Bwql2;zH}T{Ho)*>*>gE!P9Q95XoJrZd7CEdCI*gLk&F z001HQ=Q*zKC#p`t+{YoCotDy0+kk{`Jp|a>R)}cG*8mRYC*mI-cWF`qr z6~va}$W(ICCYP31#53;-gn}W5fFf>QQ8sNYA9yJ6wQ7HfIlw8>s(91;h|ow&1;MdI zJP~k#r>ovk=Ec+Bz>$~@PJF{MNNV5RxXQfTv$dgPkitu>vtjB`FWuDM_ly$X!A`9q zHY&7zJvhk>7sO9RaK`>vNQ}`H-mmm|@u<^$_%@y@h+83w1HP!sV<{7)xyfq|cPFj> zcZg10?RE8Rw`nK55MYMM=!d%C1u%^EJOmQsSuS)_;ExVlm?BR_@ejj*P3OK}#7&n7 zNTQlC0ERTExHHBl##d{6SPI@Z*oy_KV5g85v^U1IUNmD$?)LgCHu0mieFT-|MfIbO zvR5x6_{5KM~X9-tx)y)1>|P4eT1xQ$l3$X&MzbiO70+T69X zzk?~?mg|RJ8o3zd1no*YNc-2G$Olw-L|;|1D;M2C0qn56p^co5J+@&C+mB@Q`hY+a zNbd&Yt}6bSu%M0T3(P{jLH7gC+NYf)F>eZ50d9%A%3Al1k^%CZgz6-qW(1Bac(`M> zT@r2ccYV_cB2MZ+m@<*Z03sHYTgZ=AeEA$En=z8-=8}KPfpF1ndw~C=N_H8O1kLI_ zN%0x{O=U5M9&R%)1@->}g+2ss@^r(d=tGHLEiLs{1iIWJe|o;IOP|-f;??FZrMO0g zt8Jcp21HP?SGJTT2&FJ!5voZJ9;UA;V|}r4AR9T4CdF-eJ_;5qQ)A1C{P*GmI)9Iq zD2ZM`jP?NZgn0oEQc-6M>SmbZaH8jDlU{GFF+9Bd zRe}q9!Hwk{=nZMw|5KZLieB@V70)XjG8-pn4c>9z zd+-U2Ny|ZdtQ=?9(N;_NXNpfF)0itH+nk~dq!`)QH#QthowXTkk~{iWKtIyA1yE}& z?4pQ1ea{t1h*rsV_%0Cv^(R6FSf9GtFYYCv@3F8><;{+zL6tB7eyJ^GE42c^LTXWp zo%XP#cZh4Kr-{$s%@GTKWGCbXvQ~csQBLQJFs(bhBnoBP?<*{snzURIG&dPO@gB2y z(ho9gZ#qM|PnKb?-0SD~y*G%_0)~OplN;0E$eR*oF<8!h8hW6o{17M#)J{YQ<%H;- zZNs|dAA+n9Nx@W+ljiH0o!sPz1AvKzTt7Yf@wJp{cF2|j(k)|0tT!ZU+-TgK7 zHs5^xXY;E$GtHk!T(g&zQGdar!-TXB7kRU8QXys-1_(!Pa^c_X_A|N!_G?dS=QLI> z3u4XhR5cj9HDf?rCeO@x6)pPCHt^dsqvWgF=lVaE@40|=HN05ejd~`KolohLAW|bH z{Hg6N;udL9_4*yEbKstdWp74iuR!Q>dlhQLI@6}`q!36!Xemff@8KC_J!-p^3?0si zM<)vHFTVM8W=%?Z)P1pUi7fTL`qlux?!&!jG|AWgC4i9|4=%x9KPKrG%x>p=vbbcc zN3!nn2S!&{jU&kqlG#3&ygvkjh-K*jbi|)b9~-YBs`o=o06GAN@BZ*G^5#uqB3|QV z7&@N-JBtC#nY%c^J6j2i2f%-tz!JWjt6UseyzAW+>r>LSq$yWW%Fq`BNkr0KY4uwc z!lFC6+QqINfX1&`Z9ST{5V; zG@R)L9@OA*D@hdjj{3k<9%0TU)@mCEx<)7m^pK`L*ESK6YNUQZ#2OIVV-BrW8pz)- z(@?v5xJ8q8+r^R>aT7xW#YwdhlkB#MZ0NRg*)#UJb)f+0WBt?Iud@`HsON|R8j#Kt zZ-&MlrBi`THV@uEexd5ec;!n zc1GB^rSLzcbt5kl<_ll<=kFzc&j20={7_x0vMz7|W5kr9_(@-bcPwWZT>66cKARbr z1aXbC?HsqeX3OffYAD5q;jDo0U!X6Q9;ZY0Mslki7JgAYx$d(CHQiBKJlXD-N?PrNem%dzgr}_aAQY2)l1%EFSK@CkOz50FCBr z@I+*<siB7S3aI1sCUo=gID8sa{l_gT}Gz(H&+M_ePeC{(w*p+0J0rB!r> z)I!PWqR#?k0|lmLXkft25ck68)wDvzk7&B1P-V!>;U?~i*91z!`jNQqh*`=MGD(36 z#SHF^eWs7nS#*$a4!K(b+xl!i3?SvE!B1R}-nJp%m!~Tmn| zpOQ^i9{Me7iU+Dpa2{2P=@DPB@oPUX0Q}pqvE=g`Y3BT$>FMkY(0(qo)B|!4S4PJ| zVFxG?%T{`0&w+~k>GQ0@vz6|EOw!G;_cftn+6q|om9vn&>dLigxMG0LTW=Ofz+K55S6IUnTXdbNJf*(o>bO{%FwT=~8EJtkr8qT(D z+q%Il0jGM%C`8afeUlfBlsufkOqU7!OYDDB!PMo=5D! z&<*RD-kIYrqZU{C=XI~F;TVkN1A`Jmnd*QVJ%i6#=8ifE(J4^(TTf`(6=SktTmlEN5^HoAdRYJ()GNeuy)$X=C-lMpG&JF|^nMy1sLNBqpD}`%qG_C3uvYRM zivkfUSq&CzwT@%G&T{!;Gh?04`dE)H04KR7%|!Iow~G_< zEZPz1^My~4_gMQITY}gxFG9xVgR3(~=VU>02y7cj@Am;0yf$A?NA93`9UT zG&^uqHsBdvyo6(vQ(%CZ1x{8J?`r*O{lAUi2}2(wD<60={XPO$S!b}uai~#cO_urC> zhYbUKyt+8!pBUtk2XC)QWJ8?JtF>N)w7%S>e7Q;gmxnFq91&ftx<`8_YHGW-rUlg; z-=uDe2YDFAfxeWkM_-F3jf_H9Li_ykrL)h$f=1SYMEe&)-`8KLla|`%)#}=j z`xmr>aMsKy2U_OtBnj0S6xNR`0HA~7g*DF{tp`w>fg-r(zgcyug^cOf>vRs&*Hp5W z^ced^FzxhU3-`$fZ0%)BjE|Qk%SXx;sENmdwApJjuND zp>Bv?`6Sq;|5)}`3iKXhMG`dRD>gmU>FPA%=>wUaVYkuI=W!{k@-jXAmyq{x1V&bX zaB|uWng~Zxz)2aa!niMiF~Q_6OMBllRCEB%%9Sh4=fDFYP_S_Il(x61KUr;)imc5k zLJJ%patyh}rnD-#bxP1#w<|t9J<2TQuYv%<0pn58mECYwG4|Q`CyFeW(P37Bwte%q z@@KPd*3$ku(II2LHl!3D2AzEZVll4bt~Tp|y#JS(j;U&$Gv)O{qqLm5G3yO#6NbY( z_PqoPM=@$TlBEz&Qi+!b4~R#q60DOT#*pM@oek>*Q{cr;h+Xm%Q|7~!?5{QP2Hs`^ zN`GIMcK8_q7k+up6T(GzzsDJDIvg^h-Z6R1bVt9nsrRLDj!DLW#~Eke@sF=xg3fiE zj=jkE1EU1T{e1w!JdMvq4rgRYLIXA3lZhRC0!9}QQO#Q|9l)!v!!F{tVU3IBSSv+C zPkL#ScDawJurfV^;{RY%tt`^kkp#FU_)N?nh|I^UpYc)=owkeMC=pR264;ulmR)p`T_Q@}LpjeIBOorkq zuhREBESR3p%Y&#LL*-x0k?@+E5m=xdO49a(gO#?=4_2552Y(@1xdD#8L?_O4Z9ENl z58Ba+kOL65oU(|8&Xa;;XLN?EwFEbmBB34BwKW$rF@YC7IaYA|{{c8mfz}KDJ^?g& zsB;kc+DnKh+aM0SX@VxkRrR7wo%!FO;)+DaAPp~&6kV8c_{7yHOUk*AgWy$A7-6-J z!ub)jk0|L zZG|J4Jn4&78eLcJXlv3Ni$%u{wZUmiz@lO8FjO#Gy2mi2=f-s$a?ODnT~P^2OE^Tx zr{QtE0;SnXZkIjK2qhWA(+vvXn$6=fwEt*^BRVNlWt3_MVZ8&g0)s^jSYhNNIO8_$ zQloZ4(?_Z;8xAQ1O78)5bDckySlQ9K70!7u6efx90}<b>w`HmGNCw8M^m4u~P4j68}5I5!}nRxzXE0Pw!GShnQM8d*VI6RB8!(@4(x<(c{UI_22J+pZ=cz zJm2?tuNR&3p7)%y_g-u5b=KN^jA`Zu)(CP=@Bgrc=`wPIz{*s|{bw*Bp2>_n;Ye)ii zy>sAEV~*X}5hA?+`eqa?V<4~H`5c!D z8pI)Of&U?_Um93rrS$YB4Xj*gAMFKAn8=o2HDb8Z2Q_fvObM6wk_8;Gpo)P$4ITt_ zf;I|7L+Cov9pb&AwFb}!fKvsveM$U(XY_eCR>FplAU%uIGxT}Fb zoYvrnVRh@|1Kr^uR4wtOw-`)9dRyI<)=M8$eGQx@?4R!0Jp_JC6SIBBc| z>7sGBw_$iaBI%7UDY8IWESp6M`2o<*EEoi^ZE6Y-?v1Eg$nQc&9+)qEMO