Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removing tensorflow bottlenecks #177

Merged
merged 58 commits into from
Apr 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
58 commits
Select commit Hold shift + click to select a range
1b3fa63
Types and supported ops for graph mode.
nwittler Jan 18, 2022
f3fd7fe
Switched test to unitary fid.
nwittler Jan 19, 2022
6852972
Temp disabled logging for graph mode.
nwittler Jan 19, 2022
5975f8f
Saving signal stack per channel
nwittler Feb 22, 2022
d2b2d1b
Temp disabled logging for graph optim
nwittler Feb 22, 2022
fe44058
Refactoring
nwittler Feb 22, 2022
8224add
Graph mode tweaks
nwittler Feb 22, 2022
1b65798
Fixed device signatures, updated tests.
nwittler Feb 22, 2022
d8deaf2
Restored check for vanishing grads, storing best goal.
nwittler Feb 22, 2022
1e56804
Fixed types.
nwittler Feb 22, 2022
5ad7334
Graph compilation doesn't like mangled names (e.g. __fieldname)
nwittler Feb 23, 2022
2458c01
Removed questionable response function
nwittler Feb 24, 2022
34ccd48
Added experimental tree multiplication.
nwittler Feb 24, 2022
a5297b1
Used experimental matmul
nwittler Feb 24, 2022
fb3be66
Binary tree matmul for @tf.function mode
nwittler Mar 1, 2022
1d5e1d4
Enabled graph mode for goal function eval.
nwittler Mar 1, 2022
fa7d329
Modifications for graph mode, to be cleaned
nwittler Mar 1, 2022
69b735c
Docstring.
nwittler Mar 1, 2022
c734587
Merge branch 'dev' into speed
nwittler Mar 2, 2022
ac67cfe
Removed left over debug line
nwittler Mar 2, 2022
f52430b
Re-blacking
nwittler Mar 2, 2022
c417c11
Removed mangling
nwittler Mar 2, 2022
8dbf642
Merge branch 'dev' into speed
nwittler Mar 7, 2022
d08e797
Added options for tree matmul and fixed signal gen to start at 0
nwittler Mar 8, 2022
4fe3df0
Deleted obsolete logging.
nwittler Mar 8, 2022
e1762d2
Graph mode compatible logging of params
nwittler Mar 8, 2022
ae5861c
Handled envs starting at 0 for netzero pulses
nwittler Mar 8, 2022
9ee62b9
Separated getters for other values and correct flattening of matrices
nwittler Mar 14, 2022
63e10ea
Added support for multiple inputs.
nwittler Mar 14, 2022
502013f
Correct masking of multi component signals.
nwittler Mar 14, 2022
a207461
Logging of params outside of comp graph.
nwittler Mar 14, 2022
8314d82
Added list method and flattening of matrices
nwittler Mar 14, 2022
9f3f2a9
convert x_init to numpy array before appending
lazyoracle Mar 20, 2022
e20d2d5
signal_in to process() must be List[Dict]
lazyoracle Mar 20, 2022
41db44b
Using tf maximum func.
nwittler Mar 21, 2022
e163170
Precompute the signal stack for all gates.
nwittler Mar 22, 2022
0d985a7
Using min of t_final for masking
nwittler Mar 23, 2022
6d676da
Support multiple folding stacks.
nwittler Mar 23, 2022
1fa91f1
Set prop method when doing quick setup or loading from file.
nwittler Mar 23, 2022
85befe1
Logic for correct masking of envelopes
nwittler Mar 23, 2022
9014efd
Made default compatible with previous versions
nwittler Mar 23, 2022
bc70b63
Merge branch 'dev' into speed
nwittler Mar 28, 2022
cafc7af
Typing for devices
nwittler Mar 29, 2022
8bad733
No switching based on type
nwittler Mar 29, 2022
83b5840
Default simulation resolution
nwittler Mar 29, 2022
f8e7786
Deleted annoying double checks.
nwittler Mar 29, 2022
a0ac371
Logging of parameters
nwittler Mar 29, 2022
73a8181
Comparing angular freqs.
nwittler Mar 29, 2022
ba9e153
Switched to unitary fidelity.
nwittler Mar 29, 2022
7ff7770
Initial rework to enable graph mode.
nwittler Mar 29, 2022
bb8293d
Added explicit loading of simulation resolution
nwittler Mar 30, 2022
90de000
Saving simulation resolution to file and loading.
nwittler Mar 30, 2022
836f17b
Logging for different param types (tf, np, list)
nwittler Mar 30, 2022
2d66f1b
Removed response function from chains.
nwittler Mar 31, 2022
098d46e
Prefactor in units (like 2pi)
nwittler Mar 31, 2022
e3f620b
Loading optimized point into param map
nwittler Mar 31, 2022
202fdf2
Updated examples.
nwittler Mar 31, 2022
2815cb4
Comparing technical frequencies.
nwittler Mar 31, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 22 additions & 9 deletions c3/c3objs.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""Basic custom objects."""

import hjson
from typing import List
import numpy as np
import tensorflow as tf
from c3.utils.utils import num3str
Expand Down Expand Up @@ -121,6 +122,13 @@ def asdict(self) -> dict:
"symbol": self.symbol,
}

def tolist(self) -> List:
if self.length > 1:
tolist = self.get_value().numpy().tolist()
else:
tolist = [self.get_value().numpy().tolist()]
return tolist

def __add__(self, other):
out_val = copy.deepcopy(self)
out_val._set_value_extend(self.get_value() + other)
Expand Down Expand Up @@ -236,7 +244,7 @@ def numpy(self) -> np.ndarray:
# TODO should be removed to be consistent with get_value
return self.get_value().numpy() / self.pref

def get_value(self, val: tf.float64 = None, dtype: tf.dtypes = None) -> tf.Tensor:
def get_value(self) -> tf.Tensor:
"""
Return the value of this quantity as tensorflow.

Expand All @@ -245,13 +253,18 @@ def get_value(self, val: tf.float64 = None, dtype: tf.dtypes = None) -> tf.Tenso
val : tf.float64
dtype: tf.dtypes
"""
if val is None:
val = self.value
if dtype is None:
dtype = self.value.dtype
return self.scale * (self.value + 1) / 2 + self.offset

def get_other_value(self, val) -> tf.Tensor:
"""
Return an arbitrary value of the same scale as this quantity as tensorflow.

value = self.scale * (val + 1) / 2 + self.offset
return tf.cast(value, dtype)
Parameters
----------
val : tf.float64
dtype: tf.dtypes
"""
return (self.scale * (val + 1) / 2 + self.offset) / self.pref

def set_value(self, val, extend_bounds=False):
if extend_bounds:
Expand Down Expand Up @@ -289,9 +302,9 @@ def _set_value_extend(self, val) -> None:
self.set_limits(min_val, max_val)
self._set_value(val)

def get_opt_value(self) -> np.ndarray:
def get_opt_value(self) -> tf.Tensor:
"""Get an optimizer friendly representation of the value."""
return self.value.numpy().flatten()
return tf.reshape(self.value, (-1,))

def set_opt_value(self, val: float) -> None:
"""Set value optimizer friendly.
Expand Down
37 changes: 33 additions & 4 deletions c3/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
tf_state_to_dm,
tf_super,
tf_vec_to_dm,
_tf_matmul_n_even,
_tf_matmul_n_odd,
)

from c3.libraries.propagation import unitary_provider, state_provider
Expand Down Expand Up @@ -54,7 +56,7 @@ class Experiment:

"""

def __init__(self, pmap: ParameterMap = None, prop_method=None):
def __init__(self, pmap: ParameterMap = None, prop_method=None, sim_res=100e9):
self.pmap = pmap
self.opt_gates = None
self.propagators: Dict[str, tf.Tensor] = {}
Expand All @@ -67,6 +69,7 @@ def __init__(self, pmap: ParameterMap = None, prop_method=None):
self.compute_propagators_timestamp = 0
self.stop_partial_propagator_gradient = True
self.evaluate = self.evaluate_legacy
self.sim_res = sim_res
self.set_prop_method(prop_method)

def set_prop_method(self, prop_method=None) -> None:
Expand All @@ -76,6 +79,8 @@ def set_prop_method(self, prop_method=None) -> None:
"""
if prop_method is None:
self.propagation = unitary_provider["pwc"]
if self.pmap is not None:
self._compute_folding_stack()
elif isinstance(prop_method, str):
try:
self.propagation = unitary_provider[prop_method]
Expand All @@ -84,6 +89,22 @@ def set_prop_method(self, prop_method=None) -> None:
elif callable(prop_method):
self.propagation = prop_method

def _compute_folding_stack(self):
self.folding_stack = {}
for instr in self.pmap.instructions.values():
n_steps = int((instr.t_end - instr.t_start) * self.sim_res)
if n_steps not in self.folding_stack:
stack = []
while n_steps > 1:
if not n_steps % 2: # is divisable by 2
stack.append(_tf_matmul_n_even)
else:
stack.append(_tf_matmul_n_odd)
n_steps = np.ceil(n_steps / 2)
self.folding_stack[
int((instr.t_end - instr.t_start) * self.sim_res)
] = stack

def enable_qasm(self) -> None:
"""
Switch the sequencing format to QASM. Will become the default.
Expand Down Expand Up @@ -177,7 +198,9 @@ def quick_setup(self, cfg) -> None:
)
instructions.append(instr)

self.sim_res = 100e9
self.pmap = ParameterMap(instructions, generator=gen, model=model)
self.set_prop_method()

def read_config(self, filepath: str) -> None:
"""
Expand Down Expand Up @@ -207,6 +230,8 @@ def from_dict(self, cfg: Dict) -> None:
for k, v in cfg["options"].items():
self.__dict__[k] = v
self.pmap = pmap
self.sim_res = cfg.pop("sim_res", 100e9)
self.set_prop_method()

def write_config(self, filepath: str) -> None:
"""
Expand All @@ -231,6 +256,7 @@ def asdict(self) -> Dict:
"overwrite_propagators": self.overwrite_propagators,
"stop_partial_propagator_gradient": self.stop_partial_propagator_gradient,
}
exp_dict["sim_res"] = self.sim_res
return exp_dict

def __str__(self) -> str:
Expand Down Expand Up @@ -438,7 +464,7 @@ def compute_states(self) -> Dict[Instruction, List[tf.Tensor]]:
f" Available gates are:\n {list(instructions.keys())}."
)
signal = generator.generate_signals(instr)
result = self.propagation(model, signal)
result = self.propagation(model, signal, self.folding_stack)
states[instr] = result["states"]
self.states = states
return result
Expand Down Expand Up @@ -472,7 +498,10 @@ def compute_propagators(self):
)

model.controllability = self.use_control_fields
result = self.propagation(model, generator, instr)
steps = int((instr.t_end - instr.t_start) * self.sim_res)
result = self.propagation(
model, generator, instr, self.folding_stack[steps]
)
U = result["U"]
dUs = result["dUs"]
self.ts = result["ts"]
Expand All @@ -482,7 +511,7 @@ def compute_propagators(self):
framechanges = {}
for line, ctrls in instr.comps.items():
# TODO calculate properly the average frequency that each qubit sees
offset = 0.0
offset = tf.constant(0.0, tf.float64)
for ctrl in ctrls.values():
if "freq_offset" in ctrl.params.keys():
if ctrl.params["amp"] != 0.0:
Expand Down
Loading