Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bugfix numpy125 deprecations #335

Merged
merged 2 commits into from
Aug 10, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 5 additions & 5 deletions pygsti/baseobjs/basis.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ def elsize(self):
int
"""
if self.elshape is None: return 0
return int(_np.product(self.elshape))
return int(_np.prod(self.elshape))

@property
def first_element_is_identity(self):
Expand Down Expand Up @@ -957,7 +957,7 @@ def __init__(self, elements, labels=None, name=None, longname=None, real=False,
if elshape is None: elshape = el.shape
else: assert(elshape == el.shape), "Inconsistent element shapes!"
self.elements.append(el)
dim = int(_np.product(elshape))
dim = int(_np.prod(elshape))
self.ellookup = {lbl: el for lbl, el in zip(self.labels, self.elements)} # fast by-label element lookup

if vector_elements is not None:
Expand Down Expand Up @@ -1459,7 +1459,7 @@ def to_elementstd_transform_matrix(self):
number of vectors).
"""
assert(not self.sparse), "to_elementstd_transform_matrix not implemented for sparse mode"
expanddim = self.elsize # == _np.product(self.elshape)
expanddim = self.elsize # == _np.prod(self.elshape)
if self.sparse:
toSimpleStd = _sps.lil_matrix((expanddim, self.size), dtype='complex')
else:
Expand Down Expand Up @@ -1617,7 +1617,7 @@ def dim(self):
spans. Equivalently, the length of the `vector_elements` of the
basis.
"""
dim = int(_np.product([c.dim for c in self.component_bases]))
dim = int(_np.prod([c.dim for c in self.component_bases]))

#NOTE: this is actually to restrictive -- what we need is a test/flag for whether the elements of a
# basis are in their "natrual" representation where it makes sense to take tensor products. For
Expand All @@ -1635,7 +1635,7 @@ def size(self):
"""
The number of elements (or vector-elements) in the basis.
"""
return int(_np.product([c.size for c in self.component_bases]))
return int(_np.prod([c.size for c in self.component_bases]))

@property
def elshape(self):
Expand Down
2 changes: 1 addition & 1 deletion pygsti/baseobjs/errorgenbasis.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def _count_uptriangle_labels_for_support(cls, support, left_support, type_str, t
right_offsets = [(i + 1 if ii < ifirst_trivial else 0) for ii, i in enumerate(left_inds)]
if n1 == n: right_offsets[-1] += 1 # advance past diagonal element
start_at = _np.dot(right_offsets, placevals)
cnt += _np.product(right_lengths) - start_at
cnt += _np.prod(right_lengths) - start_at

return cnt

Expand Down
2 changes: 1 addition & 1 deletion pygsti/baseobjs/opcalc/slowopcalc.py
Original file line number Diff line number Diff line change
Expand Up @@ -259,4 +259,4 @@ def compact_deriv(vtape, ctape, wrt_params):


def float_product(ar):
return _np.product(ar)
return _np.prod(ar)
4 changes: 2 additions & 2 deletions pygsti/baseobjs/polynomial.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def evaluate(self, variable_values):
#FUTURE: make this function smarter (Russian peasant)
ret = 0
for ivar, coeff in self.coeffs.items():
ret += coeff * _np.product([variable_values[i] for i in ivar])
ret += coeff * _np.prod([variable_values[i] for i in ivar])
return ret

def compact(self, complex_coeff_tape=True):
Expand Down Expand Up @@ -768,7 +768,7 @@ def to_rep(self): # , max_num_vars=None not needed anymore -- given at __init__
# #FUTURE: make this function smarter (Russian peasant)
# ret = 0
# for ivar, coeff in self.items():
# ret += coeff * _np.product([variable_values[i] for i in ivar])
# ret += coeff * _np.prod([variable_values[i] for i in ivar])
# assert(_np.isclose(ret, self.fastpoly.evaluate(variable_values)))
# self._check_fast_polynomial()
# return ret
Expand Down
4 changes: 2 additions & 2 deletions pygsti/baseobjs/resourceallocation.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def gather_base(self, result, local, slice_of_global, unit_ralloc=None, all_gath
#OLD: gathered_data = gather_comm.allgather(local) # could change this to Allgatherv (?)
slices = gather_comm.allgather(slice_of_global if participating else None)
shapes = gather_comm.allgather(local.shape if participating else (0,))
sizes = [_np.product(shape) for shape in shapes]
sizes = [_np.prod(shape) for shape in shapes]
gathered_data = _np.empty(sum(sizes), dtype=local.dtype)
gather_comm.Allgatherv(local.flatten() if participating
else _np.empty(0, dtype=local.dtype), (gathered_data, sizes))
Expand All @@ -331,7 +331,7 @@ def gather_base(self, result, local, slice_of_global, unit_ralloc=None, all_gath
slices = gather_comm.gather(slice_of_global if participating else None, root=0)

if gather_comm.rank == 0:
sizes = [_np.product(shape) for shape in shapes]
sizes = [_np.prod(shape) for shape in shapes]
gathered_data = _np.empty(sum(sizes), dtype=local.dtype)
recvbuf = (gathered_data, sizes)
else:
Expand Down
10 changes: 5 additions & 5 deletions pygsti/baseobjs/statespace.py
Original file line number Diff line number Diff line change
Expand Up @@ -610,7 +610,7 @@ def udim(self):
"""
Integer Hilbert (unitary operator) space dimension of this quantum state space.
"""
return _np.product(self.qudit_udims)
return _np.prod(self.qudit_udims)

@property
def dim(self):
Expand Down Expand Up @@ -1060,17 +1060,17 @@ def is_label(x):
self.tpb_dims = []
self.tpb_udims = []
for iTPB, tpbLabels in enumerate(self.labels):
float_prod = _np.product(_np.array([self.label_dims[lbl] for lbl in tpbLabels], 'd'))
float_prod = _np.prod(_np.array([self.label_dims[lbl] for lbl in tpbLabels], 'd'))
if float_prod >= float(_sys.maxsize): # too many qubits to hold dimension in an integer
self.tpb_dims.append(_np.inf)
else:
self.tpb_dims.append(int(_np.product([self.label_dims[lbl] for lbl in tpbLabels])))
self.tpb_dims.append(int(_np.prod([self.label_dims[lbl] for lbl in tpbLabels])))

float_prod = _np.product(_np.array([self.label_udims[lbl] for lbl in tpbLabels], 'd'))
float_prod = _np.prod(_np.array([self.label_udims[lbl] for lbl in tpbLabels], 'd'))
if float_prod >= float(_sys.maxsize): # too many qubits to hold dimension in an integer
self.tpb_udims.append(_np.inf)
else:
self.tpb_udims.append(int(_np.product([self.label_udims[lbl] for lbl in tpbLabels])))
self.tpb_udims.append(int(_np.prod([self.label_udims[lbl] for lbl in tpbLabels])))

self.tpb_index.update({lbl: iTPB for lbl in tpbLabels})

Expand Down
2 changes: 1 addition & 1 deletion pygsti/evotypes/densitymx_slow/effectreps.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def __init__(self, povm_factors, effect_labels, state_space):
self.factor_dims = factordims
self.max_factor_dim = max_factor_dim # Unused
state_space = _StateSpace.cast(state_space)
assert(_np.product(factordims) == state_space.dim)
assert(_np.prod(factordims) == state_space.dim)
super(EffectRepTensorProduct, self).__init__(state_space)
self.factor_effects_have_changed()

Expand Down
4 changes: 2 additions & 2 deletions pygsti/evotypes/densitymx_slow/opreps.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,15 +329,15 @@ def __init__(self, state_space, target_labels, embedded_rep):
# final map just acts as identity w.r.t.
labelIndices = [tensorProdBlkLabels.index(label) for label in target_labels]
actionInds = _np.array(labelIndices, _np.int64)
assert(_np.product([numBasisEls[i] for i in actionInds]) == embedded_rep.dim), \
assert(_np.prod([numBasisEls[i] for i in actionInds]) == embedded_rep.dim), \
"Embedded operation has dimension (%d) inconsistent with the given target labels (%s)" % (
embedded_rep.dim, str(target_labels))

nBlocks = state_space.num_tensor_prod_blocks
iActiveBlock = iTensorProdBlk
nComponents = len(state_space.tensor_product_block_labels(iActiveBlock))
#embeddedDim = embedded_rep.dim
blocksizes = _np.array([_np.product(state_space.tensor_product_block_dimensions(k))
blocksizes = _np.array([_np.prod(state_space.tensor_product_block_dimensions(k))
for k in range(nBlocks)], _np.int64)

self.embedded_rep = embedded_rep
Expand Down
2 changes: 1 addition & 1 deletion pygsti/evotypes/densitymx_slow/statereps.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def __reduce__(self):
class StateRepTensorProduct(StateRep):
def __init__(self, factor_state_reps, state_space):
self.factor_reps = factor_state_reps
dim = _np.product([fct.dim for fct in self.factor_reps])
dim = _np.prod([fct.dim for fct in self.factor_reps])
super(StateRepTensorProduct, self).__init__(_np.zeros(dim, 'd'), state_space)
self.reps_have_changed()

Expand Down
2 changes: 1 addition & 1 deletion pygsti/evotypes/stabilizer_slow/statereps.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ class StateRepTensorProduct(StateRep):
def __init__(self, factor_state_reps, state_space):
self.factor_reps = factor_state_reps
n = sum([sf.nqubits for sf in self.factor_reps]) # total number of qubits
np = int(_np.product([len(sf.pvectors) for sf in self.factor_reps]))
np = int(_np.prod([len(sf.pvectors) for sf in self.factor_reps]))

super(StateRepTensorProduct, self).__init__(_np.zeros((2 * n, 2 * n), _np.int64),
_np.zeros((np, 2 * n), _np.int64),
Expand Down
4 changes: 2 additions & 2 deletions pygsti/evotypes/statevec_slow/effectreps.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,15 +98,15 @@ def __init__(self, povm_factors, effect_labels, state_space):
factordims = _np.ascontiguousarray(
_np.array([fct.state_space.udim for fct in povm_factors], _np.int64))

#dim = _np.product(factordims)
#dim = _np.prod(factordims)
self.povm_factors = povm_factors
self.effect_labels = effect_labels
self.kron_array = kron_array
self.factor_dims = factordims
self.nfactors = len(self.povm_factors)
self.max_factor_dim = max_factor_dim # Unused
state_space = _StateSpace.cast(state_space)
assert(_np.product(factordims) == state_space.udim)
assert(_np.prod(factordims) == state_space.udim)
super(EffectRepTensorProduct, self).__init__(state_space)
self.factor_effects_have_changed()

Expand Down
4 changes: 2 additions & 2 deletions pygsti/evotypes/statevec_slow/opreps.py
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ def __init__(self, state_space, target_labels, embedded_rep):
# final map just acts as identity w.r.t.
labelIndices = [tensorProdBlkLabels.index(label) for label in target_labels]
actionInds = _np.array(labelIndices, _np.int64)
assert(_np.product([numBasisEls[i] for i in actionInds]) == embedded_rep.dim), \
assert(_np.prod([numBasisEls[i] for i in actionInds]) == embedded_rep.dim), \
"Embedded operation has dimension (%d) inconsistent with the given target labels (%s)" % (
embedded_rep.dim, str(target_labels))

Expand All @@ -212,7 +212,7 @@ def __init__(self, state_space, target_labels, embedded_rep):
iActiveBlock = iTensorProdBlk
nComponents = len(state_space.tensor_product_block_labels(iActiveBlock))
embeddedDim = embedded_rep.dim # a *unitary* dim - see .dim property above
blocksizes = _np.array([_np.product(state_space.tensor_product_block_udimensions(k))
blocksizes = _np.array([_np.prod(state_space.tensor_product_block_udimensions(k))
for k in range(nBlocks)], _np.int64)

self.target_labels = target_labels
Expand Down
2 changes: 1 addition & 1 deletion pygsti/evotypes/statevec_slow/statereps.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def actionable_staterep(self):
class StateRepTensorProduct(StateRep):
def __init__(self, factor_state_reps, state_space):
self.factor_reps = factor_state_reps
dim = _np.product([fct.dim for fct in self.factor_reps])
dim = _np.prod([fct.dim for fct in self.factor_reps])
# FUTURE TODO: below compute a tensorprod basis instead of punting and passing `None`
super(StateRepTensorProduct, self).__init__(_np.zeros(dim, complex), state_space, None)
self.reps_have_changed()
Expand Down
2 changes: 1 addition & 1 deletion pygsti/extras/idletomography/pauliobjs.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ def ri_sign(pauli1, pauli2, parity):
sign = (-1)**((num_i + 1) / 2) * _np.prod([ri_sign(pauli1, pauli2, p)
for pauli1, pauli2, p in zip(s1, s2, parities)])
if isinstance(other, NQPauliOp): other_sign = other.sign
elif isinstance(other, NQPauliState): other_sign = _np.product(other.signs)
elif isinstance(other, NQPauliState): other_sign = _np.prod(other.signs)
else: raise ValueError("Can't take commutator with %s type" % str(type(other)))

return NQPauliOp(op, sign * self.sign * other_sign)
2 changes: 1 addition & 1 deletion pygsti/extras/interpygate/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -565,7 +565,7 @@ def compute_data(self, comm=None, mpi_workers_per_process=1, verbosity=0):

if rank in root_ranks:
#Only root ranks store data (fn_to_interpolate only needs to return results on root proc)
flat_data = _np.empty(len(my_points) * int(_np.product(expected_fn_output_shape)), dtype='d')
flat_data = _np.empty(len(my_points) * int(_np.prod(expected_fn_output_shape)), dtype='d')
data = flat_data.view(); data.shape = (len(my_points),) + expected_fn_output_shape
if (comm is not None):
printer.log("Group %d processing %d points on %d processors." % (color, len(my_points),
Expand Down
2 changes: 1 addition & 1 deletion pygsti/forwardsims/distforwardsim.py
Original file line number Diff line number Diff line change
Expand Up @@ -456,7 +456,7 @@ def _compute_processor_distribution(self, array_types, nprocs, num_params, num_c
else self._pblk_sizes[0:len(param_dimensions)] # automatically set these?

if self._processor_grid is not None:
assert(_np.product(self._processor_grid) <= nprocs), "`processor_grid` must multiply to # of procs!"
assert(_np.prod(self._processor_grid) <= nprocs), "`processor_grid` must multiply to # of procs!"
na = self._processor_grid[0]
natoms = max(na, self._num_atoms) if (self._num_atoms is not None) else na
npp = ()
Expand Down
4 changes: 2 additions & 2 deletions pygsti/forwardsims/mapforwardsim.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,9 +250,9 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types
array_types, nprocs, num_params, len(circuits), default_natoms=2 * self.model.dim) # heuristic?

printer.log("MapLayout: %d processors divided into %s (= %d) grid along circuit and parameter directions." %
(nprocs, ' x '.join(map(str, (na,) + npp)), _np.product((na,) + npp)))
(nprocs, ' x '.join(map(str, (na,) + npp)), _np.prod((na,) + npp)))
printer.log(" %d atoms, parameter block size limits %s" % (natoms, str(param_blk_sizes)))
assert(_np.product((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!"
assert(_np.prod((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!"

layout = _MapCOPALayout(circuits, self.model, dataset, self._max_cache_size, natoms, na, npp,
param_dimensions, param_blk_sizes, resource_alloc, verbosity)
Expand Down
4 changes: 2 additions & 2 deletions pygsti/forwardsims/matrixforwardsim.py
Original file line number Diff line number Diff line change
Expand Up @@ -1093,9 +1093,9 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types
na, npp = 1, (1, 1) # save all processor division for within the (single) atom, for different timestamps

printer.log("MatrixLayout: %d processors divided into %s (= %d) grid along circuit and parameter directions." %
(nprocs, ' x '.join(map(str, (na,) + npp)), _np.product((na,) + npp)))
(nprocs, ' x '.join(map(str, (na,) + npp)), _np.prod((na,) + npp)))
printer.log(" %d atoms, parameter block size limits %s" % (natoms, str(param_blk_sizes)))
assert(_np.product((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!"
assert(_np.prod((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!"

layout = _MatrixCOPALayout(circuits, self.model, dataset, natoms,
na, npp, param_dimensions, param_blk_sizes, resource_alloc, verbosity)
Expand Down
6 changes: 3 additions & 3 deletions pygsti/forwardsims/termforwardsim.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,9 +326,9 @@ def create_layout(self, circuits, dataset=None, resource_alloc=None, array_types
array_types, nprocs, num_params, len(circuits), default_natoms=nprocs)

printer.log("TermLayout: %d processors divided into %s (= %d) grid along circuit and parameter directions." %
(nprocs, ' x '.join(map(str, (na,) + npp)), _np.product((na,) + npp)))
(nprocs, ' x '.join(map(str, (na,) + npp)), _np.prod((na,) + npp)))
printer.log(" %d atoms, parameter block size limits %s" % (natoms, str(param_blk_sizes)))
assert(_np.product((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!"
assert(_np.prod((na,) + npp) <= nprocs), "Processor grid size exceeds available processors!"

layout = _TermCOPALayout(circuits, self.model, dataset, natoms, na, npp, param_dimensions,
param_blk_sizes, resource_alloc, printer)
Expand Down Expand Up @@ -950,7 +950,7 @@ def _achieved_and_max_sopm_jacobian_atom(self, layout_atom):
Eops = [self.model.circuit_layer_operator(elbl, 'povm') for elbl in elabels]
partial_op_maxmag_values = [op.total_term_magnitude() for op in partial_ops]
Eop_maxmag_values = [Eop.total_term_magnitude() for Eop in Eops]
maxmag_partial_product = _np.product(partial_op_maxmag_values)
maxmag_partial_product = _np.prod(partial_op_maxmag_values)
maxmag_products = [maxmag_partial_product * Eop_val for Eop_val in Eop_maxmag_values]

deriv = _np.zeros((len(elabels), Np), 'd')
Expand Down
10 changes: 5 additions & 5 deletions pygsti/forwardsims/termforwardsim_calc_generic.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def prs_as_polynomials(fwdsim, rholabel, elabels, circuit, polynomial_vindices_p
# use get_direct_order_terms(order, order_base) w/order_base=0.1(?) instead of taylor_order_terms??
# below: replace prps with: prs = _np.zeros(len(elabels),complex) # an array in "bulk" mode
# use *= or * instead of .mult( and .scale(
# e.g. res = _np.product([f.coeff for f in factors])
# e.g. res = _np.prod([f.coeff for f in factors])
# res *= (pLeft * pRight)
# - add assert(_np.linalg.norm(_np.imag(prs)) < 1e-6) at end and return _np.real(prs)

Expand Down Expand Up @@ -227,7 +227,7 @@ def prs_as_polynomials(fwdsim, rholabel, elabels, circuit, polynomial_vindices_p

# #DEBUG!!!
# db_nfactors = [len(l) for l in factor_lists]
# db_totfactors = _np.product(db_nfactors)
# db_totfactors = _np.prod(db_nfactors)
# db_factor_cnt += db_totfactors
# DEBUG_FCOUNT += db_totfactors
# db_part_cnt += 1
Expand Down Expand Up @@ -347,7 +347,7 @@ def circuit_achieved_and_max_sopm(fwdsim, rholabel, elabels, circuit, repcache,

ops = [fwdsim.model._circuit_layer_operator(rholabel, 'prep')] + \
[fwdsim.model._circuit_layer_operator(glbl, 'op') for glbl in circuit]
max_sum_of_pathmags = _np.product([op.total_term_magnitude for op in ops])
max_sum_of_pathmags = _np.prod([op.total_term_magnitude for op in ops])
max_sum_of_pathmags = _np.array(
[max_sum_of_pathmags * fwdsim.model._circuit_layer_operator(elbl, 'povm').total_term_magnitude
for elbl in elabels], 'd')
Expand Down Expand Up @@ -459,7 +459,7 @@ def find_best_pathmagnitude_threshold(fwdsim, rholabel, elabels, circuit, polyno

ops = [fwdsim.model._circuit_layer_operator(rholabel, 'prep')] + \
[fwdsim.model._circuit_layer_operator(glbl, 'op') for glbl in circuit]
max_sum_of_pathmags = _np.product([op.total_term_magnitude for op in ops])
max_sum_of_pathmags = _np.prod([op.total_term_magnitude for op in ops])
max_sum_of_pathmags = _np.array(
[max_sum_of_pathmags * fwdsim.model._circuit_layer_operator(elbl, 'povm').total_term_magnitude
for elbl in elabels], 'd')
Expand Down Expand Up @@ -838,7 +838,7 @@ def _prs_as_pruned_polys(fwdsim, rholabel, elabels, circuit, repcache, comm=None

ops = [fwdsim.model._circuit_layer_operator(rholabel, 'prep')] + \
[fwdsim.model._circuit_layer_operator(glbl, 'op') for glbl in circuit]
max_sum_of_pathmags = _np.product([op.total_term_magnitude for op in ops])
max_sum_of_pathmags = _np.prod([op.total_term_magnitude for op in ops])
max_sum_of_pathmags = _np.array(
[max_sum_of_pathmags * fwdsim.model._circuit_layer_operator(elbl, 'povm').total_term_magnitude
for elbl in elabels], 'd')
Expand Down