Skip to content

Commit

Permalink
fixes for new presets
Browse files Browse the repository at this point in the history
  • Loading branch information
jcmgray committed Sep 7, 2023
1 parent b84c23a commit e4bef97
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 12 deletions.
4 changes: 2 additions & 2 deletions cotengra/core.py
Original file line number Diff line number Diff line change
Expand Up @@ -2531,9 +2531,9 @@ def get_contractor(
What library to use to actually perform the contractions. Options
are:
- "auto": let cotengra choose
- None: let cotengra choose.
- "autoray": dispatch with autoray, using the ``tensordot`` and
``einsum`` implementation of the backend
``einsum`` implementation of the backend.
- "cotengra": use the ``tensordot`` and ``einsum`` implementation
of cotengra, which is based on batch matrix multiplication. This
is faster for some backends like numpy, and also enables
Expand Down
45 changes: 38 additions & 7 deletions cotengra/interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,7 @@


def register_preset(preset, optimizer, register_opt_einsum="auto"):
"""Register a preset optimizer.
"""
"""Register a preset optimizer."""
_PRESETS[preset] = optimizer

if register_opt_einsum == "auto":
Expand All @@ -31,13 +30,11 @@ def register_preset(preset, optimizer, register_opt_einsum="auto"):
pass



@functools.lru_cache(None)
def preset_to_optimizer(preset):
try:
return _PRESETS[preset]
except KeyError:

if not opt_einsum_installed:
raise KeyError(
f"preset {preset!r} not found and can't "
Expand Down Expand Up @@ -233,17 +230,51 @@ def contract_expression(
----------
eq : str
The equation to use for contraction, for example ``'ab,bc->ac'``.
shapes : tuple of int
The shapes of the tensors to contract.
The output will be automatically computed if not supplied, but Ellipses
(`'...'`) are not supported.
shapes : sequence[tuple[int]]
The shapes of the tensors to contract, or the constant tensor itself
if marked as constant in ``constants``.
optimize : str, path_like, PathOptimizer, or ContractionTree
The optimization strategy to use. If a ``HyperOptimizer`` or
``ContractionTree`` instance is passed then te expression will make use
of any sliced indices.
constants : sequence of int, optional
The indices of tensors to treat as constant, the final expression will
take the remaining non-constant tensors as inputs.
implementation : str or tuple[callable, callable], optional
What library to use to actually perform the contractions. Options
are:
- None: let cotengra choose.
- "autoray": dispatch with autoray, using the ``tensordot`` and
``einsum`` implementation of the backend.
- "cotengra": use the ``tensordot`` and ``einsum`` implementation
of cotengra, which is based on batch matrix multiplication. This
is faster for some backends like numpy, and also enables
libraries which don't yet provide ``tensordot`` and ``einsum`` to
be used.
- "cuquantum": use the cuquantum library to perform the whole
contraction (not just individual contractions).
- tuple[callable, callable]: manually supply the ``tensordot`` and
``einsum`` implementations to use.
autojit : bool, optional
Whether to use ``autoray.autojit`` to compile the expression.
If ``True``, use :func:`autoray.autojit` to compile the contraction
function.
via : tuple[callable, callable], optional
If given, the first function will be applied to the input arrays and
the second to the output array. For example, moving the tensors from
CPU to GPU and back.
sort_contraction_indices : bool, optional
If ``True``, call ``tree.sort_contraction_indices()`` before
constructing the contraction function.
Returns
-------
expr : callable
A callable, signature ``expr(*arrays)`` that will contract ``arrays``
with shapes ``shapes``.
"""
if constants is not None:
fn = _contract_expression_with_constants(
Expand Down
6 changes: 4 additions & 2 deletions cotengra/pathfinders/path_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -494,10 +494,12 @@ def local_score(sa, sb, sab):

def local_score(sa, sb, sab):
score = sab - costmod * (sa + sb)
if score < 0:
if score > 0:
return np.log(score) - temperature * gmblgen()
elif score < 0:
return -np.log(-score) - temperature * gmblgen()
else:
return np.log(score) - temperature * gmblgen()
return - temperature * gmblgen()

# return sab - costmod * (sa + sb) - temperature * gmblgen()

Expand Down
2 changes: 1 addition & 1 deletion cotengra/presets.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def __call__(self, inputs, output, size_dict, **kwargs):
optimal_cutoff=650,
methods=("greedy", "kahypar"),
reconf_opts={"subtree_size": 8, "maxiter": 500},
parallel=True,
parallel=False,
)
greedy_optimize = GreedyOptimizer()
optimal_optimize = OptimalOptimizer()
Expand Down

0 comments on commit e4bef97

Please sign in to comment.