This repository was archived by the owner on Nov 7, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 364
Add opt_einsum contractors #173
Merged
chaserileyroberts
merged 9 commits into
google:master
from
stavros11:opt_einsum_path_contractors
Aug 7, 2019
Merged
Changes from all commits
Commits
Show all changes
9 commits
Select commit
Hold shift + click to select a range
43f69a6
Add path contractors
stavros11 a59850a
Remove disconnected network test
stavros11 27e33e7
Fix linting errors
stavros11 faa2a1b
Add random_greedy and tests
stavros11 5d81c18
Remove random_greedy because of opt_einsum error
stavros11 429a627
Check connected and fix getattr
stavros11 fb659c3
Add disconnected network test
stavros11 5e1dff4
Add typing to optimizer
stavros11 4bd4beb
Change optimizer type to Any
stavros11 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,4 +1,8 @@ | ||
| from tensornetwork.contractors.bucket_contractor import bucket | ||
| from tensornetwork.contractors.naive_contractor import naive | ||
| from tensornetwork.contractors.stochastic_contractor import stochastic | ||
| from tensornetwork.contractors.opt_einsum_paths.optimal_path import optimal | ||
| from tensornetwork.contractors.opt_einsum_paths.path_contractors import optimal | ||
| from tensornetwork.contractors.opt_einsum_paths.path_contractors import branch | ||
| from tensornetwork.contractors.opt_einsum_paths.path_contractors import greedy | ||
| from tensornetwork.contractors.opt_einsum_paths.path_contractors import auto | ||
| from tensornetwork.contractors.opt_einsum_paths.path_contractors import custom |
29 changes: 0 additions & 29 deletions
29
tensornetwork/contractors/opt_einsum_paths/optimal_path.py
This file was deleted.
Oops, something went wrong.
19 changes: 0 additions & 19 deletions
19
tensornetwork/contractors/opt_einsum_paths/optimal_path_test.py
This file was deleted.
Oops, something went wrong.
158 changes: 158 additions & 0 deletions
158
tensornetwork/contractors/opt_einsum_paths/path_contractors.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,158 @@ | ||
| """Contractors based on `opt_einsum`'s path algorithms.""" | ||
|
|
||
| import functools | ||
| import opt_einsum | ||
| from typing import Any, Callable, Dict, Optional, List, Set | ||
| from tensornetwork import network | ||
| from tensornetwork.contractors.opt_einsum_paths import utils | ||
|
|
||
|
|
||
| def base(net: network.TensorNetwork, | ||
| algorithm: Callable[[List[Set[int]], Set[int], Dict[int, int]], | ||
| List]) -> network.TensorNetwork: | ||
| """Base method for all `opt_einsum` contractors. | ||
|
|
||
| Args: | ||
| net: a TensorNetwork object. Should be connected. | ||
| algorithm: `opt_einsum` contraction method to use. | ||
|
|
||
| Returns: | ||
| The network after full contraction. | ||
| """ | ||
| net.check_connected() | ||
| # First contract all trace edges | ||
stavros11 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| edges = net.get_all_nondangling() | ||
| for edge in edges: | ||
| if edge in net and edge.is_trace(): | ||
| net.contract_parallel(edge) | ||
|
|
||
| # Then apply `opt_einsum`'s algorithm | ||
| nodes = sorted(net.nodes_set) | ||
| input_sets = utils.get_input_sets(net) | ||
| output_set = utils.get_output_set(net) | ||
| size_dict = utils.get_size_dict(net) | ||
| path = algorithm(input_sets, output_set, size_dict) | ||
| for a, b in path: | ||
| new_node = nodes[a] @ nodes[b] | ||
| nodes.append(new_node) | ||
| nodes = utils.multi_remove(nodes, [a, b]) | ||
| return net | ||
|
|
||
|
|
||
| def optimal(net: network.TensorNetwork, | ||
| memory_limit: Optional[int] = None) -> network.TensorNetwork: | ||
| """Optimal contraction order via `opt_einsum`. | ||
|
|
||
| This method will find the truly optimal contraction order via | ||
| `opt_einsum`'s depth first search algorithm. Since this search is | ||
| exhaustive, if your network is large (n>10), then the search may | ||
| take longer than just contracting in a suboptimal way. | ||
|
|
||
| Args: | ||
| net: a TensorNetwork object. | ||
| memory_limit: Maximum number of elements in an array during contractions. | ||
|
|
||
| Returns: | ||
| The network after full contraction. | ||
| """ | ||
| alg = functools.partial(opt_einsum.paths.optimal, memory_limit=memory_limit) | ||
stavros11 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| return base(net, alg) | ||
|
|
||
|
|
||
| def branch(net: network.TensorNetwork, memory_limit: Optional[int] = None, | ||
| nbranch: Optional[int] = None) -> network.TensorNetwork: | ||
| """Branch contraction path via `opt_einsum`. | ||
|
|
||
| This method uses the DFS approach of `optimal` while sorting potential | ||
| contractions based on a heuristic cost, in order to reduce time spent | ||
| in exploring paths which are unlikely to be optimal. | ||
| For more details: | ||
| https://optimized-einsum.readthedocs.io/en/latest/branching_path.html | ||
|
|
||
| Args: | ||
| net: a TensorNetwork object. | ||
| memory_limit: Maximum number of elements in an array during contractions. | ||
| nbranch: Number of best contractions to explore. | ||
| If None it explores all inner products starting with those that | ||
| have the best cost heuristic. | ||
|
|
||
| Returns: | ||
| The network after full contraction. | ||
| """ | ||
| alg = functools.partial(opt_einsum.paths.branch, memory_limit=memory_limit, | ||
| nbranch=nbranch) | ||
| return base(net, alg) | ||
|
|
||
|
|
||
| def greedy(net: network.TensorNetwork, | ||
| memory_limit: Optional[int] = None) -> network.TensorNetwork: | ||
| """Greedy contraction path via `opt_einsum`. | ||
|
|
||
| This provides a more efficient strategy than `optimal` for finding | ||
| contraction paths in large networks. First contracts pairs of tensors | ||
| by finding the pair with the lowest cost at each step. Then it performs | ||
| the outer products. | ||
| For more details: | ||
| https://optimized-einsum.readthedocs.io/en/latest/greedy_path.html | ||
|
|
||
| Args: | ||
| net: a TensorNetwork object. | ||
| memory_limit: Maximum number of elements in an array during contractions. | ||
|
|
||
| Returns: | ||
| The network after full contraction. | ||
| """ | ||
| alg = functools.partial(opt_einsum.paths.greedy, memory_limit=memory_limit) | ||
| return base(net, alg) | ||
|
|
||
|
|
||
| def auto(net: network.TensorNetwork, | ||
| memory_limit: Optional[int] = None) -> network.TensorNetwork: | ||
| """Chooses one of the above algorithms according to network size. | ||
|
|
||
| Default behavior is based on `opt_einsum`'s `auto` contractor. | ||
|
|
||
| Args: | ||
| net: a TensorNetwork object. | ||
| memory_limit: Maximum number of elements in an array during contractions. | ||
|
|
||
| Returns: | ||
| The network after full contraction. | ||
| """ | ||
| n = len(net.nodes_set) | ||
| if n <= 0: | ||
| raise ValueError("Cannot contract empty tensor network.") | ||
| if n == 1: | ||
| edges = net.get_all_nondangling() | ||
| net.contract_parallel(edges.pop()) | ||
| return net | ||
| if n < 5: | ||
| return optimal(net, memory_limit) | ||
| if n < 7: | ||
| return branch(net, memory_limit) | ||
| if n < 9: | ||
| return branch(net, memory_limit, nbranch=2) | ||
| if n < 15: | ||
| return branch(net, nbranch=1) | ||
| return greedy(net, memory_limit) | ||
|
|
||
|
|
||
| def custom(net: network.TensorNetwork, optimizer: Any, | ||
| memory_limit: Optional[int] = None) -> network.TensorNetwork: | ||
| """ | ||
| Uses a custom path optimizer created by the user to calculate paths. | ||
|
|
||
| The custom path optimizer should inherit `opt_einsum`'s `PathOptimizer`. | ||
| For more details: | ||
| https://optimized-einsum.readthedocs.io/en/latest/custom_paths.html | ||
|
|
||
| Args: | ||
| net: a TensorNetwork object. | ||
| optimizer: A custom `opt_einsum.PathOptimizer` object. | ||
| memory_limit: Maximum number of elements in an array during contractions. | ||
stavros11 marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
|
||
| Returns: | ||
| The network after full contraction. | ||
| """ | ||
| alg = functools.partial(optimizer, memory_limit=memory_limit) | ||
| return base(net, alg) | ||
80 changes: 80 additions & 0 deletions
80
tensornetwork/contractors/opt_einsum_paths/path_contractors_test.py
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,80 @@ | ||
| import numpy as np | ||
| import pytest | ||
| import tensornetwork | ||
| from tensornetwork.contractors.opt_einsum_paths import path_contractors | ||
|
|
||
|
|
||
| @pytest.fixture(name="path_algorithm", | ||
| params=["optimal", "branch", "greedy", "auto"]) | ||
| def path_algorithm_fixture(request): | ||
| return getattr(path_contractors, request.param) | ||
|
|
||
|
|
||
| def test_sanity_check(backend, path_algorithm): | ||
| net = tensornetwork.TensorNetwork(backend=backend) | ||
| a = net.add_node(np.eye(2)) | ||
| b = net.add_node(np.ones((2, 7, 11))) | ||
| c = net.add_node(np.ones((7, 11, 13, 2))) | ||
| d = net.add_node(np.eye(13)) | ||
| # pylint: disable=pointless-statement | ||
| a[0] ^ b[0] | ||
| b[1] ^ c[0] | ||
| b[2] ^ c[1] | ||
| c[2] ^ d[1] | ||
| c[3] ^ a[1] | ||
| final_node = path_algorithm(net).get_final_node() | ||
| assert final_node.shape == (13,) | ||
|
|
||
|
|
||
| def test_trace_edge(backend, path_algorithm): | ||
| net = tensornetwork.TensorNetwork(backend=backend) | ||
| a = net.add_node(np.ones((2, 2, 2, 2, 2))) | ||
| b = net.add_node(np.ones((2, 2, 2))) | ||
| c = net.add_node(np.ones((2, 2, 2))) | ||
| # pylint: disable=pointless-statement | ||
| a[0] ^ a[1] | ||
| a[2] ^ b[0] | ||
| a[3] ^ c[0] | ||
| b[1] ^ c[1] | ||
| b[2] ^ c[2] | ||
| node = path_algorithm(net).get_final_node() | ||
| np.testing.assert_allclose(node.tensor, np.ones(2) * 32.0) | ||
|
|
||
|
|
||
| def test_disconnected_network(backend, path_algorithm): | ||
| net = tensornetwork.TensorNetwork(backend=backend) | ||
| a = net.add_node(np.array([2, 2])) | ||
| b = net.add_node(np.array([2, 2])) | ||
| c = net.add_node(np.array([2, 2])) | ||
| d = net.add_node(np.array([2, 2])) | ||
| # pylint: disable=pointless-statement | ||
| a[0] ^ b[0] | ||
| c[0] ^ d[0] | ||
| with pytest.raises(ValueError): | ||
| net = path_algorithm(net) | ||
|
|
||
|
|
||
| def test_auto_single_node(backend): | ||
| net = tensornetwork.TensorNetwork(backend=backend) | ||
| a = net.add_node(np.ones((2, 2, 2))) | ||
| # pylint: disable=pointless-statement | ||
| a[0] ^ a[1] | ||
| node = path_contractors.auto(net).get_final_node() | ||
| np.testing.assert_allclose(node.tensor, np.ones(2) * 2.0) | ||
|
|
||
|
|
||
| def test_custom_sanity_check(backend): | ||
| net = tensornetwork.TensorNetwork(backend=backend) | ||
| a = net.add_node(np.ones(2)) | ||
| b = net.add_node(np.ones((2, 5))) | ||
| # pylint: disable=pointless-statement | ||
| a[0] ^ b[0] | ||
|
|
||
| class PathOptimizer: | ||
|
|
||
| def __call__(self, inputs, output, size_dict, memory_limit=None): | ||
| return [(0, 1)] | ||
|
|
||
| optimizer = PathOptimizer() | ||
| final_node = path_contractors.custom(net, optimizer).get_final_node() | ||
| np.testing.assert_allclose(final_node.tensor, np.ones(5) * 2.0) |
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Add a disconnected network test.