Skip to content

Commit

Permalink
Add block collection options split_layers and collect_from_back (#…
Browse files Browse the repository at this point in the history
…10254)

* adding reverse collection direction and test

* propagating options to CollectAndCollapse, CollectLinearFunctions and CollectCliffords passes

* reno

* test for clifford collection pass

* adding option split_layers

* propagating options and adding tests

* imrpoving tests and release notes

* temporarily removing release notes

* reinstating release notes

* reno fix

* change suggestion in review comments

* correct fix this time

* using condition_resources instead of condition_bits

* Update qiskit/dagcircuit/collect_blocks.py

This is nice!

Co-authored-by: Matthew Treinish <mtreinish@kortar.org>

---------

Co-authored-by: Matthew Treinish <mtreinish@kortar.org>
  • Loading branch information
alexanderivrii and mtreinish committed Jul 20, 2023
1 parent e595066 commit 94cb65e
Show file tree
Hide file tree
Showing 8 changed files with 555 additions and 21 deletions.
115 changes: 98 additions & 17 deletions qiskit/dagcircuit/collect_blocks.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def __init__(self, dag):
self.dag = dag
self._pending_nodes = None
self._in_degree = None
self._collect_from_back = False

if isinstance(dag, DAGCircuit):
self.is_dag_dependency = False
Expand Down Expand Up @@ -86,22 +87,48 @@ def _op_nodes(self):
return self.dag.get_nodes()

def _direct_preds(self, node):
"""Returns direct predecessors of a node."""
"""Returns direct predecessors of a node. This function takes into account the
direction of collecting blocks, that is node's predecessors when collecting
backwards are the direct successors of a node in the DAG.
"""
if not self.is_dag_dependency:
return [pred for pred in self.dag.predecessors(node) if isinstance(pred, DAGOpNode)]
if self._collect_from_back:
return [pred for pred in self.dag.successors(node) if isinstance(pred, DAGOpNode)]
else:
return [pred for pred in self.dag.predecessors(node) if isinstance(pred, DAGOpNode)]
else:
return [
self.dag.get_node(pred_id) for pred_id in self.dag.direct_predecessors(node.node_id)
]
if self._collect_from_back:
return [
self.dag.get_node(pred_id)
for pred_id in self.dag.direct_successors(node.node_id)
]
else:
return [
self.dag.get_node(pred_id)
for pred_id in self.dag.direct_predecessors(node.node_id)
]

def _direct_succs(self, node):
"""Returns direct successors of a node."""
"""Returns direct successors of a node. This function takes into account the
direction of collecting blocks, that is node's successors when collecting
backwards are the direct predecessors of a node in the DAG.
"""
if not self.is_dag_dependency:
return [succ for succ in self.dag.successors(node) if isinstance(succ, DAGOpNode)]
if self._collect_from_back:
return [succ for succ in self.dag.predecessors(node) if isinstance(succ, DAGOpNode)]
else:
return [succ for succ in self.dag.successors(node) if isinstance(succ, DAGOpNode)]
else:
return [
self.dag.get_node(succ_id) for succ_id in self.dag.direct_successors(node.node_id)
]
if self._collect_from_back:
return [
self.dag.get_node(succ_id)
for succ_id in self.dag.direct_predecessors(node.node_id)
]
else:
return [
self.dag.get_node(succ_id)
for succ_id in self.dag.direct_successors(node.node_id)
]

def _have_uncollected_nodes(self):
"""Returns whether there are uncollected (pending) nodes"""
Expand Down Expand Up @@ -142,16 +169,29 @@ def collect_matching_block(self, filter_fn):

return current_block

def collect_all_matching_blocks(self, filter_fn, split_blocks=True, min_block_size=2):
def collect_all_matching_blocks(
self,
filter_fn,
split_blocks=True,
min_block_size=2,
split_layers=False,
collect_from_back=False,
):
"""Collects all blocks that match a given filtering function filter_fn.
This iteratively finds the largest block that does not match filter_fn,
then the largest block that matches filter_fn, and so on, until no more uncollected
nodes remain. Intuitively, finding larger blocks of non-matching nodes helps to
find larger blocks of matching nodes later on.
The option ``split_blocks`` allows to collected blocks into sub-blocks over
disjoint qubit subsets. The option ``min_block_size``specifies the minimum number
of gates in the block for the block to be collected.
After the blocks are collected, they can be optionally refined. The option
``split_blocks`` allows to split collected blocks into sub-blocks over disjoint
qubit subsets. The option ``split_layers`` allows to split collected blocks
into layers of non-overlapping instructions. The option ``min_block_size``
specifies the minimum number of gates in the block for the block to be collected.
By default, blocks are collected in the direction from the inputs towards the outputs
of the circuit. The option ``collect_from_back`` allows to change this direction,
that is collect blocks from the outputs towards the inputs of the circuit.
Returns the list of matching blocks only.
"""
Expand All @@ -160,6 +200,8 @@ def not_filter_fn(node):
"""Returns the opposite of filter_fn."""
return not filter_fn(node)

# Note: the collection direction must be specified before setting in-degrees
self._collect_from_back = collect_from_back
self._setup_in_degrees()

# Iteratively collect non-matching and matching blocks.
Expand All @@ -170,13 +212,26 @@ def not_filter_fn(node):
if matching_block:
matching_blocks.append(matching_block)

# If the option split_layers is set, refine blocks by splitting them into layers
# of non-overlapping instructions (in other words, into depth-1 sub-blocks).
if split_layers:
tmp_blocks = []
for block in matching_blocks:
tmp_blocks.extend(split_block_into_layers(block))
matching_blocks = tmp_blocks

# If the option split_blocks is set, refine blocks by splitting them into sub-blocks over
# disconnected qubit subsets.
if split_blocks:
split_blocks = []
tmp_blocks = []
for block in matching_blocks:
split_blocks.extend(BlockSplitter().run(block))
matching_blocks = split_blocks
tmp_blocks.extend(BlockSplitter().run(block))
matching_blocks = tmp_blocks

# If we are collecting from the back, both the order of the blocks
# and the order of nodes in each block should be reversed.
if self._collect_from_back:
matching_blocks = [block[::-1] for block in matching_blocks[::-1]]

# Keep only blocks with at least min_block_sizes.
matching_blocks = [block for block in matching_blocks if len(block) >= min_block_size]
Expand Down Expand Up @@ -235,6 +290,32 @@ def run(self, block):
return blocks


def split_block_into_layers(block):
"""Splits a block of nodes into sub-blocks of non-overlapping instructions
(or, in other words, into depth-1 sub-blocks).
"""
bit_depths = {}
layers = []

for node in block:
cur_bits = set(node.qargs)
cur_bits.update(node.cargs)

cond = getattr(node.op, "condition", None)
if cond is not None:
cur_bits.update(condition_resources(cond).clbits)

cur_depth = max(bit_depths.get(bit, 0) for bit in cur_bits)
while len(layers) <= cur_depth:
layers.append([])

for bit in cur_bits:
bit_depths[bit] = cur_depth + 1
layers[cur_depth].append(node)

return layers


class BlockCollapser:
"""This class implements various strategies of consolidating blocks of nodes
in a DAG (direct acyclic graph). It works both with the
Expand Down
15 changes: 13 additions & 2 deletions qiskit/transpiler/passes/optimization/collect_and_collapse.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,12 +88,23 @@ def run(self, dag):
return dag


def collect_using_filter_function(dag, filter_function, split_blocks, min_block_size):
def collect_using_filter_function(
dag,
filter_function,
split_blocks,
min_block_size,
split_layers=False,
collect_from_back=False,
):
"""Corresponds to an important block collection strategy that greedily collects
maximal blocks of nodes matching a given ``filter_function``.
"""
return BlockCollector(dag).collect_all_matching_blocks(
filter_fn=filter_function, split_blocks=split_blocks, min_block_size=min_block_size
filter_fn=filter_function,
split_blocks=split_blocks,
min_block_size=min_block_size,
split_layers=split_layers,
collect_from_back=collect_from_back,
)


Expand Down
15 changes: 14 additions & 1 deletion qiskit/transpiler/passes/optimization/collect_cliffords.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,14 @@ class CollectCliffords(CollectAndCollapse):
object.
"""

def __init__(self, do_commutative_analysis=False, split_blocks=True, min_block_size=2):
def __init__(
self,
do_commutative_analysis=False,
split_blocks=True,
min_block_size=2,
split_layers=False,
collect_from_back=False,
):
"""CollectCliffords initializer.
Args:
Expand All @@ -39,13 +46,19 @@ def __init__(self, do_commutative_analysis=False, split_blocks=True, min_block_s
over disjoint qubit subsets.
min_block_size (int): specifies the minimum number of gates in the block
for the block to be collected.
split_layers (bool): if True, splits collected blocks into sub-blocks
over disjoint qubit subsets.
collect_from_back (bool): specifies if blocks should be collected started
from the end of the circuit.
"""

collect_function = partial(
collect_using_filter_function,
filter_function=_is_clifford_gate,
split_blocks=split_blocks,
min_block_size=min_block_size,
split_layers=split_layers,
collect_from_back=collect_from_back,
)
collapse_function = partial(collapse_to_operation, collapse_function=_collapse_to_clifford)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,14 @@ class CollectLinearFunctions(CollectAndCollapse):
"""Collect blocks of linear gates (:class:`.CXGate` and :class:`.SwapGate` gates)
and replaces them by linear functions (:class:`.LinearFunction`)."""

def __init__(self, do_commutative_analysis=False, split_blocks=True, min_block_size=2):
def __init__(
self,
do_commutative_analysis=False,
split_blocks=True,
min_block_size=2,
split_layers=False,
collect_from_back=False,
):
"""CollectLinearFunctions initializer.
Args:
Expand All @@ -37,13 +44,19 @@ def __init__(self, do_commutative_analysis=False, split_blocks=True, min_block_s
over disjoint qubit subsets.
min_block_size (int): specifies the minimum number of gates in the block
for the block to be collected.
split_layers (bool): if True, splits collected blocks into sub-blocks
over disjoint qubit subsets.
collect_from_back (bool): specifies if blocks should be collected started
from the end of the circuit.
"""

collect_function = partial(
collect_using_filter_function,
filter_function=_is_linear_gate,
split_blocks=split_blocks,
min_block_size=min_block_size,
split_layers=split_layers,
collect_from_back=collect_from_back,
)
collapse_function = partial(
collapse_to_operation, collapse_function=_collapse_to_linear_function
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
---
features:
- |
Added two new options to :class:`~BlockCollector`.
The first new options ``split_layers`` allows to split the collected block into sub-blocks
over disjoint qubit subsets, i.e. into depth-1 sub-blocks.
The second new option ``collect_from_back`` allows to greedily collect blocks starting
from the outputs of the circuit. This is important in combination with ALAP-scheduling passes
where we may prefer to put gates in the later rather than earlier blocks.
- |
Added new options ``split_layers`` and ``collect_from_back`` to
:class:`~CollectLinearFunctions` and :class:`~CollectCliffords` transpiler passes.
When ``split_layers`` is `True`, the collected blocks are split into
into sub-blocks over disjoint qubit subsets, i.e. into depth-1 sub-blocks.
Consider the following example::
from qiskit.circuit import QuantumCircuit
from qiskit.transpiler.passes import CollectLinearFunctions
circuit = QuantumCircuit(5)
circuit.cx(0, 2)
circuit.cx(1, 4)
circuit.cx(2, 0)
circuit.cx(0, 3)
circuit.swap(3, 2)
circuit.swap(4, 1)
# Collect all linear gates, without splitting into layers
qct = CollectLinearFunctions(split_blocks=False, min_block_size=1, split_layers=False)(circuit)
assert qct.count_ops()["linear_function"] == 1
# Collect all linear gates, with splitting into layers
qct = CollectLinearFunctions(split_blocks=False, min_block_size=1, split_layers=True)(circuit)
assert qct.count_ops()["linear_function"] == 4
The original circuit is linear. When collecting linear gates without splitting into layers,
we should end up with a single linear function. However, when collecting linear gates and
splitting into layers, we should end up with 4 linear functions.
When ``collect_from_back`` is `True`, the blocks are greedily collected from the outputs towards
the inputs of the circuit. Consider the following example::
from qiskit.circuit import QuantumCircuit
from qiskit.transpiler.passes import CollectLinearFunctions
circuit = QuantumCircuit(3)
circuit.cx(1, 2)
circuit.cx(1, 0)
circuit.h(2)
circuit.swap(1, 2)
# This combines the CX(1, 2) and CX(1, 0) gates into a single linear function
qct = CollectLinearFunctions(collect_from_back=False)(circuit)
# This combines the CX(1, 0) and SWAP(1, 2) gates into a single linear function
qct = CollectLinearFunctions(collect_from_back=True)(circuit)
The original circuit contains a Hadamard gate, so that the `CX(1, 0)` gate can be
combined either with `CX(1, 2)` or with `SWAP(1, 2)`, but not with both. When
``collect_from_back`` is `False`, the linear blocks are greedily collected from the start
of the circuit, and thus `CX(1, 0)` is combined with `CX(1, 2)`. When
``collect_from_back`` is `True`, the linear blocks are greedily collected from the end
of the circuit, and thus `CX(1, 0)` is combined with `SWAP(1, 2)`.

0 comments on commit 94cb65e

Please sign in to comment.