Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[WIP] Discriminator as an experiment option #1177

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Original file line number Diff line number Diff line change
Expand Up @@ -302,6 +302,16 @@ def _transpiled_circuits(self) -> List[QuantumCircuit]:

transpiled.append(circ)

if self._experiment_options["use_discriminator"]:
transpiled = self._add_spam_circuits(transpiled)
if self._experiment_options["discriminator"]:
self._add_discriminator_to_experiment(self._experiment_options["discriminator"])
# assuming the analysis uses curve_analysis, so the SPAM circuits can be filtered out
# using filter_data
filter_data = self.analysis.options.filter_data
filter_data["experiment_type"] = self.experiment_type
self.analysis.set_options(filter_data=filter_data)

return transpiled

def _map_to_physical_qubits(self, circuit: QuantumCircuit) -> QuantumCircuit:
Expand Down
164 changes: 164 additions & 0 deletions qiskit_experiments/data_processing/nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,6 +311,170 @@ def train(self, data: np.ndarray):
)


class TrainableDiscriminatorNode(TrainableDataAction):
"""A class to discriminate kerneled data, e.g., IQ data, to produce counts.

This node integrates into the data processing chain a serializable :class:`.BaseDiscriminator`
subclass instance which must have a :meth:`predict` method that takes as input a list of lists
and returns a list of labels and a :meth:`fit` method that takes as input a list of lists and a
list of labels and trains the discriminator. Crucially, this node can be initialized with a
single discriminator which applies to each memory slot or it can be initialized with a list of
discriminators, i.e., one for each slot.

.. note::

This node will drop uncertainty from unclassified nodes.
Returned labels don't have uncertainty.

"""

def __init__(
self,
discriminators: Union[BaseDiscriminator, List[BaseDiscriminator]],
validate: bool = True,
):
"""Initialize the node with an object that can discriminate.

Args:
discriminators: The entity that will perform the discrimination. This needs to
be a :class:`.BaseDiscriminator` or a list thereof that takes
as input a list of lists and returns a list of labels. If a list of
discriminators is given then there should be as many discriminators as there
will be slots in the memory. The discriminator at the i-th index will be applied
to the i-th memory slot.
validate: If set to False the DataAction will not validate its input.
"""
super().__init__(validate)
self._discriminator = discriminators
self._n_circs = 0
self._n_shots = 0
self._n_slots = 0
self._n_iq = 0

@classmethod
def _default_parameters(cls) -> Options:
"""Default parameters.

Parameters are defined for each qubit in the data and thus
represented as an array-like.

Trainable parameters:
trained: whether the discriminator is trained or not
"""
params = super()._default_parameters()
params.trained = None

return params

def _format_data(self, data: np.ndarray) -> np.ndarray:
"""Check that there are as many discriminators as there are slots."""
self._n_shots = 0

# identify shape
try:
# level1 single-shot data
self._n_circs, self._n_shots, self._n_slots, self._n_iq = data.shape
except ValueError as ex:
raise DataProcessorError(
f"The data given to {self.__class__.__name__} does not have the shape of "
"single-shot IQ data; expecting a 4D array."
) from ex

if self._validate:
if data.shape[-1] != 2:
raise DataProcessorError(
f"IQ data given to {self.__class__.__name__} must be a multi-dimensional array"
"of dimension [d0, d1, ..., 2] in which the last dimension "
"corresponds to IQ elements."
f"Input data contains element with length {data.shape[-1]} != 2."
)

if self._validate:
if isinstance(self._discriminator, list):
if self._n_slots != len(self._discriminator):
raise DataProcessorError(
f"The Discriminator node has {len(self._discriminator)} which does "
f"not match the {self._n_slots} slots in the data."
)

return unp.nominal_values(data)

def _process(self, data: np.ndarray) -> np.ndarray:
"""Discriminate the data.

Args:
data: The IQ data as a list of points to discriminate. This data should have
the shape dim_1 x dim_2 x ... x dim_k x 2.

Returns:
The discriminated data as a list of labels with shape dim_1 x ... x dim_k.

Raises:
DataProcessorError: If the discriminator has not been previously trained on data.
"""
if not self.is_trained:
raise DataProcessorError(
"The trainable discriminator must be trained on data before it can be used."
)

# Case where one discriminator is applied to all the data.
if not isinstance(self._discriminator, list):
# Reshape the IQ data to an array of size n x 2
shape, data_length = data.shape, 1
for dim in shape[:-1]:
data_length *= dim

data = data.reshape((data_length, 2)) # the last dim is guaranteed by _process

# Classify the data using the discriminator and reshape it to dim_1 x ... x dim_k
classified = np.array(self._discriminator.predict(data)).reshape(shape[0:-1])

# case where a discriminator is applied to each slot.
else:
classified = np.empty((self._n_circs, self._n_shots, self._n_slots), dtype=str)
for idx, discriminator in enumerate(self._discriminator):
sub_data = data[:, :, idx, :].reshape((self._n_circs * self._n_shots, 2))
sub_classified = np.array(discriminator.predict(sub_data))
sub_classified = sub_classified.reshape((self._n_circs, self._n_shots))
classified[:, :, idx] = sub_classified

# Concatenate the bit-strings together.
labeled_data = []
for idx in range(self._n_circs):
labeled_data.append(
["".join(classified[idx, jdx, :][::-1]) for jdx in range(self._n_shots)]
)

return np.array(labeled_data).reshape((self._n_circs, self._n_shots))

def train(self, data: np.ndarray):
if data is None:
return
# assuming the training circuits are the 2 first circuits in the job
training_data = self._format_data(data)[:2]

shape, data_length = training_data.shape, 1
for dim in shape[:-1]:
data_length *= dim

training_data = training_data.reshape((data_length, 2))

# assuming only "0" and "1" states are used
labels = ["0"] * self._n_shots + ["1"] * self._n_shots

try:
if not isinstance(self._discriminator, list):
self._discriminator.fit(training_data, labels)
else:
for idx, _ in enumerate(self._discriminator):
self._discriminator[idx].fit(training_data, labels)
except Exception as ex:
raise DataProcessorError(
"The discriminator class must have a fit method in order to train it."
) from ex
self.set_parameters(trained=True)


class IQPart(DataAction):
"""Abstract class for IQ data post-processing."""

Expand Down
83 changes: 81 additions & 2 deletions qiskit_experiments/framework/base_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from abc import ABC, abstractmethod
import copy
import warnings
from collections import OrderedDict
from typing import Sequence, Optional, Tuple, List, Dict, Union

Expand All @@ -29,6 +30,12 @@
from qiskit_experiments.framework.experiment_data import ExperimentData
from qiskit_experiments.framework.configs import ExperimentConfig
from qiskit_experiments.warnings import deprecate_arguments
from qiskit_experiments.data_processing import DataProcessor
from qiskit_experiments.data_processing.nodes import (
MemoryToCounts,
Probability,
TrainableDiscriminatorNode,
)


class BaseExperiment(ABC, StoreInitArgs):
Expand Down Expand Up @@ -309,14 +316,82 @@ def circuits(self) -> List[QuantumCircuit]:
# values for any explicit experiment options that affect circuit
# generation

def _add_spam_circuits(self, circuits):
"""
Adds SPAM circuits at the start of the circuit list of the experiment, and tag them
SPAM circuits are 2 circuits as followed:
* circuit with only measurement on all the qubits
* circuit with X gate followed by measurement, on all the qubits
"""
circ = QuantumCircuit(self.num_qubits)
circ.measure_all()
circ.metadata = {
"experiment_type": self._type + ",SPAM cal",
"qubits": self.physical_qubits,
}

circ_x = QuantumCircuit(self.num_qubits)
circ_x.x(list(range(self.num_qubits)))
circ_x.measure_all()
circ_x.metadata = {
"experiment_type": self._type + ",SPAM cal",
"qubits": self.physical_qubits,
}
circuits.insert(0, circ_x)
circuits.insert(0, circ)
return circuits

def _add_discriminator_to_experiment(self, discriminator=None):
"""
Adds discriminator training and usage as data processing node to the experiment
If a discriminator object is not supplied, uses sklearn LinearDiscriminantAnalysis
"""
if not discriminator:
from qiskit_experiments.data_processing import SkLDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis

discriminator = SkLDA(LinearDiscriminantAnalysis())
# add return_data_points option in order to have both of level 1 and level 2 data
self.analysis.set_options(return_data_points=True)
data_processor = DataProcessor(
input_key="memory",
data_actions=[
TrainableDiscriminatorNode(discriminator),
MemoryToCounts(),
Probability("1"),
],
)
exp_data_processor = self.analysis.options["data_processor"]
if not exp_data_processor:
warnings.warn(
"Using a discriminator inserts nodes at the start of the data processing "
"chain. Your data processing nodes will be appended afer a 'Probability' "
"node."
)
for node in exp_data_processor._nodes:
data_processor.append(node)
self.analysis.set_options(data_processor=data_processor)

def _transpiled_circuits(self) -> List[QuantumCircuit]:
"""Return a list of experiment circuits, transpiled.

This function can be overridden to define custom transpilation.
"""
transpile_opts = copy.copy(self.transpile_options.__dict__)
transpile_opts["initial_layout"] = list(self.physical_qubits)
transpiled = transpile(self.circuits(), self.backend, **transpile_opts)
if self._experiment_options["use_discriminator"]:
circuits = self.circuits()
circuits = self._add_spam_circuits(circuits)
if self._experiment_options["discriminator"]:
self._add_discriminator_to_experiment(self._experiment_options["discriminator"])
transpiled = transpile(circuits, self.backend, **transpile_opts)
# assuming the analysis uses curve_analysis, so the SPAM circuits can be filtered out
# using filter_data
filter_data = self.analysis.options.filter_data
filter_data["experiment_type"] = self.experiment_type
self.analysis.set_options(filter_data=filter_data)
else:
transpiled = transpile(self.circuits(), self.backend, **transpile_opts)

return transpiled

Expand All @@ -327,13 +402,17 @@ def _default_experiment_options(cls) -> Options:
Experiment Options:
max_circuits (Optional[int]): The maximum number of circuits per job when
running an experiment on a backend.
use_discriminator (Optional[bool]): Whether to use discriminator to classify the
measured kerneled data into counts which will be used by the analysis class
discriminator (Optional[BaseDiscriminator]): If use_discriminator is True, this is the
discriminator class which will be used to classify the data
"""
# Experiment subclasses should override this method to return
# an `Options` object containing all the supported options for
# that experiment and their default values. Only options listed
# here can be modified later by the different methods for
# setting options.
return Options(max_circuits=None)
return Options(max_circuits=None, use_discriminator=False, discriminator=None)

@property
def experiment_options(self) -> Options:
Expand Down