Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix/svc kernel #220

Merged
merged 20 commits into from
Dec 8, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 4 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,8 @@ This library is part of the [Qiskit Ecosystem](https://qiskit.org/ecosystem)
_We recommend the use of [Anaconda](https://www.anaconda.com/) to manage python
environements._

`pyRiemann-qiskit` currently supports Windows, Mac and Linux OS with Python 3.9 - 3.11.
`pyRiemann-qiskit` currently supports Windows, Mac and Linux OS with **Python 3.9 -
3.11**.

You can install `pyRiemann-qiskit` release from PyPI:

Expand All @@ -105,13 +106,8 @@ pip install pyriemann-qiskit
```

The development version can be installed by cloning this repository and installing the
package on your local machine using the `setup.py` script:

```
python setup.py develop
```

Or directly pip:
package on your local machine using the `setup.py` script. We recommand to do it using
`pip`:

```
pip install .
Expand Down
4 changes: 2 additions & 2 deletions examples/ERP/plot_classify_P300_bi.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@

# reduce the number of subjects, the Quantum pipeline takes a lot of time
# if executed on the entire dataset
n_subjects = 5
n_subjects = 2
for dataset in datasets:
dataset.subject_list = dataset.subject_list[0:n_subjects]

Expand All @@ -78,7 +78,7 @@
# A Riemannian Quantum pipeline provided by pyRiemann-qiskit
# You can choose between classical SVM and Quantum SVM.
pipelines["RG+QuantumSVM"] = QuantumClassifierWithDefaultRiemannianPipeline(
shots=None, # 'None' forces classic SVM
shots=512, # 'None' forces classic SVM
nfilter=2, # default 2
# default n_components=10, a higher value renders better performance with
# the non-qunatum SVM version used in qiskit
Expand Down
22 changes: 15 additions & 7 deletions examples/MI/multiclass_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,13 +99,21 @@
# A confusion matrix is reported for each classifier. A perfectly performing
# classifier will have only its diagonal filled and the rest will be zeros.
names = ["aud left", "aud right", "vis left", "vis right"]
title = (
("VQC (" if idx == 0 else "Quantum SVM (" if idx == 1 else "Classical SVM (")
if idx == 2
else "Quantum MDM ("
if idx == 3
else "R-MDM (" + acc_str + ")"
)
if idx == 0:
title = "VQC"
elif idx == 1:
title = "Q-SVM"
elif idx == 2:
title = "SVM"
elif idx == 3:
title = "Q-MDM"
else:
title = "MDM"

title = f"{title} (" + acc_str + ")"

print(title)

axe = axes[idx]
cm = confusion_matrix(y_pred, y_test)
disp = ConfusionMatrixDisplay(cm, display_labels=names)
Expand Down
6 changes: 1 addition & 5 deletions examples/MI/plot_compare_dim_red.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,11 +39,7 @@
# Determine the number of "run" on the quantum machine (simulated or real)
# the higher is this number, the lower the variability.
"shots": [1024], # [512, 1024, 2048]
# This defines how we entangle the data into a quantum state
# the more complex is the kernel, the more outcomes we can expect from
# a quantum vs classical classifier.
"feature_entanglement": ["linear"], # ['linear', 'sca', 'full'],
# This parameter change the depth of the circuit when entangling data.
# This parameter changes the depth of the circuit when entangling data.
# There is a trade-off between accuracy and noise when the depth of the
# circuit increases.
"feature_reps": [2], # [2, 3, 4]
Expand Down
4 changes: 2 additions & 2 deletions examples/other_datasets/plot_financial_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -443,7 +443,7 @@ def transform(self, X):


class ERP_CollusionClassifier(ClassifierMixin):
def __init__(self, row_clf, erp_clf, threshold=0.5):
def __init__(self, erp_clf, row_clf, threshold=0.5):
self.row_clf = row_clf
self.erp_clf = erp_clf
self.threshold = threshold
Expand All @@ -453,7 +453,7 @@ def fit(self, X, y):
return self

def predict(self, X):
y_pred = self.row_clf.predict(X)
y_pred = self.row_clf.predict(X).astype(float)
collusion_prob = self.erp_clf.predict_proba(X)
y_pred[y_pred == 1] = collusion_prob[y_pred == 1, 1].transpose()
y_pred[y_pred >= self.threshold] = 1
Expand Down
125 changes: 59 additions & 66 deletions pyriemann_qiskit/classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
quantum computer.
"""
from datetime import datetime
from scipy.special import softmax
import logging
import numpy as np

Expand Down Expand Up @@ -145,11 +146,11 @@ def _map_classes_to_indices(self, y):
return y_copy

def _map_indices_to_classes(self, y):
y_copy = y.copy()
y_copy = np.array(y.copy())
n_classes = len(self.classes_)
for idx in range(n_classes):
y_copy[y == idx] = self.classes_[idx]
return y_copy
y_copy[np.array(y).transpose() == idx] = self.classes_[idx]
return np.array(y_copy)

def fit(self, X, y):
"""Uses a quantum backend and fits the training data.
Expand Down Expand Up @@ -237,6 +238,49 @@ def _predict(self, X):
self._log("Prediction finished.")
return result

def predict_proba(self, X):
"""Return the probabilities associated with predictions.

The default behavior is to return the nested classifier probabilities.
In case where no `predict_proba` method is available inside the classifier,
the method predicts the label number (0 or 1 for examples) and applies a
softmax in top of it.

Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input vector, where `n_samples` is the number of samples and
`n_features` is the number of features.

Returns
-------
prob : ndarray, shape (n_samples, n_classes)
prob[n, i] == 1 if the nth sample is assigned to class `i`.
"""

if not hasattr(self._classifier, "predict_proba"):
# Classifier has no predict_proba
# Use the result from predict and apply a softmax
self._log(
"No predict_proba method available.\
Computing softmax probabilities..."
)
proba = self._classifier.predict(X)
proba = [
np.array(
[
1 if c == self.classes_[i] else 0
for i in range(len(self.classes_))
]
)
for c in proba
]
proba = softmax(proba, axis=0)
else:
proba = self._classifier.predict_proba(X)

return np.array(proba)


class QuanticSVM(QuanticClassifierBase):

Expand All @@ -256,6 +300,8 @@ class QuanticSVM(QuanticClassifierBase):
Fix: copy estimator not keeping base class parameters.
.. versionchanged:: 0.2.0
Add seed parameter
SVC and QSVC now compute probability (may impact performance)
Predict is now using predict_proba with a softmax, when using QSVC.

Parameters
----------
Expand Down Expand Up @@ -360,39 +406,15 @@ def _init_algo(self, n_features):
gamma=self.gamma,
C=self.C,
max_iter=max_iter,
probability=True,
)
else:
max_iter = -1 if self.max_iter is None else self.max_iter
classifier = SVC(gamma=self.gamma, C=self.C, max_iter=max_iter)
classifier = SVC(
gamma=self.gamma, C=self.C, max_iter=max_iter, probability=True
)
return classifier

def predict_proba(self, X):
"""Return the probabilities associated with predictions.

This method is implemented for compatibility purpose
as SVM prediction probabilities are not available.
This method assigns a boolean value to each trial which
depends on whether the label was assigned to class 0 or 1

Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input vector, where `n_samples` is the number of samples and
`n_features` is the number of features.

Returns
-------
prob : ndarray, shape (n_samples, n_classes)
prob[n, 0] == True if the nth sample is assigned to 1st class;
prob[n, 1] == True if the nth sample is assigned to 2nd class.
"""
predicted_labels = self.predict(X)
ret = [
np.array([c == self.classes_[0], c == self.classes_[1]])
for c in predicted_labels
]
return np.array(ret)

def predict(self, X):
"""Calculates the predictions.

Expand All @@ -407,7 +429,12 @@ def predict(self, X):
pred : array, shape (n_samples,)
Class labels for samples in X.
"""
labels = self._predict(X)
if isinstance(self._classifier, QSVC):
probs = softmax(self.predict_proba(X))
labels = [np.argmax(prob) for prob in probs]
else:
labels = self._predict(X)
self._log("Prediction finished.")
return self._map_indices_to_classes(labels)


Expand Down Expand Up @@ -514,24 +541,6 @@ def _init_algo(self, n_features):
)
return vqc

def predict_proba(self, X):
"""Returns the probabilities associated with predictions.

Parameters
----------
X : ndarray, shape (n_samples, n_features)
Input vector, where `n_samples` is the number of samples and
`n_features` is the number of features.

Returns
-------
prob : ndarray, shape (n_samples, n_classes)
prob[n, 0] == True if the nth sample is assigned to 1st class;
prob[n, 1] == True if the nth sample is assigned to 2nd class.
"""
proba, _ = self._predict(X)
return proba

def predict(self, X):
"""Calculates the predictions.

Expand Down Expand Up @@ -664,22 +673,6 @@ def _init_algo(self, n_features):
set_global_optimizer(self._optimizer)
return classifier

def predict_proba(self, X):
"""Return the probabilities associated with predictions.

Parameters
----------
X : ndarray, shape (n_trials, n_channels, n_channels)
ndarray of trials.

Returns
-------
prob : ndarray, shape (n_samples, n_classes)
prob[n, 0] == True if the nth sample is assigned to 1st class;
prob[n, 1] == True if the nth sample is assigned to 2nd class.
"""
return self._classifier.predict_proba(X)

def predict(self, X):
"""Calculates the predictions.

Expand Down
24 changes: 12 additions & 12 deletions pyriemann_qiskit/pipelines.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from pyriemann_qiskit.utils.filtering import NoDimRed
from pyriemann_qiskit.utils.hyper_params_factory import (
gen_zz_feature_map,
gen_x_feature_map,
gen_two_local,
get_spsa,
)
Expand Down Expand Up @@ -175,14 +176,8 @@ class QuantumClassifierWithDefaultRiemannianPipeline(BasePipeline):
shots : int | None (default: 1024)
Number of repetitions of each circuit, for sampling.
If None, classical computation will be performed.
feature_entanglement : str | list[list[list[int]]] | \
Callable[int, list[list[list[int]]]]
Specifies the entanglement structure for the ZZFeatureMap.
Entanglement structure can be provided with indices or string.
Possible string values are: 'full', 'linear', 'circular' and 'sca'.
See [2]_ for more details on entanglement structure.
feature_reps : int (default: 2)
The number of repeated circuits for the ZZFeatureMap,
The number of repeated circuits for the FeatureMap,
greater or equal to 1.
spsa_trials : int (default: None)
Maximum number of iterations to perform using SPSA optimizer.
Expand All @@ -206,12 +201,15 @@ class QuantumClassifierWithDefaultRiemannianPipeline(BasePipeline):
Notes
-----
.. versionadded:: 0.0.1
.. versionchanged:: 0.2.0
Changed feature map from ZZFeatureMap to XFeatureMap.
Therefore remove unused parameter `feature_entanglement`.

See Also
--------
XdawnCovariances
TangentSpace
gen_zz_feature_map
gen_x_feature_map
gen_two_local
get_spsa
QuanticVQC
Expand All @@ -236,7 +234,6 @@ def __init__(
C=1.0,
max_iter=None,
shots=1024,
feature_entanglement="full",
feature_reps=2,
spsa_trials=None,
two_local_reps=None,
Expand All @@ -248,7 +245,6 @@ def __init__(
self.C = C
self.max_iter = max_iter
self.shots = shots
self.feature_entanglement = feature_entanglement
self.feature_reps = feature_reps
self.spsa_trials = spsa_trials
self.two_local_reps = two_local_reps
Expand All @@ -261,7 +257,11 @@ def _create_pipe(self):
is_vqc = self.spsa_trials and self.two_local_reps
is_quantum = self.shots is not None

feature_map = gen_zz_feature_map(self.feature_reps, self.feature_entanglement)
# Different feature maps can be used.
# Currently the best results are produced by the x_feature_map.
# This can change in the future as the code for the different feature maps
# is updated in the new versions of Qiskit.
feature_map = gen_x_feature_map(self.feature_reps)
gcattan marked this conversation as resolved.
Show resolved Hide resolved

if is_vqc:
self._log("QuanticVQC chosen.")
Expand Down Expand Up @@ -320,7 +320,7 @@ class QuantumMDMWithRiemannianPipeline(BasePipeline):
shots : int (default:1024)
Number of repetitions of each circuit, for sampling.
gen_feature_map : Callable[int, QuantumCircuit | FeatureMap] \
(default : Callable[int, ZZFeatureMap])
(default : Callable[int, XFeatureMap])
Function generating a feature map to encode data into a quantum state.

Attributes
Expand Down
6 changes: 4 additions & 2 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,9 @@ def _get_dataset(n_samples, n_features, n_classes, type="bin"):
samples_0 = make_covariances(
n_samples // n_classes, n_features, 0, return_params=False
)
samples_1 = samples_0 * 2
samples = np.concatenate((samples_0, samples_1), axis=0)
samples = np.concatenate(
[samples_0 * (i + 1) for i in range(n_classes)], axis=0
)
labels = _get_labels(n_samples, n_classes)
else:
samples, labels = get_mne_sample()
Expand Down Expand Up @@ -179,6 +180,7 @@ class BinaryFVT(BinaryTest):
def additional_steps(self):
self.quantum_instance.fit(self.samples, self.labels)
self.prediction = self.quantum_instance.predict(self.samples)
self.predict_proab = self.quantum_instance.predict_proba(self.samples)
print(self.labels, self.prediction)


Expand Down
Loading