/
Q3a_variational-classifier-parity.py
156 lines (107 loc) · 3.35 KB
/
Q3a_variational-classifier-parity.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
"""Variational quantum classifier
This example shows that a variational quantum classifier
can be optimized to reproduce the parity function.
"""
import pennylane as qml
from pennylane import numpy as np
from pennylane.optimize import NesterovMomentumOptimizer
dev = qml.device('default.qubit', wires=4)
def layer(W):
""" Single layer of the variational classifier.
Args:
W: array of variables
"""
qml.Rot(W[0, 0], W[0, 1], W[0, 2], wires=0)
qml.Rot(W[1, 0], W[1, 1], W[1, 2], wires=1)
qml.Rot(W[2, 0], W[2, 1], W[2, 2], wires=2)
qml.Rot(W[3, 0], W[3, 1], W[3, 2], wires=3)
qml.CNOT(wires=[0, 1])
qml.CNOT(wires=[1, 2])
qml.CNOT(wires=[2, 3])
qml.CNOT(wires=[3, 0])
def statepreparation(x):
""" Encodes data input x into quantum state.
Args:
x: single input vector
"""
qml.BasisState(x, wires=[0, 1, 2, 3])
@qml.qnode(dev)
def circuit(weights, x=None):
"""The circuit of the variational classifier.
Args:
weights (array[float]): array of variables
x: single input vector
Returns:
expectation of Pauli-Z operator on Qubit 0
"""
statepreparation(x)
for W in weights:
layer(W)
return qml.expval(qml.PauliZ(0))
def variational_classifier(var, x=None):
"""The variational classifier.
Args:
var (array[float]): array of variables
x: single input vector
Returns:
continuous output of the model
"""
weights = var[0]
bias = var[1]
return circuit(weights, x=x) + bias
def square_loss(labels, predictions):
""" Square loss function
Args:
labels (array[float]): 1-d array of labels
predictions (array[float]): 1-d array of predictions
Returns:
float: square loss
"""
loss = 0
for l, p in zip(labels, predictions):
loss += (l - p) ** 2
loss = loss / len(labels)
return loss
def accuracy(labels, predictions):
""" Share of equal labels and predictions
Args:
labels (array[float]): 1-d array of labels
predictions (array[float]): 1-d array of predictions
Returns:
float: accuracy
"""
loss = 0
for l, p in zip(labels, predictions):
if abs(l - p) < 1e-5:
loss += 1
loss = loss / len(labels)
return loss
def cost(var, X, Y):
"""Cost (error) function to be minimized."""
predictions = [variational_classifier(var, x=x) for x in X]
return square_loss(Y, predictions)
# load parity data
data = np.loadtxt("data/parity.txt")
X = data[:, :-1]
Y = data[:, -1]
Y = Y * 2 - np.ones(len(Y)) # shift label from {0, 1} to {-1, 1}
# initialize weight layers
np.random.seed(0)
num_qubits = 4
num_layers = 2
var_init = (0.01 * np.random.randn(num_layers, num_qubits, 3), 0.0)
# create optimizer
opt = NesterovMomentumOptimizer(0.5)
batch_size = 5
# train the variational classifier
var = var_init
for it in range(25):
# Update the weights by one optimizer step
batch_index = np.random.randint(0, len(X), (batch_size, ))
X_batch = X[batch_index]
Y_batch = Y[batch_index]
var = opt.step(lambda v: cost(v, X_batch, Y_batch), var)
# Compute accuracy
predictions = [np.sign(variational_classifier(var, x=x)) for x in X]
acc = accuracy(Y, predictions)
print("Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} ".format(it + 1, cost(var, X, Y), acc))