-
Notifications
You must be signed in to change notification settings - Fork 3
/
testRaw.py
94 lines (89 loc) · 4.8 KB
/
testRaw.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import torch
import time
from torch.utils.data import DataLoader, TensorDataset
from torch import nn
from pybci import PyBCI
import numpy as np
num_chs = 3 # 8 channels are created in the PseudoLSLGenerator, but we drop 5 to save compute (real-time CNN can be computationally heavy!)
sum_samps = 125 # sample rate is 250 in the PseudoLSLGwnerator
num_classes = 4 # number of different triggers (can include baseline) sent, defines if we use softmax of binary
class ConvNet(nn.Module):
def __init__(self, num_channels, num_samples, num_classes):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv1d(num_channels, 64, kernel_size=5, stride=1, padding=2)
self.relu = nn.ReLU()
self.pool = nn.MaxPool1d(kernel_size=2)
self.conv2 = nn.Conv1d(64, 128, kernel_size=5, stride=1, padding=2)
self.fc = nn.Linear(int(num_samples/2/2)*128, num_classes) # Depending on your pooling and stride you might need to adjust the input size here
def forward(self, x):
out = self.conv1(x)
out = self.relu(out)
out = self.pool(out)
out = self.conv2(out)
out = self.relu(out)
out = self.pool(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
def PyTorchModel(x_train, x_test, y_train, y_test ):
model = ConvNet(num_chs, sum_samps, num_classes)
model.train()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
epochs = 10
train_data = TensorDataset(torch.Tensor(x_train), torch.Tensor(y_train).long())
train_loader = DataLoader(dataset=train_data, batch_size=32, shuffle=True)
for epoch in range(epochs):
for inputs, labels in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
model.eval()
accuracy = 0
with torch.no_grad():
test_outputs = model(torch.Tensor(x_test))
_, predicted = torch.max(test_outputs.data, 1)
correct = (predicted == torch.Tensor(y_test).long()).sum().item()
accuracy = correct / len(y_test)
return accuracy, model # must return accuracy and model for pytorch!
class RawDecode():
desired_length = sum_samps
def ProcessFeatures(self, epochData, sr, target):
d = epochData.T
if self.desired_length == 0: # needed as windows may be differing sizes due to timestamp varience on LSL
self.desired_length = d.shape[1]
if d.shape[1] != self.desired_length:
d = np.resize(d, (d.shape[0],self.desired_length))
return d
dropchs = [x for x in range(3,8)] # drop last 5 channels to save on compute time
streamChsDropDict={"PyBCIPseudoDataStream":dropchs} #streamChsDropDict=streamChsDropDict,
streamCustomFeatureExtract = {"PyBCIPseudoDataStream" : RawDecode()} # we select psuedolslgenerator example
if __name__ == '__main__': # Note: this line is needed when calling pseudoDevice as by default runs in a multiprocessed operation
bci = PyBCI(minimumEpochsRequired = 4, createPseudoDevice=True, streamCustomFeatureExtract=streamCustomFeatureExtract, torchModel = PyTorchModel,streamChsDropDict=streamChsDropDict)#, loggingLevel = Logger.TIMING)
while not bci.connected: # check to see if lsl marker and datastream are available
bci.Connect()
time.sleep(1)
bci.TrainMode() # now both marker and datastreams available start training on received epochs
accuracy = 0
try:
while(True):
currentMarkers = bci.ReceivedMarkerCount() # check to see how many received epochs, if markers sent to close together will be ignored till done processing
time.sleep(0.5) # wait for marker updates
print("Markers received: " + str(currentMarkers) +" Accuracy: " + str(round(accuracy,2)), end=" \r")
if len(currentMarkers) > 1: # check there is more then one marker type received
if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired:
classInfo = bci.CurrentClassifierInfo() # hangs if called too early
accuracy = classInfo["accuracy"]
if min([currentMarkers[key][1] for key in currentMarkers]) > bci.minimumEpochsRequired+10:
bci.TestMode()
break
while True:
markerGuess = bci.CurrentClassifierMarkerGuess() # when in test mode only y_pred returned
guess = [key for key, value in currentMarkers.items() if value[0] == markerGuess]
print("Current marker estimation: " + str(guess), end=" \r")
time.sleep(0.2)
except KeyboardInterrupt: # allow user to break while loop
print("\nLoop interrupted by user.")
bci.StopThreads()