/
ccnn.py
78 lines (63 loc) · 3.49 KB
/
ccnn.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
from typing import Tuple
import torch
import torch.nn as nn
class CCNN(nn.Module):
r'''
Continuous Convolutional Neural Network (CCNN). For more details, please refer to the following information.
- Paper: Yang Y, Wu Q, Fu Y, et al. Continuous convolutional neural network with 3D input for EEG-based emotion recognition[C]//International Conference on Neural Information Processing. Springer, Cham, 2018: 433-443.
- URL: https://link.springer.com/chapter/10.1007/978-3-030-04239-4_39
- Related Project: https://github.com/ynulonger/DE_CNN
Below is a recommended suite for use in emotion recognition tasks:
.. code-block:: python
dataset = DEAPDataset(io_path=f'./deap',
root_path='./data_preprocessed_python',
offline_transform=transforms.Compose([
transforms.BandDifferentialEntropy(),
transforms.ToGrid(DEAP_CHANNEL_LOCATION_DICT)
]),
online_transform=transforms.ToTensor(),
label_transform=transforms.Compose([
transforms.Select('valence'),
transforms.Binary(5.0),
]))
model = CCNN(num_classes=2, in_channels=4, grid_size=(9, 9))
Args:
in_channels (int): The feature dimension of each electrode. (default: :obj:`4`)
grid_size (tuple): Spatial dimensions of grid-like EEG representation. (default: :obj:`(9, 9)`)
num_classes (int): The number of classes to predict. (default: :obj:`2`)
dropout (float): Probability of an element to be zeroed in the dropout layers. (default: :obj:`0.25`)
'''
def __init__(self, in_channels: int = 4, grid_size: Tuple[int, int] = (9, 9), num_classes: int = 2, dropout: float = 0.5):
super(CCNN, self).__init__()
self.in_channels = in_channels
self.grid_size = grid_size
self.num_classes = num_classes
self.dropout = dropout
self.conv1 = nn.Sequential(nn.ZeroPad2d((1, 2, 1, 2)), nn.Conv2d(self.in_channels, 64, kernel_size=4, stride=1),
nn.ReLU())
self.conv2 = nn.Sequential(nn.ZeroPad2d((1, 2, 1, 2)), nn.Conv2d(64, 128, kernel_size=4, stride=1), nn.ReLU())
self.conv3 = nn.Sequential(nn.ZeroPad2d((1, 2, 1, 2)), nn.Conv2d(128, 256, kernel_size=4, stride=1), nn.ReLU())
self.conv4 = nn.Sequential(nn.ZeroPad2d((1, 2, 1, 2)), nn.Conv2d(256, 64, kernel_size=4, stride=1), nn.ReLU())
self.lin1 = nn.Sequential(
nn.Linear(self.grid_size[0] * self.grid_size[1] * 64, 1024),
nn.SELU(), # Not mentioned in paper
nn.Dropout2d(self.dropout)
)
self.lin2 = nn.Linear(1024, self.num_classes)
def feature_dim(self):
return self.grid_size[0] * self.grid_size[1] * 64
def forward(self, x: torch.Tensor) -> torch.Tensor:
r'''
Args:
x (torch.Tensor): EEG signal representation, the ideal input shape is :obj:`[n, 4, 9, 9]`. Here, :obj:`n` corresponds to the batch size, :obj:`4` corresponds to :obj:`in_channels`, and :obj:`(9, 9)` corresponds to :obj:`grid_size`.
Returns:
torch.Tensor[number of sample, number of classes]: the predicted probability that the samples belong to the classes.
'''
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = x.flatten(start_dim=1)
x = self.lin1(x)
x = self.lin2(x)
return x