/
transformer.py
153 lines (123 loc) · 5.51 KB
/
transformer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class CausalSelfAttention(nn.Module):
"""
A vanilla multi-head masked self-attention layer with a projection at the end.
It is possible to use torch.nn.MultiheadAttention here but I am including an
explicit implementation here to show that there is nothing too scary here.
"""
def __init__(self, H):
super().__init__()
assert H.bert_n_emb % H.bert_n_head == 0
# key, query, value projections for all heads
self.key = nn.Linear(H.bert_n_emb, H.bert_n_emb)
self.query = nn.Linear(H.bert_n_emb, H.bert_n_emb)
self.value = nn.Linear(H.bert_n_emb, H.bert_n_emb)
# regularization
self.attn_drop = nn.Dropout(H.attn_pdrop)
self.resid_drop = nn.Dropout(H.resid_pdrop)
# output projection
self.proj = nn.Linear(H.bert_n_emb, H.bert_n_emb)
self.n_head = H.bert_n_head
self.causal = True if H.sampler == 'autoregressive' else False
if self.causal:
block_size = np.prod(H.latent_shape)
mask = torch.tril(torch.ones(block_size, block_size))
self.register_buffer("mask", mask.view(1, 1, block_size, block_size))
def forward(self, x, layer_past=None):
B, T, C = x.size()
# calculate query, key, values for all heads in batch and move head forward to be the batch dim
k = self.key(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
q = self.query(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
v = self.value(x).view(B, T, self.n_head, C // self.n_head).transpose(1, 2) # (B, nh, T, hs)
present = torch.stack((k, v))
if self.causal and layer_past is not None:
past_key, past_value = layer_past
k = torch.cat((past_key, k), dim=-2)
v = torch.cat((past_value, v), dim=-2)
# causal self-attention; Self-attend: (B, nh, T, hs) x (B, nh, hs, T) -> (B, nh, T, T)
att = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(k.size(-1)))
if self.causal and layer_past is None:
att = att.masked_fill(self.mask[:, :, :T, :T] == 0, float('-inf'))
att = F.softmax(att, dim=-1)
att = self.attn_drop(att)
y = att @ v # (B, nh, T, T) x (B, nh, T, hs) -> (B, nh, T, hs)
# re-assemble all head outputs side by side
y = y.transpose(1, 2).contiguous().view(B, T, C)
# output projection
y = self.resid_drop(self.proj(y))
return y, present
class Block(nn.Module):
""" an unassuming Transformer block """
def __init__(self, H):
super().__init__()
self.ln1 = nn.LayerNorm(H.bert_n_emb)
self.ln2 = nn.LayerNorm(H.bert_n_emb)
self.attn = CausalSelfAttention(H)
self.mlp = nn.Sequential(
nn.Linear(H.bert_n_emb, 4 * H.bert_n_emb),
nn.GELU(), # nice
nn.Linear(4 * H.bert_n_emb, H.bert_n_emb),
nn.Dropout(H.resid_pdrop),
)
def forward(self, x, layer_past=None, return_present=False):
attn, present = self.attn(self.ln1(x), layer_past)
x = x + attn
x = x + self.mlp(self.ln2(x))
if layer_past is not None or return_present:
return x, present
return x
class Transformer(nn.Module):
""" the full GPT language model, with a context size of block_size """
def __init__(self, H):
super().__init__()
self.vocab_size = H.codebook_size + 1
self.n_embd = H.bert_n_emb
self.block_size = H.block_size
self.n_layers = H.bert_n_layers
self.codebook_size = H.codebook_size
self.causal = H.sampler == 'autoregressive'
if self.causal:
self.vocab_size = H.codebook_size
self.tok_emb = nn.Embedding(self.vocab_size, self.n_embd)
self.pos_emb = nn.Parameter(
torch.zeros(1, self.block_size, self.n_embd))
self.start_tok = nn.Parameter(torch.zeros(1, 1, self.n_embd))
self.drop = nn.Dropout(H.embd_pdrop)
# transformer
self.blocks = nn.Sequential(*[Block(H) for _ in range(self.n_layers)])
# decoder head
self.ln_f = nn.LayerNorm(self.n_embd)
self.head = nn.Linear(self.n_embd, self.codebook_size, bias=False)
def get_block_size(self):
return self.block_size
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(self, idx, t=None):
# each index maps to a (learnable) vector
token_embeddings = self.tok_emb(idx)
if self.causal:
token_embeddings = torch.cat(
(self.start_tok.repeat(token_embeddings.size(0), 1, 1), token_embeddings),
dim=1
)
t = token_embeddings.shape[1]
assert t <= self.block_size, "Cannot forward, model block size is exhausted."
# each position maps to a (learnable) vector
position_embeddings = self.pos_emb[:, :t, :]
x = token_embeddings + position_embeddings
x = self.drop(x)
for block in self.blocks:
x = block(x)
x = self.ln_f(x)
logits = self.head(x)
return logits