-
Notifications
You must be signed in to change notification settings - Fork 0
/
sample.py
210 lines (192 loc) · 8.06 KB
/
sample.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
"""
Sample from a trained model
"""
import os
import pickle
from contextlib import nullcontext
import torch
import tiktoken
from model import GPTConfig, GPT
import chess
import chess.pgn
# -----------------------------------------------------------------------------
init_from = 'resume' # either 'resume' (from an out_dir) or a gpt2 variant (e.g. 'gpt2-xl')
out_dir = 'out' # ignored if init_from is not 'resume'
start = "\n" # or "<|endoftext|>" or etc. Can also specify a file, use as: "FILE:prompt.txt"
num_samples = 1 # number of samples to draw
max_new_tokens = 500 # number of tokens generated in each sample
temperature = 0.8 # 1.0 = no change, < 1.0 = less random, > 1.0 = more random, in predictions
top_k = 200 # retain only the top_k most likely tokens, clamp others to have 0 probability
seed = 1337
device = 'cuda' # examples: 'cpu', 'cuda', 'cuda:0', 'cuda:1', etc.
dtype = 'bfloat16' if torch.cuda.is_available() and torch.cuda.is_bf16_supported() else 'float16' # 'float32' or 'bfloat16' or 'float16'
compile = False # use PyTorch 2.0 to compile the model to be faster
exec(open('configurator.py').read()) # overrides from command line or config file
# -----------------------------------------------------------------------------
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cuda.matmul.allow_tf32 = True # allow tf32 on matmul
torch.backends.cudnn.allow_tf32 = True # allow tf32 on cudnn
device_type = 'cuda' if 'cuda' in device else 'cpu' # for later use in torch.autocast
ptdtype = {'float32': torch.float32, 'bfloat16': torch.bfloat16, 'float16': torch.float16}[dtype]
ctx = nullcontext() if device_type == 'cpu' else torch.amp.autocast(device_type=device_type, dtype=ptdtype)
# model
if init_from == 'resume':
# init from a model saved in a specific directory
ckpt_path = os.path.join(out_dir, 'ckpt.pt')
checkpoint = torch.load(ckpt_path, map_location=device)
gptconf = GPTConfig(**checkpoint['model_args'])
model = GPT(gptconf)
state_dict = checkpoint['model']
unwanted_prefix = '_orig_mod.'
for k,v in list(state_dict.items()):
if k.startswith(unwanted_prefix):
state_dict[k[len(unwanted_prefix):]] = state_dict.pop(k)
model.load_state_dict(state_dict)
elif init_from.startswith('gpt2'):
# init from a given GPT-2 model
model = GPT.from_pretrained(init_from, dict(dropout=0.0))
model.eval()
model.to(device)
if compile:
model = torch.compile(model) # requires PyTorch 2.0 (optional)
# look for the meta pickle in case it is available in the dataset folder
load_meta = False
if init_from == 'resume' and 'config' in checkpoint and 'dataset' in checkpoint['config']: # older checkpoints might not have these...
meta_path = os.path.join('data', checkpoint['config']['dataset'], 'meta.pkl')
load_meta = os.path.exists(meta_path)
if load_meta:
print(f"Loading meta from {meta_path}...")
with open(meta_path, 'rb') as f:
meta = pickle.load(f)
# TODO want to make this more general to arbitrary encoder/decoder schemes
stoi, itos = meta['stoi'], meta['itos']
encode = lambda s: [stoi[c] for c in s]
decode = lambda l: [itos[i] for i in l]
else:
# ok let's assume gpt-2 encodings by default
print("No meta.pkl found, assuming GPT-2 encodings...")
enc = tiktoken.get_encoding("gpt2")
encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"})
decode = lambda l: enc.decode(l)
#Jos ei tarvitse tarkistaa annetun siirron laillisuutta:
def validate_move(moves: list):
game = chess.pgn.Game()
board = chess.Board()
game.headers["Event"] = "Example"
for move in moves[:-1]:
uci = board.push_san(move).uci()
mv = chess.Move.from_uci(uci)
game.end().add_main_variation(mv)
try:
legal_moves = list(board.legal_moves)
uci = board.push_san(moves[-1]).uci()
mv = chess.Move.from_uci(uci)
if mv in legal_moves:
game.end().add_main_variation(mv)
return True
except:
return False
def generate_move(moves: list):
while True:
x = (torch.tensor(moves, dtype=torch.long, device=device)[None, ...])
y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
generated_token = y[0][len(moves)].item()
generated_move = itos[generated_token]
check = validate_moves(moves + [generated_move])
if check:
moves.append(generated_move)
return moves
# ------------------------------------------
def validate_moves(moves: list):
game = chess.pgn.Game()
board = chess.Board()
game.headers["Event"] = "Example"
for move in moves[:-1]:
uci = board.push_san(move).uci()
mv = chess.Move.from_uci(uci)
game.end().add_main_variation(mv)
try:
legal_moves = list(board.legal_moves)
uci = board.push_san(moves[-1]).uci()
mv = chess.Move.from_uci(uci)
if mv in legal_moves:
game.end().add_main_variation(mv)
return "legal move"
except chess.IllegalMoveError:
return "move not allowed"
except chess.AmbiguousMoveError:
return "move not allowed"
def generate_game(moves: list):
last_move = moves[-1]
if last_move not in stoi.keys():
print("Invalid move, try again")
pass
move_id = stoi[last_move]
check = validate_moves(moves)
if check == "move not allowed":
print("Move not allowed, try again")
pass
elif check == "legal move":
while True:
x = (torch.tensor(moves, dtype=torch.long, device=device)[None, ...])
y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
generated_token = y[0][len(moves)].item()
check = validate_moves(moves + [itos[generated_token]])
if check == "legal move":
moves.append(itos[generated_token])
return moves
"""
game = chess.pgn.Game()
board = chess.Board()
game.headers["Event"] = "Example"
def add_move_to_game(gamemove, status):
if gamemove == "\n":
return "game over"
try:
legal_moves = list(board.legal_moves)
uci = board.push_san(gamemove).uci()
mv = chess.Move.from_uci(uci)
if mv in legal_moves:
game.end().add_main_variation(mv)
return "legal move"
except chess.IllegalMoveError:
return "move not allowed"
except chess.AmbiguousMoveError:
return "move not allowed"
all_ids = []
while True:
with torch.no_grad():
with ctx:
# Haetaan uusi siirto käyttäjältä
start = input("Next move: ")
if start == "":
break
elif exit_game:
break
start_moves = start.strip().split(", ")
for start in start_moves:
if start not in stoi.keys():
print("Invalid move, try again")
break
start_id = stoi[start]
check = add_move_to_game(start, "given")
if check == "move not allowed":
print("Move not allowed, try again")
break
elif check == "game over":
exit_game = True
break
elif check == "legal move":
all_ids.append(start_id)
x = (torch.tensor(all_ids, dtype=torch.long, device=device)[None, ...])
while True:
y = model.generate(x, max_new_tokens, temperature=temperature, top_k=top_k)
generated_token = y[0][len(all_ids)].item()
check = add_move_to_game(itos[generated_token], "generated")
if check == "legal move":
all_ids.append(generated_token)
# Tässä lähetetään pelin siirrot eli all_ids? Dekoodataan ensin
# all_moves = decode(all_ids)
break
"""