/
NeuralNestAIPlayer.py
115 lines (93 loc) · 3.99 KB
/
NeuralNestAIPlayer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
from NeuralNest import NeuralNest
from random import randint
import pygame
import pygame.surfarray
import numpy as np
import pandas
from TrainingData import TrainingData
from GamePlayerNetwork import GamePlayerNetwork
class NeuralNestAIPlayer:
LEARNING = 'learning'
PLAYING = 'playing'
def __init__(self):
self.nnest = None
self.surface_array = None
self.network = None
self.mode = 'training'
self.training_data = TrainingData()
def get_neural_nest(self):
return self.nnest
def gather_data(self, FPS):
self.nnest = NeuralNest(observer=self,
window_width=800,
window_height=800,
surface_width=20,
surface_height=20,
drop_height=0,
drop_threshold=17,
basket_width=5,
min_speed=1,
max_speed=2,
egg_radius=1)
self.nnest.FPS = FPS
pygame.display.update = self.function_combine(pygame.display.update, self.on_screen_update)
print("Loading game")
caught, dropped = self.nnest.run(300)
self.training_data.save_csv("one_thousand_run.csv")
print("Game complete: caught={0} dropped={1}".format(caught, dropped))
# function that we can give two functions to and will return us a new function that calls both
def function_combine(self, screen_update_func, our_intercepting_func):
def wrap(*args, **kwargs):
screen_update_func(*args,
**kwargs) # call the screen update func we intercepted so the screen buffer is updated
our_intercepting_func() # call our own function to get the screen buffer
return wrap
def on_screen_update(self):
if self.mode == self.LEARNING:
surface_array = self.nnest.display.get_surface_grayscale_array()
assert(len(surface_array) > 0)
best_action = self.nnest.get_best_player_action()
self.training_data.append_training_data(surface_array, best_action)
# The game will call us when it is time for a move
def get_ai_action(self):
if self.mode == self.PLAYING:
surface_array = self.nnest.display.get_surface_grayscale_array()
result = self.network.get_player_action(surface_array)
return result
def caught(self):
return
def dropped(self):
return
def learn(self):
network = GamePlayerNetwork(20, 20)
network.train("synthetic_training_data.txt")
network.save_model("trained_model")
network.display_training_results()
network.plot_model()
def play(self):
self.network = GamePlayerNetwork(20, 20)
self.network.load_model("trained_model")
self.nnest = NeuralNest(observer=self,
window_width=800,
window_height=800,
surface_width=20,
surface_height=20,
drop_height=0,
drop_threshold=17,
basket_width=5,
min_speed=1,
max_speed=2,
egg_radius=1)
self.nnest.FPS = 20
self.nnest.get_player_action = self.get_ai_action
pygame.display.update = self.function_combine(pygame.display.update, self.on_screen_update)
print("Loading game")
caught, dropped = self.nnest.run(100)
print("Game complete: caught={0} dropped={1}".format(caught, dropped))
if __name__ == "__main__":
ai_player = NeuralNestAIPlayer()
ai_player.mode = NeuralNestAIPlayer.LEARNING
# ai_player.gather_data(60)
# ai_player.learn()
ai_player.mode = 'playing'
ai_player.play()