-
Notifications
You must be signed in to change notification settings - Fork 2
/
rl.py
76 lines (55 loc) · 1.59 KB
/
rl.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import numpy as np
import torch.nn.functional as F
from cached_property import cached_property
from collections import namedtuple
class Environment(object):
@property
def state(self):
pass
@property
def reward(self):
pass
@property
def is_terminal(self):
pass
@property
def exceed_max(self):
pass
@property
def current_raw_screen(self):
pass
def receive_action(self, action):
pass
def reset(self):
pass
class SoftmaxPolicy(object):
"""Abstract softmax policy class."""
def compute_policy(self, state):
raise NotImplementedError
def logits2policy(self, logits):
return SoftmaxPolicyOutput(logits)
class SoftmaxPolicyOutput(object):
def __init__(self, logits):
self.logits = logits
@cached_property
def most_probable_actions(self):
return np.argmax(self.probs.data.numpy(), axis=1)
@cached_property
def probs(self):
return F.softmax(self.logits)
@cached_property
def log_probs(self):
return F.log_softmax(self.logits)
@cached_property
def action_indices_var(self):
return self.probs.multinomial(1).detach()
@cached_property
def action_indices(self):
return self.action_indices_var.data.numpy().squeeze(1)
@cached_property
def sampled_actions_log_probs(self):
return self.log_probs.gather(1, self.action_indices_var)
@cached_property
def entropy(self):
return - (self.probs*self.log_probs).sum(1)
EvalResult = namedtuple('EvalResult', ('reward', 'duration'))