-
Notifications
You must be signed in to change notification settings - Fork 11
/
n-step semi-gradient off-policy sarsa.kt
106 lines (99 loc) · 2.96 KB
/
n-step semi-gradient off-policy sarsa.kt
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
@file:Suppress("NAME_SHADOWING")
package lab.mars.rl.algo.func_approx.off_policy
import lab.mars.rl.algo.ntd.MAX_N
import lab.mars.rl.model.*
import lab.mars.rl.util.buf.newBuf
import lab.mars.rl.util.log.debug
import lab.mars.rl.util.math.Π
import lab.mars.rl.util.math.Σ
import lab.mars.rl.util.matrix.times
import org.apache.commons.math3.util.FastMath.min
import org.apache.commons.math3.util.FastMath.pow
fun <E> MDP.`n-step semi-gradient off-policy sarsa episodic`(
q: ApproximateFunction<E>, π: Policy, b: Policy,
n: Int,
α: Double,
episodes: Int,
episodeListener: (Int, Int) -> Unit = { _, _ -> }) {
val _R = newBuf<Double>(min(n, MAX_N))
val _S = newBuf<State>(min(n, MAX_N))
val _A = newBuf<Action<State>>(min(n, MAX_N))
for (episode in 1..episodes) {
log.debug { "$episode/$episodes" }
var step = 0
var n = n
var T = Int.MAX_VALUE
var t = 0
var s = started()
var a = b(s)
_R.clear();_R.append(0.0)
_S.clear();_S.append(s)
_A.clear();_A.append(a)
do {
step++
if (t >= n) {
_R.removeFirst()
_S.removeFirst()
_A.removeFirst()
}
if (t < T) {
val (s_next, reward) = a.sample()
_R.append(reward)
_S.append(s_next)
s = s_next
if (s.isTerminal) {
T = t + 1
val τ = t - n + 1
if (τ < 0) n = T //n is too large
} else {
a = b(s)
_A.append(a)
}
}
val τ = t - n + 1
if (τ >= 0) {
val ρ = Π(1..min(n - 1, T - 1 - τ)) { π[_S[it], _A[it]] / b[_S[it], _A[it]] }
var G = Σ(1..min(n, T - τ)) { pow(γ, it - 1) * _R[it] }
if (τ + n < T) G += pow(γ, n) * q(_S[n], _A[n])
q.w += α * ρ * (G - q(_S[0], _A[0])) * q.`∇`(_S[0], _A[0])
}
t++
} while (τ < T - 1)
log.debug { "n=$n,T=$T" }
episodeListener(episode, step)
}
}
fun <E> MDP.`n-step semi-gradient off-policy sarsa continuing`(q: ApproximateFunction<E>, π: Policy, b: Policy,
n: Int,
α: Double = 1.0, β: Double) {
var average_reward = 0.0
val _R = newBuf<Double>(min(n, MAX_N))
val _S = newBuf<State>(min(n, MAX_N))
val _A = newBuf<Action<State>>(min(n, MAX_N))
var t = 0
val s = started()
var a = b(s)
_R.clear();_R.append(0.0)
_S.clear();_S.append(s)
_A.clear();_A.append(a)
while (true) {
if (t >= n) {//
_R.removeFirst()
_S.removeFirst()
_A.removeFirst()
}
val (s_next, reward) = a.sample()
_R.append(reward)
_S.append(s_next)
a = b(s)
_A.append(a)
val τ = t - n + 1
if (τ >= 0) {
val ρ = Π(1..n) { π[_S[it], _A[it]] / b[_S[it], _A[it]] }
val δ = Σ(1..n) { _R[it] - average_reward } + q(_S[n], _A[n]) - q(_S[0], _A[0])
average_reward += β * δ
q.w += α * ρ * δ * q.`∇`(_S[0], _A[0])
}
t++
}
}