-
-
Notifications
You must be signed in to change notification settings - Fork 282
/
player_management.go
219 lines (189 loc) · 6.83 KB
/
player_management.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
package repl
import (
"fmt"
"strings"
"time"
log "alda.io/client/logging"
"alda.io/client/system"
"alda.io/client/transmitter"
"alda.io/client/util"
)
const findPlayerTimeout = 20 * time.Second
const playerPoolFillInterval = 10 * time.Second
const pingTimeout = 5 * time.Second
const pingInterval = 1 * time.Second
func findAvailablePlayer() (system.PlayerState, error) {
var player system.PlayerState
if err := util.Await(
func() error {
availablePlayer, err := system.FindAvailablePlayer()
if err != nil {
return err
}
player = availablePlayer
return nil
},
findPlayerTimeout,
); err != nil {
return system.PlayerState{}, err
}
return player, nil
}
func (server *Server) transmitter() (transmitter.OSCTransmitter, error) {
if !server.hasPlayer() {
return transmitter.OSCTransmitter{},
fmt.Errorf("no player process is available")
}
return transmitter.OSCTransmitter{Port: server.player.Port}, nil
}
// Player management happens asynchronously (see the loop in `managePlayers`),
// so at any given moment, it is probable, but not 100% certain, that a player
// process will be available. This function handles the boilerplate of waiting
// for a player process to be available, constructing an OSCTransmitter that
// will transmit to that player's port, and then running `execute`, a function
// that uses the OSCTransmitter.
func (server *Server) withTransmitter(
execute func(transmitter.OSCTransmitter) error,
) error {
var transmitter transmitter.OSCTransmitter
if err := util.Await(
func() error {
oe, err := server.transmitter()
if err != nil {
return err
}
transmitter = oe
return nil
},
findPlayerTimeout,
); err != nil {
return err
}
return execute(transmitter)
}
// Boilerplate to overcome the slight awkwardness of Go's zero value semantics
// for structs. We can't set `server.player` to nil because a struct can't be
// nil, so the best we can do is set it to an empty struct
// (`system.PlayerState{}`), which means all the struct fields have zero values
// (ID="", Port=0, etc.)
//
// For practical purposes, if Port is 0, then we can be reasonably certain that
// the server doesn't have a player to talk to.
func (server *Server) hasPlayer() bool {
return server.player.Port != 0
}
// The `managePlayers` loop regularly checks to see if the player process that
// the server is using is still reachable. If the player process ever disappears
// or becomes unreachable, the `managePlayers` loop recovers by finding another
// player process to replace it.
//
// To signal that part of the loop, we "unset" `server.player` by setting it to
// the zero value (`system.PlayerState{}`). At that point, `server.hasPlayer()`
// will return false, and the player process will be replaced and
// `server.player` will be set to the current state of the new player process.
func (server *Server) unsetPlayer() {
server.player = system.PlayerState{}
}
// The server has two responsibilities when it comes to managing player
// processes:
//
// 1. Ensuring that the "player pool" is full, i.e. that there is always a fresh
// player process available to use if needed, e.g. if the one that the server
// is using falls over / becomes unavailable.
//
// 2. Ensuring that there is one specific player process available for the
// server to use, and that that process remains available for as long as the
// server needs to use it. The server does this by sending a `/ping` message
// to the player at regular intervals. If the player becomes unresponsive,
// the server is responsible for recovering by switching to use another
// player process.
func (server *Server) managePlayers() {
playerPoolLastFilled := time.Unix(0, 0)
lastPing := time.Unix(0, 0)
for {
now := time.Now()
// Fill the player pool.
if now.Sub(playerPoolLastFilled) > playerPoolFillInterval {
if err := system.FillPlayerPool(); err != nil {
log.Warn().Err(err).Msg("Failed to fill player pool.")
} else {
log.Debug().Msg("Filled player pool.")
}
playerPoolLastFilled = now
}
// If the server already has a player process that it's using, fetch updated
// state information about that player process.
if server.hasPlayer() {
updatedState, err := system.FindPlayerByID(server.player.ID)
// FIXME: We are brittly depending on the verbiage in the error messages
// returned by `system.FindPlayerByID`.
//
// TODO: Maybe UserFacingErrors could have an optional error code that we
// can depend on here?
if err == nil {
server.player = updatedState
} else if strings.HasPrefix(err.Error(), "No player was found") {
// If the state information tells us that the player process no longer
// exists, then we forget about that player process and a new one will be
// found to replace it shortly.
log.Warn().
Interface("player", server.player).
Msg("Player process is offline.")
server.unsetPlayer()
} else {
log.Warn().Err(err).Msg("Failed to update player state information.")
}
}
if !server.hasPlayer() {
player, err := findAvailablePlayer()
if err != nil {
log.Warn().Err(err).Msg("No player processes available.")
} else {
log.Info().Interface("player", player).Msg("Found player process.")
server.player = player
}
}
if server.hasPlayer() && now.Sub(lastPing) > pingInterval {
// We can safely ignore `err` here because it should always be nil, given
// that we just checked that `server.hasPlayer()` is true.
transmitter, _ := server.transmitter()
if err := util.Await(
func() error { return transmitter.TransmitPingMessage() },
pingTimeout,
); err != nil {
log.Warn().
Err(err).
Interface("player", server.player).
Msg("Player process unreachable.")
server.unsetPlayer()
} else {
log.Debug().
Interface("player", server.player).
Msg("Sent ping to player process.")
}
lastPing = now
}
time.Sleep(100 * time.Millisecond)
}
}
func (server *Server) shutdownPlayer() error {
if err := server.withTransmitter(
func(transmitter transmitter.OSCTransmitter) error {
return transmitter.TransmitShutdownMessage(0)
},
); err != nil {
return err
}
// Now we un-set the player so that we don't accidentally keep trying to use
// the same player process while it's in the process of shutting down. (This
// might also speed up the process of the `managePlayers` loop discovering
// that there is no player available, prompting it to find a replacement.)
//
// (Technically, there is still a potential race condition here where the
// `managePlayers` loop un-sets the player before we get to this line, so
// we double-unset it. But the risk is low because even if that happens, the
// worst case scenario is that we would end up replacing the player twice, and
// even if that happens, we would still end up with a player to use below.)
server.unsetPlayer()
return nil
}