/
proc_async.go
223 lines (189 loc) · 4.75 KB
/
proc_async.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
package api_ws
import (
"sync"
"time"
"github.com/gorilla/websocket"
)
//------------------------------------------------------------
// WebSocket messages async processor
//------------------------------------------------------------
const (
CHAN_MESSAGE_MAX = 1024
CHAN_MESSAGE_BROADCAST_MAX = 100
CHAN_MESSAGE_FAILED_MAX = 100
LOST_MESSAGE_MAX_LIFE = 30 * time.Second
)
var (
_chanIn = make(chan *Message, CHAN_MESSAGE_MAX)
_chanOut = make(chan *Message, CHAN_MESSAGE_MAX)
_chanBroadcast = make(chan *Message, CHAN_MESSAGE_BROADCAST_MAX)
_chanFailed = make(chan *Message, CHAN_MESSAGE_FAILED_MAX)
_failed = map[string][]*Message{}
_procFailedMutex = &sync.Mutex{}
)
// Initializes async processors.
func init() {
go procChanIn()
go procChanOut()
go procChanBroadcast()
go procChanFailed()
go procFailedCleanup()
}
// Processes incoming channel messages. One message at a time.
func procChanIn() {
var conn *Conn
for msg := range _chanIn {
msg.isProcessed = false
//fmt.Printf("chan IN ---> %v = %v\n", msg.agentId, string(msg.req))
// Attempt to get active connection
if conn = GetConn(msg.isAuthd, msg.agentId); conn == nil {
_chanFailed <- msg
continue
}
// Process message
// Must be done via goroutine to prevent channel block
// due to processing delay
go processAndRespond(conn, msg, _chanOut)
}
}
// Processes outgoing channel messages. One message at a time.
func procChanOut() {
var conn *Conn
for msg := range _chanOut {
//fmt.Printf("chan OUT <--- %v\n", string(msg.res))
// Attempt to get active connection
if conn = GetConn(msg.isAuthd, msg.agentId); conn == nil {
_chanFailed <- msg
continue
}
// Write response
conn.m.Lock()
err := conn.conn.WriteMessage(websocket.TextMessage, msg.res)
conn.m.Unlock()
if err != nil {
// Close connection on write error
_chanFailed <- msg
DeregisterConn(conn)
conn.conn.Close()
continue
}
}
}
// Sends broadcast messages to all active connections.
func procChanBroadcast() {
for msg := range _chanBroadcast {
//fmt.Printf("chan BROADCAST <--- %v\n", string(msg.res))
// Apply broadcaster function to each connection
if msg.isAuthd {
applyToAuthd(
func(conn *Conn) {
broadcaster(conn, msg)
})
} else {
applyToPublic(
func(conn *Conn) {
broadcaster(conn, msg)
})
}
}
}
// Broadcasts message to single connection.
// Closes connection on error.
func broadcaster(conn *Conn, msg *Message) {
// Catch panic
defer func() {
if err := recover(); err != nil {
// Do nothing
// Report panic: err, url, params, stack
/*
_onPanic(
fmt.Sprintf("Error writing to WebSocket: %v", err),
"Stack", util.Stack())
*/
}
}()
conn.m.Lock()
err := conn.conn.WriteMessage(websocket.TextMessage, msg.res)
conn.m.Unlock()
if err != nil {
// Close connection on write error
_chanFailed <- msg
DeregisterConn(conn)
conn.conn.Close()
}
}
// Listens on failed channel and adds messages to failed.
func procChanFailed() {
for msg := range _chanFailed {
_procFailedMutex.Lock()
t := time.Now()
msg.failTime = &t
if agentFails, ok := _failed[msg.agentId]; ok {
_failed[msg.agentId] = append(agentFails, msg)
} else {
_failed[msg.agentId] = []*Message{msg}
}
_procFailedMutex.Unlock()
}
}
// Periodically cleans up expired failed messages.
func procFailedCleanup() {
ticker := time.NewTicker(15 * time.Second)
for {
<-ticker.C
_procFailedMutex.Lock()
now := time.Now()
for agentId, agentFails := range _failed {
if anyExpiredFails(agentFails, now) {
unexp := getUnexpiredFails(agentFails, now)
if len(unexp) > 0 {
_failed[agentId] = unexp
} else {
delete(_failed, agentId)
}
}
}
_procFailedMutex.Unlock()
}
}
// Has at least one message expired?
func anyExpiredFails(msgs []*Message, t time.Time) bool {
for _, msg := range msgs {
if msg.failTime == nil {
return true // safe guard, nil not allowed
}
if msg.failTime.Add(LOST_MESSAGE_MAX_LIFE).After(t) {
return true
}
}
return false
}
// Filters out expired messages.
func getUnexpiredFails(msgs []*Message, t time.Time) (unexp []*Message) {
for _, msg := range msgs {
if msg.failTime == nil {
continue // safe guard, nil not allowed
}
if msg.failTime.Add(LOST_MESSAGE_MAX_LIFE).After(t) {
continue
}
unexp = append(unexp, msg)
}
return
}
// Pushes specific agent's failed messages back to channel for processing.
func retryFailedMessages(agentId string) {
_procFailedMutex.Lock()
if agentFails, ok := _failed[agentId]; ok {
delete(_failed, agentId)
for _, msg := range agentFails {
msg.failTime = nil
if msg.isProcessed {
_chanOut <- msg
} else {
_chanIn <- msg
}
}
}
_procFailedMutex.Unlock()
}