forked from docker-archive/classicswarm
-
Notifications
You must be signed in to change notification settings - Fork 0
/
events.go
144 lines (121 loc) · 3.33 KB
/
events.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
package api
import (
"encoding/json"
"fmt"
"io"
"net/http"
"sync"
"time"
"github.com/docker/swarm/cluster"
)
// EventsHandler broadcasts events to multiple client listeners.
type eventsHandler struct {
sync.RWMutex
ws map[string]io.Writer
cs map[string]chan struct{}
}
// NewEventsHandler creates a new EventsHandler for a cluster.
// The new eventsHandler is initialized with no writers or channels.
func newEventsHandler() *eventsHandler {
return &eventsHandler{
ws: make(map[string]io.Writer),
cs: make(map[string]chan struct{}),
}
}
// Add adds the writer and a new channel for the remote address.
func (eh *eventsHandler) Add(remoteAddr string, w io.Writer) {
eh.Lock()
eh.ws[remoteAddr] = w
eh.cs[remoteAddr] = make(chan struct{})
eh.Unlock()
}
// Wait waits on a signal from the remote address.
func (eh *eventsHandler) Wait(remoteAddr string, until int64) {
timer := time.NewTimer(0)
timer.Stop()
if until > 0 {
dur := time.Unix(until, 0).Sub(time.Now())
timer = time.NewTimer(dur)
}
// subscribe to http client close event
w := eh.ws[remoteAddr]
var closeNotify <-chan bool
if closeNotifier, ok := w.(http.CloseNotifier); ok {
closeNotify = closeNotifier.CloseNotify()
}
select {
case <-eh.cs[remoteAddr]:
case <-closeNotify:
case <-timer.C: // `--until` timeout
close(eh.cs[remoteAddr])
}
eh.cleanupHandler(remoteAddr)
}
func (eh *eventsHandler) cleanupHandler(remoteAddr string) {
eh.Lock()
// the maps are expected to have the same keys
delete(eh.cs, remoteAddr)
delete(eh.ws, remoteAddr)
eh.Unlock()
}
// Handle writes information about a cluster event to each remote address in the cluster that has been added to the events handler.
// After an unsuccessful write to a remote address, the associated channel is closed and the address is removed from the events handler.
func (eh *eventsHandler) Handle(e *cluster.Event) error {
// remove this hack once 1.10 is broadly adopted
from := e.From
e.From = e.From + " node:" + e.Engine.Name
// Attributes will be nil if the event was sent by engine < 1.10
if e.Actor.Attributes == nil {
e.Actor.Attributes = make(map[string]string)
}
e.Actor.Attributes["node.name"] = e.Engine.Name
e.Actor.Attributes["node.id"] = e.Engine.ID
e.Actor.Attributes["node.addr"] = e.Engine.Addr
e.Actor.Attributes["node.ip"] = e.Engine.IP
data, err := json.Marshal(e)
e.From = from
if err != nil {
return err
}
// remove the node field once 1.10 is broadly adopted & interlock stop relying on it
node := fmt.Sprintf(",%q:{%q:%q,%q:%q,%q:%q,%q:%q}}",
"node",
"Name", e.Engine.Name,
"Id", e.Engine.ID,
"Addr", e.Engine.Addr,
"Ip", e.Engine.IP,
)
// insert Node field
data = data[:len(data)-1]
data = append(data, []byte(node)...)
var failed []string
eh.RLock()
for key, w := range eh.ws {
if _, err := fmt.Fprintf(w, string(data)); err != nil {
// collect them to handle later under Lock
failed = append(failed, key)
continue
}
if f, ok := w.(http.Flusher); ok {
f.Flush()
}
}
eh.RUnlock()
eh.Lock()
if len(failed) > 0 {
for _, key := range failed {
if ch, ok := eh.cs[key]; ok {
close(ch)
}
delete(eh.cs, key)
}
}
eh.Unlock()
return nil
}
// Size returns the number of remote addresses that the events handler currently contains.
func (eh *eventsHandler) Size() int {
eh.RLock()
defer eh.RUnlock()
return len(eh.ws)
}