-
-
Notifications
You must be signed in to change notification settings - Fork 115
/
server.go
305 lines (270 loc) · 8.34 KB
/
server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
// Copyright 2018-2020 Burak Sezer
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package transport
import (
"context"
"fmt"
"io"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/buraksezer/olric/internal/bufpool"
"github.com/buraksezer/olric/internal/flog"
"github.com/buraksezer/olric/internal/protocol"
multierror "github.com/hashicorp/go-multierror"
"github.com/pkg/errors"
)
const (
idleConn uint32 = 0
busyConn uint32 = 1
)
// pool is good for recycling memory while reading messages from the socket.
var bufferPool = bufpool.New()
// ErrInvalidMagic means that an OBP message is read from the TCP socket but
// the magic number is not valid.
var ErrInvalidMagic = errors.New("invalid magic")
// ServerConfig is a composite type to bundle configuration parameters.
type ServerConfig struct {
BindAddr string
BindPort int
KeepAlivePeriod time.Duration
// GracefulPeriod is useful to close busy connections when you want to shutdown the server.
GracefulPeriod time.Duration
}
// Server implements a concurrent TCP server.
type Server struct {
config *ServerConfig
log *flog.Logger
wg sync.WaitGroup
listener net.Listener
dispatcher func(w, r protocol.EncodeDecoder)
StartCh chan struct{}
ctx context.Context
cancel context.CancelFunc
}
// NewServer creates and returns a new Server.
func NewServer(c *ServerConfig, l *flog.Logger) *Server {
ctx, cancel := context.WithCancel(context.Background())
return &Server{
config: c,
log: l,
StartCh: make(chan struct{}),
ctx: ctx,
cancel: cancel,
}
}
func (s *Server) SetDispatcher(f func(w, r protocol.EncodeDecoder)) {
s.dispatcher = f
}
func (s *Server) controlConnLifeCycle(conn io.ReadWriteCloser, connStatus *uint32, done chan struct{}) {
// Control connection state and close it.
defer s.wg.Done()
select {
case <-s.ctx.Done():
// The server is down.
case <-done:
// The main loop is quit. TCP socket may be closed or a protocol error occurred.
}
if atomic.LoadUint32(connStatus) != idleConn {
s.log.V(3).Printf("[DEBUG] Connection is busy, awaiting for %v", s.config.GracefulPeriod)
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
//
// WARNING: I added this context to fix a deadlock issue when an Olric node is being closed.
// Debugging such an error is pretty hard and it blocks me. Normally I expect that SetDeadline
// should fix the problem but It doesn't work. I don't know why. But this hack works well.
//
ctx, cancel := context.WithTimeout(context.Background(), s.config.GracefulPeriod)
defer cancel()
loop:
for {
select {
// Wait for the current request. When it mark the connection as idle, break the loop.
case <-ticker.C:
if atomic.LoadUint32(connStatus) == idleConn {
s.log.V(3).Printf("[DEBUG] Connection is idle, closing")
break loop
}
case <-ctx.Done():
s.log.V(3).Printf("[DEBUG] Connection is still in-use. Aborting.")
break loop
}
}
}
// Close the connection and quit.
if err := conn.Close(); err != nil {
s.log.V(3).Printf("[DEBUG] Failed to close TCP connection: %v", err)
}
}
func (s *Server) closeStream(req *protocol.StreamMessage, done chan struct{}) {
defer s.wg.Done()
defer req.Close()
select {
case <-done:
case <-s.ctx.Done():
}
}
// processMessage waits for a new request, handles it and returns the appropriate response.
func (s *Server) processMessage(conn io.ReadWriteCloser, connStatus *uint32, done chan struct{}) error {
buf := bufferPool.Get()
defer bufferPool.Put(buf)
header, err := protocol.ReadMessage(conn, buf)
if err != nil {
return err
}
var req protocol.EncodeDecoder
if header.Magic == protocol.MagicDMapReq {
req = protocol.NewDMapMessageFromRequest(buf)
} else if header.Magic == protocol.MagicStreamReq {
req = protocol.NewStreamMessageFromRequest(buf)
req.(*protocol.StreamMessage).SetConn(conn)
s.wg.Add(1)
go s.closeStream(req.(*protocol.StreamMessage), done)
} else if header.Magic == protocol.MagicPipelineReq {
req = protocol.NewPipelineMessageFromRequest(buf)
} else if header.Magic == protocol.MagicSystemReq {
req = protocol.NewSystemMessageFromRequest(buf)
} else if header.Magic == protocol.MagicDTopicReq {
req = protocol.NewDTopicMessageFromRequest(buf)
} else {
return errors.WithMessage(ErrInvalidMagic, fmt.Sprint(header.Magic))
}
// Decode reads the incoming message from the underlying TCP socket and parses
err = req.Decode()
if err != nil {
return errors.WithMessage(err, "failed to read request")
}
// Mark connection as busy.
atomic.StoreUint32(connStatus, busyConn)
// Mark connection as idle before start waiting a new request
defer atomic.StoreUint32(connStatus, idleConn)
resp := req.Response(nil)
// The dispatcher is defined by olric package and responsible to evaluate the incoming message.
s.dispatcher(resp, req)
err = resp.Encode()
if err != nil {
return err
}
_, err = resp.Buffer().WriteTo(conn)
return err
}
// processConn waits for requests and calls request handlers to generate a response. The connections are reusable.
func (s *Server) processConn(conn io.ReadWriteCloser) {
defer s.wg.Done()
// connStatus is useful for closing the server gracefully.
var connStatus uint32
done := make(chan struct{})
defer close(done)
s.wg.Add(1)
go s.controlConnLifeCycle(conn, &connStatus, done)
for {
// processMessage waits to read a message from the TCP socket.
// Then calls its handler to generate a response.
err := s.processMessage(conn, &connStatus, done)
if err != nil {
// The socket probably would have been closed by the client.
if errors.Cause(err) == io.EOF || errors.Cause(err) == protocol.ErrConnClosed {
s.log.V(5).Printf("[ERROR] End of the TCP connection: %v", err)
break
}
s.log.V(5).Printf("[ERROR] Failed to process the incoming request: %v", err)
}
}
}
// listenAndServe calls Accept on given net.Listener.
func (s *Server) listenAndServe() error {
close(s.StartCh)
for {
conn, err := s.listener.Accept()
if err != nil {
select {
case <-s.ctx.Done():
// the server is closed. just quit.
return nil
default:
}
s.log.V(3).Printf("[DEBUG] Failed to accept TCP connection: %v", err)
continue
}
if s.config.KeepAlivePeriod.Seconds() != 0 {
err = conn.(*net.TCPConn).SetKeepAlive(true)
if err != nil {
return err
}
err = conn.(*net.TCPConn).SetKeepAlivePeriod(s.config.KeepAlivePeriod)
if err != nil {
return err
}
}
s.wg.Add(1)
go s.processConn(conn)
}
}
// ListenAndServe listens on the TCP network address addr.
func (s *Server) ListenAndServe() error {
defer func() {
select {
case <-s.StartCh:
return
default:
}
close(s.StartCh)
}()
if s.dispatcher == nil {
return errors.New("no dispatcher found")
}
addr := net.JoinHostPort(s.config.BindAddr, strconv.Itoa(s.config.BindPort))
l, err := net.Listen("tcp", addr)
if err != nil {
return err
}
s.listener = l
return s.listenAndServe()
}
// Shutdown gracefully shuts down the server without interrupting any active connections.
// Shutdown works by first closing all open listeners, then closing all idle connections,
// and then waiting indefinitely for connections to return to idle and then shut down.
// If the provided context expires before the shutdown is complete, Shutdown returns
// the context's error, otherwise it returns any error returned from closing the Server's
// underlying Listener(s).
func (s *Server) Shutdown(ctx context.Context) error {
select {
case <-s.ctx.Done():
// It's already closed.
return nil
default:
}
var result error
s.cancel()
err := s.listener.Close()
if err != nil {
result = multierror.Append(result, err)
}
done := make(chan struct{})
go func() {
s.wg.Wait()
close(done)
}()
select {
case <-ctx.Done():
err = ctx.Err()
if err != nil {
result = multierror.Append(result, err)
}
case <-done:
}
return result
}