-
Notifications
You must be signed in to change notification settings - Fork 327
/
server.go
357 lines (303 loc) · 8.4 KB
/
server.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
package server
import (
"fmt"
"io"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
gocontext "golang.org/x/net/context"
log "github.com/sirupsen/logrus"
gofig "github.com/akutz/gofig/types"
"github.com/akutz/gournal"
glogrus "github.com/akutz/gournal/logrus"
"github.com/rexray/rexray/libstorage/api/context"
"github.com/rexray/rexray/libstorage/api/registry"
"github.com/rexray/rexray/libstorage/api/server/services"
"github.com/rexray/rexray/libstorage/api/types"
"github.com/rexray/rexray/libstorage/api/utils"
apicnfg "github.com/rexray/rexray/libstorage/api/utils/config"
// import and load the routers
_ "github.com/rexray/rexray/libstorage/api/server/router/help"
_ "github.com/rexray/rexray/libstorage/api/server/router/root"
_ "github.com/rexray/rexray/libstorage/api/server/router/service"
_ "github.com/rexray/rexray/libstorage/api/server/router/snapshot"
_ "github.com/rexray/rexray/libstorage/api/server/router/tasks"
_ "github.com/rexray/rexray/libstorage/api/server/router/volume"
)
var (
servers []*server
)
type server struct {
name string
adminToken string
ctx types.Context
addrs []string
config gofig.Config
authConfig *types.AuthConfig
servers []*HTTPServer
closeSignal chan int
closedSignal chan int
closeOnce *sync.Once
routers []types.Router
routeHandlers map[string][]types.Middleware
globalHandlers []types.Middleware
logHTTPEnabled bool
logHTTPRequests bool
logHTTPResponses bool
stdOut io.WriteCloser
stdErr io.WriteCloser
}
func newServer(goCtx gocontext.Context, config gofig.Config) (*server, error) {
adminTokenUUID, err := types.NewUUID()
if err != nil {
return nil, err
}
adminToken := adminTokenUUID.String()
serverName := randomServerName()
ctx := context.New(goCtx)
ctx = ctx.WithValue(context.ServerKey, serverName)
ctx = ctx.WithValue(context.AdminTokenKey, adminToken)
if lvl, ok := context.GetLogLevel(ctx); ok {
switch lvl {
case log.DebugLevel:
ctx = context.WithValue(
ctx, gournal.LevelKey(),
gournal.DebugLevel)
case log.InfoLevel:
ctx = context.WithValue(
ctx, gournal.LevelKey(),
gournal.InfoLevel)
case log.WarnLevel:
ctx = context.WithValue(
ctx, gournal.LevelKey(),
gournal.WarnLevel)
case log.ErrorLevel:
ctx = context.WithValue(
ctx, gournal.LevelKey(),
gournal.ErrorLevel)
case log.FatalLevel:
ctx = context.WithValue(
ctx, gournal.LevelKey(),
gournal.FatalLevel)
case log.PanicLevel:
ctx = context.WithValue(
ctx, gournal.LevelKey(),
gournal.PanicLevel)
}
}
if logger, ok := ctx.Value(context.LoggerKey).(*log.Logger); ok {
ctx = context.WithValue(
ctx, gournal.AppenderKey(),
glogrus.NewWithOptions(
logger.Out, logger.Level, logger.Formatter))
}
if config == nil {
var err error
if config, err = apicnfg.NewConfig(ctx); err != nil {
return nil, err
}
}
config = config.Scope(types.ConfigServer)
s := &server{
ctx: ctx,
name: serverName,
adminToken: adminToken,
config: config,
closeSignal: make(chan int),
closedSignal: make(chan int),
closeOnce: &sync.Once{},
}
if logger, ok := s.ctx.Value(context.LoggerKey).(*log.Logger); ok {
s.PrintServerStartupHeader(logger.Out)
} else {
s.PrintServerStartupHeader(types.Stderr)
}
if lvl, err := log.ParseLevel(
config.GetString(types.ConfigLogLevel)); err == nil {
context.SetLogLevel(s.ctx, lvl)
}
logFields := log.Fields{}
logConfig, err := utils.ParseLoggingConfig(
config, logFields, types.ConfigServer)
if err != nil {
return nil, err
}
// always update the server context's log level
context.SetLogLevel(s.ctx, logConfig.Level)
s.ctx.WithFields(logFields).Info("configured logging")
authFields := log.Fields{}
authConfig, err := utils.ParseAuthConfig(
s.ctx, config, authFields, types.ConfigServer)
if err != nil {
return nil, err
}
s.authConfig = authConfig
if s.authConfig != nil {
s.ctx.WithFields(authFields).Info("configured global auth")
}
s.ctx.Info("initializing server")
if err := services.Init(s.ctx, s.config); err != nil {
return nil, err
}
s.ctx.Info("initialized services")
if err := s.initEndpoints(s.ctx); err != nil {
return nil, err
}
s.ctx.Info("initialized endpoints")
if logConfig.HTTPRequests || logConfig.HTTPResponses {
s.logHTTPEnabled = true
s.logHTTPRequests = logConfig.HTTPRequests
s.logHTTPResponses = logConfig.HTTPResponses
s.stdOut = getLogIO(s.ctx, logConfig.Stdout, types.ConfigLogStdout)
s.stdErr = getLogIO(s.ctx, logConfig.Stderr, types.ConfigLogStderr)
}
s.initGlobalMiddleware()
if err := s.initRouters(); err != nil {
return nil, err
}
servers = append(servers, s)
return s, nil
}
// Serve starts serving the configured libStorage endpoints. This function
// returns a channel on which errors are received. Reading this channel is
// also the prescribed manner for clients wishing to block until the server is
// shutdown as the error channel will be closed when the server is stopped.
func Serve(
goCtx gocontext.Context,
config gofig.Config) (types.Server, <-chan error, error) {
if goCtx == nil {
goCtx = context.Background()
}
ctx := context.New(goCtx)
if _, ok := context.PathConfig(ctx); !ok {
pathConfig := utils.NewPathConfig()
ctx = ctx.WithValue(context.PathConfigKey, pathConfig)
registry.ProcessRegisteredConfigs(ctx)
}
s, err := newServer(ctx, config)
if err != nil {
return nil, nil, err
}
errs := make(chan error, len(s.servers))
srvErrs := make(chan error, len(s.servers))
for _, srv := range s.servers {
srv.srv.Handler = s.createMux(srv.ctx)
go func(srv *HTTPServer) {
srv.ctx.Info("api listening")
if err := srv.Serve(); err != nil {
if !strings.Contains(
err.Error(), "use of closed network connection") {
srvErrs <- err
}
}
}(srv)
}
go func() {
s.ctx.Info("waiting for err or close signal")
select {
case err := <-srvErrs:
errs <- err
s.ctx.Error("received server error")
case <-s.closeSignal:
s.ctx.Debug("received close signal")
}
close(errs)
s.ctx.Info("closed server error channel")
s.closedSignal <- 1
}()
// wait a second for all the configured endpoints to start. this isn't
// pretty, but the underlying golang http package doesn't really provide
// a better option
timeout := time.NewTimer(time.Second * 1)
<-timeout.C
s.ctx.Info("server started")
if logger, ok := s.ctx.Value(context.LoggerKey).(*log.Logger); ok {
s.PrintServerStartupFooter(logger.Out)
} else {
s.PrintServerStartupFooter(types.Stderr)
}
return s, errs, nil
}
// Name returns the name of the server.
func (s *server) Name() string {
return s.name
}
// Addrs returns the server's configured endpoint addresses.
func (s *server) Addrs() []string {
return s.addrs
}
// Close closes servers and thus stop receiving requests
func (s *server) Close() (err error) {
s.closeOnce.Do(
func() {
err = s.close()
s.closeSignal <- 1
<-s.closedSignal
})
return
}
func (s *server) close() error {
s.ctx.Info("shutting down server")
for _, srv := range s.servers {
srv.ctx.Info("shutting down endpoint")
if err := srv.Close(); err != nil {
srv.ctx.Error(err)
}
if srv.l.Addr().Network() == "unix" {
laddr := srv.l.Addr().String()
srv.ctx.WithField(
"path", laddr).Debug("removed unix socket")
os.RemoveAll(laddr)
}
srv.ctx.Debug("shutdown endpoint complete")
}
if s.stdOut != nil {
if err := s.stdOut.Close(); err != nil {
log.Error(err)
}
}
if s.stdErr != nil {
if err := s.stdErr.Close(); err != nil {
log.Error(err)
}
}
s.ctx.Debug("shutdown server complete")
return nil
}
// CloseOnAbort is a helper function that can be called by programs, such as
// tests or a command line or service application.
func CloseOnAbort() {
// make sure all servers get closed even if the test is abrubptly aborted
sigc := make(chan os.Signal, 1)
signal.Notify(sigc,
syscall.SIGKILL,
syscall.SIGHUP,
syscall.SIGINT,
syscall.SIGTERM,
syscall.SIGQUIT)
go func() {
<-sigc
fmt.Println("received abort signal")
for range Close() {
}
os.Exit(1)
}()
}
// Close closes all servers. This function can be used when a calling program
// traps UNIX signals or when it exits gracefully.
func Close() <-chan error {
errs := make(chan error)
go func() {
for _, server := range servers {
if err := server.Close(); err != nil {
errs <- err
}
}
close(errs)
log.Info("all servers closed")
}()
return errs
}