This repository has been archived by the owner on Feb 27, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 18
/
worker.go
202 lines (179 loc) · 5.55 KB
/
worker.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
package worker
import (
"fmt"
"net"
"os"
"time"
"github.com/Sirupsen/logrus"
schematypes "github.com/taskcluster/go-schematypes"
tcclient "github.com/taskcluster/taskcluster-client-go"
"github.com/taskcluster/taskcluster-client-go/auth"
"github.com/taskcluster/taskcluster-worker/engines"
"github.com/taskcluster/taskcluster-worker/plugins"
"github.com/taskcluster/taskcluster-worker/runtime"
"github.com/taskcluster/taskcluster-worker/runtime/atomics"
"github.com/taskcluster/taskcluster-worker/runtime/gc"
"github.com/taskcluster/taskcluster-worker/runtime/webhookserver"
)
// Worker is the center of taskcluster-worker and is responsible for managing resources, tasks,
// and host level events.
type Worker struct {
monitor runtime.Monitor
done chan struct{}
tm *Manager
sm runtime.ShutdownManager
env *runtime.Environment
server *webhookserver.LocalServer
}
// New will create a worker given configuration matching the schema from
// ConfigSchema(). The log parameter is optional and if nil is given a default
// logrus logger will be used.
func New(config interface{}, log *logrus.Logger) (*Worker, error) {
// Validate and map configuration to c
var c configType
if err := schematypes.MustMap(ConfigSchema(), config, &c); err != nil {
return nil, fmt.Errorf("Invalid configuration: %s", err)
}
// Create temporary folder
err := os.RemoveAll(c.TemporaryFolder)
if err != nil {
return nil, fmt.Errorf("Failed to remove temporaryFolder: %s, error: %s",
c.TemporaryFolder, err)
}
tempStorage, err := runtime.NewTemporaryStorage(c.TemporaryFolder)
if err != nil {
return nil, fmt.Errorf("Failed to create temporary folder, error: %s", err)
}
// Create logger
if log == nil {
log = logrus.New()
}
log.Level, _ = logrus.ParseLevel(c.LogLevel)
// Setup WebHookServer
localServer, err := webhookserver.NewLocalServer(
net.ParseIP(c.ServerIP), c.ServerPort,
c.NetworkInterface, c.ExposedPort,
c.DNSDomain,
c.DNSSecret,
c.TLSCertificate,
c.TLSKey,
time.Duration(c.MaxLifeCycle)*time.Second,
)
if err != nil {
return nil, err
}
// Setup monitor
tags := map[string]string{
"provisionerId": c.ProvisionerID,
"workerType": c.WorkerType,
"workerGroup": c.WorkerGroup,
"workerId": c.WorkerID,
}
var monitor runtime.Monitor
if c.MonitorProject != "" {
a := auth.New(&tcclient.Credentials{
ClientID: c.Credentials.ClientID,
AccessToken: c.Credentials.AccessToken,
Certificate: c.Credentials.Certificate,
})
monitor = runtime.NewMonitor(c.MonitorProject, a, c.LogLevel, tags)
} else {
monitor = runtime.NewLoggingMonitor(c.LogLevel, tags)
}
// Create environment
gc := gc.New(c.TemporaryFolder, c.MinimumDiskSpace, c.MinimumMemory)
env := &runtime.Environment{
GarbageCollector: gc,
TemporaryStorage: tempStorage,
WebHookServer: localServer,
Monitor: monitor,
}
// Ensure that engine confiuguration was provided for the engine selected
if _, ok := c.Engines[c.Engine]; !ok {
return nil, fmt.Errorf("Invalid configuration: The key 'engines.%s' must "+
"be specified when engine '%s' is selected", c.Engine, c.Engine)
}
// Find engine provider (schema should ensure it exists)
provider := engines.Engines()[c.Engine]
engine, err := provider.NewEngine(engines.EngineOptions{
Environment: env,
Monitor: env.Monitor.WithPrefix("engine").WithTag("engine", c.Engine),
Config: c.Engines[c.Engine],
})
if err != nil {
return nil, fmt.Errorf("Engine initialization failed, error: %s", err)
}
// Initialize plugin manager
pm, err := plugins.NewPluginManager(plugins.PluginOptions{
Environment: env,
Engine: engine,
Monitor: env.Monitor.WithPrefix("plugin"),
Config: c.Plugins,
})
if err != nil {
return nil, fmt.Errorf("Plugin initialization failed, error: %s", err)
}
tm, err := newTaskManager(
&c, engine, pm, env,
env.Monitor.WithPrefix("task-manager"), gc,
)
if err != nil {
return nil, err
}
return &Worker{
monitor: env.Monitor.WithPrefix("worker"),
tm: tm,
sm: runtime.NewShutdownManager("local"),
env: env,
server: localServer,
done: make(chan struct{}),
}, nil
}
// Start will begin the worker cycle of claiming and executing tasks. The worker
// will also being to respond to host level events such as shutdown notifications and
// resource depletion events.
func (w *Worker) Start() {
w.monitor.Info("worker starting up")
// Ensure that server is stopping gracefully
serverStopped := atomics.NewBool(false)
go func() {
err := w.server.ListenAndServe()
if !serverStopped.Get() {
w.monitor.Errorf("ListenAndServe failed for webhookserver, error: %s", err)
}
}()
go w.tm.Start()
select {
case <-w.tm.doneExecutingTasks:
case <-w.sm.WaitForShutdown():
case <-w.done:
}
w.tm.ImmediateStop()
// Allow server to stop
serverStopped.Set(true)
w.server.Stop()
return
}
// ImmediateStop is a convenience method for stopping the worker loop. Usually
// the worker will not be stopped this way, but rather will listen for a
// shutdown event.
func (w *Worker) ImmediateStop() {
close(w.done)
}
// GracefulStop will allow the worker to complete its running task, before stopping.
func (w *Worker) GracefulStop() {
w.tm.GracefulStop()
}
// PayloadSchema returns the payload schema for this worker.
func (w *Worker) PayloadSchema() schematypes.Object {
payloadSchema, err := schematypes.Merge(
w.tm.engine.PayloadSchema(),
w.tm.pluginManager.PayloadSchema(),
)
if err != nil {
panic(fmt.Sprintf(
"Conflicting plugin and engine payload properties, error: %s", err,
))
}
return payloadSchema
}