-
Notifications
You must be signed in to change notification settings - Fork 137
/
common.go
208 lines (181 loc) · 5.15 KB
/
common.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
package exec
import (
"bufio"
"bytes"
"context"
"encoding/json"
"math"
"os"
"runtime"
"strconv"
"time"
"github.com/cozy/cozy-stack/model/instance"
"github.com/cozy/cozy-stack/model/job"
"github.com/cozy/cozy-stack/pkg/logger"
"github.com/cozy/cozy-stack/pkg/metrics"
"github.com/cozy/cozy-stack/pkg/utils"
"github.com/prometheus/client_golang/prometheus"
"github.com/spf13/afero"
)
var defaultTimeout = 300 * time.Second
func init() {
job.AddWorker(&job.WorkerConfig{
WorkerType: "konnector",
WorkerStart: func(ctx *job.TaskContext) (*job.TaskContext, error) {
return ctx.WithCookie(&konnectorWorker{}), nil
},
BeforeHook: beforeHookKonnector,
ErrorHook: jobHookErrorCheckerKonnector,
WorkerFunc: worker,
WorkerCommit: commit,
Concurrency: runtime.NumCPU() * 2,
MaxExecCount: 2,
Timeout: defaultTimeout,
})
job.AddWorker(&job.WorkerConfig{
WorkerType: "service",
WorkerStart: func(ctx *job.TaskContext) (*job.TaskContext, error) {
return ctx.WithCookie(&serviceWorker{}), nil
},
WorkerFunc: worker,
WorkerCommit: commit,
Concurrency: runtime.NumCPU() * 2,
MaxExecCount: 2,
Timeout: defaultTimeout,
})
}
type execWorker interface {
Slug() string
PrepareWorkDir(ctx *job.TaskContext, i *instance.Instance) (workDir string, cleanDir func(), err error)
PrepareCmdEnv(ctx *job.TaskContext, i *instance.Instance) (cmd string, env []string, err error)
ScanOutput(ctx *job.TaskContext, i *instance.Instance, line []byte) error
Error(i *instance.Instance, err error) error
Logger(ctx *job.TaskContext) logger.Logger
Commit(ctx *job.TaskContext, errjob error) error
}
func worker(ctx *job.TaskContext) (err error) {
worker := ctx.Cookie().(execWorker)
if ctx.Instance == nil {
return instance.ErrNotFound
}
workDir, cleanDir, err := worker.PrepareWorkDir(ctx, ctx.Instance)
defer cleanDir()
if err != nil {
worker.Logger(ctx).Errorf("PrepareWorkDir: %s", err)
return err
}
cmdStr, env, err := worker.PrepareCmdEnv(ctx, ctx.Instance)
if err != nil {
worker.Logger(ctx).Errorf("PrepareCmdEnv: %s", err)
return err
}
var stderrBuf bytes.Buffer
cmd := CreateCmd(cmdStr, workDir)
cmd.Env = env
// set stderr writable with a bytes.Buffer limited total size of 256Ko
cmd.Stderr = utils.LimitWriterDiscard(&stderrBuf, 256*1024)
// Log out all things printed in stderr, whatever the result of the
// konnector is.
log := worker.Logger(ctx)
defer func() {
if stderrBuf.Len() > 0 {
log.Errorf("Stderr: %s", stderrBuf.String())
}
}()
cmdOut, err := cmd.StdoutPipe()
if err != nil {
return err
}
scanBuf := make([]byte, 16*1024)
scanOut := bufio.NewScanner(cmdOut)
scanOut.Buffer(scanBuf, 64*1024)
timer := prometheus.NewTimer(prometheus.ObserverFunc(func(v float64) {
var result string
if err != nil {
result = metrics.WorkerExecResultErrored
} else {
result = metrics.WorkerExecResultSuccess
}
metrics.WorkersKonnectorsExecDurations.
WithLabelValues(worker.Slug(), result).
Observe(v)
}))
defer timer.ObserveDuration()
if err = cmd.Start(); err != nil {
return wrapErr(ctx, err)
}
waitDone := make(chan error)
go func() {
for scanOut.Scan() {
if errOut := worker.ScanOutput(ctx, ctx.Instance, scanOut.Bytes()); errOut != nil {
log.Debug(errOut.Error())
}
}
if errs := scanOut.Err(); errs != nil {
log.Errorf("could not scan stdout: %s", errs)
}
waitDone <- cmd.Wait()
close(waitDone)
}()
select {
case err = <-waitDone:
case <-ctx.Done():
err = ctx.Err()
_ = KillCmd(cmd)
<-waitDone
}
return worker.Error(ctx.Instance, err)
}
func commit(ctx *job.TaskContext, errjob error) error {
return ctx.Cookie().(execWorker).Commit(ctx, errjob)
}
func ctxToTimeLimit(ctx *job.TaskContext) string {
var limit float64
if deadline, ok := ctx.Deadline(); ok {
limit = time.Until(deadline).Seconds()
}
if limit <= 0 {
limit = defaultTimeout.Seconds()
}
// add a little gap of 5 seconds to prevent racing the two deadlines
return strconv.Itoa(int(math.Ceil(limit)) + 5)
}
func wrapErr(ctx context.Context, err error) error {
if ctx.Err() == context.DeadlineExceeded {
return context.DeadlineExceeded
}
return err
}
// MaxPayloadSizeInEnvVar is the maximal size that the COZY_PAYLOAD env
// variable can be. If the payload is larger, we can't put it in the env
// variable as the kernel as a limit for it. Instead, we put the payload in a
// temporary file and only gives the filename in the COZY_PAYLOAD variable.
const MaxPayloadSizeInEnvVar = 100000
const payloadFilename = "cozy_payload.json"
func preparePayload(ctx *job.TaskContext, workDir string) (string, error) {
var payload string
if p, err := ctx.UnmarshalPayload(); err == nil {
marshaled, err := json.Marshal(p)
if err != nil {
return "", err
}
payload = string(marshaled)
}
if len(payload) > MaxPayloadSizeInEnvVar {
workFS := afero.NewBasePathFs(afero.NewOsFs(), workDir)
f, err := workFS.OpenFile(payloadFilename, os.O_CREATE|os.O_WRONLY, 0640)
if err != nil {
return "", err
}
_, err = f.WriteString(payload)
errc := f.Close()
if err != nil {
return "", err
}
if errc != nil {
return "", errc
}
payload = "@" + payloadFilename
}
return payload, nil
}