-
Notifications
You must be signed in to change notification settings - Fork 0
/
db.go
207 lines (188 loc) · 4.52 KB
/
db.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
package model
import (
"context"
"fmt"
"io"
"os"
"path/filepath"
"strconv"
"sync"
"time"
)
// TaskSort is a way to sort tasks.
type TaskSort int
// Valid TaskSort.
const (
TaskSortCreated TaskSort = iota
TaskSortModified
TaskSortCompleted
TaskSortAbandoned
TaskSortStarted
)
// TaskStateQuery filters on different kinds of tasks.
type TaskStateQuery int
// Valid TaskStateQuery.
const (
TaskStateQueryPending TaskStateQuery = iota
TaskStateQueryRunning
TaskStateQueryPendingRunning
TaskStateQueryCompleted
TaskStateQueryCompletedSuccess
TaskStateQueryCompletedFailure
TaskStateQueryExpired
TaskStateQueryTimedOut
TaskStateQueryBotDied
TaskStateQueryCanceled
TaskStateQueryAll
TaskStateQueryDeduped
TaskStateQueryKilled
TaskStateQueryNoResource
)
// Filter is a set of typical filters
type Filter struct {
Cursor string
Limit int
Earliest time.Time
Latest time.Time
}
// Tables is the functions to access Swarming DB tables.
type Tables interface {
TaskRequestGet(id int64, r *TaskRequest)
// TaskRequestAdd adds a new TaskRequest. It is immutable so it is an error
// to add two TaskRequest with the same key.
TaskRequestAdd(r *TaskRequest)
TaskRequestCount() int64
TaskRequestSlice(f Filter) ([]TaskRequest, string)
TaskResultGet(id int64, r *TaskResult)
TaskResultSet(r *TaskResult)
TaskResultCount() int64
TaskResultSlice(botid string, f Filter, state TaskStateQuery, sort TaskSort) ([]TaskResult, string)
BotGet(id string, b *Bot)
BotSet(b *Bot)
BotCount(dims map[string]string) (total, quarantined, maintenance, dead, busy int64)
BotGetSlice(cursor string, limit int) ([]Bot, string)
BotEventAdd(e *BotEvent)
BotEventGetSlice(botid string, f Filter) ([]BotEvent, string)
}
// DB is a database backend.
type DB interface {
Tables
io.Closer
// Snapshot ensures there's a copy on disk in case of a crash.
Snapshot() error
}
// TaskOutputs is a good enough task outputs manager.
//
// It uses a files backed store.
//
// TODO: Implement compression.
type TaskOutputs struct {
root string
mu sync.Mutex
handles map[int64]*output
}
type output struct {
mu sync.Mutex
f *os.File
buf []byte
err error
last time.Time
}
// NewTaskOutputs returns an initialized TaskOutputs.
func NewTaskOutputs(root string) (*TaskOutputs, error) {
t := &TaskOutputs{
root: root,
handles: map[int64]*output{},
}
if d, err := os.Stat(t.root); err == nil {
if !d.IsDir() {
return nil, fmt.Errorf("%s is not a directory", t.root)
}
} else if !os.IsNotExist(err) {
return nil, err
} else if err := os.Mkdir(t.root, 0o755); err != nil {
return nil, err
}
return t, nil
}
// SetOutput sets the output for a task at the specified offset.
func (t *TaskOutputs) SetOutput(key, offset int64, content []byte) error {
o := t.getLocked(key, offset, false)
if o.err == nil {
_, o.err = o.f.Write(content)
}
o.mu.Unlock()
return o.err
}
// ReadOutput reads the task output from a file at the specified offset.
func (t *TaskOutputs) ReadOutput(key, offset int64, max int) ([]byte, error) {
o := t.getLocked(key, offset, true)
d := 0
if o.err == nil {
if len(o.buf) < max {
o.buf = make([]byte, max)
}
d, o.err = o.f.Read(o.buf)
}
o.mu.Unlock()
return o.buf[:d], o.err
}
// Loop should be run to lazily close file handles.
func (t *TaskOutputs) Loop(ctx context.Context, max int, cutoff time.Duration) {
done := ctx.Done()
for jitter := 0; ; jitter = (jitter + 1) % 6 {
select {
case now := <-time.After(time.Minute + time.Duration(jitter)*time.Second):
old := now.Add(-cutoff)
t.mu.Lock()
for k, o := range t.handles {
if old.After(o.last) {
if o.f != nil {
o.f.Close()
}
delete(t.handles, k)
}
}
for len(t.handles) > max {
// Close random files.
for k, o := range t.handles {
if o.f != nil {
o.f.Close()
}
delete(t.handles, k)
}
}
t.mu.Unlock()
case <-done:
return
}
}
}
func (t *TaskOutputs) getLocked(key, offset int64, forRead bool) *output {
t.mu.Lock()
o := t.handles[key]
if o == nil {
o = &output{}
t.handles[key] = o
}
o.mu.Lock()
t.mu.Unlock()
if o.err == nil && !os.IsNotExist(o.err) {
if o.f == nil {
p := filepath.Join(t.root, strconv.FormatInt(key, 10))
flag := os.O_RDWR
if !forRead {
flag |= os.O_CREATE
}
o.f, o.err = os.OpenFile(p, flag, 0o644)
}
if o.err == nil {
// TODO(maruel): I'm not sure if we acn seek past file's end. Need to
// unit test.
if _, o.err = o.f.Seek(offset, io.SeekStart); o.err == nil {
o.last = time.Now()
}
}
}
return o
}