-
Notifications
You must be signed in to change notification settings - Fork 10
/
watcher.go
261 lines (219 loc) · 6.23 KB
/
watcher.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
package controller
import (
"context"
"fmt"
"sync"
"sync/atomic"
"time"
"github.com/TencentBlueKing/bk-turbo/src/backend/booster/common/blog"
"github.com/TencentBlueKing/bk-turbo/src/backend/booster/common/codec"
"github.com/TencentBlueKing/bk-turbo/src/backend/booster/common/http/httpclient"
"github.com/TencentBlueKing/bk-turbo/src/backend/booster/server/pkg/engine/distcc/controller/config"
"github.com/TencentBlueKing/bk-turbo/src/backend/booster/server/pkg/engine/distcc/controller/pkg/store"
"github.com/TencentBlueKing/bk-turbo/src/backend/booster/server/pkg/engine/distcc/controller/pkg/types"
)
// Watcher provide a manager to watch the distcc turbo task status.
type Watcher interface {
Run(pCtx context.Context) error
}
// NewWatcher get a new watcher
func NewWatcher(conf *config.DistCCControllerConfig, ops store.Ops) Watcher {
return &watcher{conf: conf, ops: ops}
}
type watcher struct {
conf *config.DistCCControllerConfig
ops store.Ops
watchPool map[string]*taskWatch
watchPoolLock sync.RWMutex
ctx context.Context
}
// Run the watcher
func (w *watcher) Run(pCtx context.Context) error {
go w.start(pCtx)
return nil
}
func (w *watcher) start(pCtx context.Context) {
blog.Infof("watcher start")
// init the pool
w.watchPoolLock.Lock()
w.watchPool = make(map[string]*taskWatch, 100)
w.watchPoolLock.Unlock()
w.ctx, _ = context.WithCancel(pCtx)
inspectTick := time.NewTicker(types.InspectRunningTaskTimeGap)
defer inspectTick.Stop()
for {
select {
case <-w.ctx.Done():
blog.Infof("watcher shut down")
return
case <-inspectTick.C:
w.checkTask()
}
}
}
func (w *watcher) checkTask() {
opts := store.NewListOptions()
opts.Limit(1000)
opts.Equal(types.ListKeyStatus, "running")
taskList, _, err := w.ops.ListTask(opts)
if err != nil {
blog.Errorf("watcher: list running task failed: %v", err)
return
}
w.watchPoolLock.Lock()
timestamp := time.Now().Unix()
for _, task := range taskList {
if w, ok := w.watchPool[task.TaskID]; ok {
w.timestamp = timestamp
continue
}
// if the task is already running for a long time, just ignore it,
// we focus on the task which just begin to run.
if timestamp-task.StatusChangeTime > types.IgnoreTaskAfterRunningTimeSecond {
continue
}
// if the task's gcc_version is in blacklist, generally some specific version
// with buggy distCC provided by others
// we will not watch these tasks.
if inList(task.GccVersion, w.conf.GccVersionBlackList) {
continue
}
blog.Infof("watcher: task(%s) gcc_version(%s) new for watcher, will be watched during running",
task.TaskID, task.GccVersion)
s := &taskWatch{ops: w.ops, task: task, timestamp: timestamp, startTime: time.Now()}
go s.watch(w.ctx)
w.watchPool[task.TaskID] = s
}
// if task is no longer running, then quit the taskWatch
for taskID, s := range w.watchPool {
if s.timestamp != timestamp {
blog.Infof("watcher: task(%s) no longer running, taskWatch quit", taskID)
s.quit()
delete(w.watchPool, taskID)
}
}
w.watchPoolLock.Unlock()
}
type taskWatch struct {
ops store.Ops
timestamp int64
task *store.RecordTask
ctx context.Context
cancel context.CancelFunc
pCtx context.Context
client *httpclient.HTTPClient
targets []distCCTarget
maxJobs int64
startTime time.Time
}
func (s *taskWatch) watch(pCtx context.Context) {
s.pCtx = pCtx
s.ctx, s.cancel = context.WithCancel(pCtx)
watchTicker := time.NewTicker(types.InspectDistCCStatTimeGap)
defer watchTicker.Stop()
s.client = httpclient.NewHTTPClient()
s.client.SetTimeOut(2 * time.Second)
s.maxJobs = -1
s.analyseTargets()
for {
select {
case <-s.ctx.Done():
return
case <-watchTicker.C:
s.request()
}
}
}
func (s *taskWatch) analyseTargets() {
if err := codec.DecJSON([]byte(s.task.Compilers), &s.targets); err != nil {
blog.Errorf("watcher: task(%s) decode compilers failed: %v", s.task.TaskID, err)
}
}
func (s *taskWatch) quit() {
if s.cancel != nil {
s.cancel()
}
if s.maxJobs != -1 {
duration := time.Duration(time.Now().Unix()-s.startTime.Unix()) * time.Second
blog.Infof("watcher: task(%s) watching for %s, max jobs is %d",
s.task.TaskID, duration.String(), s.maxJobs)
go s.writeBack()
}
}
// writeBack writes the max jobs back to task, after the task is released.
func (s *taskWatch) writeBack() {
opts := store.NewListOptions()
opts.Limit(1)
opts.Equal(types.ListKeyTaskID, s.task.TaskID)
opts.Equal(types.ListKeyReleased, true)
// inherit the watcher's context, not the taskWatch's, taskWatch will be released right after the task done.
// after that writeBack should keep alive waiting the task "released"=true and write the max jobs back.
ctx, _ := context.WithCancel(s.pCtx)
for ; ; time.Sleep(5 * time.Second) {
select {
case <-ctx.Done():
return
default:
_, length, err := s.ops.ListTask(opts)
if err != nil {
blog.Errorf("watcher: writeBack try to get task(%s) failed: %v", s.task.TaskID, err)
return
}
if length <= 0 {
continue
}
if err = s.ops.UpdateTask(s.task.TaskID, map[string]interface{}{
"max_jobs": s.maxJobs,
"observed": true,
}); err != nil {
blog.Errorf("watcher: writeBack try to write task(%s) with max jobs(%d) failed: %v",
s.task.TaskID, s.maxJobs, err)
return
}
blog.Infof("watcher: success to write task(%s) with max jobs(%d)", s.task.TaskID, s.maxJobs)
return
}
}
}
func (s *taskWatch) request() {
if len(s.targets) == 0 {
return
}
var wg sync.WaitGroup
var total int64 = 0
for _, target := range s.targets {
wg.Add(1)
go func(t distCCTarget) {
defer wg.Done()
uri := fmt.Sprintf("http://%s:%d", t.IP, t.StatsPort)
data, err := s.client.GET(uri, nil, nil)
if err != nil {
return
}
stats := new(store.StatsInfo)
stats.ParseAll(data)
load := stats.GetCurrentLoad()
_ = atomic.AddInt64(&total, load.RunningJobs)
}(target)
}
wg.Wait()
if s.maxJobs < total {
s.maxJobs = total
}
}
type distCCTarget struct {
CPU float64 `json:"CPU"`
IP string `json:"IP"`
Mem float64 `json:"Mem"`
Message string `json:"Message"`
Port int `json:"Port"`
StatsPort int `json:"StatsPort"`
}
func inList(base string, list []string) bool {
for _, l := range list {
if base == l {
return true
}
}
return false
}