-
Notifications
You must be signed in to change notification settings - Fork 403
/
naive_placer.go
64 lines (51 loc) · 1.39 KB
/
naive_placer.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
package runnerpool
import (
"context"
"sync/atomic"
"time"
"github.com/fnproject/fn/api/models"
"github.com/sirupsen/logrus"
)
type naivePlacer struct {
cfg PlacerConfig
rrIndex uint64
}
func NewNaivePlacer(cfg *PlacerConfig) Placer {
logrus.Infof("Creating new naive runnerpool placer with config=%+v", cfg)
return &naivePlacer{
cfg: *cfg,
rrIndex: uint64(time.Now().Nanosecond()),
}
}
func (sp *naivePlacer) GetPlacerConfig() PlacerConfig {
return sp.cfg
}
func (sp *naivePlacer) PlaceCall(ctx context.Context, rp RunnerPool, call RunnerCall) error {
state := NewPlacerTracker(ctx, &sp.cfg, call)
defer state.HandleDone()
var runnerPoolErr error
for {
var runners []Runner
runners, runnerPoolErr = rp.Runners(ctx, call)
for j := 0; j < len(runners) && !state.IsDone(); j++ {
i := atomic.AddUint64(&sp.rrIndex, uint64(1))
r := runners[int(i)%len(runners)]
placed, err := state.TryRunner(r, call)
if placed {
return err
}
}
if !state.RetryAllBackoff(len(runners)) {
break
}
}
if runnerPoolErr != nil {
// If we haven't been able to place the function and we got an error
// from the runner pool, return that error (since we don't have
// enough runners to handle the current load and the runner pool is
// having trouble).
state.HandleFindRunnersFailure(runnerPoolErr)
return runnerPoolErr
}
return models.ErrCallTimeoutServerBusy
}