-
Notifications
You must be signed in to change notification settings - Fork 111
/
run_autoscaler.go
117 lines (99 loc) · 3.59 KB
/
run_autoscaler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
package worker
import (
"context"
"time"
"github.com/rilldata/rill/admin/database"
"github.com/rilldata/rill/admin/metrics"
"go.uber.org/zap"
)
const legacyRecommendTime = 24 * time.Hour
const scaleThreshold = 0.10
func (w *Worker) runAutoscaler(ctx context.Context) error {
recs, ok, err := w.allRecommendations(ctx)
if err != nil {
w.logger.Error("failed to autoscale: unable to fetch recommended slots", zap.Error(err))
return err
}
if !ok {
w.logger.Debug("skipping autoscaler: no metrics project configured")
return nil
}
for _, rec := range recs {
// if UpdatedOn is too old, the recommendation is stale and may not be trusted.
if time.Since(rec.UpdatedOn) >= legacyRecommendTime {
w.logger.Debug("skipping autoscaler: the recommendation is stale", zap.String("project_id", rec.ProjectID), zap.Time("recommendation_updated_on", rec.UpdatedOn))
continue
}
if rec.RecommendedSlots <= 0 {
w.logger.Debug("skipping autoscaler: the recommend slot is <= 0", zap.String("project_id", rec.ProjectID), zap.Int("recommendation_slots", rec.RecommendedSlots))
continue
}
targetProject, err := w.admin.DB.FindProject(ctx, rec.ProjectID)
if err != nil {
w.logger.Debug("failed to find project:", zap.String("project_id", rec.ProjectID), zap.Error(err))
continue
}
if !shouldScale(targetProject.ProdSlots, rec.RecommendedSlots) {
w.logger.Debug("skipping autoscaler: target slots are within threshold of original slots",
zap.Int("project_slots", targetProject.ProdSlots),
zap.Int("recommend_slots", rec.RecommendedSlots),
zap.Float64("scale_threshold_percentage", scaleThreshold),
zap.String("project_id", targetProject.ID),
)
continue
}
updatedProject, err := w.admin.UpdateProject(ctx, targetProject, &database.UpdateProjectOptions{
Name: targetProject.Name,
Description: targetProject.Description,
Public: targetProject.Public,
GithubURL: targetProject.GithubURL,
GithubInstallationID: targetProject.GithubInstallationID,
ProdVersion: targetProject.ProdVersion,
ProdBranch: targetProject.ProdBranch,
ProdVariables: targetProject.ProdVariables,
ProdDeploymentID: targetProject.ProdDeploymentID,
ProdSlots: rec.RecommendedSlots,
ProdTTLSeconds: targetProject.ProdTTLSeconds,
Provisioner: targetProject.Provisioner,
Annotations: targetProject.Annotations,
})
if err != nil {
w.logger.Error("failed to autoscale:", zap.String("project_id", rec.ProjectID), zap.Error(err))
continue
}
w.logger.Info("succeeded in autoscaling:", zap.String("project_id", updatedProject.Name), zap.Int("project_slots", updatedProject.ProdSlots))
}
return nil
}
func (w *Worker) allRecommendations(ctx context.Context) ([]metrics.AutoscalerSlotsRecommendation, bool, error) {
client, ok, err := w.admin.OpenMetricsProject(ctx)
if err != nil {
return nil, false, err
}
if !ok {
return nil, false, nil
}
var recs []metrics.AutoscalerSlotsRecommendation
limit := 1000
offset := 0
for {
batch, err := client.AutoscalerSlotsRecommendations(ctx, limit, offset)
if err != nil {
return nil, false, err
}
if len(batch) == 0 {
break
}
recs = append(recs, batch...)
if len(batch) < limit {
break
}
offset += limit
}
return recs, true, nil
}
func shouldScale(originSlots, recommendSlots int) bool {
lowerBound := float64(originSlots) * (1 - scaleThreshold)
upperBound := float64(originSlots) * (1 + scaleThreshold)
return float64(recommendSlots) < lowerBound || float64(recommendSlots) > upperBound
}