/
entrypoint.go
96 lines (78 loc) · 3.07 KB
/
entrypoint.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
package agent
import (
"context"
"fmt"
"github.com/tychoish/fun/pubsub"
"go.uber.org/zap"
"k8s.io/client-go/kubernetes"
vmclient "github.com/neondatabase/autoscaling/neonvm/client/clientset/versioned"
"github.com/neondatabase/autoscaling/pkg/agent/billing"
"github.com/neondatabase/autoscaling/pkg/agent/schedwatch"
"github.com/neondatabase/autoscaling/pkg/util"
"github.com/neondatabase/autoscaling/pkg/util/watch"
)
type MainRunner struct {
EnvArgs EnvArgs
Config *Config
KubeClient *kubernetes.Clientset
VMClient *vmclient.Clientset
}
func (r MainRunner) Run(logger *zap.Logger, ctx context.Context) error {
vmEventQueue := pubsub.NewUnlimitedQueue[vmEvent]()
defer vmEventQueue.Close()
pushToQueue := func(ev vmEvent) {
if err := vmEventQueue.Add(ev); err != nil {
logger.Warn("Failed to add vmEvent to queue", zap.Object("event", ev), zap.Error(err))
}
}
watchMetrics := watch.NewMetrics("autoscaling_agent_watchers")
perVMMetrics, vmPromReg := makePerVMMetrics()
logger.Info("Starting VM watcher")
vmWatchStore, err := startVMWatcher(ctx, logger, r.Config, r.VMClient, watchMetrics, perVMMetrics, r.EnvArgs.K8sNodeName, pushToQueue)
if err != nil {
return fmt.Errorf("Error starting VM watcher: %w", err)
}
defer vmWatchStore.Stop()
logger.Info("VM watcher started")
schedTracker, err := schedwatch.StartSchedulerWatcher(ctx, logger, r.KubeClient, watchMetrics, r.Config.Scheduler.SchedulerName)
if err != nil {
return fmt.Errorf("Starting scheduler watch server: %w", err)
}
defer schedTracker.Stop()
globalState, globalPromReg := r.newAgentState(logger, r.EnvArgs.K8sPodIP, schedTracker)
watchMetrics.MustRegister(globalPromReg)
logger.Info("Starting billing metrics collector")
storeForNode := watch.NewIndexedStore(vmWatchStore, billing.NewVMNodeIndex(r.EnvArgs.K8sNodeName))
metrics := billing.NewPromMetrics()
metrics.MustRegister(globalPromReg)
err = billing.StartBillingMetricsCollector(ctx, logger, &r.Config.Billing, storeForNode, metrics)
if err != nil {
return fmt.Errorf("error starting billing metrics collector: %w", err)
}
promLogger := logger.Named("prometheus")
if err := util.StartPrometheusMetricsServer(ctx, promLogger.Named("global"), 9100, globalPromReg); err != nil {
return fmt.Errorf("Error starting prometheus metrics server: %w", err)
}
if err := util.StartPrometheusMetricsServer(ctx, promLogger.Named("per-vm"), 9101, vmPromReg); err != nil {
return fmt.Errorf("Error starting prometheus metrics server: %w", err)
}
if r.Config.DumpState != nil {
logger.Info("Starting 'dump state' server")
if err := globalState.StartDumpStateServer(ctx, logger.Named("dump-state"), r.Config.DumpState); err != nil {
return fmt.Errorf("Error starting dump state server: %w", err)
}
}
logger.Info("Entering main loop")
for {
event, err := vmEventQueue.Wait(ctx)
if err != nil {
if ctx.Err() != nil {
// treat context canceled as a "normal" exit (because it is)
return nil
}
logger.Error("vmEventQueue returned error", zap.Error(err))
return err
}
globalState.handleEvent(ctx, logger, event)
}
}