/
main.go
225 lines (185 loc) · 6.93 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
package main
import (
"context"
"flag"
"fmt"
"os"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/metrics/server"
"sigs.k8s.io/controller-runtime/pkg/webhook"
// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
_ "k8s.io/client-go/plugin/pkg/client/auth"
"github.com/cloudogu/k8s-component-operator/pkg/api/ecosystem"
k8sv1 "github.com/cloudogu/k8s-component-operator/pkg/api/v1"
"github.com/cloudogu/k8s-component-operator/pkg/config"
"github.com/cloudogu/k8s-component-operator/pkg/controllers"
"github.com/cloudogu/k8s-component-operator/pkg/health"
"github.com/cloudogu/k8s-component-operator/pkg/helm"
"github.com/cloudogu/k8s-component-operator/pkg/logging"
// +kubebuilder:scaffold:imports
)
var (
scheme = runtime.NewScheme()
// set up the logger before the actual logger is instantiated
// the logger will be replaced later-on with a more sophisticated instance
operatorLog = ctrl.Log.WithName("component-operator")
metricsAddr string
enableLeaderElection bool
probeAddr string
)
var (
// Version of the application
Version = "0.0.0"
)
func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(k8sv1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
}
func main() {
err := startOperator()
if err != nil {
operatorLog.Error(err, "failed to start operator")
os.Exit(1)
}
}
func startOperator() error {
err := logging.ConfigureLogger()
if err != nil {
return err
}
operatorConfig, err := config.NewOperatorConfig(Version)
if err != nil {
return fmt.Errorf("failed to create new operator configuration: %w", err)
}
options := getK8sManagerOptions(operatorConfig)
k8sManager, err := ctrl.NewManager(ctrl.GetConfigOrDie(), options)
if err != nil {
return fmt.Errorf("failed to start manager: %w", err)
}
ctx := ctrl.SetupSignalHandler()
err = configureManager(ctx, k8sManager, operatorConfig)
if err != nil {
return fmt.Errorf("failed to configure manager: %w", err)
}
return startK8sManager(ctx, k8sManager)
}
func configureManager(ctx context.Context, k8sManager manager.Manager, operatorConfig *config.OperatorConfig) error {
clientSet, err := createEcosystemClientSet(k8sManager)
if err != nil {
return err
}
err = configureReconciler(ctx, k8sManager, clientSet, operatorConfig)
if err != nil {
return fmt.Errorf("failed to configure reconciler: %w", err)
}
err = addRunners(k8sManager, clientSet, operatorConfig)
if err != nil {
return err
}
// +kubebuilder:scaffold:builder
err = addChecks(k8sManager)
if err != nil {
return fmt.Errorf("failed to add checks to the manager: %w", err)
}
return nil
}
func addRunners(k8sManager manager.Manager, clientSet ecosystem.ComponentEcosystemInterface, operatorConfig *config.OperatorConfig) error {
healthStartupHandler := health.NewStartupHandler(operatorConfig.Namespace, clientSet)
err := k8sManager.Add(healthStartupHandler)
if err != nil {
return err
}
healthShutdownHandler := health.NewShutdownHandler(clientSet.ComponentV1Alpha1().Components(operatorConfig.Namespace))
err = k8sManager.Add(healthShutdownHandler)
if err != nil {
return err
}
return nil
}
func getK8sManagerOptions(operatorConfig *config.OperatorConfig) manager.Options {
flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.")
flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.")
flag.BoolVar(&enableLeaderElection, "leader-elect", false,
"Enable leader election for controller manager. "+
"Enabling this will ensure there is only one active controller manager.")
options := ctrl.Options{
Scheme: scheme,
Metrics: server.Options{BindAddress: metricsAddr},
Cache: cache.Options{ByObject: map[client.Object]cache.ByObject{
// Restrict namespace for components only as we want to reconcile Deployments,
// StatefulSets and DaemonSets across all namespaces.
&k8sv1.Component{}: {Namespaces: map[string]cache.Config{
operatorConfig.Namespace: {},
}},
}},
WebhookServer: webhook.NewServer(webhook.Options{Port: 9443}),
HealthProbeBindAddress: probeAddr,
LeaderElection: enableLeaderElection,
LeaderElectionID: "951e217a.cloudogu.com",
}
return options
}
func startK8sManager(ctx context.Context, k8sManager manager.Manager) error {
operatorLog.Info("starting manager")
err := k8sManager.Start(ctx)
if err != nil {
return fmt.Errorf("failed to start manager: %w", err)
}
return nil
}
func configureReconciler(ctx context.Context, k8sManager manager.Manager, clientSet ecosystem.ComponentEcosystemInterface, operatorConfig *config.OperatorConfig) error {
eventRecorder := k8sManager.GetEventRecorderFor("k8s-component-operator")
helmRepoData, err := config.GetHelmRepositoryData(ctx, clientSet.CoreV1().ConfigMaps(operatorConfig.Namespace))
if err != nil {
return err
}
operatorConfig.HelmRepositoryData = helmRepoData
debug := config.Stage == config.StageDevelopment
helmClient, err := helm.NewClient(operatorConfig.Namespace, operatorConfig.HelmRepositoryData, debug, logging.FormattingLoggerWithName("helm-client", ctrl.Log.Info))
if err != nil {
return fmt.Errorf("failed to create helm client: %w", err)
}
componentReconciler := controllers.NewComponentReconciler(clientSet, helmClient, eventRecorder, operatorConfig.Namespace)
err = componentReconciler.SetupWithManager(k8sManager)
if err != nil {
return fmt.Errorf("failed to setup reconciler with manager: %w", err)
}
healthReconcilers := health.NewController(operatorConfig.Namespace, clientSet)
err = healthReconcilers.SetupWithManager(k8sManager)
if err != nil {
return fmt.Errorf("failed to setup health reconcilers with manager: %w", err)
}
return nil
}
func createEcosystemClientSet(k8sManager manager.Manager) (*ecosystem.EcosystemClientset, error) {
clientSet, err := kubernetes.NewForConfig(k8sManager.GetConfig())
if err != nil {
return nil, fmt.Errorf("failed to create clientset: %w", err)
}
componentClientSet, err := ecosystem.NewComponentClientset(k8sManager.GetConfig(), clientSet)
if err != nil {
return nil, fmt.Errorf("failed to create component client set: %w", err)
}
return componentClientSet, nil
}
func addChecks(mgr manager.Manager) error {
err := mgr.AddHealthzCheck("healthz", healthz.Ping)
if err != nil {
return fmt.Errorf("failed to add healthz check: %w", err)
}
err = mgr.AddReadyzCheck("readyz", healthz.Ping)
if err != nil {
return fmt.Errorf("failed to add readyz check: %w", err)
}
return nil
}