This repository has been archived by the owner on Nov 30, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 7
/
create_restart_kubelets.go
98 lines (80 loc) · 2.83 KB
/
create_restart_kubelets.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
package masters
import (
"context"
"fmt"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute"
"github.com/giantswarm/microerror"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/giantswarm/azure-operator/service/controller/controllercontext"
"github.com/giantswarm/azure-operator/service/controller/internal/state"
"github.com/giantswarm/azure-operator/service/controller/key"
)
func (r *Resource) restartKubeletOnWorkersTransition(ctx context.Context, obj interface{}, currentState state.State) (state.State, error) {
// Check if API server is up or wait.
r.logger.LogCtx(ctx, "level", "debug", "message", "Checking if API server is up")
up, err := r.isApiServerUP(ctx)
if err != nil {
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("API server is NOT up. Original error was %s", err.Error()))
return currentState, nil
}
if !up {
r.logger.LogCtx(ctx, "level", "debug", "message", "API server is NOT up.")
return currentState, nil
}
r.logger.LogCtx(ctx, "level", "debug", "message", "API server is up")
cr, err := key.ToCustomResource(obj)
if err != nil {
return "", microerror.Mask(err)
}
groupsClient, err := r.getGroupsClient(ctx)
if err != nil {
return currentState, microerror.Mask(err)
}
group, err := groupsClient.Get(ctx, key.ClusterID(cr))
if err != nil {
return currentState, microerror.Mask(err)
}
vmssVMsClient, err := r.getVMsClient(ctx)
if err != nil {
return "", microerror.Mask(err)
}
commandId := "RunShellScript"
script := []string{
"sudo systemctl restart k8s-kubelet",
}
runCommandInput := compute.RunCommandInput{
CommandID: &commandId,
Script: &script,
}
allMasterInstances, err := r.allInstances(ctx, cr, key.LegacyWorkerVMSSName)
if err != nil {
return "", microerror.Mask(err)
}
for _, instance := range allMasterInstances {
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("Sending restart kubelet command to %s", *instance.Name))
runCommandFuture, err := vmssVMsClient.RunCommand(ctx, *group.Name, key.LegacyWorkerVMSSName(cr), *instance.InstanceID, runCommandInput)
if err != nil {
return "", microerror.Mask(err)
}
_, err = vmssVMsClient.RunCommandResponder(runCommandFuture.Response())
if err != nil {
return "", microerror.Mask(err)
}
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("Sent restart kubelet command to %s", *instance.Name))
}
return DeploymentCompleted, nil
}
func (r *Resource) isApiServerUP(ctx context.Context) (bool, error) {
cc, err := controllercontext.FromContext(ctx)
if err != nil {
return false, microerror.Mask(err)
}
if cc.Client.TenantCluster.K8s == nil {
return false, clientNotFoundError
}
_, err = cc.Client.TenantCluster.K8s.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return false, microerror.Mask(err)
}
return true, nil
}