This repository has been archived by the owner on Nov 30, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 7
/
resource.go
193 lines (157 loc) · 6.26 KB
/
resource.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
package volumebindingmigration
import (
"bytes"
"context"
providerv1alpha1 "github.com/giantswarm/apiextensions/v3/pkg/apis/provider/v1alpha1"
"github.com/giantswarm/errors/tenant"
"github.com/giantswarm/k8sclient/v5/pkg/k8sclient"
"github.com/giantswarm/microerror"
"github.com/giantswarm/micrologger"
"github.com/giantswarm/tenantcluster/v3/pkg/tenantcluster"
storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/client-go/rest"
ctrl "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/yaml"
"github.com/giantswarm/azure-operator/v5/service/controller/key"
"github.com/giantswarm/azure-operator/v5/service/controller/templates/ignition"
)
const (
// Name is the identifier of the resource.
Name = "volumebindingmigration"
provisionerAzureFile = "kubernetes.io/azure-file"
)
type Config struct {
Logger micrologger.Logger
TenantRestConfigProvider *tenantcluster.TenantCluster
}
// Resource ensures that existing StorageClasses use `WaitForFirstConsumer`
// bind mode.
type Resource struct {
logger micrologger.Logger
tenantRestConfigProvider *tenantcluster.TenantCluster
}
func New(config Config) (*Resource, error) {
if config.Logger == nil {
return nil, microerror.Maskf(invalidConfigError, "%T.Logger must not be empty", config)
}
if config.TenantRestConfigProvider == nil {
return nil, microerror.Maskf(invalidConfigError, "%T.TenantRestConfigProvider must not be empty", config)
}
r := &Resource{
logger: config.Logger,
tenantRestConfigProvider: config.TenantRestConfigProvider,
}
return r, nil
}
// EnsureCreated ensures that existing StorageClasses use
// `WaitForFirstConsumer` bind mode.
func (r *Resource) EnsureCreated(ctx context.Context, obj interface{}) error {
cr, err := key.ToCustomResource(obj)
if err != nil {
return microerror.Mask(err)
}
var tenantClusterK8sClient ctrl.Client
{
tenantClusterK8sClient, err = r.getTenantClusterClient(ctx, &cr)
if tenant.IsAPINotAvailable(err) || tenantcluster.IsTimeout(err) {
// The kubernetes API is not reachable. This usually happens when a new cluster is being created.
// This makes the whole controller to fail and stops next handlers from being executed even if they are
// safe to run. We don't want that to happen so we just return and we'll try again during next loop.
r.logger.Debugf(ctx, "tenant API not available yet")
r.logger.Debugf(ctx, "canceling resource")
return nil
} else if err != nil {
return microerror.Mask(err)
}
}
r.logger.Debugf(ctx, "ensuring storageclasses use desired volumeBindingMode")
defaultSCs, err := defaultStorageClasses()
if err != nil {
return microerror.Mask(err)
}
for i := range defaultSCs {
desiredObj := defaultSCs[i]
if desiredObj.Provisioner == provisionerAzureFile {
r.logger.Debugf(ctx, "storage class %q uses provisioner %q; skipping as unsupported", desiredObj.Name, desiredObj.Provisioner)
continue
}
r.logger.Debugf(ctx, "finding present storage class object %q", desiredObj.Name)
var presentObj storagev1.StorageClass
err := tenantClusterK8sClient.Get(ctx, ctrl.ObjectKey{Name: desiredObj.Name, Namespace: desiredObj.Namespace}, &presentObj)
if apierrors.IsNotFound(err) {
// All good. We'll create it.
r.logger.Debugf(ctx, "did not find present storage class object %q", desiredObj)
} else if err != nil {
return microerror.Mask(err)
} else {
r.logger.Debugf(ctx, "finding if present storage class object %q has desired volumeBindingMode %q", desiredObj.Name, desiredObj.VolumeBindingMode)
}
// If present object matches the desired one, continue to next one.
if (desiredObj.VolumeBindingMode == nil && presentObj.VolumeBindingMode == nil) || (desiredObj.VolumeBindingMode != nil && presentObj.VolumeBindingMode != nil && *presentObj.VolumeBindingMode == *desiredObj.VolumeBindingMode) {
r.logger.Debugf(ctx, "present storage class object %q has desired volumeBindingMode: %q", presentObj.Name, desiredObj.VolumeBindingMode)
continue
}
// Volume bind mode is immutable field so we must delete the present
// object if it exists.
if !presentObj.CreationTimestamp.IsZero() && presentObj.ResourceVersion != "" {
r.logger.Debugf(ctx, "present storage class object %q does not have desired volumeBindingMode but %q instead", presentObj.Name, presentObj.VolumeBindingMode)
r.logger.Debugf(ctx, "deleting present storage class object %q", presentObj.Name)
err = tenantClusterK8sClient.Delete(ctx, &presentObj)
if err != nil {
return microerror.Mask(err)
}
r.logger.Debugf(ctx, "deleted present storage class object %q", presentObj.Name)
}
r.logger.Debugf(ctx, "creating desired storage class object %q", desiredObj.Name)
// Finally create the desired object.
err = tenantClusterK8sClient.Create(ctx, &desiredObj)
if err != nil {
return microerror.Mask(err)
}
r.logger.Debugf(ctx, "created desired storage class object %q", desiredObj.Name)
}
r.logger.Debugf(ctx, "ensured storageclasses use desired volumeBindingMode")
return nil
}
// EnsureDeleted is no-op.
func (r *Resource) EnsureDeleted(ctx context.Context, obj interface{}) error {
return nil
}
// Name returns the resource name.
func (r *Resource) Name() string {
return Name
}
func (r *Resource) getTenantClusterClient(ctx context.Context, azureConfig *providerv1alpha1.AzureConfig) (ctrl.Client, error) {
var k8sClient k8sclient.Interface
{
restConfig, err := r.tenantRestConfigProvider.NewRestConfig(ctx, key.ClusterID(azureConfig), key.ClusterAPIEndpoint(*azureConfig))
if err != nil {
return nil, microerror.Mask(err)
}
k8sClient, err = k8sclient.NewClients(k8sclient.ClientsConfig{
Logger: r.logger,
RestConfig: rest.CopyConfig(restConfig),
})
if err != nil {
return nil, microerror.Mask(err)
}
}
return k8sClient.CtrlClient(), nil
}
func defaultStorageClasses() ([]storagev1.StorageClass, error) {
var storageClasses []storagev1.StorageClass
objs := bytes.Split([]byte(ignition.DefaultStorageClass), []byte("---"))
for _, bs := range objs {
sc := storagev1.StorageClass{}
err := yaml.Unmarshal(bs, &sc)
if err != nil {
return nil, microerror.Mask(err)
}
if sc.Kind != "StorageClass" {
continue
}
storageClasses = append(storageClasses, sc)
}
return storageClasses, nil
}