This repository has been archived by the owner on Jun 19, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 74
/
storage.go
396 lines (334 loc) · 14.6 KB
/
storage.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
/*
Copyright 2019 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"context"
"encoding/json"
"fmt"
"time"
"go.uber.org/zap"
gstatus "google.golang.org/grpc/status"
corev1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
"knative.dev/pkg/controller"
"knative.dev/pkg/logging"
"knative.dev/pkg/reconciler"
. "cloud.google.com/go/storage"
"github.com/google/knative-gcp/pkg/apis/events/v1alpha1"
listers "github.com/google/knative-gcp/pkg/client/listers/events/v1alpha1"
gstorage "github.com/google/knative-gcp/pkg/gclient/storage"
"github.com/google/knative-gcp/pkg/pubsub/adapter/converters"
"github.com/google/knative-gcp/pkg/reconciler/events/storage/resources"
"github.com/google/knative-gcp/pkg/reconciler/pubsub"
"github.com/google/knative-gcp/pkg/utils"
"google.golang.org/grpc/codes"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
const (
finalizerName = controllerAgentName
resourceGroup = "cloudstoragesources.events.cloud.google.com"
)
var (
// Mapping of the storage source CloudEvent types to google storage types.
storageEventTypes = map[string]string{
v1alpha1.CloudStorageSourceFinalize: "OBJECT_FINALIZE",
v1alpha1.CloudStorageSourceArchive: "OBJECT_ARCHIVE",
v1alpha1.CloudStorageSourceDelete: "OBJECT_DELETE",
v1alpha1.CloudStorageSourceMetadataUpdate: "OBJECT_METADATA_UPDATE",
}
)
// Reconciler is the controller implementation for Google Cloud Storage (GCS) event
// notifications.
type Reconciler struct {
*pubsub.PubSubBase
// storageLister for reading storages.
storageLister listers.CloudStorageSourceLister
// createClientFn is the function used to create the Storage client that interacts with GCS.
// This is needed so that we can inject a mock client for UTs purposes.
createClientFn gstorage.CreateFn
}
// Check that we implement the controller.Reconciler interface.
var _ controller.Reconciler = (*Reconciler)(nil)
// Reconcile compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the CloudStorageSource resource
// with the current status of the resource.
func (r *Reconciler) Reconcile(ctx context.Context, key string) error {
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Invalid resource key")
return nil
}
// Get the CloudStorageSource resource with this namespace/name
original, err := r.storageLister.CloudStorageSources(namespace).Get(name)
if apierrs.IsNotFound(err) {
// The CloudStorageSource resource may no longer exist, in which case we stop processing.
logging.FromContext(ctx).Desugar().Error("CloudStorageSource in work queue no longer exists")
return nil
} else if err != nil {
return err
}
// Don't modify the informers copy
storage := original.DeepCopy()
reconcileErr := r.reconcile(ctx, storage)
// If no error is returned, mark the observed generation.
if reconcileErr == nil {
storage.Status.ObservedGeneration = storage.Generation
}
if equality.Semantic.DeepEqual(original.Finalizers, storage.Finalizers) {
// If we didn't change finalizers then don't call updateFinalizers.
} else if _, updated, fErr := r.updateFinalizers(ctx, storage); fErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update CloudStorageSource finalizers", zap.Error(fErr))
r.Recorder.Eventf(storage, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update finalizers for CloudStorageSource %q: %v", storage.Name, fErr)
return fErr
} else if updated {
// There was a difference and updateFinalizers said it updated and did not return an error.
r.Recorder.Eventf(storage, corev1.EventTypeNormal, "Updated", "Updated CloudStorageSource %q finalizers", storage.Name)
}
if equality.Semantic.DeepEqual(original.Status, storage.Status) {
// If we didn't change anything then don't call updateStatus.
// This is important because the copy we loaded from the informer's
// cache may be stale and we don't want to overwrite a prior update
// to status with this stale state.
} else if uErr := r.updateStatus(ctx, original, storage); uErr != nil {
logging.FromContext(ctx).Desugar().Warn("Failed to update CloudStorageSource status", zap.Error(uErr))
r.Recorder.Eventf(storage, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for CloudStorageSource %q: %v", storage.Name, uErr)
return uErr
} else if reconcileErr == nil {
// There was a difference and updateStatus did not return an error.
r.Recorder.Eventf(storage, corev1.EventTypeNormal, "Updated", "Updated CloudStorageSource %q", storage.Name)
}
if reconcileErr != nil {
r.Recorder.Event(storage, corev1.EventTypeWarning, "InternalError", reconcileErr.Error())
}
return reconcileErr
}
func (r *Reconciler) reconcile(ctx context.Context, storage *v1alpha1.CloudStorageSource) error {
ctx = logging.WithLogger(ctx, r.Logger.With(zap.Any("storage", storage)))
storage.Status.InitializeConditions()
// See if the source has been deleted.
if storage.DeletionTimestamp != nil {
logging.FromContext(ctx).Desugar().Debug("Deleting CloudStorageSource notification")
if err := r.deleteNotification(ctx, storage); err != nil {
storage.Status.MarkNotificationNotReady("NotificationDeleteFailed", "Failed to delete CloudStorageSource notification: %s", err.Error())
return err
}
storage.Status.MarkNotificationNotReady("NotificationDeleted", "Successfully deleted CloudStorageSource notification: %s", storage.Status.NotificationID)
if err := r.PubSubBase.DeletePubSub(ctx, storage); err != nil {
return err
}
// Only set the notificationID to empty after we successfully deleted the PubSub resources.
// Otherwise, we may leak them.
storage.Status.NotificationID = ""
removeFinalizer(storage)
return nil
}
// Ensure that there's finalizer there, since we're about to attempt to
// change external state with the topic, so we need to clean it up.
addFinalizer(storage)
topic := resources.GenerateTopicName(storage)
_, _, err := r.PubSubBase.ReconcilePubSub(ctx, storage, topic, resourceGroup)
if err != nil {
return err
}
notification, err := r.reconcileNotification(ctx, storage)
if err != nil {
storage.Status.MarkNotificationNotReady("NotificationReconcileFailed", "Failed to reconcile CloudStorageSource notification: %s", err.Error())
return err
}
storage.Status.MarkNotificationReady(notification)
return nil
}
func (r *Reconciler) reconcileNotification(ctx context.Context, storage *v1alpha1.CloudStorageSource) (string, error) {
if storage.Status.ProjectID == "" {
projectID, err := utils.ProjectID(storage.Spec.Project)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to find project id", zap.Error(err))
return "", err
}
// Set the projectID in the status.
storage.Status.ProjectID = projectID
}
client, err := r.createClientFn(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create CloudStorageSource client", zap.Error(err))
return "", err
}
defer client.Close()
// Load the Bucket.
bucket := client.Bucket(storage.Spec.Bucket)
notifications, err := bucket.Notifications(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to fetch existing notifications", zap.Error(err))
return "", err
}
// If the notification does exist, then return its ID.
if existing, ok := notifications[storage.Status.NotificationID]; ok {
return existing.ID, nil
}
// If the notification does not exist, then create it.
// Add our own converter type as a customAttribute.
customAttributes := map[string]string{
converters.KnativeGCPConverter: converters.CloudStorageConverter,
}
nc := &Notification{
TopicProjectID: storage.Status.ProjectID,
TopicID: storage.Status.TopicID,
PayloadFormat: JSONPayload,
EventTypes: r.toCloudStorageSourceEventTypes(storage.Spec.EventTypes),
ObjectNamePrefix: storage.Spec.ObjectNamePrefix,
CustomAttributes: customAttributes,
}
notification, err := bucket.AddNotification(ctx, nc)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create CloudStorageSource notification", zap.Error(err))
return "", err
}
return notification.ID, nil
}
func (r *Reconciler) toCloudStorageSourceEventTypes(eventTypes []string) []string {
storageTypes := make([]string, 0, len(eventTypes))
for _, eventType := range eventTypes {
storageTypes = append(storageTypes, storageEventTypes[eventType])
}
return storageTypes
}
// deleteNotification looks at the status.NotificationID and if non-empty,
// hence indicating that we have created a notification successfully
// in the CloudStorageSource, remove it.
func (r *Reconciler) deleteNotification(ctx context.Context, storage *v1alpha1.CloudStorageSource) error {
if storage.Status.NotificationID == "" {
return nil
}
// At this point the project should have been populated.
// Querying CloudStorageSource as the notification could have been deleted outside the cluster (e.g, through gcloud).
client, err := r.createClientFn(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to create CloudStorageSource client", zap.Error(err))
return err
}
defer client.Close()
// Load the Bucket.
bucket := client.Bucket(storage.Spec.Bucket)
notifications, err := bucket.Notifications(ctx)
if err != nil {
logging.FromContext(ctx).Desugar().Error("Failed to fetch existing notifications", zap.Error(err))
return err
}
// This is bit wonky because, we could always just try to delete, but figuring out
// if an error returned is NotFound seems to not really work, so, we'll try
// checking first the list and only then deleting.
if existing, ok := notifications[storage.Status.NotificationID]; ok {
logging.FromContext(ctx).Desugar().Debug("Found existing notification", zap.Any("notification", existing))
err = bucket.DeleteNotification(ctx, storage.Status.NotificationID)
if err == nil {
logging.FromContext(ctx).Desugar().Debug("Deleted Notification", zap.String("notificationId", storage.Status.NotificationID))
return nil
}
if st, ok := gstatus.FromError(err); !ok {
logging.FromContext(ctx).Desugar().Error("Failed from CloudStorageSource client while deleting CloudStorageSource notification", zap.String("notificationId", storage.Status.NotificationID), zap.Error(err))
return err
} else if st.Code() != codes.NotFound {
logging.FromContext(ctx).Desugar().Error("Failed to delete CloudStorageSource notification", zap.String("notificationId", storage.Status.NotificationID), zap.Error(err))
return err
}
}
return nil
}
func addFinalizer(s *v1alpha1.CloudStorageSource) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Insert(finalizerName)
s.Finalizers = finalizers.List()
}
func removeFinalizer(s *v1alpha1.CloudStorageSource) {
finalizers := sets.NewString(s.Finalizers...)
finalizers.Delete(finalizerName)
s.Finalizers = finalizers.List()
}
func (r *Reconciler) updateStatus(ctx context.Context, original *v1alpha1.CloudStorageSource, desired *v1alpha1.CloudStorageSource) error {
existing := original.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the informer's state, subsequent attempts fetch the latest state via API.
if attempts > 0 {
existing, err = r.RunClientSet.EventsV1alpha1().CloudStorageSources(desired.Namespace).Get(desired.Name, metav1.GetOptions{})
if err != nil {
return err
}
}
// Check if there is anything to update.
if equality.Semantic.DeepEqual(existing.Status, desired.Status) {
return nil
}
becomesReady := desired.Status.IsReady() && !existing.Status.IsReady()
existing.Status = desired.Status
_, err = r.RunClientSet.EventsV1alpha1().CloudStorageSources(desired.Namespace).UpdateStatus(existing)
if err == nil && becomesReady {
// TODO compute duration since last non-ready. See https://github.com/google/knative-gcp/issues/455.
duration := time.Since(existing.ObjectMeta.CreationTimestamp.Time)
logging.FromContext(ctx).Desugar().Info("CloudStorageSource became ready", zap.Any("after", duration))
r.Recorder.Event(existing, corev1.EventTypeNormal, "ReadinessChanged", fmt.Sprintf("CloudStorageSource %q became ready", existing.Name))
if metricErr := r.StatsReporter.ReportReady("CloudStorageSource", existing.Namespace, existing.Name, duration); metricErr != nil {
logging.FromContext(ctx).Desugar().Error("Failed to record ready for CloudStorageSource", zap.Error(metricErr))
}
}
return err
})
}
// updateFinalizers is a generic method for future compatibility with a
// reconciler SDK.
func (r *Reconciler) updateFinalizers(ctx context.Context, desired *v1alpha1.CloudStorageSource) (*v1alpha1.CloudStorageSource, bool, error) {
storage, err := r.storageLister.CloudStorageSources(desired.Namespace).Get(desired.Name)
if err != nil {
return nil, false, err
}
// Don't modify the informers copy.
existing := storage.DeepCopy()
var finalizers []string
// If there's nothing to update, just return.
existingFinalizers := sets.NewString(existing.Finalizers...)
desiredFinalizers := sets.NewString(desired.Finalizers...)
if desiredFinalizers.Has(finalizerName) {
if existingFinalizers.Has(finalizerName) {
// Nothing to do.
return desired, false, nil
}
// Add the finalizer.
finalizers = append(existing.Finalizers, finalizerName)
} else {
if !existingFinalizers.Has(finalizerName) {
// Nothing to do.
return desired, false, nil
}
// Remove the finalizer.
existingFinalizers.Delete(finalizerName)
finalizers = existingFinalizers.List()
}
mergePatch := map[string]interface{}{
"metadata": map[string]interface{}{
"finalizers": finalizers,
"resourceVersion": existing.ResourceVersion,
},
}
patch, err := json.Marshal(mergePatch)
if err != nil {
return desired, false, err
}
update, err := r.RunClientSet.EventsV1alpha1().CloudStorageSources(existing.Namespace).Patch(existing.Name, types.MergePatchType, patch)
return update, true, err
}