/
current.go
120 lines (97 loc) · 3.72 KB
/
current.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
package s3object
import (
"bytes"
"context"
"fmt"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/giantswarm/microerror"
"github.com/giantswarm/operatorkit/controller/context/resourcecanceledcontext"
"github.com/giantswarm/aws-operator/service/controller/legacy/v22patch1/controllercontext"
"github.com/giantswarm/aws-operator/service/controller/legacy/v22patch1/key"
)
func (r *Resource) GetCurrentState(ctx context.Context, obj interface{}) (interface{}, error) {
customObject, err := key.ToCustomObject(obj)
if err != nil {
return nil, microerror.Mask(err)
}
cc, err := controllercontext.FromContext(ctx)
if err != nil {
return nil, microerror.Mask(err)
}
// During deletion, it might happen that the encryption key got already
// deleted. In such a case we do not have to do anything here anymore. The
// desired state computation usually requires the encryption key to come up
// with the deletion state, but in case it is gone we do not have to do
// anything here anymore. The current implementation relies on the bucket
// deletion of the s3bucket resource, which deletes all S3 objects and the
// bucket itself.
if key.IsDeleted(customObject) {
if cc.Status.TenantCluster.EncryptionKey == "" {
r.logger.LogCtx(ctx, "level", "debug", "message", "no encryption key in controller context")
r.logger.LogCtx(ctx, "level", "debug", "message", "canceling resource")
resourcecanceledcontext.SetCanceled(ctx)
return nil, nil
}
}
bucketName := key.BucketName(customObject, cc.Status.TenantCluster.AWSAccountID)
var objects []*s3.Object
{
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("finding the S3 bucket %#q", bucketName))
i := &s3.ListObjectsV2Input{
Bucket: aws.String(bucketName),
}
o, err := cc.AWSClient.S3.ListObjectsV2(i)
if IsBucketNotFound(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("did not find the S3 bucket %#q", bucketName))
return nil, nil
} else if err != nil {
return nil, microerror.Mask(err)
}
objects = o.Contents
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("found the S3 bucket %#q", bucketName))
}
currentBucketState := map[string]BucketObjectState{}
{
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("finding the contents of %d S3 objects", len(objects)))
for _, object := range objects {
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("finding the content of the S3 object %#q", *object.Key))
body, err := r.getBucketObjectBody(ctx, bucketName, *object.Key)
if err != nil {
return nil, microerror.Mask(err)
}
currentBucketState[*object.Key] = BucketObjectState{
Body: body,
Bucket: bucketName,
Key: *object.Key,
}
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("found the content of the S3 object %#q", *object.Key))
}
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("found the contents of %d S3 objects", len(objects)))
}
return currentBucketState, nil
}
func (r *Resource) getBucketObjectBody(ctx context.Context, bucketName string, keyName string) (string, error) {
cc, err := controllercontext.FromContext(ctx)
if err != nil {
return "", microerror.Mask(err)
}
input := &s3.GetObjectInput{
Bucket: aws.String(bucketName),
Key: aws.String(keyName),
}
result, err := cc.AWSClient.S3.GetObject(input)
if IsObjectNotFound(err) || IsBucketNotFound(err) {
r.logger.LogCtx(ctx, "level", "debug", "message", fmt.Sprintf("did not find S3 object %#q", keyName))
return "", nil
} else if err != nil {
return "", microerror.Mask(err)
}
var body string
{
buf := new(bytes.Buffer)
buf.ReadFrom(result.Body)
body = buf.String()
}
return body, nil
}