/
resource.go
289 lines (260 loc) · 10.1 KB
/
resource.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
package bridgezone
import (
"context"
"reflect"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/route53"
"github.com/giantswarm/microerror"
"github.com/giantswarm/micrologger"
"k8s.io/client-go/kubernetes"
clientaws "github.com/giantswarm/aws-operator/v16/client/aws"
"github.com/giantswarm/aws-operator/v16/service/controller/controllercontext"
"github.com/giantswarm/aws-operator/v16/service/internal/credential"
)
const (
name = "bridgezone"
)
type Config struct {
HostAWSConfig clientaws.Config
K8sClient kubernetes.Interface
Logger micrologger.Logger
Route53Enabled bool
}
// Resource is bridgezone resource making sure we have fallback delegation in
// old DNS structure. TODO This is only for the migration period. TODO When we
// delete the "intermediate" zone this resource becomes noop and we do not need
// it anymore.
//
// Old structure looks like:
//
// installation.eu-central-1.aws.gigantic.io (control plane account)
// └── NS k8s.installation.eu-central-1.aws.gigantic.io (default control plane account)
//
// k8s.installation.eu-central-1.aws.gigantic.io (default control plane account)
// ├── A api.old_cluster_a.k8s.installation.eu-central-1.aws.gigantic.io
// ├── A ingress.old_cluster_a.k8s.installation.eu-central-1.aws.gigantic.io
// ├── A api.old_cluster_b.k8s.installation.eu-central-1.aws.gigantic.io
// └── A ingress.old_cluster_b.k8s.installation.eu-central-1.aws.gigantic.io
//
// New structure looks like:
//
// installation.eu-central-1.aws.gigantic.io (control plane account)
// └── NS new_cluster_a.k8s.installation.eu-central-1.aws.gigantic.io (byoc tenant account)
// └── NS new_cluster_b.k8s.installation.eu-central-1.aws.gigantic.io (byoc tenant account)
//
// new_cluster_a.k8s.installation.eu-central-1.aws.gigantic.io (byoc tenant account)
// ├── A api.new_cluster_a.k8s.installation.eu-central-1.aws.gigantic.io
// └── A ingress.new_cluster_a.k8s.installation.eu-central-1.aws.gigantic.io
//
// new_cluster_b.k8s.installation.eu-central-1.aws.gigantic.io (byoc tenant account)
// ├── A api.new_cluster_b.k8s.installation.eu-central-1.aws.gigantic.io
// └── A ingress.new_cluster_b.k8s.installation.eu-central-1.aws.gigantic.io
//
// For the migration period for new clusters we need also to add delegation to
// k8s.installation.eu-central-1.aws.gigantic.io because of the AWS DNS caching issues.
//
// installation.eu-central-1.aws.gigantic.io (control plane account)
// ├── NS k8s.installation.eu-central-1.aws.gigantic.io (default tenant account)
// └── NS cluster_id.k8s.installation.eu-central-1.aws.gigantic.io (byoc tenant account)
//
// k8s.installation.eu-central-1.aws.gigantic.io (default tenant account)
// ├── NS cluster_id.k8s.installation.eu-central-1.aws.gigantic.io (byoc tenant account)
// ├── A api.old_cluster.k8s.installation.eu-central-1.aws.gigantic.io
// └── A ingress.old_cluster.k8s.installation.eu-central-1.aws.gigantic.io
//
// cluster_id.k8s.installation.eu-central-1.aws.gigantic.io (byoc tenant account)
// ├── A api.cluster_id.k8s.installation.eu-central-1.aws.gigantic.io
// └── A ingress.cluster_id.k8s.installation.eu-central-1.aws.gigantic.io
//
// NOTE: In the code below k8s.installation.eu-central-1.aws.gigantic.io zone is called
// "intermediate" and cluster_id.k8s.installation.eu-central-1.aws.gigantic.io zone is
// called "final". This resource *only* ensures we have delegation from the
// intermediate zone to the final zone, but only if the intermediate zone
// exists.
//
// After everything is fully migrated the DNS layout should look like:
//
// installation.eu-central-1.aws.gigantic.io (control plane account)
// ├── NS k8s.installation.eu-central-1.aws.gigantic.io (default guest account)
// └── NS cluster_id.k8s.installation.eu-central-1.aws.gigantic.io (byoc guest account)
//
// k8s.installation.eu-central-1.aws.gigantic.io (default guest account)
// └── NS cluster_id.k8s.installation.eu-central-1.aws.gigantic.io (byoc guest account)
//
// cluster_id.k8s.installation.eu-central-1.aws.gigantic.io (byoc guest account)
// ├── A api.cluster_id.k8s.installation.eu-central-1.aws.gigantic.io
// └── A ingress.cluster_id.k8s.installation.eu-central-1.aws.gigantic.io
//
// At this point we should be fine with removing
// k8s.installation.eu-central-1.aws.gigantic.io NS record from
// installation.eu-central-1.aws.gigantic.io zone. Then after a couple of days
// when delegation propagates and DNS caches are refreshed we can delete
// k8s.installation.eu-central-1.aws.gigantic.io zone from the default guest
// account.
//
// NOTE: To complete full migration we need to start reconciling "hostpost"
// CloudFormation stack. This stack is responsible for creating
// cluster_id.k8s.installation.eu-central-1.aws.gigantic.io delegation in the
// installation.eu-central-1.aws.gigantic.io. Till this happens this resource
// cannot be deleted.
//
// See https://github.com/giantswarm/aws-operator/pull/1373.
type Resource struct {
hostAWSConfig clientaws.Config
k8sClient kubernetes.Interface
logger micrologger.Logger
route53Enabled bool
}
func New(config Config) (*Resource, error) {
if reflect.DeepEqual(clientaws.Config{}, config.HostAWSConfig) {
return nil, microerror.Maskf(invalidConfigError, "%T.HostAWSConfig must not be empty", config)
}
if config.K8sClient == nil {
return nil, microerror.Maskf(invalidConfigError, "%T.K8sClient must not be empty", config)
}
if config.Logger == nil {
return nil, microerror.Maskf(invalidConfigError, "%T.Logger must not be empty", config)
}
r := &Resource{
hostAWSConfig: config.HostAWSConfig,
k8sClient: config.K8sClient,
logger: config.Logger,
route53Enabled: config.Route53Enabled,
}
return r, nil
}
func (r *Resource) Name() string {
return name
}
// findHostedZoneID fetches Route53 hosted zone IDs based on a given name. The
// implementation fetches up to 100 matching results to find the right one. The
// bridgezone resource here is only concerned with the hosted zone ID of the
// hosted zone name provided. The desired ID will always be carried in the first
// Route53 response as the one we want to fetch is the most accurate and always
// listed as the first item in the response. This is because of the
// lexicographical order of the response items as the API documentation puts it.
// See also
// https://godoc.org/github.com/aws/aws-sdk-go/service/route53#Route53.ListHostedZonesByName.
//
// Retrieves a list of your hosted zones in lexicographic order.
//
// Here is an example to make it clearer. Let's consider the following hosted
// zone name.
//
// 9cvgo.k8s.ginger.eu-central-1.aws.gigantic.io
//
// Given this name, findHostedZoneID will receive a response from Route53
// similar to the following example, containing a single hosted zone carrying
// its ID.
//
// {
// ...
// HostedZones: [{
// ...
// Id: "/hostedzone/Z1A4QS1NDU6NW6",
// Name: "9cvgo.k8s.ginger.eu-central-1.aws.gigantic.io.",
// ...
// }],
// ...
// }
//
// The example above was about a very specific domain name, which list result
// could only find a single item in the response. Let's consider a less specific
// domain name as input for findHostedZoneID.
//
// k8s.ginger.eu-central-1.aws.gigantic.io
//
// The result from Route53 will again list all the childs within the given
// domain name. In the example response below there where only two tenant
// clusters.
//
// {
// ...
// HostedZones: [{
// ...
// Id: "/hostedzone/Z1HJGG5VLG8GZH",
// Name: "k8s.ginger.eu-central-1.aws.gigantic.io.",
// ...
// },{
// ...
// Id: "/hostedzone/Z1KSFLSM1JEQYM",
// Name: "0tz6i.k8s.ginger.eu-central-1.aws.gigantic.io.",
// ...
// },{
// ...
// Id: "/hostedzone/Z1A4QS1NDU6NW6",
// Name: "9cvgo.k8s.ginger.eu-central-1.aws.gigantic.io.",
// ...
// }],
// ...
// }
func (r *Resource) findHostedZoneID(ctx context.Context, client *route53.Route53, name string) (string, error) {
in := &route53.ListHostedZonesByNameInput{
DNSName: aws.String(name),
}
out, err := client.ListHostedZonesByName(in)
if err != nil {
return "", microerror.Mask(err)
}
for _, hostedZone := range out.HostedZones {
if *hostedZone.Name == name {
return *hostedZone.Id, nil
}
}
return "", microerror.Maskf(notFoundError, "hosted zone name %#q", name)
}
func (r *Resource) getNameServersAndTTL(ctx context.Context, client *route53.Route53, zoneID, name string) (nameServers []string, ttl int64, err error) {
one := "1"
ns := route53.RRTypeNs
in := &route53.ListResourceRecordSetsInput{
HostedZoneId: &zoneID,
MaxItems: &one,
StartRecordName: &name,
StartRecordType: &ns,
}
out, err := client.ListResourceRecordSetsWithContext(ctx, in)
if err != nil {
return nil, 0, microerror.Mask(err)
}
if len(out.ResourceRecordSets) == 0 {
return nil, 0, microerror.Maskf(notFoundError, "NS record %q for HostedZone %q not found", name, zoneID)
}
if len(out.ResourceRecordSets) != 1 {
return nil, 0, microerror.Maskf(executionError, "expected single NS record %q for HostedZone %q, found %#v", name, zoneID, out.ResourceRecordSets)
}
rs := *out.ResourceRecordSets[0]
if strings.TrimSuffix(*rs.Name, ".") != name {
return nil, 0, microerror.Maskf(notFoundError, "NS record %q for HostedZone %q not found", name, zoneID)
}
var servers []string
for _, r := range rs.ResourceRecords {
servers = append(servers, *r.Value)
}
return servers, *rs.TTL, nil
}
func (r *Resource) route53Clients(ctx context.Context) (guest, defaultGuest *route53.Route53, err error) {
// guest
{
cc, err := controllercontext.FromContext(ctx)
if err != nil {
return nil, nil, microerror.Mask(err)
}
guest = cc.Client.TenantCluster.AWS.Route53
}
// defaultGuest
{
arn, err := credential.GetDefaultARN(ctx, r.k8sClient)
if err != nil {
return nil, nil, microerror.Mask(err)
}
c := r.hostAWSConfig
c.RoleARN = arn
newClients, err := clientaws.NewClients(c)
if err != nil {
return nil, nil, microerror.Mask(err)
}
defaultGuest = newClients.Route53
}
return guest, defaultGuest, nil
}