This repository has been archived by the owner on Oct 23, 2023. It is now read-only.
/
runner.go
138 lines (111 loc) · 3.1 KB
/
runner.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
package awsapicall
import (
"context"
infrastructurev1alpha2 "github.com/giantswarm/apiextensions/v2/pkg/apis/infrastructure/v1alpha2"
"github.com/giantswarm/k8sclient/v4/pkg/k8sclient"
"github.com/giantswarm/microerror"
"github.com/giantswarm/micrologger"
"github.com/spf13/cobra"
k8sruntimeclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/giantswarm/awscnfm/v12/pkg/client"
"github.com/giantswarm/awscnfm/v12/pkg/env"
"github.com/giantswarm/awscnfm/v12/pkg/key"
"github.com/giantswarm/awscnfm/v12/pkg/label"
)
const (
kubeSystemNamespace = "kube-system"
)
type runner struct {
flag *flag
logger micrologger.Logger
}
func (r *runner) Run(cmd *cobra.Command, args []string) error {
ctx := context.Background()
err := r.flag.Validate()
if err != nil {
return microerror.Mask(err)
}
err = r.run(ctx, cmd, args)
if err != nil {
return microerror.Mask(err)
}
return nil
}
func (r *runner) run(ctx context.Context, cmd *cobra.Command, args []string) error {
var err error
var cpClients k8sclient.Interface
{
c := client.ControlPlaneConfig{
Logger: r.logger,
KubeConfig: env.ControlPlaneKubeConfig(),
}
cpClients, err = client.NewControlPlane(c)
if err != nil {
return microerror.Mask(err)
}
}
var tcClients k8sclient.Interface
{
c := client.TenantClusterConfig{
ControlPlane: cpClients,
Logger: r.logger,
TenantCluster: r.flag.TenantCluster,
}
tcClients, err = client.NewTenantCluster(c)
if err != nil {
return microerror.Mask(err)
}
}
// awsRegion is necessary to execute aws-cli call
var awsRegion string
{
var cr infrastructurev1alpha2.AWSCluster
{
var list infrastructurev1alpha2.AWSClusterList
err := cpClients.CtrlClient().List(
ctx,
&list,
k8sruntimeclient.InNamespace(cr.GetNamespace()),
k8sruntimeclient.MatchingLabels{label.Cluster: r.flag.TenantCluster},
)
if err != nil {
return microerror.Mask(err)
}
if len(list.Items) == 0 {
return microerror.Mask(notFoundError)
}
if len(list.Items) > 1 {
return microerror.Mask(tooManyCRsError)
}
cr = list.Items[0]
}
awsRegion = cr.Spec.Provider.Region
}
// dockerRegistry is needed in order to spawn pod with proper docker image that will execute aws-cli call
var dockerRegistry string
{
dockerRegistry, err = key.FetchDockerRegistry(ctx, cpClients.CtrlClient())
if err != nil {
return microerror.Mask(err)
}
}
err = r.createAWSApiCallJob(ctx, tcClients.CtrlClient(), awsRegion, dockerRegistry)
if err != nil {
return microerror.Mask(err)
}
return nil
}
// createAWSApiCallJob will spawn a job in k8s tenant cluster to test calling AWS API to ensure kiam works as expected
func (r *runner) createAWSApiCallJob(ctx context.Context, tcClient k8sruntimeclient.Client, awsRegion string, dockerRegistry string) error {
networkPolicy := jobNetworkPolicy()
err := tcClient.Create(ctx, networkPolicy)
if err != nil {
return microerror.Mask(err)
}
job := awsApiCallJob(dockerRegistry, awsRegion, r.flag.TenantCluster)
err = tcClient.Create(ctx, job)
if err != nil {
return microerror.Mask(err)
}
return nil
}