/
serviceCaller.go
304 lines (245 loc) · 9.44 KB
/
serviceCaller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
package main
import (
"fmt"
"github.com/Berops/claudie/internal/envs"
"github.com/Berops/claudie/internal/utils"
"github.com/Berops/claudie/proto/pb"
ansibler "github.com/Berops/claudie/services/ansibler/client"
kubeEleven "github.com/Berops/claudie/services/kube-eleven/client"
kuber "github.com/Berops/claudie/services/kuber/client"
terraformer "github.com/Berops/claudie/services/terraformer/client"
"github.com/rs/zerolog/log"
)
type BuilderContext struct {
projectName string
cluster *pb.K8Scluster
desiredCluster *pb.K8Scluster
loadbalancers []*pb.LBcluster
desiredLoadbalancers []*pb.LBcluster
deletedLoadBalancers []*pb.LBcluster
}
func (ctx *BuilderContext) GetClusterName() string {
if ctx.desiredCluster != nil {
return ctx.desiredCluster.ClusterInfo.Name
}
if ctx.cluster != nil {
return ctx.cluster.ClusterInfo.Name
}
// try to get the cluster name from the lbs if present
if len(ctx.loadbalancers) != 0 {
return ctx.loadbalancers[0].TargetedK8S
}
if len(ctx.desiredLoadbalancers) != 0 {
return ctx.desiredLoadbalancers[0].TargetedK8S
}
if len(ctx.deletedLoadBalancers) != 0 {
return ctx.deletedLoadBalancers[0].TargetedK8S
}
return ""
}
func buildCluster(ctx *BuilderContext) (*BuilderContext, error) {
if err := callTerraformer(ctx); err != nil {
return nil, fmt.Errorf("error in Terraformer for cluster %s project %s : %w", ctx.GetClusterName(), ctx.projectName, err)
}
if err := callAnsibler(ctx); err != nil {
return nil, fmt.Errorf("error in Ansibler for cluster %s project %s : %w", ctx.GetClusterName(), ctx.projectName, err)
}
if err := callKubeEleven(ctx); err != nil {
return nil, fmt.Errorf("error in KubeEleven for cluster %s project %s : %w", ctx.GetClusterName(), ctx.projectName, err)
}
if err := callKuber(ctx); err != nil {
return nil, fmt.Errorf("error in Kuber for cluster %s project %s : %w", ctx.GetClusterName(), ctx.projectName, err)
}
return ctx, nil
}
// callTerraformer passes config to terraformer for building the infra
func callTerraformer(ctx *BuilderContext) error {
cc, err := utils.GrpcDialWithInsecure("terraformer", envs.TerraformerURL)
if err != nil {
return err
}
defer utils.CloseClientConnection(cc)
c := pb.NewTerraformerServiceClient(cc)
log.Info().Msgf("Calling BuildInfrastructure on terraformer for cluster %s project: %s", ctx.GetClusterName(), ctx.projectName)
req := &pb.BuildInfrastructureRequest{
Current: ctx.cluster,
Desired: ctx.desiredCluster,
CurrentLbs: ctx.loadbalancers,
DesiredLbs: ctx.desiredLoadbalancers,
ProjectName: ctx.projectName,
}
res, err := terraformer.BuildInfrastructure(c, req)
if err != nil {
return err
}
ctx.cluster = res.Current
ctx.desiredCluster = res.Desired
ctx.loadbalancers = res.CurrentLbs
ctx.desiredLoadbalancers = res.DesiredLbs
return nil
}
// callAnsibler passes config to ansibler to set up VPN
func callAnsibler(ctx *BuilderContext) error {
cc, err := utils.GrpcDialWithInsecure("ansibler", envs.AnsiblerURL)
if err != nil {
return err
}
defer utils.CloseClientConnection(cc)
c := pb.NewAnsiblerServiceClient(cc)
log.Info().Msgf("Calling TearDownLoadbalancers on ansibler for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
teardownRes, err := ansibler.TeardownLoadBalancers(c, &pb.TeardownLBRequest{
Desired: ctx.desiredCluster,
DesiredLbs: ctx.desiredLoadbalancers,
DeletedLbs: ctx.deletedLoadBalancers,
ProjectName: ctx.projectName,
})
if err != nil {
return err
}
ctx.desiredCluster = teardownRes.Desired
ctx.desiredLoadbalancers = teardownRes.DesiredLbs
ctx.deletedLoadBalancers = teardownRes.DeletedLbs
log.Info().Msgf("Calling InstallVPN on ansibler for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
installRes, err := ansibler.InstallVPN(c, &pb.InstallRequest{
Desired: ctx.desiredCluster,
DesiredLbs: ctx.desiredLoadbalancers,
ProjectName: ctx.projectName,
})
if err != nil {
return err
}
ctx.desiredCluster = installRes.Desired
ctx.desiredLoadbalancers = installRes.DesiredLbs
log.Info().Msgf("Calling InstallNodeRequirements on ansibler for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
installRes, err = ansibler.InstallNodeRequirements(c, &pb.InstallRequest{
Desired: ctx.desiredCluster,
DesiredLbs: ctx.desiredLoadbalancers,
ProjectName: ctx.projectName,
})
if err != nil {
return err
}
ctx.desiredCluster = installRes.Desired
ctx.desiredLoadbalancers = installRes.DesiredLbs
log.Info().Msgf("Calling SetUpLoadbalancers on ansibler for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
setUpRes, err := ansibler.SetUpLoadbalancers(c, &pb.SetUpLBRequest{
Desired: ctx.desiredCluster,
CurrentLbs: ctx.loadbalancers,
DesiredLbs: ctx.desiredLoadbalancers,
PreviousAPIEndpoint: teardownRes.PreviousAPIEndpoint,
ProjectName: ctx.projectName,
})
if err != nil {
return err
}
ctx.desiredCluster = setUpRes.Desired
ctx.loadbalancers = setUpRes.CurrentLbs
ctx.desiredLoadbalancers = setUpRes.DesiredLbs
return nil
}
// callKubeEleven passes config to kubeEleven to bootstrap k8s cluster
func callKubeEleven(ctx *BuilderContext) error {
cc, err := utils.GrpcDialWithInsecure("kubeEleven", envs.KubeElevenURL)
if err != nil {
return err
}
defer utils.CloseClientConnection(cc)
c := pb.NewKubeElevenServiceClient(cc)
log.Info().Msgf("Calling BuildCluster on kube-eleven for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
res, err := kubeEleven.BuildCluster(c, &pb.BuildClusterRequest{
Desired: ctx.desiredCluster,
DesiredLbs: ctx.desiredLoadbalancers,
ProjectName: ctx.projectName,
})
if err != nil {
return err
}
ctx.desiredCluster = res.Desired
ctx.desiredLoadbalancers = res.DesiredLbs
return nil
}
// callKuber passes config to Kuber to apply any additional resources via kubectl
func callKuber(ctx *BuilderContext) error {
cc, err := utils.GrpcDialWithInsecure("kuber", envs.KuberURL)
if err != nil {
return err
}
defer utils.CloseClientConnection(cc)
c := pb.NewKuberServiceClient(cc)
log.Info().Msgf("Calling SetUpStorage on kuber for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
resStorage, err := kuber.SetUpStorage(c, &pb.SetUpStorageRequest{DesiredCluster: ctx.desiredCluster})
if err != nil {
return err
}
ctx.desiredCluster = resStorage.DesiredCluster
log.Info().Msgf("Calling StoreKubeconfig on kuber for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
if _, err := kuber.StoreKubeconfig(c, &pb.StoreKubeconfigRequest{Cluster: ctx.desiredCluster}); err != nil {
return err
}
log.Info().Msgf("Calling StoreNodeMetadata on kuber for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
if _, err := kuber.StoreClusterMetadata(c, &pb.StoreClusterMetadataRequest{Cluster: ctx.desiredCluster}); err != nil {
return err
}
return nil
}
// destroyConfig destroys existing clusters infra for a config by calling Terraformer and Kuber
func destroyCluster(ctx *BuilderContext) error {
if err := destroyConfigTerraformer(ctx); err != nil {
return fmt.Errorf("error in destroy config terraformer for config %s project %s : %w", ctx.GetClusterName(), ctx.projectName, err)
}
if err := deleteClusterData(ctx); err != nil {
return fmt.Errorf("error in delete kubeconfig for config %s project %s : %w", ctx.GetClusterName(), ctx.projectName, err)
}
return nil
}
// destroyConfigTerraformer calls terraformer's DestroyInfrastructure function
func destroyConfigTerraformer(ctx *BuilderContext) error {
cc, err := utils.GrpcDialWithInsecure("terraformer", envs.TerraformerURL)
if err != nil {
return err
}
defer utils.CloseClientConnection(cc)
log.Info().Msgf("Calling DestroyInfrastructure on terraformer for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
c := pb.NewTerraformerServiceClient(cc)
_, err = terraformer.DestroyInfrastructure(c, &pb.DestroyInfrastructureRequest{
ProjectName: ctx.projectName,
Current: ctx.cluster,
CurrentLbs: ctx.loadbalancers,
})
return err
}
// deleteClusterData deletes the kubeconfig and cluster metadata.
func deleteClusterData(ctx *BuilderContext) error {
if ctx.cluster == nil {
return nil
}
cc, err := utils.GrpcDialWithInsecure("kuber", envs.KuberURL)
if err != nil {
return err
}
defer utils.CloseClientConnection(cc)
c := pb.NewKuberServiceClient(cc)
log.Info().Msgf("Calling DeleteKubeconfig on kuber for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
if _, err := kuber.DeleteKubeconfig(c, &pb.DeleteKubeconfigRequest{Cluster: ctx.cluster}); err != nil {
return err
}
log.Info().Msgf("Calling DeleteClusterMetadata on kuber for cluster %s project %s", ctx.GetClusterName(), ctx.projectName)
_, err = kuber.DeleteClusterMetadata(c, &pb.DeleteClusterMetadataRequest{Cluster: ctx.cluster})
return err
}
// callDeleteNodes calls Kuber.DeleteNodes which will safely delete nodes from cluster
func callDeleteNodes(master, worker []string, cluster *pb.K8Scluster) (*pb.K8Scluster, error) {
cc, err := utils.GrpcDialWithInsecure("kuber", envs.KuberURL)
if err != nil {
return nil, err
}
defer utils.CloseClientConnection(cc)
// Creating the client
c := pb.NewKuberServiceClient(cc)
log.Info().Msgf("Calling DeleteNodes on kuber for cluster %s", cluster.ClusterInfo.Name)
resDelete, err := kuber.DeleteNodes(c, &pb.DeleteNodesRequest{MasterNodes: master, WorkerNodes: worker, Cluster: cluster})
if err != nil {
return nil, err
}
return resDelete.Cluster, nil
}