forked from rancher/rancher
-
Notifications
You must be signed in to change notification settings - Fork 0
/
driver.go
102 lines (75 loc) · 3.38 KB
/
driver.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
package clusterprovisioner
import (
"reflect"
"github.com/rancher/rancher/pkg/clusterprovisioninglogger"
"github.com/rancher/rke/services"
"github.com/rancher/types/apis/management.cattle.io/v3"
"k8s.io/apimachinery/pkg/runtime"
)
func (p *Provisioner) driverCreate(cluster *v3.Cluster, spec v3.ClusterSpec) (api string, token string, cert string, err error) {
ctx, logger := clusterprovisioninglogger.NewLogger(p.Clusters, p.EventLogger, cluster, v3.ClusterConditionProvisioned)
defer logger.Close()
spec = cleanRKE(spec)
if newCluster, err := p.Clusters.Update(cluster); err == nil {
cluster = newCluster
}
return p.Driver.Create(ctx, cluster.Name, spec)
}
func (p *Provisioner) driverUpdate(cluster *v3.Cluster, spec v3.ClusterSpec) (api string, token string, cert string, err error) {
ctx, logger := clusterprovisioninglogger.NewLogger(p.Clusters, p.EventLogger, cluster, v3.ClusterConditionUpdated)
defer logger.Close()
spec = cleanRKE(spec)
applied := cleanRKE(cluster.Status.AppliedSpec)
if spec.RancherKubernetesEngineConfig != nil && cluster.Status.APIEndpoint != "" && cluster.Status.ServiceAccountToken != "" &&
reflect.DeepEqual(applied.RancherKubernetesEngineConfig, spec.RancherKubernetesEngineConfig) {
return cluster.Status.APIEndpoint, cluster.Status.ServiceAccountToken, cluster.Status.CACert, nil
}
if spec.RancherKubernetesEngineConfig != nil && spec.RancherKubernetesEngineConfig.Services.Etcd.Snapshot == nil &&
applied.RancherKubernetesEngineConfig != nil && applied.RancherKubernetesEngineConfig.Services.Etcd.Snapshot == nil {
_false := false
cluster.Spec.RancherKubernetesEngineConfig.Services.Etcd.Snapshot = &_false
}
if newCluster, err := p.Clusters.Update(cluster); err == nil {
cluster = newCluster
}
return p.Driver.Update(ctx, cluster.Name, spec)
}
func (p *Provisioner) driverRemove(cluster *v3.Cluster) error {
ctx, logger := clusterprovisioninglogger.NewLogger(p.Clusters, p.EventLogger, cluster, v3.ClusterConditionProvisioned)
defer logger.Close()
spec := cleanRKE(cluster.Spec)
_, err := v3.ClusterConditionUpdated.Do(cluster, func() (runtime.Object, error) {
if newCluster, err := p.Clusters.Update(cluster); err == nil {
cluster = newCluster
}
return cluster, p.Driver.Remove(ctx, cluster.Name, spec)
})
return err
}
func (p *Provisioner) generateServiceAccount(cluster *v3.Cluster, spec v3.ClusterSpec) (string, error) {
ctx, logger := clusterprovisioninglogger.NewLogger(p.Clusters, p.EventLogger, cluster, v3.ClusterConditionUpdated)
defer logger.Close()
spec = cleanRKE(spec)
return p.Driver.GenerateServiceAccount(ctx, cluster.Name, spec)
}
func (p *Provisioner) removeLegacyServiceAccount(cluster *v3.Cluster, spec v3.ClusterSpec) error {
ctx, logger := clusterprovisioninglogger.NewLogger(p.Clusters, p.EventLogger, cluster, v3.ClusterConditionUpdated)
defer logger.Close()
spec = cleanRKE(spec)
return p.Driver.RemoveLegacyServiceAccount(ctx, cluster.Name, spec)
}
func cleanRKE(spec v3.ClusterSpec) v3.ClusterSpec {
if spec.RancherKubernetesEngineConfig == nil {
return spec
}
result := spec.DeepCopy()
var filteredNodes []v3.RKEConfigNode
for _, node := range spec.RancherKubernetesEngineConfig.Nodes {
if len(node.Role) == 1 && node.Role[0] == services.WorkerRole {
continue
}
filteredNodes = append(filteredNodes, node)
}
result.RancherKubernetesEngineConfig.Nodes = filteredNodes
return *result
}