/
push_manifest_objects.go
181 lines (169 loc) · 6.96 KB
/
push_manifest_objects.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
// Copyright (c) 2022, 2023, Oracle and/or its affiliates.
// Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
package vmc
import (
"context"
"fmt"
clusterapi "github.com/verrazzano/verrazzano/cluster-operator/apis/clusters/v1alpha1"
internalcapi "github.com/verrazzano/verrazzano/cluster-operator/internal/capi"
constants2 "github.com/verrazzano/verrazzano/pkg/constants"
"github.com/verrazzano/verrazzano/pkg/k8sutil"
"github.com/verrazzano/verrazzano/pkg/rancherutil"
"github.com/verrazzano/verrazzano/platform-operator/constants"
"github.com/verrazzano/verrazzano/platform-operator/controllers/verrazzano/component/common"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
)
// pushManifestObjects applies the Verrazzano manifest objects to the managed cluster.
// To access the managed cluster, we are taking advantage of the Rancher proxy or CAPI based access.
func (r *VerrazzanoManagedClusterReconciler) pushManifestObjects(ctx context.Context, rancherEnabled bool, vmc *clusterapi.VerrazzanoManagedCluster) (bool, error) {
pushed := false
var err error
if vmc.Status.ClusterRef != nil {
pushed, err = r.pushViaCAPIClient(ctx, vmc, rancherEnabled)
if err != nil {
return pushed, err
}
}
if !pushed && rancherEnabled {
pushed, err = r.pushViaRancherProxy(vmc)
if err != nil {
return pushed, err
}
}
return pushed, nil
}
func (r *VerrazzanoManagedClusterReconciler) pushViaRancherProxy(vmc *clusterapi.VerrazzanoManagedCluster) (bool, error) {
clusterID := vmc.Status.RancherRegistration.ClusterID
if len(clusterID) == 0 {
r.log.Progressf("Waiting to push manifest objects, Rancher ClusterID not found in the VMC %s/%s status", vmc.GetNamespace(), vmc.GetName())
return false, nil
}
rc, err := rancherutil.NewVerrazzanoClusterRancherConfig(r.Client, r.RancherIngressHost, r.log)
if err != nil || rc == nil {
return false, err
}
// If the managed cluster is not active, we should not attempt to push resources
isActive, err := isManagedClusterActiveInRancher(rc, clusterID, r.log)
if err != nil {
return false, err
}
vsNamespaceCreated, _ := isNamespaceCreated(vmc, r, clusterID, constants.VerrazzanoSystemNamespace)
if isActive && vsNamespaceCreated {
// Create or Update the agent and registration secrets
agentSecret := corev1.Secret{}
agentSecret.Namespace = constants.VerrazzanoSystemNamespace
agentSecret.Name = constants.MCAgentSecret
regSecret := corev1.Secret{}
regSecret.Namespace = constants.VerrazzanoSystemNamespace
regSecret.Name = constants.MCRegistrationSecret
agentOperation, err := createOrUpdateSecretRancherProxy(&agentSecret, rc, clusterID, func() error {
existingAgentSec, err := r.getSecret(vmc.Namespace, GetAgentSecretName(vmc.Name), true)
if err != nil {
return err
}
agentSecret.Data = existingAgentSec.Data
return nil
}, r.log)
if err != nil {
return false, err
}
regOperation, err := createOrUpdateSecretRancherProxy(®Secret, rc, clusterID, func() error {
existingRegSecret, err := r.getSecret(vmc.Namespace, GetRegistrationSecretName(vmc.Name), true)
if err != nil {
return err
}
regSecret.Data = existingRegSecret.Data
return nil
}, r.log)
if err != nil {
return false, err
}
agentModified := agentOperation != controllerutil.OperationResultNone
regModified := regOperation != controllerutil.OperationResultNone
return agentModified || regModified, nil
}
return false, nil
}
func (r *VerrazzanoManagedClusterReconciler) pushViaCAPIClient(ctx context.Context, vmc *clusterapi.VerrazzanoManagedCluster, rancherEnabled bool) (bool, error) {
r.log.Debugf("Pushing via CAPI client, status: %s", vmc.Status.RancherRegistration.Status)
if vmc.Status.RancherRegistration.Status != clusterapi.RegistrationApplied {
cluster := &unstructured.Unstructured{}
cluster.SetGroupVersionKind(internalcapi.GVKCAPICluster)
err := r.Get(ctx, types.NamespacedName{Namespace: vmc.Status.ClusterRef.Namespace, Name: vmc.Status.ClusterRef.Name}, cluster)
if err != nil && !errors.IsNotFound(err) {
return false, err
}
manifest, err := r.getClusterManifest(cluster)
if err != nil {
return false, err
}
// register the cluster if Verrazzano installed on workload cluster
workloadClient, err := r.getWorkloadClusterClient(cluster)
if err != nil {
r.log.Errorf("Error getting workload cluster %s client: %v", cluster.GetName(), err)
return false, err
}
// apply the manifest to workload cluster
yamlApplier := k8sutil.NewYAMLApplier(workloadClient, "")
err = yamlApplier.ApplyS(string(manifest))
if err != nil {
r.log.Errorf("Failed applying cluster manifest to workload cluster %s: %v", cluster.GetName(), err)
return false, err
}
r.log.Infof("Registration manifest applied to cluster %s", cluster.GetName())
// update the registration status if Rancher is enabled since repeated application of the manifest will
// trigger connection issues
if rancherEnabled && vmc.Status.RancherRegistration.Status == clusterapi.RegistrationCompleted {
existingVMC := &clusterapi.VerrazzanoManagedCluster{}
err = r.Get(ctx, types.NamespacedName{Namespace: vmc.Namespace, Name: vmc.Name}, existingVMC)
if err != nil {
return false, err
}
existingVMC.Status.RancherRegistration.Status = clusterapi.RegistrationApplied
err = r.Status().Update(ctx, existingVMC)
if err != nil {
r.log.Errorf("Error updating VMC status for cluster %s: %v", cluster.GetName(), err)
return false, err
}
r.log.Debugf("Registration status updated to Applied")
vmc = existingVMC
// get and label the cattle-system namespace
ns := &corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: common.CattleSystem}}
if _, err := ctrl.CreateOrUpdate(ctx, workloadClient, ns, func() error {
if ns.Labels == nil {
ns.Labels = make(map[string]string)
}
ns.Labels[constants2.LabelVerrazzanoNamespace] = common.CattleSystem
return nil
}); err != nil {
return false, err
}
}
return true, nil
}
return false, nil
}
// getClusterManifest retrieves the registration manifest for the workload cluster
func (r *VerrazzanoManagedClusterReconciler) getClusterManifest(cluster *unstructured.Unstructured) ([]byte, error) {
// retrieve the manifest for the workload cluster
resourceName := getClusterResourceName(cluster, r.Client)
manifestSecret := &corev1.Secret{}
err := r.Get(context.TODO(), types.NamespacedName{
Name: fmt.Sprintf("verrazzano-cluster-%s-manifest", resourceName),
Namespace: constants.VerrazzanoMultiClusterNamespace},
manifestSecret)
if err != nil {
return nil, err
}
manifest, ok := manifestSecret.Data["yaml"]
if !ok {
return nil, fmt.Errorf("Error retrieving cluster manifest for %s", cluster.GetName())
}
return manifest, nil
}