-
Notifications
You must be signed in to change notification settings - Fork 63
/
super_stream_controller.go
209 lines (180 loc) · 8.3 KB
/
super_stream_controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
/*
RabbitMQ Messaging Topology Kubernetes Operator
Copyright 2021 VMware, Inc.
This product is licensed to you under the Mozilla Public License 2.0 license (the "License"). You may not use this product except in compliance with the Mozilla 2.0 License.
This product may include a number of subcomponents with separate copyright notices and license terms. Your use of these subcomponents is subject to the terms and conditions of the subcomponent's license, as noted in the LICENSE file.
*/
package controllers
import (
"context"
"fmt"
"strconv"
"github.com/go-logr/logr"
rabbitmqv1beta1 "github.com/rabbitmq/cluster-operator/v2/api/v1beta1"
topologyv1alpha1 "github.com/rabbitmq/messaging-topology-operator/api/v1alpha1"
topology "github.com/rabbitmq/messaging-topology-operator/api/v1beta1"
"github.com/rabbitmq/messaging-topology-operator/internal/managedresource"
"github.com/rabbitmq/messaging-topology-operator/rabbitmqclient"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
clientretry "k8s.io/client-go/util/retry"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
// SuperStreamReconciler reconciles a RabbitMQ Super Stream, and any resources it comprises of
type SuperStreamReconciler struct {
client.Client
Log logr.Logger
Scheme *runtime.Scheme
Recorder record.EventRecorder
RabbitmqClientFactory rabbitmqclient.Factory
KubernetesClusterDomain string
}
// +kubebuilder:rbac:groups=rabbitmq.com,resources=exchanges,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups=rabbitmq.com,resources=queues,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups=rabbitmq.com,resources=bindings,verbs=get;create;update;patch;delete
// +kubebuilder:rbac:groups=rabbitmq.com,resources=superstreams,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=rabbitmq.com,resources=superstreams/finalizers,verbs=update
// +kubebuilder:rbac:groups=rabbitmq.com,resources=superstreams/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=rabbitmq.com,resources=rabbitmqclusters,verbs=get;list;watch
// +kubebuilder:rbac:groups=rabbitmq.com,resources=rabbitmqclusters/status,verbs=get
// +kubebuilder:rbac:groups="",resources=services,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=get;create;patch
func (r *SuperStreamReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
logger := ctrl.LoggerFrom(ctx)
superStream := &topologyv1alpha1.SuperStream{}
if err := r.Get(ctx, req.NamespacedName, superStream); err != nil {
return reconcile.Result{}, client.IgnoreNotFound(err)
}
rmqClusterRef, err := r.getRabbitmqClusterReference(ctx, superStream.Spec.RabbitmqClusterReference, superStream.Namespace)
if err != nil {
return handleRMQReferenceParseError(ctx, r.Client, r.Recorder, superStream, &superStream.Status.Conditions, err)
}
logger.Info("Start reconciling")
if superStream.Spec.Partitions < len(superStream.Status.Partitions) {
// This would constitute a scale down, which may result in data loss.
err := fmt.Errorf(
"SuperStreams cannot be scaled down: an attempt was made to scale from %d partitions to %d",
len(superStream.Status.Partitions),
superStream.Spec.Partitions,
)
msg := fmt.Sprintf("SuperStream %s failed to reconcile", superStream.Name)
logger.Error(err, msg)
r.Recorder.Event(superStream, corev1.EventTypeWarning, "FailedScaleDown", err.Error())
if writerErr := r.SetReconcileSuccess(ctx, superStream, topology.NotReady(msg, superStream.Status.Conditions)); writerErr != nil {
logger.Error(writerErr, failedStatusUpdate, "status", superStream.Status)
}
return reconcile.Result{}, nil
}
var routingKeys []string
if len(superStream.Spec.RoutingKeys) == 0 {
routingKeys = r.generateRoutingKeys(superStream)
} else if len(superStream.Spec.RoutingKeys) != superStream.Spec.Partitions {
err := fmt.Errorf(
"expected number of routing keys (%d) to match number of partitions (%d)",
len(superStream.Spec.RoutingKeys),
superStream.Spec.Partitions,
)
msg := fmt.Sprintf("SuperStream %s failed to reconcile", superStream.Name)
logger.Error(err, msg)
if writerErr := r.SetReconcileSuccess(ctx, superStream, topology.NotReady(msg, superStream.Status.Conditions)); writerErr != nil {
logger.Error(writerErr, failedStatusUpdate, "status", superStream.Status)
}
return reconcile.Result{}, err
} else {
routingKeys = superStream.Spec.RoutingKeys
}
// Each SuperStream generates, for n partitions, 1 exchange, n streams and n bindings
managedResourceBuilder := managedresource.Builder{
ObjectOwner: superStream,
Scheme: r.Scheme,
}
builders := []managedresource.ResourceBuilder{managedResourceBuilder.SuperStreamExchange(superStream.Spec.Vhost, rmqClusterRef)}
for index, routingKey := range routingKeys {
builders = append(
builders,
managedResourceBuilder.SuperStreamPartition(index, routingKey, superStream.Spec.Vhost, rmqClusterRef),
managedResourceBuilder.SuperStreamBinding(index, routingKey, superStream.Spec.Vhost, rmqClusterRef),
)
}
var partitionQueueNames []string
for _, builder := range builders {
resource, err := builder.Build()
if err != nil {
return ctrl.Result{}, err
}
err = clientretry.RetryOnConflict(clientretry.DefaultRetry, func() error {
var apiError error
_, apiError = controllerutil.CreateOrUpdate(ctx, r.Client, resource, func() error {
return builder.Update(resource)
})
return apiError
})
if err != nil {
msg := fmt.Sprintf("FailedReconcile%s", builder.ResourceType())
if writerErr := r.SetReconcileSuccess(ctx, superStream, topology.NotReady(msg, superStream.Status.Conditions)); writerErr != nil {
logger.Error(writerErr, failedStatusUpdate, "status", superStream.Status)
}
return ctrl.Result{}, err
}
if builder.ResourceType() == "Partition" {
partition := resource.(*topology.Queue)
partitionQueueNames = append(partitionQueueNames, partition.Spec.Name)
}
}
superStream.Status.Partitions = partitionQueueNames
if err := clientretry.RetryOnConflict(clientretry.DefaultRetry, func() error {
return r.Status().Update(ctx, superStream)
}); err != nil {
logger.Error(err, failedStatusUpdate)
}
if err := r.SetReconcileSuccess(ctx, superStream, topology.Ready(superStream.Status.Conditions)); err != nil {
logger.Error(err, failedStatusUpdate)
}
logger.Info("Finished reconciling")
return ctrl.Result{}, nil
}
func (r *SuperStreamReconciler) getRabbitmqClusterReference(ctx context.Context, rmq topology.RabbitmqClusterReference, requestNamespace string) (*topology.RabbitmqClusterReference, error) {
var namespace string
if rmq.Namespace == "" {
namespace = requestNamespace
} else {
namespace = rmq.Namespace
}
cluster := &rabbitmqv1beta1.RabbitmqCluster{}
if err := r.Get(ctx, types.NamespacedName{Name: rmq.Name, Namespace: namespace}, cluster); err != nil {
return nil, fmt.Errorf("failed to get cluster from reference: %s Error: %w", err, rabbitmqclient.NoSuchRabbitmqClusterError)
}
if !rabbitmqclient.AllowedNamespace(rmq, requestNamespace, cluster) {
return nil, rabbitmqclient.ResourceNotAllowedError
}
return &topology.RabbitmqClusterReference{
Name: rmq.Name,
Namespace: namespace,
}, nil
}
func (r *SuperStreamReconciler) generateRoutingKeys(superStream *topologyv1alpha1.SuperStream) (routingKeys []string) {
for i := 0; i < superStream.Spec.Partitions; i++ {
routingKeys = append(routingKeys, strconv.Itoa(i))
}
return routingKeys
}
func (r *SuperStreamReconciler) SetReconcileSuccess(ctx context.Context, superStream *topologyv1alpha1.SuperStream, condition topology.Condition) error {
superStream.Status.Conditions = []topology.Condition{condition}
superStream.Status.ObservedGeneration = superStream.GetGeneration()
return clientretry.RetryOnConflict(clientretry.DefaultRetry, func() error {
return r.Status().Update(ctx, superStream)
})
}
func (r *SuperStreamReconciler) SetupWithManager(mgr ctrl.Manager) error {
return ctrl.NewControllerManagedBy(mgr).
For(&topologyv1alpha1.SuperStream{}).
Owns(&topology.Exchange{}).
Owns(&topology.Binding{}).
Owns(&topology.Queue{}).
Complete(r)
}