Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

set ClusterOperator.Status #72

Merged
merged 2 commits into from
Jan 28, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 13 additions & 4 deletions pkg/controller/clusterconfig/clusterconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/openshift/cluster-network-operator/pkg/apply"
"github.com/openshift/cluster-network-operator/pkg/names"
"github.com/openshift/cluster-network-operator/pkg/network"
"github.com/openshift/cluster-network-operator/pkg/util/clusteroperator"

apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
Expand All @@ -22,14 +23,14 @@ import (
)

// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
func Add(mgr manager.Manager, status *clusteroperator.StatusManager) error {
return add(mgr, newReconciler(mgr, status))
}

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
func newReconciler(mgr manager.Manager, status *clusteroperator.StatusManager) reconcile.Reconciler {
configv1.Install(mgr.GetScheme())
return &ReconcileClusterConfig{client: mgr.GetClient(), scheme: mgr.GetScheme()}
return &ReconcileClusterConfig{client: mgr.GetClient(), scheme: mgr.GetScheme(), status: status}
}

// add adds a new Controller to mgr with r as the reconcile.Reconciler
Expand Down Expand Up @@ -57,6 +58,7 @@ type ReconcileClusterConfig struct {
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
status *clusteroperator.StatusManager
}

// Reconcile propagates changes from the cluster config to the operator config.
Expand All @@ -83,27 +85,34 @@ func (r *ReconcileClusterConfig) Reconcile(request reconcile.Request) (reconcile
}
// Error reading the object - requeue the request.
log.Println(err)
// FIXME: operator status?
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, tough to know what to do. If it weren't a transient failure, then I suppose we would treat this the same as invalid input. This seems very unlikely though.

return reconcile.Result{}, err
}

// Validate the cluster config
if err := network.ValidateClusterConfig(clusterConfig.Spec); err != nil {
err = errors.Wrapf(err, "failed to validate Network.Spec")
log.Println(err)
r.status.SetConfigFailing("InvalidClusterConfig", err)
return reconcile.Result{}, err
}

operatorConfig, err := r.UpdateOperatorConfig(context.TODO(), *clusterConfig)
if err != nil {
err = errors.Wrapf(err, "failed to generate NetworkConfig CRD")
log.Println(err)
r.status.SetConfigFailing("UpdateOperatorConfig", err)
return reconcile.Result{}, err
}

// Clear any cluster config-related errors before applying operator config
r.status.SetConfigSuccess()

if operatorConfig != nil {
if err := apply.ApplyObject(context.TODO(), r.client, operatorConfig); err != nil {
err = errors.Wrapf(err, "could not apply (%s) %s/%s", operatorConfig.GroupVersionKind(), operatorConfig.GetNamespace(), operatorConfig.GetName())
log.Println(err)
r.status.SetConfigFailing("ApplyClusterConfig", err)
return reconcile.Result{}, err
}
}
Expand Down
9 changes: 7 additions & 2 deletions pkg/controller/controller.go
Original file line number Diff line number Diff line change
@@ -1,16 +1,21 @@
package controller

import (
"github.com/openshift/cluster-network-operator/pkg/util/clusteroperator"
operatorversion "github.com/openshift/cluster-network-operator/version"

"sigs.k8s.io/controller-runtime/pkg/manager"
)

// AddToManagerFuncs is a list of functions to add all Controllers to the Manager
var AddToManagerFuncs []func(manager.Manager) error
var AddToManagerFuncs []func(manager.Manager, *clusteroperator.StatusManager) error

// AddToManager adds all Controllers to the Manager
func AddToManager(m manager.Manager) error {
status := clusteroperator.NewStatusManager(m.GetClient(), "openshift-network-operator", operatorversion.Version)

for _, f := range AddToManagerFuncs {
if err := f(m); err != nil {
if err := f(m, status); err != nil {
return err
}
}
Expand Down
48 changes: 48 additions & 0 deletions pkg/controller/networkconfig/daemonset_controller.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
package networkconfig

import (
"log"

"github.com/openshift/cluster-network-operator/pkg/util/clusteroperator"

"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)

// newDaemonSetReconciler returns a new reconcile.Reconciler
func newDaemonSetReconciler(status *clusteroperator.StatusManager) *ReconcileDaemonSets {
return &ReconcileDaemonSets{status: status}
}

var _ reconcile.Reconciler = &ReconcileDaemonSets{}

// ReconcileDaemonSets updates the ClusterOperator.Status according to the states of DaemonSet objects
type ReconcileDaemonSets struct {
status *clusteroperator.StatusManager

daemonSets []types.NamespacedName
}

func (r *ReconcileDaemonSets) SetDaemonSets(daemonSets []types.NamespacedName) {
r.daemonSets = daemonSets
}

// Reconcile updates the ClusterOperator.Status to match the current state of the
// watched DaemonSets
func (r *ReconcileDaemonSets) Reconcile(request reconcile.Request) (reconcile.Result, error) {
found := false
for _, ds := range r.daemonSets {
if ds.Namespace == request.Namespace && ds.Name == request.Name {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Annoying that the operator-sdk doesn't support mixed-scope watches, AFAICT.

found = true
break
}
}
if !found {
return reconcile.Result{}, nil
}

log.Printf("Reconciling update to DaemonSet %s/%s\n", request.Namespace, request.Name)
r.status.SetFromDaemonSets(r.daemonSets)

return reconcile.Result{RequeueAfter: ResyncPeriod}, nil
}
65 changes: 56 additions & 9 deletions pkg/controller/networkconfig/networkconfig_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package networkconfig

import (
"context"
"fmt"
"log"
"time"

Expand All @@ -12,11 +13,14 @@ import (
"github.com/openshift/cluster-network-operator/pkg/apply"
"github.com/openshift/cluster-network-operator/pkg/names"
"github.com/openshift/cluster-network-operator/pkg/network"
"github.com/openshift/cluster-network-operator/pkg/util/clusteroperator"

appsv1 "k8s.io/api/apps/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
uns "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
Expand All @@ -37,18 +41,24 @@ var ManifestPath = "./bindata"

// Add creates a new NetworkConfig Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager) error {
return add(mgr, newReconciler(mgr))
func Add(mgr manager.Manager, status *clusteroperator.StatusManager) error {
return add(mgr, newReconciler(mgr, status))
}

// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager) reconcile.Reconciler {
func newReconciler(mgr manager.Manager, status *clusteroperator.StatusManager) *ReconcileNetworkConfig {
configv1.Install(mgr.GetScheme())
return &ReconcileNetworkConfig{client: mgr.GetClient(), scheme: mgr.GetScheme()}
return &ReconcileNetworkConfig{
client: mgr.GetClient(),
scheme: mgr.GetScheme(),
status: status,

daemonSetReconciler: newDaemonSetReconciler(status),
}
}

// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r reconcile.Reconciler) error {
func add(mgr manager.Manager, r *ReconcileNetworkConfig) error {
// Create a new controller
c, err := controller.New("networkconfig-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
Expand All @@ -61,6 +71,16 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
return err
}

// Likewise for the DaemonSet reconciler
c, err = controller.New("daemonset-controller", mgr, controller.Options{Reconciler: r.daemonSetReconciler})
if err != nil {
return err
}
err = c.Watch(&source.Kind{Type: &appsv1.DaemonSet{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}

return nil
}

Expand All @@ -72,6 +92,9 @@ type ReconcileNetworkConfig struct {
// that reads objects from the cache and writes to the apiserver
client client.Client
scheme *runtime.Scheme
status *clusteroperator.StatusManager

daemonSetReconciler *ReconcileDaemonSets
}

// Reconcile updates the state of the cluster to match that which is desired
Expand All @@ -90,6 +113,7 @@ func (r *ReconcileNetworkConfig) Reconcile(request reconcile.Request) (reconcile
err := r.client.Get(context.TODO(), request.NamespacedName, operConfig)
if err != nil {
if apierrors.IsNotFound(err) {
r.status.SetConfigFailing("NoOperatorConfig", fmt.Errorf("NetworkConfig was deleted"))
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected, since we set
// the ownerReference (see https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/).
Expand All @@ -98,26 +122,30 @@ func (r *ReconcileNetworkConfig) Reconcile(request reconcile.Request) (reconcile
}
// Error reading the object - requeue the request.
log.Printf("Unable to retrieve NetworkConfig object: %v", err)
// FIXME: operator status?
return reconcile.Result{}, err
}

// Merge in the cluster configuration, in case the administrator has updated some "downstream" fields
// This will also commit the change back to the apiserver.
if err := r.MergeClusterConfig(context.TODO(), operConfig); err != nil {
log.Printf("Failed to merge the cluster configuration: %v", err)
r.status.SetConfigFailing("MergeClusterConfig", err)
return reconcile.Result{}, err
}

// Validate the configuration
if err := network.Validate(&operConfig.Spec); err != nil {
log.Printf("Failed to validate NetworkConfig.Spec: %v", err)
r.status.SetConfigFailing("InvalidOperatorConfig", err)
return reconcile.Result{}, err
}

// Retrieve the previously applied operator configuration
prev, err := GetAppliedConfiguration(context.TODO(), r.client, operConfig.ObjectMeta.Name)
if err != nil {
log.Printf("Failed to retrieve previously applied configuration: %v", err)
// FIXME: operator status?
return reconcile.Result{}, err
}

Expand All @@ -132,39 +160,56 @@ func (r *ReconcileNetworkConfig) Reconcile(request reconcile.Request) (reconcile
err = network.IsChangeSafe(prev, &operConfig.Spec)
if err != nil {
log.Printf("Not applying unsafe change: %v", err)
return reconcile.Result{},
errors.Wrapf(err, "not applying unsafe change")
errors.Wrapf(err, "not applying unsafe change")
r.status.SetConfigFailing("InvalidOperatorConfig", err)
return reconcile.Result{}, err
}
}

// Generate the objects
objs, err := network.Render(&operConfig.Spec, ManifestPath)
if err != nil {
log.Printf("Failed to render: %v", err)
return reconcile.Result{}, errors.Wrapf(err, "failed to render")
err = errors.Wrapf(err, "failed to render")
r.status.SetConfigFailing("RenderError", err)
return reconcile.Result{}, err
}

// The first object we create should be the record of our applied configuration. The last object we create is config.openshift.io/v1/Network.Status
app, err := AppliedConfiguration(operConfig)
if err != nil {
log.Printf("Failed to render applied: %v", err)
return reconcile.Result{}, errors.Wrapf(err, "failed to render applied")
err = errors.Wrapf(err, "failed to render applied")
r.status.SetConfigFailing("RenderError", err)
return reconcile.Result{}, err
}
objs = append([]*uns.Unstructured{app}, objs...)

// Set up the DaemonSet reconciler before we start creating the DaemonSets
r.status.SetConfigSuccess()
daemonSets := []types.NamespacedName{}
for _, obj := range objs {
if obj.GetAPIVersion() == "apps/v1" && obj.GetKind() == "DaemonSet" {
daemonSets = append(daemonSets, types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()})
}
}
r.daemonSetReconciler.SetDaemonSets(daemonSets)

// Apply the objects to the cluster
for _, obj := range objs {
// Mark the object to be GC'd if the owner is deleted.
if err := controllerutil.SetControllerReference(operConfig, obj, r.scheme); err != nil {
err = errors.Wrapf(err, "could not set reference for (%s) %s/%s", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
log.Println(err)
r.status.SetConfigFailing("InternalError", err)
return reconcile.Result{}, err
}

// Open question: should an error here indicate we will never retry?
if err := apply.ApplyObject(context.TODO(), r.client, obj); err != nil {
err = errors.Wrapf(err, "could not apply (%s) %s/%s", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
log.Println(err)
r.status.SetConfigFailing("ApplyOperatorConfig", err)
return reconcile.Result{}, err
}
}
Expand All @@ -174,6 +219,7 @@ func (r *ReconcileNetworkConfig) Reconcile(request reconcile.Request) (reconcile
if err != nil {
err = errors.Wrapf(err, "could not generate network status")
log.Println(err)
r.status.SetConfigFailing("StatusError", err)
return reconcile.Result{}, err
}
if status != nil {
Expand All @@ -182,6 +228,7 @@ func (r *ReconcileNetworkConfig) Reconcile(request reconcile.Request) (reconcile
if err := apply.ApplyObject(context.TODO(), r.client, status); err != nil {
err = errors.Wrapf(err, "could not apply (%s) %s/%s", status.GroupVersionKind(), status.GetNamespace(), status.GetName())
log.Println(err)
r.status.SetConfigFailing("StatusError", err)
return reconcile.Result{}, err
}
}
Expand Down
Loading