Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rework reconcile loop for workspace controller #36

Merged
merged 12 commits into from
Apr 7, 2020
2 changes: 1 addition & 1 deletion .vscode/launch.json
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
"env": {
"WATCH_NAMESPACE": "che-workspace-controller",
"CONTROLLER_CONFIG_MAP_NAMESPACE": "che-workspace-controller"
},
}
},
{
"name": "Launch Controller",
Expand Down
18 changes: 13 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,13 @@ ADMIN_CTX ?= ""

all: help

_print_vars:
@echo "Current env vars:"
@echo " IMG=$(IMG)"
@echo " PULL_POLICY=$(PULL_POLICY)"
@echo " WEBHOOK_ENABLED=$(WEBHOOK_ENABLED)"
@echo " DEFAULT_ROUTING=$(DEFAULT_ROUTING)"

_set_ctx:
ifneq ($(ADMIN_CTX),"")
$(eval CURRENT_CTX := $(shell $(TOOL) config current-context))
Expand Down Expand Up @@ -95,17 +102,18 @@ endif
_do_uninstall:
# It's safer to delete all workspaces before deleting the controller; otherwise we could
# leave workspaces in a hanging state if we add finalizers.
ifneq ($(shell command -v kubectl),)
ifneq ($(shell command -v kubectl 2> /dev/null),)
kubectl delete workspaces.workspace.che.eclipse.org --all-namespaces --all
else
$(info WARN: kubectl is not installed: unable to delete all workspaces)
endif
$(TOOL) delete namespace $(NAMESPACE)
$(TOOL) delete customresourcedefinitions.apiextensions.k8s.io workspaceroutings.workspace.che.eclipse.org
$(TOOL) delete customresourcedefinitions.apiextensions.k8s.io components.workspace.che.eclipse.org
$(TOOL) delete customresourcedefinitions.apiextensions.k8s.io workspaces.workspace.che.eclipse.org

### docker: build and push docker image
docker:
docker: _print_vars
docker build -t $(IMG) -f ./build/Dockerfile .
docker push $(IMG)

Expand All @@ -120,7 +128,7 @@ else
endif

### deploy: deploy controller to cluster
deploy: _set_ctx _create_namespace _deploy_registry _update_yamls _update_crds webhook _apply_controller_cfg _reset_yamls _reset_ctx
deploy: _print_vars _set_ctx _create_namespace _deploy_registry _update_yamls _update_crds webhook _apply_controller_cfg _reset_yamls _reset_ctx

### restart: restart cluster controller deployment
restart: _set_ctx _do_restart _reset_ctx
Expand All @@ -129,7 +137,7 @@ restart: _set_ctx _do_restart _reset_ctx
rollout: docker restart

### update_cfg: configures already deployed controller according to set env variables
update_cfg: _set_ctx _update_yamls _apply_controller_cfg _reset_yamls _reset_ctx
update_cfg: _print_vars _set_ctx _update_yamls _apply_controller_cfg _reset_yamls _reset_ctx

### update_crds: update custom resource definitions on cluster
update_crds: _set_ctx _update_crds _reset_ctx
Expand All @@ -138,7 +146,7 @@ update_crds: _set_ctx _update_crds _reset_ctx
uninstall: _set_ctx _do_uninstall _reset_ctx

### local: set up cluster for local development
local: _set_ctx _create_namespace _deploy_registry _set_registry_url _update_yamls _update_crds _update_controller_configmap _reset_yamls _reset_ctx
local: _print_vars _set_ctx _create_namespace _deploy_registry _set_registry_url _update_yamls _update_crds _update_controller_configmap _reset_yamls _reset_ctx

### start_local: start local instance of controller using operator-sdk
start_local:
Expand Down
98 changes: 76 additions & 22 deletions cmd/manager/main.go
Original file line number Diff line number Diff line change
@@ -1,44 +1,59 @@
//
// Copyright (c) 2019-2020 Red Hat, Inc.
// This program and the accompanying materials are made
// available under the terms of the Eclipse Public License 2.0
// which is available at https://www.eclipse.org/legal/epl-2.0/
//
// SPDX-License-Identifier: EPL-2.0
//
// Contributors:
// Red Hat, Inc. - initial API and implementation
//

package main

import (
"context"
"flag"
"fmt"
"github.com/che-incubator/che-workspace-operator/pkg/controller/registry"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/rest"
"os"
"runtime"

// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/client-go/rest"

"github.com/che-incubator/che-workspace-operator/pkg/apis"
"github.com/che-incubator/che-workspace-operator/pkg/controller"
"github.com/che-incubator/che-workspace-operator/pkg/webhook"
"github.com/che-incubator/che-workspace-operator/version"

"github.com/operator-framework/operator-sdk/pkg/k8sutil"
kubemetrics "github.com/operator-framework/operator-sdk/pkg/kube-metrics"
"github.com/operator-framework/operator-sdk/pkg/leader"
"github.com/operator-framework/operator-sdk/pkg/log/zap"
"github.com/operator-framework/operator-sdk/pkg/metrics"
"github.com/operator-framework/operator-sdk/pkg/restmapper"
sdkVersion "github.com/operator-framework/operator-sdk/version"
"github.com/spf13/pflag"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"sigs.k8s.io/controller-runtime/pkg/client/config"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"

"sigs.k8s.io/controller-runtime/pkg/runtime/signals"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
)

// Change below variables to serve metrics on different host or port.
var (
metricsHost = "0.0.0.0"
metricsPort int32 = 8383
metricsHost = "0.0.0.0"
metricsPort int32 = 8383
operatorMetricsPort int32 = 8686
)
var log = logf.Log.WithName("cmd")

func printVersion() {
log.Info(fmt.Sprintf("Operator Version: %s", version.Version))
log.Info(fmt.Sprintf("Go Version: %s", runtime.Version()))
log.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH))
log.Info(fmt.Sprintf("Version of operator-sdk: %v", sdkVersion.Version))
Expand Down Expand Up @@ -81,9 +96,8 @@ func main() {
}

ctx := context.TODO()

// Become the leader before proceeding
err = leader.Become(ctx, "che-workspace-controller-lock")
err = leader.Become(ctx, "che-workspace-operator-lock")
if err != nil {
log.Error(err, "")
os.Exit(1)
Expand All @@ -92,6 +106,7 @@ func main() {
// Create a new Cmd to provide shared dependencies and start components
mgr, err := manager.New(cfg, manager.Options{
Namespace: namespace,
MapperProvider: restmapper.NewDynamicRESTMapper,
MetricsBindAddress: fmt.Sprintf("%s:%d", metricsHost, metricsPort),
})
if err != nil {
Expand All @@ -107,14 +122,6 @@ func main() {
os.Exit(1)
}

log.Info("Expose Plugin Registry Port.")

// Create Service object to expose the embedded plugin registry.
_, err = registry.ExposeRegistryPort(ctx, 8080)
if err != nil {
log.Info(err.Error())
}

log.Info("Setting up Controllers.")

// Setup all Controllers
Expand All @@ -123,20 +130,43 @@ func main() {
os.Exit(1)
}

// Setup all webhooks
log.Info("Setting up webhooks")
if err := webhook.SetUpWebhooks(mgr, ctx); err != nil {
log.Error(err, "unable to register webhooks to the manager")
os.Exit(1)
}

// TODO: Required to filter GVK for metrics, since we add routes and templates to the scheme.
// TODO: see: https://github.com/operator-framework/operator-sdk/pull/2606
//if err = serveCRMetrics(cfg); err != nil {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure I undestand why it's commented but does it make sense to create a separate issue to uncomment it and eventually make CR metrics working?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It was initially commented to resolve a log spamming issue Josh found while porting webhooks to the forked repo, and I didn't have time to delve deeply enough to figure out the full problem. I agree that it should be removed and an issue created, though.

// log.Info("Could not generate and serve custom resource metrics", "error", err.Error())
//}

log.Info("Expose Metrics Port.")
// Add to the below struct any other metrics ports you want to expose.
servicePorts := []v1.ServicePort{
{Port: metricsPort, Name: metrics.OperatorPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: metricsPort}},
// TODO: See above re: CR metrics
//{Port: operatorMetricsPort, Name: metrics.CRPortName, Protocol: v1.ProtocolTCP, TargetPort: intstr.IntOrString{Type: intstr.Int, IntVal: operatorMetricsPort}},
}
// Create Service object to expose the metrics port(s).
service, err := metrics.CreateMetricsService(ctx, cfg, servicePorts)
if err != nil {
log.Info("Could not create metrics Service", "error", err.Error())
}
// Create Service object to expose the metrics port.
_, err = metrics.CreateMetricsService(ctx, &rest.Config{}, servicePorts)

// CreateServiceMonitors will automatically create the prometheus-operator ServiceMonitor resources
// necessary to configure Prometheus to scrape metrics from this operator.
services := []*v1.Service{service}
_, err = metrics.CreateServiceMonitors(cfg, namespace, services)
if err != nil {
log.Info(err.Error())
log.Info("Could not create ServiceMonitor object", "error", err.Error())
// If this operator is deployed to a cluster without the prometheus-operator running, it will return
// ErrServiceMonitorNotPresent, which can be used to safely skip ServiceMonitor creation.
if err == metrics.ErrServiceMonitorNotPresent {
log.Info("Install prometheus-operator in your cluster to create ServiceMonitor objects", "error", err.Error())
}
}

log.Info("Starting the Cmd.")
Expand All @@ -147,3 +177,27 @@ func main() {
os.Exit(1)
}
}

// serveCRMetrics gets the Operator/CustomResource GVKs and generates metrics based on those types.
// It serves those metrics on "http://metricsHost:operatorMetricsPort".
func serveCRMetrics(cfg *rest.Config) error {
// Below function returns filtered operator/CustomResource specific GVKs.
// For more control override the below GVK list with your own custom logic.
filteredGVK, err := k8sutil.GetGVKsFromAddToScheme(apis.AddToScheme)
if err != nil {
return err
}
// Get the namespace the operator is currently deployed in.
operatorNs, err := k8sutil.GetOperatorNamespace()
if err != nil {
return err
}
// To generate metrics in other namespaces, add the values below.
ns := []string{operatorNs}
// Generate and serve custom resource specific metrics.
err = kubemetrics.GenerateAndServeCRMetrics(cfg, ns, filteredGVK, metricsHost, operatorMetricsPort)
if err != nil {
return err
}
return nil
}
2 changes: 1 addition & 1 deletion deploy/controller_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,6 @@ data:
ingress.global.domain: 192.168.99.100.nip.io
plugin.registry.url: http://che-plugin-registry.192.168.99.100.nip.io/v3
che.workspace.plugin_broker.artifacts.image: quay.io/eclipse/che-plugin-artifacts-broker:v3.1.0
cherestapis.image.name: quay.io/dfestal/che-workspace-crd-rest-apis:newone
cherestapis.image.name: amisevsk/che-rest-apis:latest
che.webhooks.enabled: "false"
che.default_routing_class: "basic"
Loading