Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add catalog and update bundle building #31

Merged
merged 4 commits into from
May 17, 2024
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 9 additions & 1 deletion .dockerignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,12 @@
# Ignore build and test binaries.
bin/

bundle.Dockerfile
bundle.Dockerfile*
**/__debug_*

bundle*

kind-kubeconfig*

catalog.Dockerfile
catalog
11 changes: 10 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,13 @@ Dockerfile.cross


bundle.Dockerfile
bundle
bundle

**/__debug_*

bundle*

kind-kubeconfig*

catalog.Dockerfile
catalog
64 changes: 45 additions & 19 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -248,14 +248,36 @@ bundle: manifests kustomize operator-sdk ## Generate bundle manifests and metada
$(KUSTOMIZE) build config/manifests | $(OPERATOR_SDK) generate bundle $(BUNDLE_GEN_FLAGS)
$(OPERATOR_SDK) bundle validate ./bundle

.PHONY: scorecard-test
scorecard-test: bundle ## Run the scorecard tests
$(OPERATOR_SDK) scorecard bundle

.PHONY: bundle-build
bundle-build: ## Build the bundle image.
docker build -f bundle.Dockerfile -t $(BUNDLE_IMG) .
$(CONTAINER_TOOL) build -f bundle.Dockerfile -t $(BUNDLE_IMG) .

.PHONY: bundle-push
bundle-push: ## Push the bundle image.
$(MAKE) docker-push IMG=$(BUNDLE_IMG)

.PHONY: bundle-buildx
bundle-buildx: ## Build the bundle image.
# copy existing Dockerfile and insert --platform=${BUILDPLATFORM} into Dockerfile.cross, and preserve the original Dockerfile
sed -e '1 s/\(^FROM\)/FROM --platform=\$$\{BUILDPLATFORM\}/; t' -e ' 1,// s//FROM --platform=\$$\{BUILDPLATFORM\}/' bundle.Dockerfile > bundle.Dockerfile.cross
- $(CONTAINER_TOOL) buildx create --name project-v3-builder
$(CONTAINER_TOOL) buildx use project-v3-builder
$(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) --tag ${BUNDLE_IMG} -f bundle.Dockerfile.cross .
$(CONTAINER_TOOL) buildx rm project-v3-builder
rm bundle.Dockerfile.cross

.PHONY: bundle-run
bundle-run: ## Run the bundle image.
$(OPERATOR_SDK) run bundle $(BUNDLE_IMG)

.PHONY: bundle-cleanup
bundle-cleanup: ## Clean up the bundle image.
$(OPERATOR_SDK) cleanup $(PROJECT_NAME)

.PHONY: opm
OPM = ./bin/opm
opm: ## Download opm locally if necessary.
Expand All @@ -265,34 +287,38 @@ ifeq (,$(shell which opm 2>/dev/null))
set -e ;\
mkdir -p $(dir $(OPM)) ;\
OS=$(shell go env GOOS) && ARCH=$(shell go env GOARCH) && \
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.23.0/$${OS}-$${ARCH}-opm ;\
curl -sSLo $(OPM) https://github.com/operator-framework/operator-registry/releases/download/v1.29.0/$${OS}-$${ARCH}-opm ;\
chmod +x $(OPM) ;\
}
else
OPM = $(shell which opm)
endif
endif

# A comma-separated list of bundle images (e.g. make catalog-build BUNDLE_IMGS=example.com/operator-bundle:v0.1.0,example.com/operator-bundle:v0.2.0).
# These images MUST exist in a registry and be pull-able.
BUNDLE_IMGS ?= $(BUNDLE_IMG)

# The image tag given to the resulting catalog image (e.g. make catalog-build CATALOG_IMG=example.com/operator-catalog:v0.2.0).
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:v$(VERSION)
CATALOG_IMG ?= $(IMAGE_TAG_BASE)-catalog:latest

# Set CATALOG_BASE_IMG to an existing catalog image tag to add $BUNDLE_IMGS to that image.
ifneq ($(origin CATALOG_BASE_IMG), undefined)
FROM_INDEX_OPT := --from-index $(CATALOG_BASE_IMG)
endif

# Build a catalog image by adding bundle images to an empty catalog using the operator package manager tool, 'opm'.
# This recipe invokes 'opm' in 'semver' bundle add mode. For more information on add modes, see:
# https://github.com/operator-framework/community-operators/blob/7f1438c/docs/packaging-operator.md#updating-your-existing-operator
.PHONY: catalog-build
catalog-build: opm ## Build a catalog image.
$(OPM) index add --container-tool docker --mode semver --tag $(CATALOG_IMG) --bundles $(BUNDLE_IMGS) $(FROM_INDEX_OPT)
catalog-build: opm ## Build a catalog manifests.
mkdir -p catalog
@if ! test -f ./catalog.Dockerfile; then \
$(OPM) generate dockerfile catalog; \
fi
sed -E "s|(image: ).*-bundle:v$(VERSION)|\1$(BUNDLE_IMG)|g" catalog-template.yaml | \
$(OPM) alpha render-template basic -o yaml > catalog/catalog.yaml

.PHONY: catalog-docker-build
catalog-docker-build: ## Build a catalog image.
$(CONTAINER_TOOL) build -t ${CATALOG_IMG} -f catalog.Dockerfile .

# Push the catalog image.
.PHONY: catalog-push
catalog-push: ## Push a catalog image.
.PHONY: catalog-docker-push
catalog-docker-push: ## Push a catalog image.
$(MAKE) docker-push IMG=$(CATALOG_IMG)

.PHONY: catalog-docker-buildx
catalog-docker-buildx: ## Build and push a catalog image for cross-platform support
- $(CONTAINER_TOOL) buildx create --name project-v3-builder
$(CONTAINER_TOOL) buildx use project-v3-builder
- $(CONTAINER_TOOL) buildx build --push --platform=$(PLATFORMS) -f catalog.Dockerfile --tag ${CATALOG_IMG} .
- $(CONTAINER_TOOL) buildx rm project-v3-builder
111 changes: 111 additions & 0 deletions catalog-template.yaml

Large diffs are not rendered by default.

47 changes: 47 additions & 0 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,12 @@ package main

import (
"flag"
"fmt"
"github.com/zncdata-labs/zookeeper-operator/internal/clustercontroller"
"github.com/zncdata-labs/zookeeper-operator/internal/znodecontroller"
"os"
"sigs.k8s.io/controller-runtime/pkg/cache"
"strings"

// Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.)
// to ensure that exec-entrypoint and run can make use of them.
Expand Down Expand Up @@ -66,7 +69,23 @@ func main() {
flag.Parse()

ctrl.SetLogger(zap.New(zap.UseFlagOptions(&opts)))
watchNamespaces, err := getWatchNamespaces()
if err != nil {
setupLog.Error(err, "unable to get WatchNamespace, "+
"the manager will watch and manage resources in all namespaces")
}

cachedNamespaces := make(map[string]cache.Config)

if len(watchNamespaces) > 0 {
setupLog.Info("watchNamespaces", "namespaces", watchNamespaces)
cachedNamespaces = make(map[string]cache.Config)
for _, ns := range watchNamespaces {
cachedNamespaces[ns] = cache.Config{}
}
} else {
setupLog.Info("watchNamespaces", "namespaces", "all")
}
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
Metrics: server.Options{BindAddress: metricsAddr},
Expand All @@ -84,6 +103,7 @@ func main() {
// if you are doing or is intended to do any operation such as perform cleanups
// after the manager stops then its usage might be unsafe.
// LeaderElectionReleaseOnCancel: true,
Cache: cache.Options{DefaultNamespaces: cachedNamespaces},
})
if err != nil {
setupLog.Error(err, "unable to start manager")
Expand Down Expand Up @@ -123,3 +143,30 @@ func main() {
os.Exit(1)
}
}

// getWatchNamespaces returns the Namespaces the operator should be watching for changes
func getWatchNamespaces() ([]string, error) {
// WatchNamespacesEnvVar is the constant for env variable WATCH_NAMESPACES
// which specifies the Namespaces to watch.
// An empty value means the operator is running with cluster scope.
var watchNamespacesEnvVar = "WATCH_NAMESPACES"

ns, found := os.LookupEnv(watchNamespacesEnvVar)
if !found {
return nil, fmt.Errorf("%s must be set", watchNamespacesEnvVar)
}
return cleanNamespaceList(ns), nil
}

func cleanNamespaceList(namespaces string) (result []string) {
unfilteredList := strings.Split(namespaces, ",")
result = make([]string, 0, len(unfilteredList))

for _, elem := range unfilteredList {
elem = strings.TrimSpace(elem)
if len(elem) != 0 {
result = append(result, elem)
}
}
return
}
2 changes: 1 addition & 1 deletion config/manager/kustomization.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
images:
- name: controller
newName: quay.io/zncdata/zookeeper-operator
newName: registry.cn-hangzhou.aliyuncs.com/luwei-dev/zookeeper-operator
newTag: v0.0.1
13 changes: 13 additions & 0 deletions deploy/catalog.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
apiVersion: operators.coreos.com/v1alpha1
kind: CatalogSource
metadata:
name: zookeeper-catalog
# namespace: olm
spec:
sourceType: grpc
image: registry.cn-hangzhou.aliyuncs.com/luwei-dev/zookeeper-operator-catalog:latest
displayName: zookeeper-catalog
publisher: zncdata-labs
updateStrategy:
registryPoll:
interval: 20m
46 changes: 46 additions & 0 deletions deploy/cluster.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
apiVersion: zookeeper.zncdata.dev/v1alpha1
kind: ZookeeperCluster
metadata:
labels:
app.kubernetes.io/name: zookeepercluster
app.kubernetes.io/instance: zookeepercluster-sample
app.kubernetes.io/part-of: zookeeper-operator
app.kubernetes.io/managed-by: kustomize
app.kubernetes.io/created-by: zookeeper-operator
name: zookeepercluster-sample
spec:
image:
repository: docker.io/bitnami/zookeeper
tag: 3.9.1-debian-12-r15
clusterConfig:
service:
type: NodePort
port: 2181
server:
config:
resources:
cpu:
min: "1"
max: "2"
memory:
limit: "2Gi"
roleGroups:
default:
replicas: 3
config:
resources:
cpu:
min: "1"
max: "2"
memory:
limit: "3Gi"
logging:
zookeeperCluster:
loggers:
test:
level: DEBUG
console:
level: WARN
file:
level: INFO

9 changes: 9 additions & 0 deletions deploy/namespace.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
apiVersion: v1
kind: Namespace
metadata:
name: kubedatastack-operators
---
apiVersion: v1
kind: Namespace
metadata:
name: kubedatastack
10 changes: 10 additions & 0 deletions deploy/operator-group.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
apiVersion: operators.coreos.com/v1
kind: OperatorGroup
metadata:
name: zncdata-stack
namespace: kubedatastack-operators
spec:
# if not specified, the operator group will target all namespaces
# so the operator will be able to watch and manage resources in all namespaces
targetNamespaces:
- kubedatastack
11 changes: 11 additions & 0 deletions deploy/subscription.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: zookeeper-sub
namespace: kubedatastack-operators
spec:
channel: stable
name: zookeeper-operator
source: zookeeper-catalog
sourceNamespace: olm
installPlanApproval: Automated
10 changes: 5 additions & 5 deletions internal/clustercontroller/zookeepercluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,11 @@ type ZookeeperClusterReconciler struct {
//+kubebuilder:rbac:groups=zookeeper.zncdata.dev,resources=zookeeperclusters,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=zookeeper.zncdata.dev,resources=zookeeperclusters/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=zookeeper.zncdata.dev,resources=zookeeperclusters/finalizers,verbs=update
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch
//+kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch

// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.15.0/pkg/reconcile
func (r *ZookeeperClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
Expand Down
7 changes: 5 additions & 2 deletions internal/znodecontroller/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,9 +84,12 @@ func (z *ZNodeReconciler) createZnodePath() string {
func (z *ZNodeReconciler) createZookeeperZnode(path string, cluster *zkv1alpha1.ZookeeperCluster) error {
svcDns := z.getClusterSvcUrl(cluster)
logger.Info("zookeeper cluster service client dns url", "dns", svcDns)
zkCli := NewZkClient(svcDns)
zkCli, err := NewZkClient(svcDns)
if err != nil {
return err
}
defer zkCli.Close()
err := zkCli.Create(path, []byte{})
err = zkCli.Create(path, []byte{})
if err != nil {
return err
}
Expand Down
16 changes: 10 additions & 6 deletions internal/znodecontroller/zkclient.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,20 +31,24 @@ type ZkClient struct {
}

// NewZkClient new zk client
func NewZkClient(address string) *ZkClient {
conn := GetConnect([]string{address})
func NewZkClient(address string) (*ZkClient, error) {
conn, err := GetConnect([]string{address})
if err != nil {
return nil, err
}
return &ZkClient{
Address: address,
Client: conn,
}
}, nil
}

func GetConnect(zkList []string) (conn *zk.Conn) {
conn, _, err := zk.Connect(zkList, 10*time.Second)
func GetConnect(zkList []string) (conn *zk.Conn, err error) {
conn, _, err = zk.Connect(zkList, 10*time.Second)
if err != nil {
logger.Error(err, "failed to connect to zookeeper")
return nil, err
}
return
return conn, nil
}

func (z ZkClient) Create(path string, data []byte) error {
Expand Down
Loading