Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deployments resources #771

Merged
merged 8 commits into from May 18, 2022
2 changes: 1 addition & 1 deletion .github/workflows/kind-conformance.yaml
Expand Up @@ -24,7 +24,7 @@ jobs:

# Map between K8s and KinD versions.
# This is attempting to make it a bit clearer what's being tested.
# See: https://github.com/kubernetes-sigs/kind/releases/tag/v0.11.1
# See: https://github.com/kubernetes-sigs/kind/releases/tag/v0.12.0
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should probably update the knative version used for conformance tests. I see e2e is getting updated, conformance tests likely still using 1.1.0 (from Makefile)

include:
- k8s-version: v1.22.1
kind-image-sha: sha256:100b3558428386d1372591f8d62add85b900538d94db8e455b66ebaf05a3ca3a
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/kind-e2e.yaml
Expand Up @@ -18,12 +18,12 @@ jobs:
- v1.22.1
- v1.23.3
knative-version:
- 1.0.2
- 1.1.0
- 1.3.2
- 1.4.0

# Map between K8s and KinD versions.
# This is attempting to make it a bit clearer what's being tested.
# See: https://github.com/kubernetes-sigs/kind/releases/tag/v0.11.1
# See: https://github.com/kubernetes-sigs/kind/releases/tag/v0.12.0
include:
- k8s-version: v1.22.1
kind-image-sha: sha256:100b3558428386d1372591f8d62add85b900538d94db8e455b66ebaf05a3ca3a
Expand Down
14 changes: 7 additions & 7 deletions Makefile
Expand Up @@ -77,7 +77,7 @@ releases-gcloud:
$(OPEN) https://cloud.google.com/sdk/docs/quickstart

KO_RELEASES := https://github.com/google/ko/releases
KO_VERSION := 0.9.3
KO_VERSION := 0.11.2
KO_BIN_DIR := $(LOCAL_BIN)/ko_$(KO_VERSION)_$(PLATFORM)_x86_64
KO_URL := $(KO_RELEASES)/download/v$(KO_VERSION)/$(notdir $(KO_BIN_DIR)).tar.gz
KO := $(KO_BIN_DIR)/ko
Expand All @@ -95,7 +95,7 @@ releases-ko:
$(OPEN) $(KO_RELEASES)

KIND_RELEASES := https://github.com/kubernetes-sigs/kind/releases
KIND_VERSION := 0.11.1
KIND_VERSION := 0.12.0
KIND_URL := $(KIND_RELEASES)/download/v$(KIND_VERSION)/kind-$(platform)-amd64
KIND := $(LOCAL_BIN)/kind_$(KIND_VERSION)_$(platform)_amd64
$(KIND): | $(CURL) $(LOCAL_BIN)
Expand Down Expand Up @@ -131,7 +131,7 @@ releases-envsubst:

KUBECTL_RELEASES := https://github.com/kubernetes/kubernetes/tags
# Keep this in sync with KIND_K8S_VERSION
KUBECTL_VERSION := 1.22.1
KUBECTL_VERSION := 1.24.0
KUBECTL_BIN := kubectl-$(KUBECTL_VERSION)-$(platform)-amd64
KUBECTL_URL := https://storage.googleapis.com/kubernetes-release/release/v$(KUBECTL_VERSION)/bin/$(platform)/amd64/kubectl
KUBECTL := $(LOCAL_BIN)/$(KUBECTL_BIN)
Expand Down Expand Up @@ -304,7 +304,7 @@ $(KUBECONFIG): | $(KUBECONFIG_DIR)
kubeconfig: $(KUBECONFIG)

# https://github.com/rabbitmq/cluster-operator/releases
RABBITMQ_CLUSTER_OPERATOR_VERSION ?= 1.12.1
RABBITMQ_CLUSTER_OPERATOR_VERSION ?= 1.13.0
.PHONY: install-rabbitmq-cluster-operator
install-rabbitmq-cluster-operator: | $(KUBECONFIG) $(KUBECTL) ## Install RabbitMQ Cluster Operator
$(KUBECTL) $(K_CMD) --filename \
Expand All @@ -313,21 +313,21 @@ install-rabbitmq-cluster-operator: | $(KUBECONFIG) $(KUBECTL) ## Install RabbitM
# https://github.com/jetstack/cert-manager/releases
# ⚠️ You may want to keep this in sync with RABBITMQ_TOPOLOGY_OPERATOR_VERSION
# In other words, don't upgrade cert-manager to a version that was released AFTER RABBITMQ_TOPOLOGY_OPERATOR_VERSION
CERT_MANAGER_VERSION ?= 1.7.0
CERT_MANAGER_VERSION ?= 1.8.0
.PHONY: install-cert-manager
install-cert-manager: | $(KUBECONFIG) $(KUBECTL) ## Install Cert Manager - dependency of RabbitMQ Topology Operator
$(KUBECTL) $(K_CMD) --filename \
https://github.com/jetstack/cert-manager/releases/download/v$(CERT_MANAGER_VERSION)/cert-manager.yaml
$(KUBECTL) wait --for=condition=available deploy/cert-manager-webhook --timeout=60s --namespace $(CERT_MANAGER_NAMESPACE)

# https://github.com/rabbitmq/messaging-topology-operator/releases
RABBITMQ_TOPOLOGY_OPERATOR_VERSION ?= 1.4.1
RABBITMQ_TOPOLOGY_OPERATOR_VERSION ?= 1.6.0
.PHONY: install-rabbitmq-topology-operator
install-rabbitmq-topology-operator: | install-cert-manager $(KUBECTL) ## Install RabbitMQ Topology Operator
$(KUBECTL) $(K_CMD) --filename \
https://github.com/rabbitmq/messaging-topology-operator/releases/download/v$(RABBITMQ_TOPOLOGY_OPERATOR_VERSION)/messaging-topology-operator-with-certmanager.yaml

KNATIVE_VERSION ?= 1.1.0
KNATIVE_VERSION ?= 1.4.0

# https://github.com/knative/serving/releases
.PHONY: install-knative-serving
Expand Down
10 changes: 10 additions & 0 deletions pkg/reconciler/broker/resources/dispatcher.go
Expand Up @@ -21,6 +21,7 @@ import (

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/rickb777/date/period"
Expand Down Expand Up @@ -146,6 +147,15 @@ func MakeDispatcherDeployment(args *DispatcherArgs) *appsv1.Deployment {
Name: dispatcherContainerName,
Image: args.Image,
Env: envs,
// This resource requests and limits comes from performance testing 1500msgs/s with a parallelism of 1000
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("300m"),
corev1.ResourceMemory: resource.MustParse("15Mi")},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4000m"),
corev1.ResourceMemory: resource.MustParse("400Mi")},
},
}},
},
},
Expand Down
9 changes: 9 additions & 0 deletions pkg/reconciler/broker/resources/dispatcher_test.go
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/google/go-cmp/cmp"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "knative.dev/eventing/pkg/apis/duck/v1"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
Expand Down Expand Up @@ -105,6 +106,14 @@ func TestMakeDispatcherDeployment(t *testing.T) {
Containers: []corev1.Container{{
Name: "dispatcher",
Image: image,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("300m"),
corev1.ResourceMemory: resource.MustParse("15Mi")},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4000m"),
corev1.ResourceMemory: resource.MustParse("400Mi")},
},
Env: []corev1.EnvVar{{
Name: system.NamespaceEnvKey,
Value: system.Namespace(),
Expand Down
10 changes: 10 additions & 0 deletions pkg/reconciler/broker/resources/ingress.go
Expand Up @@ -21,6 +21,7 @@ import (

appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"

Expand Down Expand Up @@ -120,6 +121,15 @@ func MakeIngressDeployment(args *IngressArgs) *appsv1.Deployment {
ContainerPort: 8080,
Name: "http",
}},
// This resource requests and limits comes from performance testing 1500msgs/s with a parallelism of 1000
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("150m"),
corev1.ResourceMemory: resource.MustParse("10Mi")},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1000m"),
corev1.ResourceMemory: resource.MustParse("300Mi")},
},
}},
},
},
Expand Down
9 changes: 9 additions & 0 deletions pkg/reconciler/broker/resources/ingress_test.go
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/google/go-cmp/cmp"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
Expand Down Expand Up @@ -82,6 +83,14 @@ func TestMakeIngressDeployment(t *testing.T) {
Containers: []corev1.Container{{
Image: image,
Name: "ingress",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("150m"),
corev1.ResourceMemory: resource.MustParse("10Mi")},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1000m"),
corev1.ResourceMemory: resource.MustParse("300Mi")},
},
Env: []corev1.EnvVar{{
Name: system.NamespaceEnvKey,
Value: system.Namespace(),
Expand Down
10 changes: 10 additions & 0 deletions pkg/reconciler/source/resources/receive_adapter.go
Expand Up @@ -23,6 +23,7 @@ import (
"github.com/rickb777/date/period"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"knative.dev/eventing-rabbitmq/pkg/apis/sources/v1alpha1"
eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
Expand Down Expand Up @@ -189,6 +190,15 @@ func MakeReceiveAdapter(args *ReceiveAdapterArgs) *v1.Deployment {
Image: args.Image,
ImagePullPolicy: "IfNotPresent",
Env: env,
// This resource requests and limits comes from performance testing 1500msgs/s with a parallelism of 1000
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("300m"),
corev1.ResourceMemory: resource.MustParse("15Mi")},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4000m"),
corev1.ResourceMemory: resource.MustParse("400Mi")},
},
},
},
},
Expand Down
9 changes: 9 additions & 0 deletions pkg/reconciler/source/resources/receive_adapter_test.go
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/google/go-cmp/cmp"
v1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1alpha12 "knative.dev/eventing-rabbitmq/pkg/apis/sources/v1alpha1"
eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
Expand Down Expand Up @@ -153,6 +154,14 @@ func TestMakeReceiveAdapter(t *testing.T) {
Name: "receive-adapter",
Image: "test-image",
ImagePullPolicy: "IfNotPresent",
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("300m"),
corev1.ResourceMemory: resource.MustParse("15Mi")},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4000m"),
corev1.ResourceMemory: resource.MustParse("400Mi")},
},
Env: []corev1.EnvVar{
{
Name: "RABBITMQ_BROKERS",
Expand Down
10 changes: 10 additions & 0 deletions pkg/reconciler/trigger/resources/dispatcher.go
Expand Up @@ -24,6 +24,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/rickb777/date/period"
"k8s.io/apimachinery/pkg/api/resource"
eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
"knative.dev/eventing/pkg/apis/eventing"
eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
Expand Down Expand Up @@ -91,6 +92,15 @@ func MakeDispatcherDeployment(args *DispatcherArgs) *appsv1.Deployment {
Name: "BROKER_INGRESS_URL",
Value: args.BrokerIngressURL.String(),
}},
// This resource requests and limits comes from performance testing 1500msgs/s with a parallelism of 1000
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("300m"),
corev1.ResourceMemory: resource.MustParse("15Mi")},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4000m"),
corev1.ResourceMemory: resource.MustParse("400Mi")},
},
}
if args.Configs != nil {
dispatcher.Env = append(dispatcher.Env, args.Configs.ToEnvVars()...)
Expand Down
9 changes: 9 additions & 0 deletions pkg/reconciler/trigger/resources/dispatcher_test.go
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/google/go-cmp/cmp"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/utils/pointer"
eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
Expand Down Expand Up @@ -136,6 +137,14 @@ func deployment(opts ...func(*appsv1.Deployment)) *appsv1.Deployment {
Containers: []corev1.Container{{
Name: "dispatcher",
Image: image,
Resources: corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("300m"),
corev1.ResourceMemory: resource.MustParse("15Mi")},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("4000m"),
corev1.ResourceMemory: resource.MustParse("400Mi")},
},
Env: []corev1.EnvVar{{
Name: system.NamespaceEnvKey,
Value: system.Namespace(),
Expand Down