Skip to content

Commit

Permalink
upgrade to latest dependencies (#1416)
Browse files Browse the repository at this point in the history
bumping knative.dev/reconciler-test 03cc77c...785e0bd:
  > 785e0bd Improve error message when deleting resources (# 618)
  > 7d36fe9 Copy pull secrets to SA for eventshub (# 615)
  > e52650f upgrade to latest dependencies (# 606)
bumping knative.dev/eventing 6a695cb...0dadfd9:
  > 0dadfd9 [release-1.11] Scheduler: fix reserved replicas handling, blocking autoscaler and overcommitted pods (# 7374)
  > c1626f1 [release-1.11] Update dependencies (# 7362)
  > 46cc775 [release-1.11] TLS certificate rotation tests (# 7103) (# 7346)
bumping knative.dev/pkg bd99f2f...56bfe0d:
  > 56bfe0d [release-1.11] [CVE-2023-44487] Disable http2 for webhooks (# 2875)

Signed-off-by: Knative Automation <automation@knative.team>
  • Loading branch information
knative-automation committed Oct 24, 2023
1 parent 0df291c commit 6cdf2d0
Show file tree
Hide file tree
Showing 15 changed files with 175 additions and 42 deletions.
6 changes: 3 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ require (
k8s.io/apimachinery v0.26.5
k8s.io/client-go v0.26.5
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2
knative.dev/eventing v0.38.4
knative.dev/eventing v0.38.5
knative.dev/hack v0.0.0-20230712131415-ddae80293c43
knative.dev/pkg v0.0.0-20231011193800-bd99f2f98be7
knative.dev/reconciler-test v0.0.0-20231010075208-03cc77c11831
knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626
knative.dev/reconciler-test v0.0.0-20231023114057-785e0bd2d9a2
sigs.k8s.io/yaml v1.3.0
)

Expand Down
12 changes: 6 additions & 6 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -967,14 +967,14 @@ k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+O
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
knative.dev/eventing v0.38.4 h1:eH059bfeLilj2xAN6V7XXOh3wqzz5ssoMS/CIJpJfmk=
knative.dev/eventing v0.38.4/go.mod h1:ct8t+v6nmp1kFCy6ngkDWIEvnjJDNDoKptrfnQVh+z8=
knative.dev/eventing v0.38.5 h1:NvSy3lek9IbLLWEot36NyAfNv7VkJNl38F1ItVL0D6s=
knative.dev/eventing v0.38.5/go.mod h1:g+iAS+KBRSKULEPqoVnseMkObDeq3SJhqefbuIu8zY8=
knative.dev/hack v0.0.0-20230712131415-ddae80293c43 h1:3SE06uNfSFGm/5XS+0trbyCUpgsOaBeyhPQU8FPNFz8=
knative.dev/hack v0.0.0-20230712131415-ddae80293c43/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
knative.dev/pkg v0.0.0-20231011193800-bd99f2f98be7 h1:y3qbfYX1SuSr/1ysXvKfpV8q/kCwWLWieCUgAhBUHmQ=
knative.dev/pkg v0.0.0-20231011193800-bd99f2f98be7/go.mod h1:g+UCgSKQ2f15kHYu/V3CPtoKo5F1x/2Y1ot0NSK7gA0=
knative.dev/reconciler-test v0.0.0-20231010075208-03cc77c11831 h1:rOisVvTe0yuJNImgOex1Z4vdqXRPP1FAg5xPxbLOSlU=
knative.dev/reconciler-test v0.0.0-20231010075208-03cc77c11831/go.mod h1:i+/PWK/n3HPgjXMoj5U7CA6WRW/C3c3EfHCQ0FmrhNM=
knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626 h1:qFE+UDBRg6cpF5LbA0sv1XK4XZ36Z7aTRCa+HcuxnNQ=
knative.dev/pkg v0.0.0-20231023150739-56bfe0dd9626/go.mod h1:g+UCgSKQ2f15kHYu/V3CPtoKo5F1x/2Y1ot0NSK7gA0=
knative.dev/reconciler-test v0.0.0-20231023114057-785e0bd2d9a2 h1:Lenj/sGhPYZoCdl4bvoeZzA4Y1VS4LNEIWH1/HTU+6I=
knative.dev/reconciler-test v0.0.0-20231023114057-785e0bd2d9a2/go.mod h1:HgugJUOhHZ3F6Tbhte92ecL0sBqJtCeJtd7K8jX+IJk=
pgregory.net/rapid v0.3.3 h1:jCjBsY4ln4Atz78QoBWxUEvAHaFyNDQg9+WU62aCn1U=
pgregory.net/rapid v0.3.3/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
Expand Down
2 changes: 1 addition & 1 deletion vendor/knative.dev/eventing/pkg/scheduler/state/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ func (s *stateBuilder) updateFreeCapacity(free []int32, last int32, podName stri
// Assert the pod is not overcommitted
if free[ordinal] < 0 {
// This should not happen anymore. Log as an error but do not interrupt the current scheduling.
s.logger.Errorw("pod is overcommitted", zap.String("podName", podName), zap.Int32("free", free[ordinal]))
s.logger.Warnw("pod is overcommitted", zap.String("podName", podName), zap.Int32("free", free[ordinal]))
}

if ordinal > last {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ func (a *autoscaler) Demote(b reconciler.Bucket) {

func newAutoscaler(ctx context.Context, cfg *Config, stateAccessor st.StateAccessor) *autoscaler {
return &autoscaler{
logger: logging.FromContext(ctx),
logger: logging.FromContext(ctx).With(zap.String("component", "autoscaler")),
statefulSetClient: kubeclient.Get(ctx).AppsV1().StatefulSets(cfg.StatefulSetNamespace),
statefulSetName: cfg.StatefulSetName,
vpodLister: cfg.VPodLister,
Expand All @@ -133,8 +133,10 @@ func (a *autoscaler) Start(ctx context.Context) {
case <-ctx.Done():
return
case <-time.After(a.refreshPeriod):
a.logger.Infow("Triggering scale down", zap.Bool("isLeader", a.isLeader.Load()))
attemptScaleDown = true
case <-a.trigger:
a.logger.Infow("Triggering scale up", zap.Bool("isLeader", a.isLeader.Load()))
attemptScaleDown = false
}

Expand All @@ -145,9 +147,14 @@ func (a *autoscaler) Start(ctx context.Context) {
}

func (a *autoscaler) Autoscale(ctx context.Context) {
select {
// We trigger the autoscaler asynchronously by using the channel so that the scale down refresh
// period is reset.
a.trigger <- struct{}{}
case a.trigger <- struct{}{}:
default:
// We don't want to block if the channel's buffer is full, it will be triggered eventually.

}
}

func (a *autoscaler) syncAutoscale(ctx context.Context, attemptScaleDown bool) error {
Expand Down
66 changes: 50 additions & 16 deletions vendor/knative.dev/eventing/pkg/scheduler/statefulset/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -228,15 +228,6 @@ func (s *StatefulSetScheduler) Schedule(vpod scheduler.VPod) ([]duckv1alpha1.Pla
s.reservedMu.Lock()
defer s.reservedMu.Unlock()

vpods, err := s.vpodLister()
if err != nil {
return nil, err
}
vpodFromLister := st.GetVPod(vpod.GetKey(), vpods)
if vpodFromLister != nil && vpod.GetResourceVersion() != vpodFromLister.GetResourceVersion() {
return nil, fmt.Errorf("vpod to schedule has resource version different from one in indexer")
}

placements, err := s.scheduleVPod(vpod)
if placements == nil {
return placements, err
Expand All @@ -253,7 +244,7 @@ func (s *StatefulSetScheduler) Schedule(vpod scheduler.VPod) ([]duckv1alpha1.Pla
}

func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1.Placement, error) {
logger := s.logger.With("key", vpod.GetKey())
logger := s.logger.With("key", vpod.GetKey(), zap.String("component", "scheduler"))
// Get the current placements state
// Quite an expensive operation but safe and simple.
state, err := s.stateAccessor.State(s.reserved)
Expand All @@ -262,18 +253,60 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
return nil, err
}

// Clean up reserved from removed resources that don't appear in the vpod list anymore and have
// no pending resources.
reserved := make(map[types.NamespacedName]map[string]int32)
for k, v := range s.reserved {
if pendings, ok := state.Pending[k]; ok {
if pendings == 0 {
reserved[k] = map[string]int32{}
} else {
reserved[k] = v
}
}
}
s.reserved = reserved

logger.Debugw("scheduling", zap.Any("state", state))

existingPlacements := vpod.GetPlacements()
var left int32

// Remove unschedulable pods from placements
// Remove unschedulable or adjust overcommitted pods from placements
var placements []duckv1alpha1.Placement
if len(existingPlacements) > 0 {
placements = make([]duckv1alpha1.Placement, 0, len(existingPlacements))
for _, p := range existingPlacements {
if state.IsSchedulablePod(st.OrdinalFromPodName(p.PodName)) {
placements = append(placements, *p.DeepCopy())
p := p.DeepCopy()
ordinal := st.OrdinalFromPodName(p.PodName)

if !state.IsSchedulablePod(ordinal) {
continue
}

// Handle overcommitted pods.
if state.FreeCap[ordinal] < 0 {
// vr > free => vr: 9, overcommit 4 -> free: 0, vr: 5, pending: +4
// vr = free => vr: 4, overcommit 4 -> free: 0, vr: 0, pending: +4
// vr < free => vr: 3, overcommit 4 -> free: -1, vr: 0, pending: +3

overcommit := -state.FreeCap[ordinal]

if p.VReplicas >= overcommit {
state.SetFree(ordinal, 0)
state.Pending[vpod.GetKey()] += overcommit

p.VReplicas = p.VReplicas - overcommit
} else {
state.SetFree(ordinal, p.VReplicas-overcommit)
state.Pending[vpod.GetKey()] += p.VReplicas

p.VReplicas = 0
}
}

if p.VReplicas > 0 {
placements = append(placements, *p)
}
}
}
Expand Down Expand Up @@ -312,7 +345,7 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
} else { //Predicates and priorities must be used for scheduling
// Need less => scale down
if tr > vpod.GetVReplicas() && state.DeschedPolicy != nil {
logger.Debugw("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
logger.Infow("scaling down", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
placements = s.removeReplicasWithPolicy(vpod, tr-vpod.GetVReplicas(), placements)

// Do not trigger the autoscaler to avoid unnecessary churn
Expand All @@ -325,17 +358,18 @@ func (s *StatefulSetScheduler) scheduleVPod(vpod scheduler.VPod) ([]duckv1alpha1
// Need more => scale up
// rebalancing needed for all vreps most likely since there are pending vreps from previous reconciliation
// can fall here when vreps scaled up or after eviction
logger.Debugw("scaling up with a rebalance (if needed)", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
logger.Infow("scaling up with a rebalance (if needed)", zap.Int32("vreplicas", tr), zap.Int32("new vreplicas", vpod.GetVReplicas()))
placements, left = s.rebalanceReplicasWithPolicy(vpod, vpod.GetVReplicas(), placements)
}
}

if left > 0 {
// Give time for the autoscaler to do its job
logger.Info("not enough pod replicas to schedule. Awaiting autoscaler", zap.Any("placement", placements), zap.Int32("left", left))
logger.Infow("not enough pod replicas to schedule")

// Trigger the autoscaler
if s.autoscaler != nil {
logger.Infow("Awaiting autoscaler", zap.Any("placement", placements), zap.Int32("left", left))
s.autoscaler.Autoscale(s.ctx)
}

Expand Down
12 changes: 9 additions & 3 deletions vendor/knative.dev/eventing/test/e2e-common.sh
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ readonly CONFIG_TRACING_CONFIG="test/config/config-tracing.yaml"
readonly KNATIVE_EVENTING_MONITORING_YAML="test/config/monitoring.yaml"

# The number of controlplane replicas to run.
readonly REPLICAS=3
readonly REPLICAS=${REPLICAS:-3}

# Should deploy a Knative Monitoring as well
readonly DEPLOY_KNATIVE_MONITORING="${DEPLOY_KNATIVE_MONITORING:-1}"
Expand All @@ -76,15 +76,15 @@ UNINSTALL_LIST=()

# Setup the Knative environment for running tests.
function knative_setup() {
install_cert_manager || fail_test "Could not install Cert Manager"

install_knative_eventing "HEAD"

install_mt_broker || fail_test "Could not install MT Channel Based Broker"

enable_sugar || fail_test "Could not enable Sugar Controller Injection"

unleash_duck || fail_test "Could not unleash the chaos duck"

install_cert_manager || fail_test "Could not install Cert Manager"
}

function scale_controlplane() {
Expand Down Expand Up @@ -147,6 +147,12 @@ function install_knative_eventing() {
-f "${EVENTING_CORE_NAME}" || return 1
UNINSTALL_LIST+=( "${EVENTING_CORE_NAME}" )

local EVENTING_TLS_NAME=${TMP_DIR}/${EVENTING_TLS_YAML##*/}
sed "s/namespace: ${KNATIVE_DEFAULT_NAMESPACE}/namespace: ${SYSTEM_NAMESPACE}/g" ${EVENTING_TLS_YAML} > ${EVENTING_TLS_NAME}
kubectl apply \
-f "${EVENTING_TLS_NAME}" || return 1
UNINSTALL_LIST+=( "${EVENTING_TLS_NAME}" )

kubectl patch horizontalpodautoscalers.autoscaling -n ${SYSTEM_NAMESPACE} eventing-webhook -p '{"spec": {"minReplicas": '${REPLICAS}'}}' || return 1

else
Expand Down
6 changes: 6 additions & 0 deletions vendor/knative.dev/eventing/test/e2e-rekt-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,10 @@ echo "Running E2E Reconciler Tests"

go_test_e2e -timeout=1h ./test/rekt || fail_test

echo "Running E2E Reconciler Tests with strict transport encryption"

kubectl apply -Rf "$(dirname "$0")/config-transport-encryption"

go_test_e2e -timeout=1h ./test/rekt -run TLS || fail_test

success
3 changes: 1 addition & 2 deletions vendor/knative.dev/eventing/test/e2e-upgrade-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/e2e-common.sh"
# Overrides

function knative_setup {
# Nothing to do at setup
true
install_cert_manager || return $?
}

function install_test_resources {
Expand Down
18 changes: 18 additions & 0 deletions vendor/knative.dev/pkg/webhook/webhook.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,6 +74,17 @@ type Options struct {
// ControllerOptions encapsulates options for creating a new controller,
// including throttling and stats behavior.
ControllerOptions *controller.ControllerOptions

// EnableHTTP2 enables HTTP2 for webhooks.
// Mitigate CVE-2023-44487 by disabling HTTP2 by default until the Go
// standard library and golang.org/x/net are fully fixed.
// Right now, it is possible for authenticated and unauthenticated users to
// hold open HTTP2 connections and consume huge amounts of memory.
// See:
// * https://github.com/kubernetes/kubernetes/pull/121120
// * https://github.com/kubernetes/kubernetes/issues/121197
// * https://github.com/golang/go/issues/63417#issuecomment-1758858612
EnableHTTP2 bool
}

// Operation is the verb being operated on
Expand Down Expand Up @@ -237,12 +248,19 @@ func (wh *Webhook) Run(stop <-chan struct{}) error {
QuietPeriod: wh.Options.GracePeriod,
}

// If TLSNextProto is not nil, HTTP/2 support is not enabled automatically.
nextProto := map[string]func(*http.Server, *tls.Conn, http.Handler){}
if wh.Options.EnableHTTP2 {
nextProto = nil
}

server := &http.Server{
ErrorLog: log.New(&zapWrapper{logger}, "", 0),
Handler: drainer,
Addr: fmt.Sprint(":", wh.Options.Port),
TLSConfig: wh.tlsConfig,
ReadHeaderTimeout: time.Minute, //https://medium.com/a-journey-with-go/go-understand-and-mitigate-slowloris-attack-711c1b1403f6
TLSNextProto: nextProto,
}

var serve = server.ListenAndServe
Expand Down
20 changes: 17 additions & 3 deletions vendor/knative.dev/reconciler-test/pkg/environment/namespace.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,12 +122,26 @@ func (mr *MagicEnvironment) CreateNamespaceIfNeeded() error {
return fmt.Errorf("error copying the image pull Secret: %s", err)
}

_, err = c.CoreV1().ServiceAccounts(mr.namespace).Patch(context.Background(), sa.Name, types.StrategicMergePatchType,
[]byte(`{"imagePullSecrets":[{"name":"`+mr.imagePullSecretName+`"}]}`), metav1.PatchOptions{})
for _, secret := range sa.ImagePullSecrets {
if secret.Name == mr.imagePullSecretName {
return nil
}
}

// Prevent overwriting existing imagePullSecrets
patch := `[{"op":"add","path":"/imagePullSecrets/-","value":{"name":"` + mr.imagePullSecretName + `"}}]`
if len(sa.ImagePullSecrets) == 0 {
patch = `[{"op":"add","path":"/imagePullSecrets","value":[{"name":"` + mr.imagePullSecretName + `"}]}]`
}

_, err = c.CoreV1().ServiceAccounts(mr.namespace).Patch(context.Background(), sa.Name, types.JSONPatchType,
[]byte(patch), metav1.PatchOptions{})
if err != nil {
return fmt.Errorf("patch failed on NS/SA (%s/%s): %s", mr.namespace, sa.Name, err)
return fmt.Errorf("patch failed on NS/SA (%s/%s): %w",
mr.namespace, sa.Name, err)
}
}

return nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,3 +17,9 @@ kind: ServiceAccount
metadata:
name: {{ .name }}
namespace: {{ .namespace }}
{{ if .withPullSecrets }}
imagePullSecrets:
{{ range $_, $value := .withPullSecrets.secrets }}
- name: {{ $value }}
{{ end }}
{{ end }}
27 changes: 26 additions & 1 deletion vendor/knative.dev/reconciler-test/pkg/eventshub/rbac/rbac.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,9 @@ import (
"embed"

apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/reconciler-test/pkg/environment"

"knative.dev/reconciler-test/pkg/feature"
"knative.dev/reconciler-test/pkg/manifest"
Expand All @@ -30,11 +33,33 @@ import (
var templates embed.FS

// Install creates the necessary ServiceAccount, Role, RoleBinding for the eventshub.
// The resources are named according to the current namespace defined in the environment.
func Install(cfg map[string]interface{}) feature.StepFn {
return func(ctx context.Context, t feature.T) {
WithPullSecrets(ctx, t)(cfg)
if _, err := manifest.InstallYamlFS(ctx, templates, cfg); err != nil && !apierrors.IsAlreadyExists(err) {
t.Fatal(err)
}
}
}

func WithPullSecrets(ctx context.Context, t feature.T) manifest.CfgFn {
namespace := environment.FromContext(ctx).Namespace()
serviceAccount, err := kubeclient.Get(ctx).CoreV1().ServiceAccounts(namespace).Get(ctx, "default", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to read default SA in %s namespace: %v", namespace, err)
}

return func(cfg map[string]interface{}) {
if len(serviceAccount.ImagePullSecrets) == 0 {
return
}
if _, set := cfg["withPullSecrets"]; !set {
cfg["withPullSecrets"] = map[string]interface{}{}
}
withPullSecrets := cfg["withPullSecrets"].(map[string]interface{})
withPullSecrets["secrets"] = []string{}
for _, secret := range serviceAccount.ImagePullSecrets {
withPullSecrets["secrets"] = append(withPullSecrets["secrets"].([]string), secret.Name)
}
}
}
Loading

0 comments on commit 6cdf2d0

Please sign in to comment.