Skip to content

Commit

Permalink
Merge pull request #1479 from sanchezl/bump-1.26.1
Browse files Browse the repository at this point in the history
Bump to k8s 1.26.1
  • Loading branch information
openshift-merge-robot committed Mar 1, 2023
2 parents f854081 + 22d4846 commit 019663f
Show file tree
Hide file tree
Showing 80 changed files with 1,832 additions and 65,311 deletions.
1 change: 1 addition & 0 deletions .go-version
@@ -0,0 +1 @@
1.19.5
3,134 changes: 0 additions & 3,134 deletions CHANGELOG/CHANGELOG-1.10.md

This file was deleted.

2,576 changes: 0 additions & 2,576 deletions CHANGELOG/CHANGELOG-1.11.md

This file was deleted.

2,342 changes: 0 additions & 2,342 deletions CHANGELOG/CHANGELOG-1.12.md

This file was deleted.

2,224 changes: 0 additions & 2,224 deletions CHANGELOG/CHANGELOG-1.13.md

This file was deleted.

2,225 changes: 0 additions & 2,225 deletions CHANGELOG/CHANGELOG-1.14.md

This file was deleted.

2,223 changes: 0 additions & 2,223 deletions CHANGELOG/CHANGELOG-1.15.md

This file was deleted.

3,027 changes: 0 additions & 3,027 deletions CHANGELOG/CHANGELOG-1.16.md

This file was deleted.

3,176 changes: 0 additions & 3,176 deletions CHANGELOG/CHANGELOG-1.17.md

This file was deleted.

3,421 changes: 0 additions & 3,421 deletions CHANGELOG/CHANGELOG-1.18.md

This file was deleted.

4,471 changes: 0 additions & 4,471 deletions CHANGELOG/CHANGELOG-1.19.md

This file was deleted.

580 changes: 0 additions & 580 deletions CHANGELOG/CHANGELOG-1.2.md

This file was deleted.

3,751 changes: 0 additions & 3,751 deletions CHANGELOG/CHANGELOG-1.20.md

This file was deleted.

3,258 changes: 0 additions & 3,258 deletions CHANGELOG/CHANGELOG-1.21.md

This file was deleted.

3,963 changes: 0 additions & 3,963 deletions CHANGELOG/CHANGELOG-1.22.md

This file was deleted.

3,617 changes: 0 additions & 3,617 deletions CHANGELOG/CHANGELOG-1.23.md

This file was deleted.

2,956 changes: 0 additions & 2,956 deletions CHANGELOG/CHANGELOG-1.24.md

This file was deleted.

2,280 changes: 0 additions & 2,280 deletions CHANGELOG/CHANGELOG-1.25.md

This file was deleted.

703 changes: 676 additions & 27 deletions CHANGELOG/CHANGELOG-1.26.md

Large diffs are not rendered by default.

964 changes: 0 additions & 964 deletions CHANGELOG/CHANGELOG-1.3.md

This file was deleted.

1,431 changes: 0 additions & 1,431 deletions CHANGELOG/CHANGELOG-1.4.md

This file was deleted.

1,327 changes: 0 additions & 1,327 deletions CHANGELOG/CHANGELOG-1.5.md

This file was deleted.

2,883 changes: 0 additions & 2,883 deletions CHANGELOG/CHANGELOG-1.6.md

This file was deleted.

3,292 changes: 0 additions & 3,292 deletions CHANGELOG/CHANGELOG-1.7.md

This file was deleted.

3,088 changes: 0 additions & 3,088 deletions CHANGELOG/CHANGELOG-1.8.md

This file was deleted.

2,642 changes: 0 additions & 2,642 deletions CHANGELOG/CHANGELOG-1.9.md

This file was deleted.

5 changes: 4 additions & 1 deletion api/openapi-spec/swagger.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 4 additions & 1 deletion api/openapi-spec/v3/api__v1_openapi.json
Expand Up @@ -6249,7 +6249,10 @@
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "set"
"x-kubernetes-list-map-keys": [
"name"
],
"x-kubernetes-list-type": "map"
},
"limits": {
"additionalProperties": {
Expand Down
5 changes: 4 additions & 1 deletion api/openapi-spec/v3/apis__apps__v1_openapi.json
Expand Up @@ -4057,7 +4057,10 @@
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "set"
"x-kubernetes-list-map-keys": [
"name"
],
"x-kubernetes-list-type": "map"
},
"limits": {
"additionalProperties": {
Expand Down
5 changes: 4 additions & 1 deletion api/openapi-spec/v3/apis__batch__v1_openapi.json
Expand Up @@ -3231,7 +3231,10 @@
"default": {}
},
"type": "array",
"x-kubernetes-list-type": "set"
"x-kubernetes-list-map-keys": [
"name"
],
"x-kubernetes-list-type": "map"
},
"limits": {
"additionalProperties": {
Expand Down
2 changes: 1 addition & 1 deletion build/build-image/cross/VERSION
@@ -1 +1 @@
v1.26.0-go1.19.4-bullseye.1
v1.26.0-go1.19.5-bullseye.0
2 changes: 1 addition & 1 deletion build/common.sh
Expand Up @@ -96,7 +96,7 @@ readonly KUBE_CONTAINER_RSYNC_PORT=8730

# These are the default versions (image tags) for their respective base images.
readonly __default_distroless_iptables_version=v0.1.2
readonly __default_go_runner_version=v2.3.1-go1.19.4-bullseye.0
readonly __default_go_runner_version=v2.3.1-go1.19.5-bullseye.0
readonly __default_setcap_version=bullseye-v1.3.0

# These are the base images for the Docker-wrapped binaries.
Expand Down
7 changes: 4 additions & 3 deletions build/dependencies.yaml
Expand Up @@ -88,8 +88,9 @@ dependencies:

# Golang
- name: "golang: upstream version"
version: 1.19.4
version: 1.19.5
refPaths:
- path: .go-version
- path: build/build-image/cross/VERSION
- path: staging/publishing/rules.yaml
match: 'default-go-version\: \d+.\d+(alpha|beta|rc)?\.?(\d+)?'
Expand All @@ -109,7 +110,7 @@ dependencies:
match: minimum_go_version=go([0-9]+\.[0-9]+)

- name: "registry.k8s.io/kube-cross: dependents"
version: v1.26.0-go1.19.4-bullseye.1
version: v1.26.0-go1.19.5-bullseye.0
refPaths:
- path: build/build-image/cross/VERSION

Expand Down Expand Up @@ -139,7 +140,7 @@ dependencies:
match: configs\[DistrolessIptables\] = Config{list\.BuildImageRegistry, "distroless-iptables", "v([0-9]+)\.([0-9]+)\.([0-9]+)"}

- name: "registry.k8s.io/go-runner: dependents"
version: v2.3.1-go1.19.4-bullseye.0
version: v2.3.1-go1.19.5-bullseye.0
refPaths:
- path: build/common.sh
match: __default_go_runner_version=
Expand Down
Expand Up @@ -27,7 +27,7 @@ spec:
nodeSelector:
kubernetes.io/os: linux
containers:
- image: registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.33
- image: registry.k8s.io/kas-network-proxy/proxy-agent:v0.0.35
name: konnectivity-agent
command: ["/proxy-agent"]
args: [
Expand Down
2 changes: 1 addition & 1 deletion cluster/gce/manifests/konnectivity-server.yaml
Expand Up @@ -20,7 +20,7 @@ spec:
{{ disallow_privilege_escalation}}
{{ capabilities }}
{{ drop_capabilities }}
image: registry.k8s.io/kas-network-proxy/proxy-server:v0.0.33
image: registry.k8s.io/kas-network-proxy/proxy-server:v0.0.35
resources:
requests:
cpu: 25m
Expand Down
4 changes: 2 additions & 2 deletions hack/update-vendor-licenses.sh
Expand Up @@ -201,8 +201,8 @@ for PACKAGE in ${modules}; do

# if there are no files vendored under this package...
if [[ -z "$(find "${DEPS_DIR}/${PACKAGE}" -mindepth 1 -maxdepth 1 -type f)" ]]; then
# and we have the same number of submodules as subdirectories...
if [[ "$(find "${DEPS_DIR}/${PACKAGE}/" -mindepth 1 -maxdepth 1 -type d | wc -l)" -eq "$(echo "${modules}" | grep -cE "^${PACKAGE}/")" ]]; then
# and we have at least the same number of submodules as subdirectories...
if [[ "$(find "${DEPS_DIR}/${PACKAGE}/" -mindepth 1 -maxdepth 1 -type d | wc -l)" -le "$(echo "${modules}" | grep -cE "^${PACKAGE}/")" ]]; then
echo "Only submodules of ${PACKAGE} are vendored, skipping" >&2
continue
fi
Expand Down
12 changes: 12 additions & 0 deletions hack/verify-vendor.sh
Expand Up @@ -96,5 +96,17 @@ if [[ ${ret} -gt 0 ]]; then
exit ${ret}
fi

# Ensure we can tidy every repo using only its recorded versions
for repo in $(kube::util::list_staging_repos); do
pushd "${_kubetmp}/staging/src/k8s.io/${repo}" >/dev/null 2>&1
echo "Tidying k8s.io/${repo}..."
GODEBUG=gocacheverify=1 go mod tidy
popd >/dev/null 2>&1
done
pushd "${_kubetmp}" >/dev/null 2>&1
echo "Tidying k8s.io/kubernetes..."
GODEBUG=gocacheverify=1 go mod tidy
popd >/dev/null 2>&1

echo "Vendor Verified."
# ex: ts=2 sw=2 et filetype=sh
2 changes: 1 addition & 1 deletion openshift-hack/images/hyperkube/Dockerfile.rhel
Expand Up @@ -13,4 +13,4 @@ COPY --from=builder /tmp/build/* /usr/bin/
LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \
io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \
io.openshift.tags="openshift,hyperkube" \
io.openshift.build.versions="kubernetes=1.26.0"
io.openshift.build.versions="kubernetes=1.26.1"
60 changes: 39 additions & 21 deletions pkg/controller/daemon/daemon_controller.go
Expand Up @@ -898,6 +898,32 @@ func (dsc *DaemonSetsController) podsShouldBeOnNode(
return nodesNeedingDaemonPods, podsToDelete
}

func (dsc *DaemonSetsController) updateDaemonSet(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash, key string, old []*apps.ControllerRevision) error {
err := dsc.manage(ctx, ds, nodeList, hash)
if err != nil {
return err
}

// Process rolling updates if we're ready.
if dsc.expectations.SatisfiedExpectations(key) {
switch ds.Spec.UpdateStrategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
case apps.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ctx, ds, nodeList, hash)
}
if err != nil {
return err
}
}

err = dsc.cleanupHistory(ctx, ds, old)
if err != nil {
return fmt.Errorf("failed to clean up revisions of DaemonSet: %w", err)
}

return nil
}

// manage manages the scheduling and running of Pods of ds on nodes.
// After figuring out which nodes should run a Pod of ds but not yet running one and
// which nodes should not run a Pod of ds but currently running one, it calls function
Expand Down Expand Up @@ -1147,7 +1173,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds *

err = storeDaemonSetStatus(ctx, dsc.kubeClient.AppsV1().DaemonSets(ds.Namespace), ds, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady, updatedNumberScheduled, numberAvailable, numberUnavailable, updateObservedGen)
if err != nil {
return fmt.Errorf("error storing status for daemon set %#v: %v", ds, err)
return fmt.Errorf("error storing status for daemon set %#v: %w", ds, err)
}

// Resync the DaemonSet after MinReadySeconds as a last line of defense to guard against clock-skew.
Expand Down Expand Up @@ -1221,29 +1247,21 @@ func (dsc *DaemonSetsController) syncDaemonSet(ctx context.Context, key string)
return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, false)
}

err = dsc.manage(ctx, ds, nodeList, hash)
if err != nil {
err = dsc.updateDaemonSet(ctx, ds, nodeList, hash, dsKey, old)
statusErr := dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, true)
switch {
case err != nil && statusErr != nil:
// If there was an error, and we failed to update status,
// log it and return the original error.
klog.ErrorS(statusErr, "Failed to update status", "daemonSet", klog.KObj(ds))
return err
case err != nil:
return err
case statusErr != nil:
return statusErr
}

// Process rolling updates if we're ready.
if dsc.expectations.SatisfiedExpectations(dsKey) {
switch ds.Spec.UpdateStrategy.Type {
case apps.OnDeleteDaemonSetStrategyType:
case apps.RollingUpdateDaemonSetStrategyType:
err = dsc.rollingUpdate(ctx, ds, nodeList, hash)
}
if err != nil {
return err
}
}

err = dsc.cleanupHistory(ctx, ds, old)
if err != nil {
return fmt.Errorf("failed to clean up revisions of DaemonSet: %v", err)
}

return dsc.updateDaemonSetStatus(ctx, ds, nodeList, hash, true)
return nil
}

// NodeShouldRunDaemonPod checks a set of preconditions against a (node,daemonset) and returns a
Expand Down
107 changes: 104 additions & 3 deletions pkg/controller/daemon/daemon_controller_test.go
Expand Up @@ -18,6 +18,7 @@ package daemon

import (
"context"
"errors"
"fmt"
"reflect"
"sort"
Expand Down Expand Up @@ -255,7 +256,7 @@ func (f *fakePodControl) CreatePods(ctx context.Context, namespace string, templ
f.Lock()
defer f.Unlock()
if err := f.FakePodControl.CreatePods(ctx, namespace, template, object, controllerRef); err != nil {
return fmt.Errorf("failed to create pod for DaemonSet")
return fmt.Errorf("failed to create pod for DaemonSet: %w", err)
}

pod := &v1.Pod{
Expand Down Expand Up @@ -387,14 +388,23 @@ func validateSyncDaemonSets(manager *daemonSetsController, fakePodControl *fakeP
}

func expectSyncDaemonSets(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int) {
t.Helper()
expectSyncDaemonSetsWithError(t, manager, ds, podControl, expectedCreates, expectedDeletes, expectedEvents, nil)
}

func expectSyncDaemonSetsWithError(t *testing.T, manager *daemonSetsController, ds *apps.DaemonSet, podControl *fakePodControl, expectedCreates, expectedDeletes int, expectedEvents int, expectedError error) {
t.Helper()
key, err := controller.KeyFunc(ds)
if err != nil {
t.Fatal("could not get key for daemon")
}

err = manager.syncHandler(context.TODO(), key)
if err != nil {
if expectedError != nil && !errors.Is(err, expectedError) {
t.Fatalf("Unexpected error returned from syncHandler: %v", err)
}

if expectedError == nil && err != nil {
t.Log(err)
}

Expand Down Expand Up @@ -771,7 +781,7 @@ func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _, err := newTestController(ds)
manager, podControl, clientset, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}
Expand All @@ -782,6 +792,17 @@ func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
t.Fatal(err)
}

var updated *apps.DaemonSet
clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if action.GetSubresource() != "status" {
return false, nil, nil
}
if u, ok := action.(core.UpdateAction); ok {
updated = u.GetObject().(*apps.DaemonSet)
}
return false, nil, nil
})

expectSyncDaemonSets(t, manager, ds, podControl, podControl.FakePodControl.CreateLimit, 0, 0)

expectedLimit := 0
Expand All @@ -791,6 +812,18 @@ func TestSimpleDaemonSetPodCreateErrors(t *testing.T) {
if podControl.FakePodControl.CreateCallCount > expectedLimit {
t.Errorf("Unexpected number of create calls. Expected <= %d, saw %d\n", podControl.FakePodControl.CreateLimit*2, podControl.FakePodControl.CreateCallCount)
}
if updated == nil {
t.Fatalf("Failed to get updated status")
}
if got, want := updated.Status.DesiredNumberScheduled, int32(podControl.FakePodControl.CreateLimit)*10; got != want {
t.Errorf("Status.DesiredNumberScheduled = %v, want %v", got, want)
}
if got, want := updated.Status.CurrentNumberScheduled, int32(podControl.FakePodControl.CreateLimit); got != want {
t.Errorf("Status.CurrentNumberScheduled = %v, want %v", got, want)
}
if got, want := updated.Status.UpdatedNumberScheduled, int32(podControl.FakePodControl.CreateLimit); got != want {
t.Errorf("Status.UpdatedNumberScheduled = %v, want %v", got, want)
}
}
}

Expand Down Expand Up @@ -856,6 +889,74 @@ func TestSimpleDaemonSetUpdatesStatusAfterLaunchingPods(t *testing.T) {
}
}

func TestSimpleDaemonSetUpdatesStatusError(t *testing.T) {
var (
syncErr = fmt.Errorf("sync error")
statusErr = fmt.Errorf("status error")
)

testCases := []struct {
desc string

hasSyncErr bool
hasStatusErr bool

expectedErr error
}{
{
desc: "sync error",
hasSyncErr: true,
hasStatusErr: false,
expectedErr: syncErr,
},
{
desc: "status error",
hasSyncErr: false,
hasStatusErr: true,
expectedErr: statusErr,
},
{
desc: "sync and status error",
hasSyncErr: true,
hasStatusErr: true,
expectedErr: syncErr,
},
}

for _, tc := range testCases {
t.Run(tc.desc, func(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, clientset, err := newTestController(ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
}

if tc.hasSyncErr {
podControl.FakePodControl.Err = syncErr
}

clientset.PrependReactor("update", "daemonsets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if action.GetSubresource() != "status" {
return false, nil, nil
}

if tc.hasStatusErr {
return true, nil, statusErr
} else {
return false, nil, nil
}
})

manager.dsStore.Add(ds)
addNodes(manager.nodeStore, 0, 1, nil)
expectSyncDaemonSetsWithError(t, manager, ds, podControl, 1, 0, 0, tc.expectedErr)
}
})
}
}

// DaemonSets should do nothing if there aren't any nodes
func TestNoNodesDoesNothing(t *testing.T) {
for _, strategy := range updateStrategies() {
Expand Down

0 comments on commit 019663f

Please sign in to comment.