Skip to content

Commit

Permalink
Revert "Revert "OCPBUGS-16166: Update to Kubernetes 1.27.4""
Browse files Browse the repository at this point in the history
This reverts commit 17d9911.
  • Loading branch information
soltysh committed Aug 8, 2023
1 parent e123787 commit 276d06a
Show file tree
Hide file tree
Showing 70 changed files with 11,478 additions and 569 deletions.
2 changes: 1 addition & 1 deletion .go-version
@@ -1 +1 @@
1.20.5
1.20.6
281 changes: 208 additions & 73 deletions CHANGELOG/CHANGELOG-1.27.md

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion build/build-image/cross/VERSION
@@ -1 +1 @@
v1.27.0-go1.20.5-bullseye.0
v1.27.0-go1.20.6-bullseye.0
2 changes: 1 addition & 1 deletion build/common.sh
Expand Up @@ -96,7 +96,7 @@ readonly KUBE_CONTAINER_RSYNC_PORT=8730

# These are the default versions (image tags) for their respective base images.
readonly __default_distroless_iptables_version=v0.2.3
readonly __default_go_runner_version=v2.3.1-go1.20.5-bullseye.0
readonly __default_go_runner_version=v2.3.1-go1.20.6-bullseye.0
readonly __default_setcap_version=bullseye-v1.4.2

# These are the base images for the Docker-wrapped binaries.
Expand Down
6 changes: 3 additions & 3 deletions build/dependencies.yaml
Expand Up @@ -95,7 +95,7 @@ dependencies:

# Golang
- name: "golang: upstream version"
version: 1.20.5
version: 1.20.6
refPaths:
- path: .go-version
- path: build/build-image/cross/VERSION
Expand All @@ -117,7 +117,7 @@ dependencies:
match: minimum_go_version=go([0-9]+\.[0-9]+)

- name: "registry.k8s.io/kube-cross: dependents"
version: v1.27.0-go1.20.5-bullseye.0
version: v1.27.0-go1.20.6-bullseye.0
refPaths:
- path: build/build-image/cross/VERSION

Expand Down Expand Up @@ -147,7 +147,7 @@ dependencies:
match: configs\[DistrolessIptables\] = Config{list\.BuildImageRegistry, "distroless-iptables", "v([0-9]+)\.([0-9]+)\.([0-9]+)"}

- name: "registry.k8s.io/go-runner: dependents"
version: v2.3.1-go1.20.5-bullseye.0
version: v2.3.1-go1.20.6-bullseye.0
refPaths:
- path: build/common.sh
match: __default_go_runner_version=
Expand Down
6 changes: 0 additions & 6 deletions cmd/kubeadm/app/cmd/phases/workflow/runner_test.go
Expand Up @@ -401,14 +401,8 @@ func TestBindToCommandArgRequirements(t *testing.T) {
continue
}

// Ensure it is the expected function
if reflect.ValueOf(cCmd.Args).Pointer() != reflect.ValueOf(args.args).Pointer() {
t.Error("The function pointers where not equal.")
}

// Test passing argument set
err := cCmd.Args(cCmd, args.pass)

if err != nil {
t.Errorf("command %s should validate the args: %v\n %v", cCmd.Name(), args.pass, err)
}
Expand Down
4 changes: 3 additions & 1 deletion cmd/kubeadm/app/util/pkiutil/pki_helpers.go
Expand Up @@ -631,10 +631,12 @@ func GeneratePrivateKey(keyType x509.PublicKeyAlgorithm) (crypto.Signer, error)

// NewSignedCert creates a signed certificate using the given CA certificate and key
func NewSignedCert(cfg *CertConfig, key crypto.Signer, caCert *x509.Certificate, caKey crypto.Signer, isCA bool) (*x509.Certificate, error) {
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64))
// returns a uniform random value in [0, max-1), then add 1 to serial to make it a uniform random value in [1, max).
serial, err := cryptorand.Int(cryptorand.Reader, new(big.Int).SetInt64(math.MaxInt64-1))
if err != nil {
return nil, err
}
serial = new(big.Int).Add(serial, big.NewInt(1))
if len(cfg.CommonName) == 0 {
return nil, errors.New("must specify a CommonName")
}
Expand Down
2 changes: 1 addition & 1 deletion go.mod
Expand Up @@ -38,7 +38,7 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da
github.com/golang/mock v1.6.0
github.com/golang/protobuf v1.5.3
github.com/google/cadvisor v0.47.1
github.com/google/cadvisor v0.47.2
github.com/google/cel-go v0.12.6
github.com/google/gnostic v0.5.7-v3refs
github.com/google/go-cmp v0.5.9
Expand Down
2 changes: 1 addition & 1 deletion openshift-hack/images/hyperkube/Dockerfile.rhel
Expand Up @@ -13,4 +13,4 @@ COPY --from=builder /tmp/build/* /usr/bin/
LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \
io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \
io.openshift.tags="openshift,hyperkube" \
io.openshift.build.versions="kubernetes=1.27.3"
io.openshift.build.versions="kubernetes=1.27.4"
7 changes: 7 additions & 0 deletions pkg/apis/core/validation/validation.go
Expand Up @@ -4727,7 +4727,14 @@ func ValidatePodUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) fiel
// already effectively nil, no change needed
case mungedPodSpec.Affinity == nil && oldNodeAffinity != nil:
mungedPodSpec.Affinity = &core.Affinity{NodeAffinity: oldNodeAffinity} // +k8s:verify-mutation:reason=clone
case mungedPodSpec.Affinity != nil && oldPod.Spec.Affinity == nil &&
mungedPodSpec.Affinity.PodAntiAffinity == nil && mungedPodSpec.Affinity.PodAffinity == nil:
// We ensure no other fields are being changed, but the NodeAffinity. If that's the case, and the
// old pod's affinity is nil, we set the mungedPodSpec's affinity to nil.
mungedPodSpec.Affinity = nil // +k8s:verify-mutation:reason=clone
default:
// The node affinity is being updated and the old pod Affinity is not nil.
// We set the mungedPodSpec's node affinity to the old pod's node affinity.
mungedPodSpec.Affinity.NodeAffinity = oldNodeAffinity // +k8s:verify-mutation:reason=clone
}
}
Expand Down
111 changes: 111 additions & 0 deletions pkg/apis/core/validation/validation_test.go
Expand Up @@ -13790,6 +13790,117 @@ func TestValidatePodUpdate(t *testing.T) {
},
err: "spec.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0]: Invalid value:",
test: "empty NodeSelectorTerm (selects nothing) cannot become populated (selects something)",
}, {
old: core.Pod{
Spec: core.PodSpec{
Affinity: nil,
SchedulingGates: []core.PodSchedulingGate{{Name: "baz"}},
},
},
new: core.Pod{
Spec: core.PodSpec{
Affinity: &core.Affinity{
NodeAffinity: &core.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{{
MatchExpressions: []core.NodeSelectorRequirement{{
Key: "expr",
Operator: core.NodeSelectorOpIn,
Values: []string{"foo"},
}},
}},
},
},
},
SchedulingGates: []core.PodSchedulingGate{{Name: "baz"}},
},
},
opts: PodValidationOptions{
AllowMutableNodeSelectorAndNodeAffinity: true,
},
test: "nil affinity can be mutated for gated pods",
},
{
old: core.Pod{
Spec: core.PodSpec{
Affinity: nil,
SchedulingGates: []core.PodSchedulingGate{{Name: "baz"}},
},
},
new: core.Pod{
Spec: core.PodSpec{
Affinity: &core.Affinity{
NodeAffinity: &core.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{{
MatchExpressions: []core.NodeSelectorRequirement{{
Key: "expr",
Operator: core.NodeSelectorOpIn,
Values: []string{"foo"},
}},
}},
},
},
PodAffinity: &core.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{
{
TopologyKey: "foo",
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
},
},
},
},
SchedulingGates: []core.PodSchedulingGate{{Name: "baz"}},
},
},
opts: PodValidationOptions{
AllowMutableNodeSelectorAndNodeAffinity: true,
},
err: "pod updates may not change fields other than",
test: "the podAffinity cannot be updated on gated pods",
},
{
old: core.Pod{
Spec: core.PodSpec{
Affinity: nil,
SchedulingGates: []core.PodSchedulingGate{{Name: "baz"}},
},
},
new: core.Pod{
Spec: core.PodSpec{
Affinity: &core.Affinity{
NodeAffinity: &core.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
NodeSelectorTerms: []core.NodeSelectorTerm{{
MatchExpressions: []core.NodeSelectorRequirement{{
Key: "expr",
Operator: core.NodeSelectorOpIn,
Values: []string{"foo"},
}},
}},
},
},
PodAntiAffinity: &core.PodAntiAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{
{
TopologyKey: "foo",
LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
},
},
},
},
SchedulingGates: []core.PodSchedulingGate{{Name: "baz"}},
},
},
opts: PodValidationOptions{
AllowMutableNodeSelectorAndNodeAffinity: true,
},
err: "pod updates may not change fields other than",
test: "the podAntiAffinity cannot be updated on gated pods",
},
}
for _, test := range tests {
Expand Down
78 changes: 50 additions & 28 deletions pkg/controller/cronjob/utils.go
Expand Up @@ -72,10 +72,10 @@ func deleteFromActiveList(cj *batchv1.CronJob, uid types.UID) {
// mostRecentScheduleTime returns:
// - the last schedule time or CronJob's creation time,
// - the most recent time a Job should be created or nil, if that's after now,
// - number of missed schedules
// - boolean indicating an excessive number of missed schedules,
// - error in an edge case where the schedule specification is grammatically correct,
// but logically doesn't make sense (31st day for months with only 30 days, for example).
func mostRecentScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Schedule, includeStartingDeadlineSeconds bool) (time.Time, *time.Time, int64, error) {
func mostRecentScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Schedule, includeStartingDeadlineSeconds bool) (time.Time, *time.Time, bool, error) {
earliestTime := cj.ObjectMeta.CreationTimestamp.Time
if cj.Status.LastScheduleTime != nil {
earliestTime = cj.Status.LastScheduleTime.Time
Expand All @@ -93,24 +93,62 @@ func mostRecentScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Sc
t2 := schedule.Next(t1)

if now.Before(t1) {
return earliestTime, nil, 0, nil
return earliestTime, nil, false, nil
}
if now.Before(t2) {
return earliestTime, &t1, 1, nil
return earliestTime, &t1, false, nil
}

// It is possible for cron.ParseStandard("59 23 31 2 *") to return an invalid schedule
// minute - 59, hour - 23, dom - 31, month - 2, and dow is optional, clearly 31 is invalid
// In this case the timeBetweenTwoSchedules will be 0, and we error out the invalid schedule
timeBetweenTwoSchedules := int64(t2.Sub(t1).Round(time.Second).Seconds())
if timeBetweenTwoSchedules < 1 {
return earliestTime, nil, 0, fmt.Errorf("time difference between two schedules is less than 1 second")
return earliestTime, nil, false, fmt.Errorf("time difference between two schedules is less than 1 second")
}
// this logic used for calculating number of missed schedules does a rough
// approximation, by calculating a diff between two schedules (t1 and t2),
// and counting how many of these will fit in between last schedule and now
timeElapsed := int64(now.Sub(t1).Seconds())
numberOfMissedSchedules := (timeElapsed / timeBetweenTwoSchedules) + 1
mostRecentTime := time.Unix(t1.Unix()+((numberOfMissedSchedules-1)*timeBetweenTwoSchedules), 0).UTC()

return earliestTime, &mostRecentTime, numberOfMissedSchedules, nil
var mostRecentTime time.Time
// to get the most recent time accurate for regular schedules and the ones
// specified with @every form, we first need to calculate the potential earliest
// time by multiplying the initial number of missed schedules by its interval,
// this is critical to ensure @every starts at the correct time, this explains
// the numberOfMissedSchedules-1, the additional -1 serves there to go back
// in time one more time unit, and let the cron library calculate a proper
// schedule, for case where the schedule is not consistent, for example
// something like 30 6-16/4 * * 1-5
potentialEarliest := t1.Add(time.Duration((numberOfMissedSchedules-1-1)*timeBetweenTwoSchedules) * time.Second)
for t := schedule.Next(potentialEarliest); !t.After(now); t = schedule.Next(t) {
mostRecentTime = t
}

// An object might miss several starts. For example, if
// controller gets wedged on friday at 5:01pm when everyone has
// gone home, and someone comes in on tuesday AM and discovers
// the problem and restarts the controller, then all the hourly
// jobs, more than 80 of them for one hourly cronJob, should
// all start running with no further intervention (if the cronJob
// allows concurrency and late starts).
//
// However, if there is a bug somewhere, or incorrect clock
// on controller's server or apiservers (for setting creationTimestamp)
// then there could be so many missed start times (it could be off
// by decades or more), that it would eat up all the CPU and memory
// of this controller. In that case, we want to not try to list
// all the missed start times.
//
// I've somewhat arbitrarily picked 100, as more than 80,
// but less than "lots".
tooManyMissed := numberOfMissedSchedules > 100

if mostRecentTime.IsZero() {
return earliestTime, nil, tooManyMissed, nil
}
return earliestTime, &mostRecentTime, tooManyMissed, nil
}

// nextScheduleTimeDuration returns the time duration to requeue based on
Expand All @@ -136,33 +174,17 @@ func nextScheduleTimeDuration(cj *batchv1.CronJob, now time.Time, schedule cron.
// and before now, or nil if no unmet schedule times, and an error.
// If there are too many (>100) unstarted times, it will also record a warning.
func nextScheduleTime(logger klog.Logger, cj *batchv1.CronJob, now time.Time, schedule cron.Schedule, recorder record.EventRecorder) (*time.Time, error) {
_, mostRecentTime, numberOfMissedSchedules, err := mostRecentScheduleTime(cj, now, schedule, true)
_, mostRecentTime, tooManyMissed, err := mostRecentScheduleTime(cj, now, schedule, true)

if mostRecentTime == nil || mostRecentTime.After(now) {
return nil, err
}

if numberOfMissedSchedules > 100 {
// An object might miss several starts. For example, if
// controller gets wedged on friday at 5:01pm when everyone has
// gone home, and someone comes in on tuesday AM and discovers
// the problem and restarts the controller, then all the hourly
// jobs, more than 80 of them for one hourly cronJob, should
// all start running with no further intervention (if the cronJob
// allows concurrency and late starts).
//
// However, if there is a bug somewhere, or incorrect clock
// on controller's server or apiservers (for setting creationTimestamp)
// then there could be so many missed start times (it could be off
// by decades or more), that it would eat up all the CPU and memory
// of this controller. In that case, we want to not try to list
// all the missed start times.
//
// I've somewhat arbitrarily picked 100, as more than 80,
// but less than "lots".
recorder.Eventf(cj, corev1.EventTypeWarning, "TooManyMissedTimes", "too many missed start times: %d. Set or decrease .spec.startingDeadlineSeconds or check clock skew", numberOfMissedSchedules)
logger.Info("too many missed times", "cronjob", klog.KObj(cj), "missedTimes", numberOfMissedSchedules)
if tooManyMissed {
recorder.Eventf(cj, corev1.EventTypeWarning, "TooManyMissedTimes", "too many missed start times. Set or decrease .spec.startingDeadlineSeconds or check clock skew")
logger.Info("too many missed times", "cronjob", klog.KObj(cj))
}

return mostRecentTime, err
}

Expand Down

0 comments on commit 276d06a

Please sign in to comment.