Skip to content

Commit

Permalink
Merge pull request kubernetes#115462 from danielvegamyhre/automated-c…
Browse files Browse the repository at this point in the history
…herry-pick-of-#115349-upstream-release-1.26

Automated cherry pick of kubernetes#115349: update prev succeeded indexes for indexed jobs
  • Loading branch information
k8s-ci-robot committed Feb 10, 2023
2 parents 41137e4 + a082cbd commit f7bcf49
Show file tree
Hide file tree
Showing 3 changed files with 125 additions and 28 deletions.
13 changes: 6 additions & 7 deletions pkg/controller/job/indexed_job_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ func calculateSucceededIndexes(job *batch.Job, pods []*v1.Pod) (orderedIntervals
var prevIntervals orderedIntervals
withFinalizers := hasJobTrackingAnnotation(job)
if withFinalizers {
prevIntervals = succeededIndexesFromJob(job)
prevIntervals = succeededIndexesFromString(job.Status.CompletedIndexes, int(*job.Spec.Completions))
}
newSucceeded := sets.NewInt()
for _, p := range pods {
Expand Down Expand Up @@ -152,20 +152,19 @@ func (oi orderedIntervals) has(ix int) bool {
return oi[hi].First <= ix
}

func succeededIndexesFromJob(job *batch.Job) orderedIntervals {
if job.Status.CompletedIndexes == "" {
func succeededIndexesFromString(completedIndexes string, completions int) orderedIntervals {
if completedIndexes == "" {
return nil
}
var result orderedIntervals
var lastInterval *interval
completions := int(*job.Spec.Completions)
for _, intervalStr := range strings.Split(job.Status.CompletedIndexes, ",") {
for _, intervalStr := range strings.Split(completedIndexes, ",") {
limitsStr := strings.Split(intervalStr, "-")
var inter interval
var err error
inter.First, err = strconv.Atoi(limitsStr[0])
if err != nil {
klog.InfoS("Corrupted completed indexes interval, ignoring", "job", klog.KObj(job), "interval", intervalStr, "err", err)
klog.InfoS("Corrupted completed indexes interval, ignoring", "interval", intervalStr, "err", err)
continue
}
if inter.First >= completions {
Expand All @@ -174,7 +173,7 @@ func succeededIndexesFromJob(job *batch.Job) orderedIntervals {
if len(limitsStr) > 1 {
inter.Last, err = strconv.Atoi(limitsStr[1])
if err != nil {
klog.InfoS("Corrupted completed indexes interval, ignoring", "job", klog.KObj(job), "interval", intervalStr, "err", err)
klog.InfoS("Corrupted completed indexes interval, ignoring", "interval", intervalStr, "err", err)
continue
}
if inter.Last >= completions {
Expand Down
28 changes: 24 additions & 4 deletions pkg/controller/job/job_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1036,6 +1036,7 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job
uidsWithFinalizer.Insert(uid)
}
}

// Shallow copy, as it will only be used to detect changes in the counters.
oldCounters := job.Status
if cleanUncountedPodsWithoutFinalizers(&job.Status, uidsWithFinalizer) {
Expand Down Expand Up @@ -1109,10 +1110,14 @@ func (jm *Controller) trackJobStatusAndRemoveFinalizers(ctx context.Context, job
break
}
}
if len(newSucceededIndexes) > 0 {
if isIndexed {
succeededIndexes = succeededIndexes.withOrderedIndexes(newSucceededIndexes)
succeededIndexesStr := succeededIndexes.String()
if succeededIndexesStr != job.Status.CompletedIndexes {
needsFlush = true
}
job.Status.Succeeded = int32(succeededIndexes.total())
job.Status.CompletedIndexes = succeededIndexes.String()
job.Status.CompletedIndexes = succeededIndexesStr
}
if feature.DefaultFeatureGate.Enabled(features.JobPodFailurePolicy) {
if finishedCond != nil && finishedCond.Type == batch.JobFailureTarget {
Expand Down Expand Up @@ -1778,9 +1783,24 @@ func findConditionByType(list []batch.JobCondition, cType batch.JobConditionType

func recordJobPodFinished(job *batch.Job, oldCounters batch.JobStatus) {
completionMode := completionModeStr(job)
diff := job.Status.Succeeded - oldCounters.Succeeded
var diff int

// Updating succeeded metric must be handled differently
// for Indexed Jobs to handle the case where the job has
// been scaled down by reducing completions & parallelism
// in tandem, and now a previously completed index is
// now out of range (i.e. index >= spec.Completions).
if isIndexedJob(job) {
if job.Status.CompletedIndexes != oldCounters.CompletedIndexes {
diff = succeededIndexesFromString(job.Status.CompletedIndexes, int(*job.Spec.Completions)).total() - succeededIndexesFromString(oldCounters.CompletedIndexes, int(*job.Spec.Completions)).total()
}
} else {
diff = int(job.Status.Succeeded) - int(oldCounters.Succeeded)
}
metrics.JobPodsFinished.WithLabelValues(completionMode, metrics.Succeeded).Add(float64(diff))
diff = job.Status.Failed - oldCounters.Failed

// Update failed metric.
diff = int(job.Status.Failed - oldCounters.Failed)
metrics.JobPodsFinished.WithLabelValues(completionMode, metrics.Failed).Add(float64(diff))
}

Expand Down
112 changes: 95 additions & 17 deletions pkg/controller/job/job_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1177,16 +1177,18 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
indexedCompletion := batch.IndexedCompletion
mockErr := errors.New("mock error")
cases := map[string]struct {
job batch.Job
pods []*v1.Pod
finishedCond *batch.JobCondition
expectedRmFinalizers sets.String
needsFlush bool
statusUpdateErr error
podControlErr error
wantErr error
wantRmFinalizers int
wantStatusUpdates []batch.JobStatus
job batch.Job
pods []*v1.Pod
finishedCond *batch.JobCondition
expectedRmFinalizers sets.String
needsFlush bool
statusUpdateErr error
podControlErr error
wantErr error
wantRmFinalizers int
wantStatusUpdates []batch.JobStatus
wantSucceededPodsMetric int
wantFailedPodsMetric int
}{
"no updates": {},
"new active": {
Expand Down Expand Up @@ -1227,6 +1229,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
Failed: 3,
},
},
wantSucceededPodsMetric: 2,
wantFailedPodsMetric: 3,
},
"past and new finished pods": {
job: batch.Job{
Expand Down Expand Up @@ -1267,6 +1271,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
Failed: 6,
},
},
wantSucceededPodsMetric: 3,
wantFailedPodsMetric: 3,
},
"expecting removed finalizers": {
job: batch.Job{
Expand Down Expand Up @@ -1306,6 +1312,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
Failed: 6,
},
},
wantSucceededPodsMetric: 3,
wantFailedPodsMetric: 3,
},
"succeeding job": {
pods: []*v1.Pod{
Expand All @@ -1329,6 +1337,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
CompletionTime: &succeededCond.LastTransitionTime,
},
},
wantSucceededPodsMetric: 1,
wantFailedPodsMetric: 1,
},
"failing job": {
pods: []*v1.Pod{
Expand All @@ -1353,6 +1363,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
Conditions: []batch.JobCondition{*failedCond},
},
},
wantSucceededPodsMetric: 1,
wantFailedPodsMetric: 2,
},
"deleted job": {
job: batch.Job{
Expand Down Expand Up @@ -1385,6 +1397,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
Failed: 1,
},
},
wantSucceededPodsMetric: 1,
wantFailedPodsMetric: 1,
},
"status update error": {
pods: []*v1.Pod{
Expand Down Expand Up @@ -1473,6 +1487,62 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
},
},
wantSucceededPodsMetric: 2,
},
"indexed job prev successful pods outside current completions index range with no new succeeded pods": {
job: batch.Job{
Spec: batch.JobSpec{
CompletionMode: &indexedCompletion,
Completions: pointer.Int32(2),
Parallelism: pointer.Int32(2),
},
Status: batch.JobStatus{
Active: 2,
Succeeded: 1,
CompletedIndexes: "3",
},
},
pods: []*v1.Pod{
buildPod().phase(v1.PodRunning).trackingFinalizer().index("0").Pod,
buildPod().phase(v1.PodRunning).trackingFinalizer().index("1").Pod,
},
wantRmFinalizers: 0,
wantStatusUpdates: []batch.JobStatus{
{
Active: 2,
Succeeded: 0,
CompletedIndexes: "",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
},
},
},
"indexed job prev successful pods outside current completions index range with new succeeded pods in range": {
job: batch.Job{
Spec: batch.JobSpec{
CompletionMode: &indexedCompletion,
Completions: pointer.Int32(2),
Parallelism: pointer.Int32(2),
},
Status: batch.JobStatus{
Active: 2,
Succeeded: 1,
CompletedIndexes: "3",
},
},
pods: []*v1.Pod{
buildPod().phase(v1.PodRunning).trackingFinalizer().index("0").Pod,
buildPod().phase(v1.PodSucceeded).trackingFinalizer().index("1").Pod,
},
wantRmFinalizers: 1,
wantStatusUpdates: []batch.JobStatus{
{
Active: 2,
Succeeded: 1,
CompletedIndexes: "1",
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
},
},
wantSucceededPodsMetric: 1,
},
"indexed job new failed pods": {
job: batch.Job{
Expand Down Expand Up @@ -1505,6 +1575,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
},
},
wantFailedPodsMetric: 3,
},
"indexed job past and new pods": {
job: batch.Job{
Expand Down Expand Up @@ -1543,6 +1614,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
UncountedTerminatedPods: &batch.UncountedTerminatedPods{},
},
},
wantSucceededPodsMetric: 1,
wantFailedPodsMetric: 2,
},
"too many finished": {
job: batch.Job{
Expand Down Expand Up @@ -1583,6 +1656,8 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
Failed: 1,
},
},
wantSucceededPodsMetric: 499,
wantFailedPodsMetric: 1,
},
"too many indexed finished": {
job: batch.Job{
Expand All @@ -1606,6 +1681,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
Succeeded: 500,
},
},
wantSucceededPodsMetric: 500,
},
"pod flips from failed to succeeded": {
job: batch.Job{
Expand All @@ -1632,6 +1708,7 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
Conditions: []batch.JobCondition{*failedCond},
},
},
wantFailedPodsMetric: 2,
},
}
for name, tc := range cases {
Expand All @@ -1651,7 +1728,10 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
job.Status.UncountedTerminatedPods = &batch.UncountedTerminatedPods{}
}
uncounted := newUncountedTerminatedPods(*job.Status.UncountedTerminatedPods)
succeededIndexes := succeededIndexesFromJob(job)
var succeededIndexes orderedIntervals
if isIndexedJob(job) {
succeededIndexes = succeededIndexesFromString(job.Status.CompletedIndexes, int(*job.Spec.Completions))
}
err := manager.trackJobStatusAndRemoveFinalizers(context.TODO(), job, tc.pods, succeededIndexes, *uncounted, tc.expectedRmFinalizers, tc.finishedCond, tc.needsFlush)
if !errors.Is(err, tc.wantErr) {
t.Errorf("Got error %v, want %v", err, tc.wantErr)
Expand All @@ -1669,17 +1749,15 @@ func TestTrackJobStatusAndRemoveFinalizers(t *testing.T) {
if err != nil {
t.Fatalf("Obtaining succeeded job_pods_finished_total: %v", err)
}
newSucceeded := job.Status.Succeeded - tc.job.Status.Succeeded
if float64(newSucceeded) != v {
t.Errorf("Metric reports %.0f succeeded pods, want %d", v, newSucceeded)
if float64(tc.wantSucceededPodsMetric) != v {
t.Errorf("Metric reports %.0f succeeded pods, want %d", v, tc.wantSucceededPodsMetric)
}
v, err = metricstestutil.GetCounterMetricValue(metrics.JobPodsFinished.WithLabelValues(completionMode, metrics.Failed))
if err != nil {
t.Fatalf("Obtaining failed job_pods_finished_total: %v", err)
}
newFailed := job.Status.Failed - tc.job.Status.Failed
if float64(newFailed) != v {
t.Errorf("Metric reports %.0f failed pods, want %d", v, newFailed)
if float64(tc.wantFailedPodsMetric) != v {
t.Errorf("Metric reports %.0f failed pods, want %d", v, tc.wantFailedPodsMetric)
}
}
})
Expand Down

0 comments on commit f7bcf49

Please sign in to comment.