-
Notifications
You must be signed in to change notification settings - Fork 38.6k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Fix deployment helper - no assumptions on only one new ReplicaSet #41851
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -612,9 +612,13 @@ func EqualIgnoreHash(template1, template2 v1.PodTemplateSpec) bool { | |
// FindNewReplicaSet returns the new RS this given deployment targets (the one with the same pod template). | ||
func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet) (*extensions.ReplicaSet, error) { | ||
newRSTemplate := GetNewReplicaSetTemplate(deployment) | ||
sort.Sort(controller.ReplicaSetsByCreationTimestamp(rsList)) | ||
for i := range rsList { | ||
if EqualIgnoreHash(rsList[i].Spec.Template, newRSTemplate) { | ||
// This is the new ReplicaSet. | ||
// In rare cases, such as after cluster upgrades, Deployment may end up with | ||
// having more than one new ReplicaSets that have the same template as its template, | ||
// see https://github.com/kubernetes/kubernetes/issues/40415 | ||
// We deterministically choose the oldest new ReplicaSet. | ||
return rsList[i], nil | ||
} | ||
} | ||
|
@@ -629,16 +633,21 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions. | |
// All pods and replica sets are labeled with pod-template-hash to prevent overlapping | ||
oldRSs := map[string]*extensions.ReplicaSet{} | ||
allOldRSs := map[string]*extensions.ReplicaSet{} | ||
newRSTemplate := GetNewReplicaSetTemplate(deployment) | ||
requiredRSs := []*extensions.ReplicaSet{} | ||
allRSs := []*extensions.ReplicaSet{} | ||
newRS, err := FindNewReplicaSet(deployment, rsList) | ||
if err != nil { | ||
return requiredRSs, allRSs, err | ||
} | ||
for _, pod := range podList.Items { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I have PR that reworks |
||
podLabelsSelector := labels.Set(pod.ObjectMeta.Labels) | ||
for _, rs := range rsList { | ||
rsLabelsSelector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) | ||
if err != nil { | ||
return nil, nil, fmt.Errorf("invalid label selector: %v", err) | ||
} | ||
// Filter out replica set that has the same pod template spec as the deployment - that is the new replica set. | ||
if EqualIgnoreHash(rs.Spec.Template, newRSTemplate) { | ||
// Filter out new replica set | ||
if newRS != nil && rs.UID == newRS.UID { | ||
continue | ||
} | ||
allOldRSs[rs.ObjectMeta.Name] = rs | ||
|
@@ -647,12 +656,10 @@ func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions. | |
} | ||
} | ||
} | ||
requiredRSs := []*extensions.ReplicaSet{} | ||
for key := range oldRSs { | ||
value := oldRSs[key] | ||
requiredRSs = append(requiredRSs, value) | ||
} | ||
allRSs := []*extensions.ReplicaSet{} | ||
for key := range allOldRSs { | ||
value := allOldRSs[key] | ||
allRSs = append(allRSs, value) | ||
|
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@smarterclayton @mfojtik we are going to choose the oldest replica set out of two identical replica sets for now - seems like the sanest thing to do when we end up in such a state (seems possible after cluster upgrades: #40415).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@janetkuo can you add a comment explaining this here?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
done