Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use consts as predicate key names in handlers #59952

Merged
merged 1 commit into from Feb 22, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
6 changes: 3 additions & 3 deletions pkg/scheduler/algorithm/predicates/predicates.go
Expand Up @@ -72,8 +72,8 @@ const (
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
// checkServiceAffinityPred defines the name of predicate checkServiceAffinity.
checkServiceAffinityPred = "checkServiceAffinity"
// CheckServiceAffinityPred defines the name of predicate checkServiceAffinity.
CheckServiceAffinityPred = "CheckServiceAffinity"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

will that introduce backward compatibility?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should be fine. According to: https://github.com/kubernetes/kubernetes/blob/master/pkg/scheduler/factory/plugins.go#L207-L218
The name of this predicate is not part of scheduler policy configure file.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

But there could be existing policy config files that are already using the old name.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@bsalamat @k82cn AFAIK, the right way of using serviceAffinity in policy file is like below:

    "apiVersion": "v1",
    "kind": "Policy",
    "predicates": [
        {
            "name": "NoVolumeZoneConflict"
        },
        {
            "name": "MaxEBSVolumeCount"
        },
        {
            "name": "MaxGCEPDVolumeCount"
        },
        {
            "name": "MatchInterPodAffinity"
        },
        {
            "name": "NoDiskConflict"
        },
        {
            "name": "GeneralPredicates"
        },
        {
            "name": "PodToleratesNodeTaints"
        },
        {
            "name": "CheckNodeMemoryPressure"
        },
        {
            "name": "CheckNodeDiskPressure"
        },
        {
            "argument": {
                "serviceAffinity": {
                    "labels": [
                        "region"
                    ]
                }
            },
            "name": "Region"
        }
    ],

That's why I claim

The name of this predicate is not part of scheduler policy configure file

Since serviceAffinity is a argument which is not touched by this PR.

Correct me if I misunderstood sth :)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah, I see. Thanks for clarification.

// MaxEBSVolumeCountPred defines the name of predicate MaxEBSVolumeCount.
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
// MaxGCEPDVolumeCountPred defines the name of predicate MaxGCEPDVolumeCount.
Expand Down Expand Up @@ -128,7 +128,7 @@ var (
GeneralPred, HostNamePred, PodFitsHostPortsPred,
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
checkServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred,
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred,
MaxAzureDiskVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
CheckNodeMemoryPressurePred, CheckNodeDiskPressurePred, MatchInterPodAffinityPred}
)
Expand Down
40 changes: 20 additions & 20 deletions pkg/scheduler/factory/factory.go
Expand Up @@ -70,11 +70,11 @@ const (
)

var (
serviceAffinitySet = sets.NewString("ServiceAffinity")
matchInterPodAffinitySet = sets.NewString("MatchInterPodAffinity")
generalPredicatesSets = sets.NewString("GeneralPredicates")
noDiskConflictSet = sets.NewString("NoDiskConflict")
maxPDVolumeCountPredicateKeys = []string{"MaxGCEPDVolumeCount", "MaxAzureDiskVolumeCount", "MaxEBSVolumeCount"}
serviceAffinitySet = sets.NewString(predicates.CheckServiceAffinityPred)
matchInterPodAffinitySet = sets.NewString(predicates.MatchInterPodAffinityPred)
generalPredicatesSets = sets.NewString(predicates.GeneralPred)
noDiskConflictSet = sets.NewString(predicates.NoDiskConflictPred)
maxPDVolumeCountPredicateKeys = []string{predicates.MaxGCEPDVolumeCountPred, predicates.MaxAzureDiskVolumeCountPred, predicates.MaxEBSVolumeCountPred}
)

// configFactory is the default implementation of the scheduler.Configurator interface.
Expand Down Expand Up @@ -377,7 +377,7 @@ func (c *configFactory) invalidatePredicatesForPvUpdate(oldPV, newPV *v1.Persist
for k, v := range newPV.Labels {
// If PV update modifies the zone/region labels.
if isZoneRegionLabel(k) && !reflect.DeepEqual(v, oldPV.Labels[k]) {
invalidPredicates.Insert("NoVolumeZoneConflict")
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)
break
}
}
Expand Down Expand Up @@ -434,19 +434,19 @@ func (c *configFactory) invalidatePredicatesForPv(pv *v1.PersistentVolume) {

// PV types which impact MaxPDVolumeCountPredicate
if pv.Spec.AWSElasticBlockStore != nil {
invalidPredicates.Insert("MaxEBSVolumeCount")
invalidPredicates.Insert(predicates.MaxEBSVolumeCountPred)
}
if pv.Spec.GCEPersistentDisk != nil {
invalidPredicates.Insert("MaxGCEPDVolumeCount")
invalidPredicates.Insert(predicates.MaxGCEPDVolumeCountPred)
}
if pv.Spec.AzureDisk != nil {
invalidPredicates.Insert("MaxAzureDiskVolumeCount")
invalidPredicates.Insert(predicates.MaxAzureDiskVolumeCountPred)
}

// If PV contains zone related label, it may impact cached NoVolumeZoneConflict
for k := range pv.Labels {
if isZoneRegionLabel(k) {
invalidPredicates.Insert("NoVolumeZoneConflict")
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)
break
}
}
Expand Down Expand Up @@ -520,7 +520,7 @@ func (c *configFactory) invalidatePredicatesForPvc(pvc *v1.PersistentVolumeClaim
invalidPredicates := sets.NewString(maxPDVolumeCountPredicateKeys...)

// The bound volume's label may change
invalidPredicates.Insert("NoVolumeZoneConflict")
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)

if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
// Add/delete impacts the available PVs to choose from
Expand Down Expand Up @@ -779,19 +779,19 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
invalidPredicates := sets.NewString()

if !reflect.DeepEqual(oldNode.Status.Allocatable, newNode.Status.Allocatable) {
invalidPredicates.Insert("GeneralPredicates") // "PodFitsResources"
invalidPredicates.Insert(predicates.GeneralPred) // "PodFitsResources"
}
if !reflect.DeepEqual(oldNode.GetLabels(), newNode.GetLabels()) {
invalidPredicates.Insert("GeneralPredicates", "ServiceAffinity") // "PodSelectorMatches"
invalidPredicates.Insert(predicates.GeneralPred, predicates.CheckServiceAffinityPred) // "PodSelectorMatches"
for k, v := range oldNode.GetLabels() {
// any label can be topology key of pod, we have to invalidate in all cases
if v != newNode.GetLabels()[k] {
invalidPredicates.Insert("MatchInterPodAffinity")
invalidPredicates.Insert(predicates.MatchInterPodAffinityPred)
}
// NoVolumeZoneConflict will only be affected by zone related label change
if isZoneRegionLabel(k) {
if v != newNode.GetLabels()[k] {
invalidPredicates.Insert("NoVolumeZoneConflict")
invalidPredicates.Insert(predicates.NoVolumeZoneConflictPred)
}
}
}
Expand All @@ -807,7 +807,7 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
}
if !reflect.DeepEqual(oldTaints, newTaints) ||
!reflect.DeepEqual(oldNode.Spec.Taints, newNode.Spec.Taints) {
invalidPredicates.Insert("PodToleratesNodeTaints")
invalidPredicates.Insert(predicates.PodToleratesNodeTaintsPred)
}

if !reflect.DeepEqual(oldNode.Status.Conditions, newNode.Status.Conditions) {
Expand All @@ -820,19 +820,19 @@ func (c *configFactory) invalidateCachedPredicatesOnNodeUpdate(newNode *v1.Node,
newConditions[cond.Type] = cond.Status
}
if oldConditions[v1.NodeMemoryPressure] != newConditions[v1.NodeMemoryPressure] {
invalidPredicates.Insert("CheckNodeMemoryPressure")
invalidPredicates.Insert(predicates.CheckNodeMemoryPressurePred)
}
if oldConditions[v1.NodeDiskPressure] != newConditions[v1.NodeDiskPressure] {
invalidPredicates.Insert("CheckNodeDiskPressure")
invalidPredicates.Insert(predicates.CheckNodeDiskPressurePred)
}
if oldConditions[v1.NodeReady] != newConditions[v1.NodeReady] ||
oldConditions[v1.NodeOutOfDisk] != newConditions[v1.NodeOutOfDisk] ||
oldConditions[v1.NodeNetworkUnavailable] != newConditions[v1.NodeNetworkUnavailable] {
invalidPredicates.Insert("CheckNodeCondition")
invalidPredicates.Insert(predicates.CheckNodeConditionPred)
}
}
if newNode.Spec.Unschedulable != oldNode.Spec.Unschedulable {
invalidPredicates.Insert("CheckNodeCondition")
invalidPredicates.Insert(predicates.CheckNodeConditionPred)
}
c.equivalencePodCache.InvalidateCachedPredicateItem(newNode.GetName(), invalidPredicates)
}
Expand Down