Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

move Get*(v1.Pod) into a new podinfo package #82212

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Binary file not shown.
2 changes: 2 additions & 0 deletions pkg/api/pod/BUILD
Expand Up @@ -12,8 +12,10 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/api/pod",
deps = [
"//pkg/apis/core:go_default_library",
"//pkg/apis/scheduling:go_default_library",
"//pkg/features:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
],
Expand Down
13 changes: 13 additions & 0 deletions pkg/api/pod/util.go
Expand Up @@ -19,9 +19,11 @@ package pod
import (
"strings"

"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilfeature "k8s.io/apiserver/pkg/util/feature"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/security/apparmor"
)
Expand Down Expand Up @@ -842,3 +844,14 @@ func multiplePodIPsInUse(podStatus *api.PodStatus) bool {
}
return false
}

// GetPodPriority returns priority of the given pod.
func GetPodPriority(pod *v1.Pod) int32 {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why are we moving this function here?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The podinfo package has exactly the same function.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should I keep it here or in podinfo?

Copy link
Contributor

@draveness draveness Sep 3, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should I keep it here or in podinfo?

We should keep it in podinfo if it works well in this PR.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

wait, this is what the kubelet uses. But, this should actually go to pkg/api/v1/pod.

You could consider open a separate PR just for moving this function, as it involves a different set of reviewers.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Closing this PR and opening separate PR

if pod.Spec.Priority != nil {
return *pod.Spec.Priority
}
// When priority of a running pod is nil, it means it was created at a time
// that there was no global default priority class and the priority class
// name of the pod was empty. So, we resolve to the static default priority.
return scheduling.DefaultPriorityWhenNoDefaultClassExists
}
2 changes: 1 addition & 1 deletion pkg/kubelet/eviction/BUILD
Expand Up @@ -48,6 +48,7 @@ go_library(
],
importpath = "k8s.io/kubernetes/pkg/kubelet/eviction",
deps = [
"//pkg/api/pod:go_default_library",
"//pkg/api/v1/resource:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/apis/core/v1/helper/qos:go_default_library",
Expand All @@ -61,7 +62,6 @@ go_library(
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
Expand Down
8 changes: 4 additions & 4 deletions pkg/kubelet/eviction/helpers.go
Expand Up @@ -23,14 +23,14 @@ import (
"strings"
"time"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog"
podutil "k8s.io/kubernetes/pkg/api/pod"
v1resource "k8s.io/kubernetes/pkg/api/v1/resource"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
schedulerutils "k8s.io/kubernetes/pkg/scheduler/util"
volumeutils "k8s.io/kubernetes/pkg/volume/util"
)

Expand Down Expand Up @@ -512,8 +512,8 @@ func (ms *multiSorter) Less(i, j int) bool {

// priority compares pods by Priority, if priority is enabled.
func priority(p1, p2 *v1.Pod) int {
priority1 := schedulerutils.GetPodPriority(p1)
priority2 := schedulerutils.GetPodPriority(p2)
priority1 := podutil.GetPodPriority(p1)
priority2 := podutil.GetPodPriority(p2)
if priority1 == priority2 {
return 0
}
Expand Down
1 change: 1 addition & 0 deletions pkg/scheduler/BUILD
Expand Up @@ -99,6 +99,7 @@ filegroup(
"//pkg/scheduler/factory:all-srcs",
"//pkg/scheduler/framework:all-srcs",
"//pkg/scheduler/internal/cache:all-srcs",
"//pkg/scheduler/internal/podinfo:all-srcs",
"//pkg/scheduler/internal/queue:all-srcs",
"//pkg/scheduler/metrics:all-srcs",
"//pkg/scheduler/nodeinfo:all-srcs",
Expand Down
1 change: 1 addition & 0 deletions pkg/scheduler/algorithm/predicates/BUILD
Expand Up @@ -24,6 +24,7 @@ go_library(
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/internal/podinfo:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library",
Expand Down
20 changes: 10 additions & 10 deletions pkg/scheduler/algorithm/predicates/metadata.go
Expand Up @@ -24,13 +24,14 @@ import (

"k8s.io/klog"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
podinfo "k8s.io/kubernetes/pkg/scheduler/internal/podinfo"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
Expand Down Expand Up @@ -228,7 +229,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf
pod: pod,
podBestEffort: isPodBestEffort(pod),
podRequest: GetResourceRequest(pod),
podPorts: schedutil.GetContainerPorts(pod),
podPorts: podinfo.GetContainerPorts(pod),
topologyPairsPotentialAffinityPods: incomingPodAffinityMap,
topologyPairsPotentialAntiAffinityPods: incomingPodAntiAffinityMap,
topologyPairsAntiAffinityPodsMap: existingPodAntiAffinityMap,
Expand All @@ -253,7 +254,6 @@ func getExistingPodSpreadCache(pod *v1.Pod, nodeInfoMap map[string]*schedulernod
for name := range nodeInfoMap {
allNodeNames = append(allNodeNames, name)
}

errCh := schedutil.NewErrorChannel()
var lock sync.Mutex

Expand Down Expand Up @@ -369,7 +369,7 @@ func newTopologyPairsMaps() *topologyPairsMaps {
}

func (m *topologyPairsMaps) addTopologyPair(pair topologyPair, pod *v1.Pod) {
podFullName := schedutil.GetPodFullName(pod)
podFullName := podinfo.GetPodFullName(pod)
if m.topologyPairToPods[pair] == nil {
m.topologyPairToPods[pair] = make(map[*v1.Pod]struct{})
}
Expand All @@ -381,7 +381,7 @@ func (m *topologyPairsMaps) addTopologyPair(pair topologyPair, pod *v1.Pod) {
}

func (m *topologyPairsMaps) removePod(deletedPod *v1.Pod) {
deletedPodFullName := schedutil.GetPodFullName(deletedPod)
deletedPodFullName := podinfo.GetPodFullName(deletedPod)
for pair := range m.podToTopologyPairs[deletedPodFullName] {
delete(m.topologyPairToPods[pair], deletedPod)
if len(m.topologyPairToPods[pair]) == 0 {
Expand Down Expand Up @@ -464,8 +464,8 @@ func (c *podSpreadCache) clone() *podSpreadCache {
// RemovePod changes predicateMetadata assuming that the given `deletedPod` is
// deleted from the system.
func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod, node *v1.Node) error {
deletedPodFullName := schedutil.GetPodFullName(deletedPod)
if deletedPodFullName == schedutil.GetPodFullName(meta.pod) {
deletedPodFullName := podinfo.GetPodFullName(deletedPod)
if deletedPodFullName == podinfo.GetPodFullName(meta.pod) {
return fmt.Errorf("deletedPod and meta.pod must not be the same")
}
meta.topologyPairsAntiAffinityPodsMap.removePod(deletedPod)
Expand All @@ -481,7 +481,7 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod, node *v1.Node) erro
len(meta.serviceAffinityMatchingPodList) > 0 &&
deletedPod.Namespace == meta.serviceAffinityMatchingPodList[0].Namespace {
for i, pod := range meta.serviceAffinityMatchingPodList {
if schedutil.GetPodFullName(pod) == deletedPodFullName {
if podinfo.GetPodFullName(pod) == deletedPodFullName {
meta.serviceAffinityMatchingPodList = append(
meta.serviceAffinityMatchingPodList[:i],
meta.serviceAffinityMatchingPodList[i+1:]...)
Expand All @@ -495,8 +495,8 @@ func (meta *predicateMetadata) RemovePod(deletedPod *v1.Pod, node *v1.Node) erro
// AddPod changes predicateMetadata assuming that `newPod` is added to the
// system.
func (meta *predicateMetadata) AddPod(addedPod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) error {
addedPodFullName := schedutil.GetPodFullName(addedPod)
if addedPodFullName == schedutil.GetPodFullName(meta.pod) {
addedPodFullName := podinfo.GetPodFullName(addedPod)
if addedPodFullName == podinfo.GetPodFullName(meta.pod) {
return fmt.Errorf("addedPod and meta.pod must not be the same")
}
if nodeInfo.Node() == nil {
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/algorithm/predicates/predicates.go
Expand Up @@ -45,8 +45,8 @@ import (
"k8s.io/kubernetes/pkg/scheduler/algorithm"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
podinfo "k8s.io/kubernetes/pkg/scheduler/internal/podinfo"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
Expand Down Expand Up @@ -1156,7 +1156,7 @@ func PodFitsHostPorts(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulerno
wantPorts = predicateMeta.podPorts
} else {
// We couldn't parse metadata - fallback to computing it.
wantPorts = schedutil.GetContainerPorts(pod)
wantPorts = podinfo.GetContainerPorts(pod)
}
if len(wantPorts) == 0 {
return true, nil, nil
Expand Down
6 changes: 4 additions & 2 deletions pkg/scheduler/core/BUILD
Expand Up @@ -9,16 +9,17 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/core",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/pod:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithm/priorities:go_default_library",
"//pkg/scheduler/api:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/podinfo:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
Expand All @@ -43,6 +44,7 @@ go_test(
],
embed = [":go_default_library"],
deps = [
"//pkg/api/pod:go_default_library",
"//pkg/scheduler/algorithm:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithm/priorities:go_default_library",
Expand All @@ -51,10 +53,10 @@ go_test(
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/podinfo:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
Expand Down
9 changes: 5 additions & 4 deletions pkg/scheduler/core/extender_test.go
Expand Up @@ -27,16 +27,17 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
podutil "k8s.io/kubernetes/pkg/api/pod"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
podinfo "k8s.io/kubernetes/pkg/scheduler/internal/podinfo"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
"k8s.io/kubernetes/pkg/scheduler/util"
)

type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
Expand Down Expand Up @@ -200,7 +201,7 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(
// and get cached node info by given node name.
nodeInfoCopy := f.cachedNodeNameToInfo[node.GetName()].Clone()

potentialVictims := util.SortableList{CompFunc: util.MoreImportantPod}
potentialVictims := podinfo.SortableList{CompFunc: podinfo.MoreImportantPod}

removePod := func(rp *v1.Pod) {
nodeInfoCopy.RemovePod(rp)
Expand All @@ -210,9 +211,9 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(
}
// As the first step, remove all the lower priority pods from the node and
// check if the given pod can be scheduled.
podPriority := util.GetPodPriority(pod)
podPriority := podutil.GetPodPriority(pod)
for _, p := range nodeInfoCopy.Pods() {
if util.GetPodPriority(p) < podPriority {
if podutil.GetPodPriority(p) < podPriority {
potentialVictims.Items = append(potentialVictims.Items, p)
removePod(p)
}
Expand Down