Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Rename scheduler/nodeinfo pkg to scheduler/types #89703

Merged
merged 1 commit into from Apr 2, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 2 additions & 1 deletion pkg/scheduler/BUILD
Expand Up @@ -81,9 +81,9 @@ go_test(
"//pkg/scheduler/internal/cache/fake:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/events/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
Expand Down Expand Up @@ -132,6 +132,7 @@ filegroup(
"//pkg/scheduler/nodeinfo:all-srcs",
"//pkg/scheduler/profile:all-srcs",
"//pkg/scheduler/testing:all-srcs",
"//pkg/scheduler/types:all-srcs",
"//pkg/scheduler/util:all-srcs",
],
tags = ["automanaged"],
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/core/BUILD
Expand Up @@ -17,8 +17,8 @@ go_library(
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
Expand Down Expand Up @@ -64,9 +64,9 @@ go_test(
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/core/extender.go
Expand Up @@ -31,7 +31,7 @@ import (
extenderv1 "k8s.io/kube-scheduler/extender/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we don't need this alias in a pkg/scheduler package. Same for the rest of the imports that don't have collisions and are within pkg/scheduler.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is a significant refactor, which I tried to automate, it will be too much work to select the places where this conflicts and where it doesn't, and the return is not obvious. The alias type is mostly used to refer to the apimachinary one. We also used schedulerlisters, so this is not diverging too much.

)

const (
Expand Down Expand Up @@ -287,7 +287,7 @@ func (h *HTTPExtender) convertToNodeToVictims(
// and extender, i.e. when the pod is not found in nodeInfo.Pods.
func (h *HTTPExtender) convertPodUIDToPod(
metaPod *extenderv1.MetaPod,
nodeInfo *schedulernodeinfo.NodeInfo) (*v1.Pod, error) {
nodeInfo *schedulertypes.NodeInfo) (*v1.Pod, error) {
for _, pod := range nodeInfo.Pods() {
if string(pod.UID) == metaPod.UID {
return pod, nil
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/core/extender_test.go
Expand Up @@ -41,9 +41,9 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/pkg/scheduler/util"
)

Expand Down Expand Up @@ -143,7 +143,7 @@ type FakeExtender struct {
ignorable bool

// Cached node information for fake extender
cachedNodeNameToInfo map[string]*schedulernodeinfo.NodeInfo
cachedNodeNameToInfo map[string]*schedulertypes.NodeInfo
}

func (f *FakeExtender) Name() string {
Expand Down
14 changes: 7 additions & 7 deletions pkg/scheduler/core/generic_scheduler.go
Expand Up @@ -43,8 +43,8 @@ import (
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/profile"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/pkg/scheduler/util"
utiltrace "k8s.io/utils/trace"
)
Expand Down Expand Up @@ -524,7 +524,7 @@ func (g *genericScheduler) findNodesThatPassExtenders(pod *v1.Pod, filtered []*v
// addNominatedPods adds pods with equal or greater priority which are nominated
// to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState,
// 3) augmented nodeInfo.
func (g *genericScheduler) addNominatedPods(ctx context.Context, prof *profile.Profile, pod *v1.Pod, state *framework.CycleState, nodeInfo *schedulernodeinfo.NodeInfo) (bool, *framework.CycleState, *schedulernodeinfo.NodeInfo, error) {
func (g *genericScheduler) addNominatedPods(ctx context.Context, prof *profile.Profile, pod *v1.Pod, state *framework.CycleState, nodeInfo *schedulertypes.NodeInfo) (bool, *framework.CycleState, *schedulertypes.NodeInfo, error) {
if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen only in tests.
return false, state, nodeInfo, nil
Expand Down Expand Up @@ -564,7 +564,7 @@ func (g *genericScheduler) podPassesFiltersOnNode(
prof *profile.Profile,
state *framework.CycleState,
pod *v1.Pod,
info *schedulernodeinfo.NodeInfo,
info *schedulertypes.NodeInfo,
) (bool, *framework.Status, error) {
var status *framework.Status

Expand Down Expand Up @@ -856,7 +856,7 @@ func (g *genericScheduler) selectNodesForPreemption(
prof *profile.Profile,
state *framework.CycleState,
pod *v1.Pod,
potentialNodes []*schedulernodeinfo.NodeInfo,
potentialNodes []*schedulertypes.NodeInfo,
pdbs []*policy.PodDisruptionBudget,
) (map[*v1.Node]*extenderv1.Victims, error) {
nodeToVictims := map[*v1.Node]*extenderv1.Victims{}
Expand Down Expand Up @@ -946,7 +946,7 @@ func (g *genericScheduler) selectVictimsOnNode(
prof *profile.Profile,
state *framework.CycleState,
pod *v1.Pod,
nodeInfo *schedulernodeinfo.NodeInfo,
nodeInfo *schedulertypes.NodeInfo,
pdbs []*policy.PodDisruptionBudget,
) ([]*v1.Pod, int, bool) {
var potentialVictims []*v1.Pod
Expand Down Expand Up @@ -1034,8 +1034,8 @@ func (g *genericScheduler) selectVictimsOnNode(

// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
// that may be satisfied by removing pods from the node.
func nodesWherePreemptionMightHelp(nodes []*schedulernodeinfo.NodeInfo, fitErr *FitError) []*schedulernodeinfo.NodeInfo {
var potentialNodes []*schedulernodeinfo.NodeInfo
func nodesWherePreemptionMightHelp(nodes []*schedulertypes.NodeInfo, fitErr *FitError) []*schedulertypes.NodeInfo {
var potentialNodes []*schedulertypes.NodeInfo
for _, node := range nodes {
name := node.Node().Name
// We reply on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable'
Expand Down
25 changes: 12 additions & 13 deletions pkg/scheduler/core/generic_scheduler_test.go
Expand Up @@ -57,10 +57,9 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)

Expand All @@ -78,7 +77,7 @@ func (pl *trueFilterPlugin) Name() string {
}

// Filter invoked at the filter extension point.
func (pl *trueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
func (pl *trueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
return nil
}

Expand All @@ -95,7 +94,7 @@ func (pl *falseFilterPlugin) Name() string {
}

// Filter invoked at the filter extension point.
func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
}

Expand All @@ -112,7 +111,7 @@ func (pl *matchFilterPlugin) Name() string {
}

// Filter invoked at the filter extension point.
func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if node == nil {
return framework.NewStatus(framework.Error, "node not found")
Expand All @@ -136,7 +135,7 @@ func (pl *noPodsFilterPlugin) Name() string {
}

// Filter invoked at the filter extension point.
func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
if len(nodeInfo.Pods()) == 0 {
return nil
}
Expand All @@ -161,7 +160,7 @@ func (pl *fakeFilterPlugin) Name() string {
}

// Filter invoked at the filter extension point.
func (pl *fakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
func (pl *fakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
atomic.AddInt32(&pl.numFilterCalled, 1)

if returnCode, ok := pl.failedNodeReturnCodeMap[nodeInfo.Node().Name]; ok {
Expand Down Expand Up @@ -2029,9 +2028,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
fitErr := FitError{
FilteredNodesStatuses: test.nodesStatuses,
}
var nodeInfos []*schedulernodeinfo.NodeInfo
var nodeInfos []*schedulertypes.NodeInfo
for _, n := range makeNodeList(nodeNames) {
ni := schedulernodeinfo.NewNodeInfo()
ni := schedulertypes.NewNodeInfo()
ni.SetNode(n)
nodeInfos = append(nodeInfos, ni)
}
Expand Down Expand Up @@ -2372,7 +2371,7 @@ func TestPreempt(t *testing.T) {
for _, pod := range test.pods {
cache.AddPod(pod)
}
cachedNodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{}
cachedNodeInfoMap := map[string]*schedulertypes.NodeInfo{}
nodeNames := defaultNodeNames
if len(test.nodeNames) != 0 {
nodeNames = test.nodeNames
Expand All @@ -2392,7 +2391,7 @@ func TestPreempt(t *testing.T) {
nodeNames[i] = node.Name

// Set nodeInfo to extenders to mock extenders' cache for preemption.
cachedNodeInfo := schedulernodeinfo.NewNodeInfo()
cachedNodeInfo := schedulertypes.NewNodeInfo()
cachedNodeInfo.SetNode(node)
cachedNodeInfoMap[node.Name] = cachedNodeInfo
}
Expand Down Expand Up @@ -2571,8 +2570,8 @@ func TestFairEvaluationForNodes(t *testing.T) {
}
}

func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulernodeinfo.NodeInfo, error) {
var nodeInfos []*schedulernodeinfo.NodeInfo
func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulertypes.NodeInfo, error) {
var nodeInfos []*schedulertypes.NodeInfo
for _, n := range nodes {
nodeInfo, err := snapshot.NodeInfos().Get(n.Name)
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/factory_test.go
Expand Up @@ -54,8 +54,8 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/profile"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)

const (
Expand Down Expand Up @@ -593,6 +593,6 @@ func (t *TestPlugin) ScoreExtensions() framework.ScoreExtensions {
return nil
}

func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status {
func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
return nil
}
Expand Up @@ -8,7 +8,7 @@ go_library(
deps = [
"//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
Expand Down
Expand Up @@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
utilnode "k8s.io/kubernetes/pkg/util/node"
)

Expand Down Expand Up @@ -196,7 +196,7 @@ func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin
}

// countMatchingPods counts pods based on namespace and matching all selectors
func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) int {
func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *schedulertypes.NodeInfo) int {
if len(nodeInfo.Pods()) == 0 || selector.Empty() {
return 0
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/scheduler/framework/plugins/imagelocality/BUILD
Expand Up @@ -7,7 +7,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/util/parsers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
Expand Down
Expand Up @@ -24,7 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/pkg/util/parsers"
)

Expand Down Expand Up @@ -94,7 +94,7 @@ func calculatePriority(sumScores int64) int64 {
// sumImageScores returns the sum of image scores of all the containers that are already on the node.
// Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate
// the final score. Note that the init containers are not considered for it's rare for users to deploy huge init containers.
func sumImageScores(nodeInfo *schedulernodeinfo.NodeInfo, containers []v1.Container, totalNumNodes int) int64 {
func sumImageScores(nodeInfo *schedulertypes.NodeInfo, containers []v1.Container, totalNumNodes int) int64 {
var sum int64
imageStates := nodeInfo.ImageStates()

Expand All @@ -111,7 +111,7 @@ func sumImageScores(nodeInfo *schedulernodeinfo.NodeInfo, containers []v1.Contai
// The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to.
// This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or
// a few nodes due to image locality.
func scaledImageScore(imageState *schedulernodeinfo.ImageStateSummary, totalNumNodes int) int64 {
func scaledImageScore(imageState *schedulertypes.ImageStateSummary, totalNumNodes int) int64 {
spread := float64(imageState.NumNodes) / float64(totalNumNodes)
return int64(float64(imageState.Size) * spread)
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/scheduler/framework/plugins/interpodaffinity/BUILD
Expand Up @@ -13,7 +13,7 @@ go_library(
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
Expand All @@ -36,7 +36,7 @@ go_test(
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
Expand Down