New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Use NodeWrapper to directly initialize nodes with labels #92514
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,7 +19,8 @@ package testing | |
import ( | ||
"fmt" | ||
|
||
"k8s.io/api/core/v1" | ||
v1 "k8s.io/api/core/v1" | ||
"k8s.io/apimachinery/pkg/api/resource" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/types" | ||
) | ||
|
@@ -362,7 +363,8 @@ type NodeWrapper struct{ v1.Node } | |
|
||
// MakeNode creates a Node wrapper. | ||
func MakeNode() *NodeWrapper { | ||
return &NodeWrapper{v1.Node{}} | ||
w := &NodeWrapper{v1.Node{}} | ||
return w.Capacity(nil) | ||
} | ||
|
||
// Obj returns the inner Node. | ||
|
@@ -390,3 +392,28 @@ func (n *NodeWrapper) Label(k, v string) *NodeWrapper { | |
n.Labels[k] = v | ||
return n | ||
} | ||
|
||
// Capacity sets the capacity and the allocatable resources of the inner node. | ||
// Each entry in `resources` corresponds to a resource name and its quantity. | ||
// By default, the capacity and allocatable number of pods are set to 32. | ||
func (n *NodeWrapper) Capacity(resources map[v1.ResourceName]string) *NodeWrapper { | ||
res := v1.ResourceList{ | ||
v1.ResourcePods: resource.MustParse("32"), | ||
} | ||
for name, value := range resources { | ||
res[name] = resource.MustParse(value) | ||
} | ||
n.Status.Capacity, n.Status.Allocatable = res, res | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. would be good to ensure FYI: I put it in https://github.com/kubernetes/kubernetes/pull/92571/files?file-filters%5B%5D=.go#diff-1cea28cd0be3cdbab57f5dc287dc98c0R427-R438 |
||
return n | ||
} | ||
|
||
// Images sets the images of the inner node. Each entry in `images` corresponds | ||
// to an image name and its size in bytes. | ||
func (n *NodeWrapper) Images(images map[string]int64) *NodeWrapper { | ||
var containerImages []v1.ContainerImage | ||
for name, size := range images { | ||
containerImages = append(containerImages, v1.ContainerImage{Names: []string{name}, SizeBytes: size}) | ||
} | ||
n.Status.Images = containerImages | ||
return n | ||
} |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -22,7 +22,7 @@ import ( | |
"testing" | ||
"time" | ||
|
||
"k8s.io/api/core/v1" | ||
v1 "k8s.io/api/core/v1" | ||
"k8s.io/apimachinery/pkg/api/resource" | ||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | ||
"k8s.io/apimachinery/pkg/runtime" | ||
|
@@ -33,6 +33,7 @@ import ( | |
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" | ||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" | ||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" | ||
st "k8s.io/kubernetes/pkg/scheduler/testing" | ||
testutils "k8s.io/kubernetes/test/integration/util" | ||
) | ||
|
||
|
@@ -1130,7 +1131,7 @@ func TestBindPlugin(t *testing.T) { | |
defer testutils.CleanupTest(t, testCtx) | ||
|
||
// Add a few nodes. | ||
_, err := createNodes(testCtx.ClientSet, "test-node", nil, 2) | ||
_, err := createNodes(testCtx.ClientSet, "test-node", st.MakeNode(), 2) | ||
if err != nil { | ||
t.Fatalf("Cannot create nodes: %v", err) | ||
} | ||
|
@@ -1776,12 +1777,12 @@ func TestPreemptWithPermitPlugin(t *testing.T) { | |
defer testutils.CleanupTest(t, testCtx) | ||
|
||
// Add one node. | ||
nodeRes := &v1.ResourceList{ | ||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), | ||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), | ||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), | ||
nodeRes := map[v1.ResourceName]string{ | ||
v1.ResourcePods: "32", | ||
v1.ResourceCPU: "500m", | ||
v1.ResourceMemory: "500", | ||
} | ||
_, err := createNodes(testCtx.ClientSet, "test-node", nodeRes, 1) | ||
_, err := createNodes(testCtx.ClientSet, "test-node", st.MakeNode().Capacity(nodeRes), 1) | ||
if err != nil { | ||
t.Fatalf("Cannot create nodes: %v", err) | ||
} | ||
|
@@ -1841,7 +1842,7 @@ func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestCont | |
go testCtx.Scheduler.Run(testCtx.Ctx) | ||
|
||
if nodeCount > 0 { | ||
_, err := createNodes(testCtx.ClientSet, "test-node", nil, nodeCount) | ||
_, err := createNodes(testCtx.ClientSet, "test-node", st.MakeNode(), nodeCount) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Shouldn't MakeNode call There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Totally! My bad, let me fix it. |
||
if err != nil { | ||
t.Fatalf("Cannot create nodes: %v", err) | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Indicate that sets 32 pods limit by default