Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added test file for cmd/armadactl/cmd/cancel_test.go #2418

Closed
wants to merge 9 commits into from
55 changes: 55 additions & 0 deletions cmd/armadactl/cmd/cancel_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
package cmd

import (
"io"
"testing"

"github.com/spf13/cobra"
"github.com/stretchr/testify/require"

"github.com/armadaproject/armada/internal/armadactl"
)

func TestCancel(t *testing.T) {
tests := map[string]struct {
Flags []flag
jobId string
queue string
jobSet string
}{
"default flags": {nil, "", "", ""},
"valid jobId": {[]flag{{"jobId", "jobId1"}}, "jobId1", "", ""},
"valid queue": {[]flag{{"queue", "queue1,jobSet1"}}, "", "queue1", "jobSet1"},
"valid jobSet": {[]flag{{"jobSet", "jobSet1"}}, "", "", "jobSet1"},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
a := armadactl.New()
cmd := cancelCmd()

cmd.PreRunE = func(cmd *cobra.Command, args []string) error {
a.Out = io.Discard

if len(test.jobId) > 0 {
jobIdFlag, err1 := cmd.Flags().GetString("jobId")
require.Error(t, err1)
require.Equal(t, test.jobId, jobIdFlag)
}
if len(test.queue) > 0 {
queueFlag, err1 := cmd.Flags().GetString("queue")
jobSetFlag, err2 := cmd.Flags().GetString("jobSet")
require.Error(t, err1)
require.Error(t, err2)
require.Equal(t, test.queue, queueFlag)
require.Equal(t, test.jobSet, jobSetFlag)
}
if len(test.jobSet) > 0 {
jobSetFlag, err1 := cmd.Flags().GetString("jobSet")
require.Error(t, err1)
require.Equal(t, test.jobSet, jobSetFlag)
}
return nil
}
})
}
}
71 changes: 46 additions & 25 deletions internal/executor/fake/context/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,32 +49,37 @@ var DefaultNodeSpec = []*NodeSpec{
},
}

type nodeAllocation struct {
availableResource armadaresource.ComputeResources
allocatedPods map[string]bool
}

type FakeClusterContext struct {
clusterId string
nodeIdLabel string
pool string
podEventHandlers []*cache.ResourceEventHandlerFuncs
clusterEventHandlers []*cache.ResourceEventHandlerFuncs
rwLock sync.RWMutex
pods map[string]*v1.Pod
events map[string]*v1.Event
nodes []*v1.Node
nodesByNodeId map[string]*v1.Node
nodeAvailableResource map[string]armadaresource.ComputeResources
clusterId string
nodeIdLabel string
pool string
podEventHandlers []*cache.ResourceEventHandlerFuncs
clusterEventHandlers []*cache.ResourceEventHandlerFuncs
rwLock sync.RWMutex
pods map[string]*v1.Pod
events map[string]*v1.Event
nodes []*v1.Node
nodesByNodeId map[string]*v1.Node
nodeAllocation map[string]nodeAllocation
}

func NewFakeClusterContext(appConfig configuration.ApplicationConfiguration, nodeIdLabel string, nodeSpecs []*NodeSpec) cluster_context.ClusterContext {
if nodeIdLabel == "" {
panic("nodeIdLabel must be set")
}
c := &FakeClusterContext{
clusterId: appConfig.ClusterId,
nodeIdLabel: nodeIdLabel,
pool: appConfig.Pool,
pods: map[string]*v1.Pod{},
nodes: []*v1.Node{},
nodesByNodeId: map[string]*v1.Node{},
nodeAvailableResource: map[string]armadaresource.ComputeResources{},
clusterId: appConfig.ClusterId,
nodeIdLabel: nodeIdLabel,
pool: appConfig.Pool,
pods: map[string]*v1.Pod{},
nodes: []*v1.Node{},
nodesByNodeId: map[string]*v1.Node{},
nodeAllocation: map[string]nodeAllocation{},
}
if nodeSpecs == nil {
nodeSpecs = DefaultNodeSpec
Expand Down Expand Up @@ -296,6 +301,7 @@ func (c *FakeClusterContext) DeletePods(pods []*v1.Pod) {

for _, p := range pods {
delete(c.pods, p.Name)
c.deallocateNoLock(p)
}
}()
}
Expand Down Expand Up @@ -336,7 +342,10 @@ func (c *FakeClusterContext) addNodes(specs []*NodeSpec) {
}
c.nodes = append(c.nodes, node)
c.nodesByNodeId[name] = node
c.nodeAvailableResource[node.Name] = armadaresource.FromResourceList(s.Allocatable)
c.nodeAllocation[name] = nodeAllocation{
allocatedPods: map[string]bool{},
availableResource: armadaresource.FromResourceList(s.Allocatable),
}
}
}
}
Expand Down Expand Up @@ -365,8 +374,8 @@ func (c *FakeClusterContext) trySchedule(pod *v1.Pod) (scheduled bool, removed b
sort.Slice(nodes, func(i, j int) bool {
node1 := c.nodes[i]
node2 := c.nodes[j]
node1Resource := c.nodeAvailableResource[node1.Name]
node2Resource := c.nodeAvailableResource[node2.Name]
node1Resource := c.nodeAllocation[node1.Name].availableResource
node2Resource := c.nodeAllocation[node2.Name].availableResource

// returns true if node1 should be considered before node2
return node2Resource.Dominates(node1Resource)
Expand All @@ -375,7 +384,8 @@ func (c *FakeClusterContext) trySchedule(pod *v1.Pod) (scheduled bool, removed b
for _, n := range nodes {
if c.isSchedulableOn(pod, n) {
resources := armadaresource.TotalPodResourceRequest(&pod.Spec)
c.nodeAvailableResource[n.Name].Sub(resources)
c.nodeAllocation[n.Name].availableResource.Sub(resources)
c.nodeAllocation[n.Name].allocatedPods[pod.Name] = true
pod.Spec.NodeName = n.Name
return true, false
}
Expand All @@ -387,13 +397,24 @@ func (c *FakeClusterContext) deallocate(pod *v1.Pod) {
c.rwLock.Lock()
defer c.rwLock.Unlock()

resources := armadaresource.TotalPodResourceRequest(&pod.Spec)
c.nodeAvailableResource[pod.Spec.NodeName].Add(resources)
c.deallocateNoLock(pod)
}

func (c *FakeClusterContext) deallocateNoLock(pod *v1.Pod) {
if pod.Spec.NodeName == "" {
return
}

if c.nodeAllocation[pod.Spec.NodeName].allocatedPods[pod.Name] {
resources := armadaresource.TotalPodResourceRequest(&pod.Spec)
c.nodeAllocation[pod.Spec.NodeName].availableResource.Add(resources)
delete(c.nodeAllocation[pod.Spec.NodeName].allocatedPods, pod.Name)
}
}

func (c *FakeClusterContext) isSchedulableOn(pod *v1.Pod, n *v1.Node) bool {
requiredResource := armadaresource.TotalPodResourceRequest(&pod.Spec)
availableResource := c.nodeAvailableResource[n.Name].DeepCopy()
availableResource := c.nodeAllocation[n.Name].availableResource.DeepCopy()
availableResource.Sub(requiredResource)

// resources
Expand Down