Skip to content

Commit

Permalink
Add support for request
Browse files Browse the repository at this point in the history
  • Loading branch information
AnanyaKumar committed Aug 6, 2015
1 parent 9a3fb3c commit ef1e576
Show file tree
Hide file tree
Showing 11 changed files with 153 additions and 23 deletions.
2 changes: 1 addition & 1 deletion api/swagger-spec/v1.json
Expand Up @@ -11895,7 +11895,7 @@
},
"requests": {
"type": "any",
"description": "Minimum amount of resources requested; requests are honored only for persistent volumes as of now; see http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications"
"description": "Minimum amount of resources requested; if Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value; see http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications"
}
}
},
Expand Down
16 changes: 16 additions & 0 deletions pkg/api/testing/fuzzer.go
Expand Up @@ -159,6 +159,22 @@ func FuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
//q.Amount.SetScale(inf.Scale(-c.Intn(12)))
q.Amount.SetUnscaled(c.Int63n(1000))
},
func(q *api.ResourceRequirements, c fuzz.Continue) {
randomQuantity := func() resource.Quantity {
return *resource.NewQuantity(c.Int63n(1000), resource.DecimalExponent)
}
q.Limits = make(api.ResourceList)
q.Requests = make(api.ResourceList)
cpuLimit := randomQuantity()
q.Limits[api.ResourceCPU] = *cpuLimit.Copy()
q.Requests[api.ResourceCPU] = *cpuLimit.Copy()
memoryLimit := randomQuantity()
q.Limits[api.ResourceMemory] = *memoryLimit.Copy()
q.Requests[api.ResourceMemory] = *memoryLimit.Copy()
storageLimit := randomQuantity()
q.Limits[api.ResourceStorage] = *storageLimit.Copy()
q.Requests[api.ResourceStorage] = *storageLimit.Copy()
},
func(p *api.PullPolicy, c fuzz.Continue) {
policies := []api.PullPolicy{api.PullAlways, api.PullNever, api.PullIfNotPresent}
*p = policies[c.Rand.Intn(len(policies))]
Expand Down
7 changes: 3 additions & 4 deletions pkg/api/types.go
Expand Up @@ -682,12 +682,11 @@ type Capabilities struct {

// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources required.
// Limits describes the maximum amount of compute resources allowed.
Limits ResourceList `json:"limits,omitempty"`
// Requests describes the minimum amount of compute resources required.
// Note: 'Requests' are honored only for Persistent Volumes as of now.
// TODO: Update the scheduler to use 'Requests' in addition to 'Limits'. If Request is omitted for a container,
// it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value
// If Request is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value
Requests ResourceList `json:"requests,omitempty"`
}

Expand Down
13 changes: 13 additions & 0 deletions pkg/api/v1/defaults.go
Expand Up @@ -156,6 +156,19 @@ func addDefaultingFuncs() {
obj.APIVersion = "v1"
}
},
func(obj *ResourceRequirements) {
// Set requests to limits if requests are not specified (but limits are).
if obj.Limits != nil {
if obj.Requests == nil {
obj.Requests = make(ResourceList)
}
for key, value := range obj.Limits {
if _, exists := obj.Requests[key]; !exists {
obj.Requests[key] = *(value.Copy())
}
}
}
},
)
}

Expand Down
9 changes: 4 additions & 5 deletions pkg/api/v1/types.go
Expand Up @@ -656,13 +656,12 @@ type Capabilities struct {

// ResourceRequirements describes the compute resource requirements.
type ResourceRequirements struct {
// Limits describes the maximum amount of compute resources required.
// Limits describes the maximum amount of compute resources allowed.
Limits ResourceList `json:"limits,omitempty" description:"Maximum amount of compute resources allowed; see http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications"`
// Requests describes the minimum amount of compute resources required.
// Note: 'Requests' are honored only for Persistent Volumes as of now.
// TODO: Update the scheduler to use 'Requests' in addition to 'Limits'. If Request is omitted for a container,
// it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value
Requests ResourceList `json:"requests,omitempty" description:"Minimum amount of resources requested; requests are honored only for persistent volumes as of now; see http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications"`
// If Requests is omitted for a container, it defaults to Limits if that is explicitly specified,
// otherwise to an implementation-defined value.
Requests ResourceList `json:"requests,omitempty" description:"Minimum amount of resources requested; if Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value; see http://releases.k8s.io/HEAD/docs/design/resources.md#resource-specifications"`
}

const (
Expand Down
25 changes: 19 additions & 6 deletions pkg/api/validation/validation.go
Expand Up @@ -1490,19 +1490,32 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements) errs.V
allErrs := errs.ValidationErrorList{}
for resourceName, quantity := range requirements.Limits {
// Validate resource name.
errs := validateResourceName(resourceName.String(), fmt.Sprintf("resources.limits[%s]", resourceName))
allErrs = append(allErrs, validateResourceName(resourceName.String(), fmt.Sprintf("resources.limits[%s]", resourceName))...)
if api.IsStandardResourceName(resourceName.String()) {
errs = append(errs, validateBasicResource(quantity).Prefix(fmt.Sprintf("Resource %s: ", resourceName))...)
allErrs = append(allErrs, validateBasicResource(quantity).Prefix(fmt.Sprintf("Resource %s: ", resourceName))...)
}
// Check that request <= limit.
requestQuantity, exists := requirements.Requests[resourceName]
if exists {
var requestValue, limitValue int64
requestValue = requestQuantity.Value()
limitValue = quantity.Value()
// Do a more precise comparison if possible (if the value won't overflow).
if requestValue <= resource.MaxMilliValue && limitValue <= resource.MaxMilliValue {
requestValue = requestQuantity.MilliValue()
limitValue = quantity.MilliValue()
}
if limitValue < requestValue {
allErrs = append(allErrs, errs.NewFieldInvalid(fmt.Sprintf("resources.limits[%s]", resourceName), quantity.String(), "limit cannot be smaller than request"))
}
}
allErrs = append(allErrs, errs...)
}
for resourceName, quantity := range requirements.Requests {
// Validate resource name.
errs := validateResourceName(resourceName.String(), fmt.Sprintf("resources.requests[%s]", resourceName))
allErrs = append(allErrs, validateResourceName(resourceName.String(), fmt.Sprintf("resources.requests[%s]", resourceName))...)
if api.IsStandardResourceName(resourceName.String()) {
errs = append(errs, validateBasicResource(quantity).Prefix(fmt.Sprintf("Resource %s: ", resourceName))...)
allErrs = append(allErrs, validateBasicResource(quantity).Prefix(fmt.Sprintf("Resource %s: ", resourceName))...)
}
allErrs = append(allErrs, errs...)
}
return allErrs
}
Expand Down
78 changes: 78 additions & 0 deletions pkg/api/validation/validation_test.go
Expand Up @@ -844,6 +844,62 @@ func TestValidateContainers(t *testing.T) {
},
ImagePullPolicy: "IfNotPresent",
},
{
Name: "resources-request-limit-simple",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("8"),
},
Limits: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
},
},
ImagePullPolicy: "IfNotPresent",
},
{
Name: "resources-request-limit-edge",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
api.ResourceName("my.org/resource"): resource.MustParse("10m"),
},
Limits: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
api.ResourceName("my.org/resource"): resource.MustParse("10m"),
},
},
ImagePullPolicy: "IfNotPresent",
},
{
Name: "resources-request-limit-partials",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("9.5"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
},
Limits: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
api.ResourceName("my.org/resource"): resource.MustParse("10m"),
},
},
ImagePullPolicy: "IfNotPresent",
},
{
Name: "resources-request",
Image: "image",
Resources: api.ResourceRequirements{
Requests: api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("9.5"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
},
},
ImagePullPolicy: "IfNotPresent",
},
{
Name: "same-host-port-different-protocol",
Image: "image",
Expand Down Expand Up @@ -1011,6 +1067,28 @@ func TestValidateContainers(t *testing.T) {
ImagePullPolicy: "IfNotPresent",
},
},
"Request limit simple invalid": {
{
Name: "abc-123",
Image: "image",
Resources: api.ResourceRequirements{
Limits: getResourceLimits("5", "3"),
Requests: getResourceLimits("6", "3"),
},
ImagePullPolicy: "IfNotPresent",
},
},
"Request limit multiple invalid": {
{
Name: "abc-123",
Image: "image",
Resources: api.ResourceRequirements{
Limits: getResourceLimits("5", "3"),
Requests: getResourceLimits("6", "4"),
},
ImagePullPolicy: "IfNotPresent",
},
},
}
for k, v := range errorCases {
if errs := validateContainers(v, volumes); len(errs) == 0 {
Expand Down
14 changes: 13 additions & 1 deletion pkg/kubelet/dockertools/manager.go
Expand Up @@ -579,7 +579,19 @@ func (dm *DockerManager) runContainer(
}
}
memoryLimit := container.Resources.Limits.Memory().Value()
cpuShares := milliCPUToShares(container.Resources.Limits.Cpu().MilliValue())
cpuRequest := container.Resources.Requests.Cpu()
cpuLimit := container.Resources.Limits.Cpu()
var cpuShares int64
// If request is not specified, but limit is, we want request to default to limit.
// API server does this for new containers, but we repeat this logic in Kubelet
// for containers running on existing Kubernetes clusters.
if cpuRequest.Amount == nil && cpuLimit.Amount != nil {
cpuShares = milliCPUToShares(cpuLimit.MilliValue())
} else {
// if cpuRequest.Amount is nil, then milliCPUToShares will return the minimal number
// of CPU shares.
cpuShares = milliCPUToShares(cpuRequest.MilliValue())
}
dockerOpts := docker.CreateContainerOptions{
Name: BuildDockerName(dockerName, container),
Config: &docker.Config{
Expand Down
2 changes: 1 addition & 1 deletion pkg/kubelet/kubelet_test.go
Expand Up @@ -2100,7 +2100,7 @@ func TestHandleMemExceeded(t *testing.T) {
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)

spec := api.PodSpec{Containers: []api.Container{{Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
"memory": resource.MustParse("90"),
},
}}}}
Expand Down
8 changes: 4 additions & 4 deletions plugin/pkg/scheduler/algorithm/predicates/predicates.go
Expand Up @@ -107,10 +107,10 @@ type resourceRequest struct {

func getResourceRequest(pod *api.Pod) resourceRequest {
result := resourceRequest{}
for ix := range pod.Spec.Containers {
limits := pod.Spec.Containers[ix].Resources.Limits
result.memory += limits.Memory().Value()
result.milliCPU += limits.Cpu().MilliValue()
for _, container := range pod.Spec.Containers {
requests := container.Resources.Requests
result.memory += requests.Memory().Value()
result.milliCPU += requests.Cpu().MilliValue()
}
return result
}
Expand Down
Expand Up @@ -59,7 +59,7 @@ func newResourcePod(usage ...resourceRequest) *api.Pod {
for _, req := range usage {
containers = append(containers, api.Container{
Resources: api.ResourceRequirements{
Limits: api.ResourceList{
Requests: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(req.memory, resource.BinarySI),
},
Expand Down

0 comments on commit ef1e576

Please sign in to comment.