Skip to content

Commit

Permalink
use structs for processing resources (#99)
Browse files Browse the repository at this point in the history
trial an approach of using a typed struct instead of `types.Object`.

at first glance this seems more ergonomic, but I feel like I remember
there being a reason why we don't already do this.
  • Loading branch information
joshrwolf committed May 14, 2024
1 parent 072b9f7 commit 4b3c026
Show file tree
Hide file tree
Showing 8 changed files with 95 additions and 115 deletions.
1 change: 1 addition & 0 deletions docs/resources/harness_docker.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ Optional:

Optional:

- `limit` (String) Unused.
- `request` (String) Quantity of CPUs requested for the harness container


Expand Down
5 changes: 3 additions & 2 deletions docs/resources/harness_k3s.md
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,8 @@ Optional:

Optional:

- `request` (String) Quantity of CPUs requested for the harness container
- `limit` (String) Limit of memory the harness container can consume
- `request` (String) Amount of memory requested for the harness container


<a id="nestedatt--resources--memory"></a>
Expand All @@ -113,7 +114,7 @@ Optional:
Optional:

- `limit` (String) Limit of memory the harness container can consume
- `request` (String) Amount of memory requested for the harness container
- `request` (String) Amount of memory requested for the harness container. The default is the bare minimum required by k3s. Anything lower should be used with caution.



Expand Down
1 change: 0 additions & 1 deletion internal/containers/provider/docker.go
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,6 @@ func (p *DockerProvider) Start(ctx context.Context) error {
Resources: container.Resources{
MemoryReservation: p.req.Resources.MemoryRequest.Value(),
Memory: p.req.Resources.MemoryLimit.Value(),

// mirroring what's done in Docker CLI: https://github.com/docker/cli/blob/0ad1d55b02910f4b40462c0d01aac2934eb0f061/cli/command/container/update.go#L117
NanoCPUs: p.req.Resources.CpuRequest.Value(),
},
Expand Down
74 changes: 36 additions & 38 deletions internal/provider/harness.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ type RegistryResourceTlsModel struct {
}

type ContainerResources struct {
Memory types.Object `tfsdk:"memory"`
Cpu types.Object `tfsdk:"cpu"`
Memory *ContainerMemoryResources `tfsdk:"memory"`
Cpu *ContainerCpuResources `tfsdk:"cpu"`
}

type ContainerMemoryResources struct {
Expand All @@ -59,6 +59,7 @@ type ContainerMemoryResources struct {

type ContainerCpuResources struct {
Request types.String `tfsdk:"request"`
Limit types.String `tfsdk:"limit"`
}

func (r *HarnessResource) Configure(ctx context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
Expand Down Expand Up @@ -165,54 +166,51 @@ func (r *HarnessResource) ShouldSkip(ctx context.Context, req resource.CreateReq
return skip
}

// ParseMemoryResources parses memory resources for harnesses, returning a ContainerResourcesRequest object if parsing
// is successful or an error if values are not parseable.
func ParseMemoryResources(memoryResources ContainerMemoryResources, resourceRequests *provider.ContainerResourcesRequest) error {
var memoryRequest, memoryLimit string
if !memoryResources.Request.IsNull() {
memoryRequest = memoryResources.Request.ValueString()
// ParseResources parses the ContainerResources object into a provider.ContainerResourcesRequest object.
func ParseResources(resources *ContainerResources) (provider.ContainerResourcesRequest, error) {
req := provider.ContainerResourcesRequest{}

parsedMemoryRequest, err := kresource.ParseQuantity(memoryRequest)
if err != nil {
return fmt.Errorf("failed to parse memory request: %w", err)
}
resourceRequests.MemoryRequest = parsedMemoryRequest
if resources == nil {
return req, nil
}

if memoryResources.Limit.IsNull() {
if memoryRequest != "" {
memoryLimit = memoryRequest
if resources.Memory != nil {
if resources.Memory.Request.ValueString() != "" {
q, err := kresource.ParseQuantity(resources.Memory.Request.ValueString())
if err != nil {
return req, fmt.Errorf("failed to parse memory request: %w", err)
}
req.MemoryRequest = q
}
} else {
memoryLimit = memoryResources.Limit.ValueString()
}

if memoryLimit != "" {
parsedMemoryLimit, err := kresource.ParseQuantity(memoryLimit)
if err != nil {
return fmt.Errorf("failed to parse memory limit: %w", err)
if resources.Memory.Limit.ValueString() != "" {
q, err := kresource.ParseQuantity(resources.Memory.Limit.ValueString())
if err != nil {
return req, fmt.Errorf("failed to parse memory limit: %w", err)
}
req.MemoryLimit = q
}
resourceRequests.MemoryLimit = parsedMemoryLimit
}

return nil
}

// ParseCpuResources parses memory resources for harnesses, returning a ContainerResourcesRequest object if parsing
// is successful or an error if values are not parseable.
func ParseCpuResources(cpuResources ContainerCpuResources, resourceRequests *provider.ContainerResourcesRequest) error {
var cpuRequest string
if !cpuResources.Request.IsNull() {
cpuRequest = cpuResources.Request.ValueString()
if resources.Cpu != nil {
if resources.Cpu.Request.ValueString() != "" {
q, err := kresource.ParseQuantity(resources.Cpu.Request.ValueString())
if err != nil {
return req, fmt.Errorf("failed to parse cpu request: %w", err)
}
req.CpuRequest = q
}

parsedCpuRequest, err := kresource.ParseQuantity(cpuRequest)
if err != nil {
return fmt.Errorf("failed to parse cpu request: %w", err)
if resources.Cpu.Limit.ValueString() != "" {
q, err := kresource.ParseQuantity(resources.Cpu.Limit.ValueString())
if err != nil {
return req, fmt.Errorf("failed to parse cpu limit: %w", err)
}
req.CpuLimit = q
}
resourceRequests.CpuRequest = parsedCpuRequest
}

return nil
return req, nil
}

// AddHarnessSchemaAttributes adds common attributes to the given map. values
Expand Down
46 changes: 11 additions & 35 deletions internal/provider/harness_docker_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
)

const (
Expand Down Expand Up @@ -58,7 +57,7 @@ type HarnessDockerResourceModel struct {
Mounts []ContainerResourceMountModel `tfsdk:"mounts"`
Networks map[string]ContainerResourceModelNetwork `tfsdk:"networks"`
Registries map[string]DockerRegistryResourceModel `tfsdk:"registries"`
Resources types.Object `tfsdk:"resources"`
Resources *ContainerResources `tfsdk:"resources"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
}

Expand Down Expand Up @@ -141,41 +140,14 @@ func (r *HarnessDockerResource) Create(ctx context.Context, req resource.CreateR
networks = data.Networks
}

if !data.Resources.IsNull() {
var resources ContainerResources
resp.Diagnostics.Append(data.Resources.As(ctx, &resources, basetypes.ObjectAsOptions{})...)
if resp.Diagnostics.HasError() {
return
}

var memoryResources *ContainerMemoryResources
resp.Diagnostics.Append(resources.Memory.As(ctx, &memoryResources, basetypes.ObjectAsOptions{})...)
if resp.Diagnostics.HasError() {
return
}

var cpuResources *ContainerCpuResources
resp.Diagnostics.Append(resources.Cpu.As(ctx, &cpuResources, basetypes.ObjectAsOptions{})...)
if resp.Diagnostics.HasError() {
if res := data.Resources; res != nil {
rreq, err := ParseResources(res)
if err != nil {
resp.Diagnostics.AddError("failed to parse resources", err.Error())
return
}

var resourceRequests provider.ContainerResourcesRequest
if memoryResources != nil {
if err2 := ParseMemoryResources(*memoryResources, &resourceRequests); err2 != nil {
resp.Diagnostics.AddError("failed to parse resources", err2.Error())
return
}
}

if cpuResources != nil {
if err2 := ParseCpuResources(*cpuResources, &resourceRequests); err2 != nil {
resp.Diagnostics.AddError("failed to parse resources", err2.Error())
return
}
}

opts = append(opts, docker.WithContainerResources(resourceRequests))
log.Info(ctx, "Setting resources for docker harness", "cpu_limit", rreq.CpuLimit.String(), "cpu_request", rreq.CpuRequest.String(), "memory_limit", rreq.MemoryLimit.String(), "memory_request", rreq.MemoryRequest.String())
opts = append(opts, docker.WithContainerResources(rreq))
}

if r.store.providerResourceData.Harnesses != nil {
Expand Down Expand Up @@ -432,6 +404,10 @@ func addDockerResourceSchemaAttributes() map[string]schema.Attribute {
Optional: true,
Description: "Quantity of CPUs requested for the harness container",
},
"limit": schema.StringAttribute{
Optional: true,
Description: "Unused.",
},
},
},
},
Expand Down
14 changes: 13 additions & 1 deletion internal/provider/harness_docker_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,19 @@ resource "imagetest_harness_docker" "test" {
inventory = data.imagetest_inventory.this
resources = {
memory = { request = "2Gi" }
cpu = {
request = "1"
}
memory = {
request = "524288000" # 500Mi
}
}
provisioner "local-exec" {
command = <<EOF
docker inspect ${self.id} | jq '.[0].HostConfig.NanoCpus' | grep 1
docker inspect ${self.id} | jq '.[0].HostConfig.MemoryReservation' | grep 524288000
EOF
}
}
Expand Down
54 changes: 18 additions & 36 deletions internal/provider/harness_k3s_resource.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import (
"github.com/hashicorp/terraform-plugin-framework/resource"
"github.com/hashicorp/terraform-plugin-framework/resource/schema"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault"
"github.com/hashicorp/terraform-plugin-framework/resource/schema/stringdefault"
"github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-framework/types/basetypes"
)
Expand Down Expand Up @@ -59,7 +60,7 @@ type HarnessK3sResourceModel struct {
Networks map[string]ContainerResourceModelNetwork `tfsdk:"networks"`
Sandbox types.Object `tfsdk:"sandbox"`
Timeouts timeouts.Value `tfsdk:"timeouts"`
Resources types.Object `tfsdk:"resources"`
Resources *ContainerResources `tfsdk:"resources"`
}

type RegistryResourceModel struct {
Expand Down Expand Up @@ -239,41 +240,14 @@ func (r *HarnessK3sResource) Create(ctx context.Context, req resource.CreateRequ

kopts = append(kopts, k3s.WithNetworks(networks...))

if !data.Resources.IsNull() {
var resources ContainerResources
resp.Diagnostics.Append(data.Resources.As(ctx, &resources, basetypes.ObjectAsOptions{})...)
if resp.Diagnostics.HasError() {
return
}

var memoryResources *ContainerMemoryResources
resp.Diagnostics.Append(resources.Memory.As(ctx, &memoryResources, basetypes.ObjectAsOptions{})...)
if resp.Diagnostics.HasError() {
return
}

var cpuResources *ContainerCpuResources
resp.Diagnostics.Append(resources.Cpu.As(ctx, &cpuResources, basetypes.ObjectAsOptions{})...)
if resp.Diagnostics.HasError() {
if res := data.Resources; res != nil {
rreq, err := ParseResources(res)
if err != nil {
resp.Diagnostics.AddError("failed to parse resources", err.Error())
return
}

var resourceRequests provider.ContainerResourcesRequest
if memoryResources != nil {
if err2 := ParseMemoryResources(*memoryResources, &resourceRequests); err2 != nil {
resp.Diagnostics.AddError("failed to parse resources", err2.Error())
return
}
}

if cpuResources != nil {
if err2 := ParseCpuResources(*cpuResources, &resourceRequests); err2 != nil {
resp.Diagnostics.AddError("failed to parse resources", err2.Error())
return
}
}

kopts = append(kopts, k3s.WithResources(resourceRequests))
log.Info(ctx, "Setting resources for k3s harness", "cpu_limit", rreq.CpuLimit.String(), "cpu_request", rreq.CpuRequest.String(), "memory_limit", rreq.MemoryLimit.String(), "memory_request", rreq.MemoryRequest.String())
kopts = append(kopts, k3s.WithResources(rreq))
}

id := data.Id.ValueString()
Expand Down Expand Up @@ -472,7 +446,9 @@ func defaultK3sHarnessResourceSchemaAttributes() map[string]schema.Attribute {
Attributes: map[string]schema.Attribute{
"request": schema.StringAttribute{
Optional: true,
Description: "Amount of memory requested for the harness container",
Description: "Amount of memory requested for the harness container. The default is the bare minimum required by k3s. Anything lower should be used with caution.",
Default: stringdefault.StaticString("2Gi"),
Computed: true,
},
"limit": schema.StringAttribute{
Optional: true,
Expand All @@ -485,7 +461,13 @@ func defaultK3sHarnessResourceSchemaAttributes() map[string]schema.Attribute {
Attributes: map[string]schema.Attribute{
"request": schema.StringAttribute{
Optional: true,
Description: "Quantity of CPUs requested for the harness container",
Description: "Amount of memory requested for the harness container",
Default: stringdefault.StaticString("1"),
Computed: true,
},
"limit": schema.StringAttribute{
Optional: true,
Description: "Limit of memory the harness container can consume",
},
},
},
Expand Down
15 changes: 13 additions & 2 deletions internal/provider/harness_k3s_resource_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -155,11 +155,22 @@ resource "imagetest_harness_k3s" "test" {
}
resources = {
cpu = {
request = "1"
}
memory = {
request = "256Mi"
limit = "1Gi"
request = "524288000" # 500Mi
limit = "524288001"
}
}
provisioner "local-exec" {
command = <<EOF
docker inspect ${self.id} | jq '.[0].HostConfig.NanoCpus' | grep 1
docker inspect ${self.id} | jq '.[0].HostConfig.MemoryReservation' | grep 524288000
docker inspect ${self.id} | jq '.[0].HostConfig.Memory' | grep 524288001
EOF
}
}
resource "imagetest_feature" "test" {
Expand Down

0 comments on commit 4b3c026

Please sign in to comment.