Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature: AWS Batch Job Definition now creates a new version instead of ForceNew #34281

7 changes: 7 additions & 0 deletions .changelog/34281.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
```release-note:enhancement
resource/aws_batch_job_definition: Resource no longer recreates, but updates `revision` with any changes. Included new parameters `scheduling_priority` & `arn_prefix`.
```

```release-note:note
resource/aws_batch_job_definition: Job Definition Delete action now `deregisters` all jobs matching the job name.
```
188 changes: 159 additions & 29 deletions internal/service/batch/job_definition.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,11 @@ import (
"github.com/hashicorp/terraform-provider-aws/names"
)

const (
jobDefinitionStatusInactive = "INACTIVE"
jobDefinitionStatusActive = "ACTIVE"
)

// @SDKResource("aws_batch_job_definition", name="Job Definition")
// @Tags(identifierAttribute="arn")
func ResourceJobDefinition() *schema.Resource {
Expand All @@ -47,10 +52,16 @@ func ResourceJobDefinition() *schema.Resource {
Type: schema.TypeString,
Computed: true,
},
"container_properties": {

"arn_prefix": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Computed: true,
},

"container_properties": {
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"node_properties"},
StateFunc: func(v interface{}) string {
json, _ := structure.NormalizeJsonString(v)
return json
Expand All @@ -62,16 +73,23 @@ func ResourceJobDefinition() *schema.Resource {
},
ValidateFunc: validJobContainerProperties,
},

"deregister_on_new_revision": {
Type: schema.TypeBool,
Default: true,
Optional: true,
},

"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validName,
},
"node_properties": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
Type: schema.TypeString,
Optional: true,
ConflictsWith: []string{"container_properties"},
StateFunc: func(v interface{}) string {
json, _ := structure.NormalizeJsonString(v)
return json
Expand All @@ -85,76 +103,75 @@ func ResourceJobDefinition() *schema.Resource {
"parameters": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},

// If the job runs on Amazon EKS resources, then you must not specify platformCapabilities.
"platform_capabilities": {
Type: schema.TypeSet,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{
Type: schema.TypeString,
ValidateFunc: validation.StringInSlice(batch.PlatformCapability_Values(), false),
},
},

// If the job runs on Amazon EKS resources, then you must not specify propagateTags.
"propagate_tags": {
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
Default: false,
},

"retry_strategy": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"attempts": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
ValidateFunc: validation.IntBetween(1, 10),
},

"evaluate_on_exit": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MinItems: 0,
MaxItems: 5,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"action": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
StateFunc: func(v interface{}) string {
return strings.ToLower(v.(string))
},
ValidateFunc: validation.StringInSlice(batch.RetryAction_Values(), true),
},

"on_exit_code": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.All(
validation.StringLenBetween(1, 512),
validation.StringMatch(regexache.MustCompile(`^[0-9]*\*?$`), "must contain only numbers, and can optionally end with an asterisk"),
),
},

"on_reason": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.All(
validation.StringLenBetween(1, 512),
validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z.:\s]*\*?$`), "must contain letters, numbers, periods, colons, and white space, and can optionally end with an asterisk"),
),
},

"on_status_reason": {
Type: schema.TypeString,
Optional: true,
ForceNew: true,
ValidateFunc: validation.All(
validation.StringLenBetween(1, 512),
validation.StringMatch(regexache.MustCompile(`^[0-9A-Za-z.:\s]*\*?$`), "must contain letters, numbers, periods, colons, and white space, and can optionally end with an asterisk"),
Expand All @@ -166,33 +183,39 @@ func ResourceJobDefinition() *schema.Resource {
},
},
},

"revision": {
Type: schema.TypeInt,
Computed: true,
},

"scheduling_priority": {
Type: schema.TypeInt,
Optional: true,
},

names.AttrTags: tftags.TagsSchema(),
names.AttrTagsAll: tftags.TagsSchemaComputed(),

"timeout": {
Type: schema.TypeList,
Optional: true,
ForceNew: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"attempt_duration_seconds": {
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
ValidateFunc: validation.IntAtLeast(60),
},
},
},
},

"type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{batch.JobDefinitionTypeContainer, batch.JobDefinitionTypeMultinode}, true),
ValidateFunc: validation.StringInSlice(batch.JobDefinitionType_Values(), true),
},
},

Expand Down Expand Up @@ -265,6 +288,10 @@ func resourceJobDefinitionCreate(ctx context.Context, d *schema.ResourceData, me
input.Timeout = expandJobTimeout(v.([]interface{})[0].(map[string]interface{}))
}

if v, ok := d.GetOk("scheduling_priority"); ok {
input.SchedulingPriority = aws.Int64(int64(v.(int)))
}

output, err := conn.RegisterJobDefinitionWithContext(ctx, input)

if err != nil {
Expand Down Expand Up @@ -293,6 +320,7 @@ func resourceJobDefinitionRead(ctx context.Context, d *schema.ResourceData, meta
}

d.Set("arn", jobDefinition.JobDefinitionArn)
d.Set("arn_prefix", strings.TrimSuffix(*jobDefinition.JobDefinitionArn, fmt.Sprintf(":%d", *jobDefinition.Revision)))

containerProperties, err := flattenContainerProperties(jobDefinition.ContainerProperties)

Expand All @@ -304,6 +332,8 @@ func resourceJobDefinitionRead(ctx context.Context, d *schema.ResourceData, meta
return sdkdiag.AppendErrorf(diags, "setting container_properties: %s", err)
}

d.Set("deregister_on_new_revision", d.Get("deregister_on_new_revision").(bool))

nodeProperties, err := flattenNodeProperties(jobDefinition.NodeProperties)

if err != nil {
Expand Down Expand Up @@ -339,14 +369,92 @@ func resourceJobDefinitionRead(ctx context.Context, d *schema.ResourceData, meta

d.Set("revision", jobDefinition.Revision)
d.Set("type", jobDefinition.Type)
d.Set("scheduling_priority", jobDefinition.SchedulingPriority)

return diags
}

func resourceJobDefinitionUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
var diags diag.Diagnostics
conn := meta.(*conns.AWSClient).BatchConn(ctx)

if d.HasChangesExcept("tags", "tags_all") {
name := d.Get("name").(string)
input := &batch.RegisterJobDefinitionInput{
JobDefinitionName: aws.String(name),
Type: aws.String(d.Get("type").(string)),
}

if v, ok := d.GetOk("container_properties"); ok {
props, err := expandJobContainerProperties(v.(string))
if err != nil {
return sdkdiag.AppendErrorf(diags, "updating Batch Job Definition (%s): %s", name, err)
}

if aws.StringValue(input.Type) == batch.JobDefinitionTypeContainer {
removeEmptyEnvironmentVariables(&diags, props.Environment, cty.GetAttrPath("container_properties"))
input.ContainerProperties = props
}
}

// Tags only.
if v, ok := d.GetOk("node_properties"); ok {
props, err := expandJobNodeProperties(v.(string))
if err != nil {
return sdkdiag.AppendErrorf(diags, "updating Batch Job Definition (%s): %s", name, err)
}

for _, node := range props.NodeRangeProperties {
removeEmptyEnvironmentVariables(&diags, node.Container.Environment, cty.GetAttrPath("node_properties"))
}
input.NodeProperties = props
}

if v, ok := d.GetOk("propagate_tags"); ok {
input.PropagateTags = aws.Bool(v.(bool))
}

if v, ok := d.GetOk("parameters"); ok {
input.Parameters = expandJobDefinitionParameters(v.(map[string]interface{}))
}

if v, ok := d.GetOk("platform_capabilities"); ok && v.(*schema.Set).Len() > 0 {
input.PlatformCapabilities = flex.ExpandStringSet(v.(*schema.Set))
}

if v, ok := d.GetOk("scheduling_priority"); ok {
input.SchedulingPriority = aws.Int64(int64(v.(int)))
}

if v, ok := d.GetOk("retry_strategy"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil {
input.RetryStrategy = expandRetryStrategy(v.([]interface{})[0].(map[string]interface{}))
}

if v, ok := d.GetOk("timeout"); ok && len(v.([]interface{})) > 0 && v.([]interface{})[0] != nil {
input.Timeout = expandJobTimeout(v.([]interface{})[0].(map[string]interface{}))
}

jd, err := conn.RegisterJobDefinitionWithContext(ctx, input)

if err != nil {
return sdkdiag.AppendErrorf(diags, "updating Batch Job Definition (%s): %s", name, err)
}

// arn contains revision which is used in the Read call
currentARN := d.Get("arn").(string)
d.SetId(aws.StringValue(jd.JobDefinitionArn))
d.Set("revision", jd.Revision)

if v := d.Get("deregister_on_new_revision"); v == true {
log.Printf("[DEBUG] Deleting Previous Batch Job Definition: %s", currentARN)
_, err := conn.DeregisterJobDefinitionWithContext(ctx, &batch.DeregisterJobDefinitionInput{
JobDefinition: aws.String(currentARN),
})

if err != nil {
return sdkdiag.AppendErrorf(diags, "deleting Batch Job Definition (%s): %s", currentARN, err)
}
}
}

return append(diags, resourceJobDefinitionRead(ctx, d, meta)...)
}
Expand All @@ -355,22 +463,29 @@ func resourceJobDefinitionDelete(ctx context.Context, d *schema.ResourceData, me
var diags diag.Diagnostics
conn := meta.(*conns.AWSClient).BatchConn(ctx)

log.Printf("[DEBUG] Deleting Batch Job Definition: %s", d.Id())
_, err := conn.DeregisterJobDefinitionWithContext(ctx, &batch.DeregisterJobDefinitionInput{
JobDefinition: aws.String(d.Id()),
})
name := d.Get("name").(string)
jds, err := ListActiveJobDefinitionByName(ctx, conn, name)

if err != nil {
return sdkdiag.AppendErrorf(diags, "deleting Batch Job Definition (%s): %s", d.Id(), err)
return sdkdiag.AppendErrorf(diags, "deleting Batch Job Definitions (%s): %s", name, err)
}

for i := range jds {
arn := aws.StringValue(jds[i].JobDefinitionArn)
log.Printf("[DEBUG] Deleting Batch Job Definition: %s", arn)
_, err := conn.DeregisterJobDefinitionWithContext(ctx, &batch.DeregisterJobDefinitionInput{
JobDefinition: aws.String(arn),
})

if err != nil {
return sdkdiag.AppendErrorf(diags, "deleting Batch Job Definition (%s): %s", arn, err)
}
}

return diags
}

func FindJobDefinitionByARN(ctx context.Context, conn *batch.Batch, arn string) (*batch.JobDefinition, error) {
const (
jobDefinitionStatusInactive = "INACTIVE"
)
input := &batch.DescribeJobDefinitionsInput{
JobDefinitions: aws.StringSlice([]string{arn}),
}
Expand Down Expand Up @@ -409,6 +524,21 @@ func findJobDefinition(ctx context.Context, conn *batch.Batch, input *batch.Desc
return output.JobDefinitions[0], nil
}

func ListActiveJobDefinitionByName(ctx context.Context, conn *batch.Batch, name string) ([]*batch.JobDefinition, error) {
drewmullen marked this conversation as resolved.
Show resolved Hide resolved
input := &batch.DescribeJobDefinitionsInput{
JobDefinitionName: aws.String(name),
Status: aws.String(jobDefinitionStatusActive),
}

output, err := conn.DescribeJobDefinitionsWithContext(ctx, input)

if err != nil {
return nil, err
}

return output.JobDefinitions, nil
}

func validJobContainerProperties(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
_, err := expandJobContainerProperties(value)
Expand Down
Loading
Loading