Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cloudflare_logpush_job: Add output_options support #3171

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/3171.txt
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/cloudflare_logpush_job: Add `output_options` parameter
```
19 changes: 19 additions & 0 deletions docs/resources/logpush_job.md
Expand Up @@ -125,13 +125,32 @@ resource "cloudflare_logpush_job" "example_job" {
- `max_upload_interval_seconds` (Number) The maximum interval in seconds for log batches. Value must be between 30 and 300.
- `max_upload_records` (Number) The maximum number of log lines per batch. Value must be between 1000 and 1,000,000.
- `name` (String) The name of the logpush job to create.
- `output_options` (Block List, Max: 1) Structured replacement for logpull_options. When including this field, the logpull_option field will be ignored. (see [below for nested schema](#nestedblock--output_options))
- `ownership_challenge` (String) Ownership challenge token to prove destination ownership, required when destination is Amazon S3, Google Cloud Storage, Microsoft Azure or Sumo Logic. See [Developer documentation](https://developers.cloudflare.com/logs/logpush/logpush-configuration-api/understanding-logpush-api/#usage).
- `zone_id` (String) The zone identifier to target for the resource. Must provide only one of `account_id`, `zone_id`.

### Read-Only

- `id` (String) The ID of this resource.

<a id="nestedblock--output_options"></a>
### Nested Schema for `output_options`

Optional:

- `batch_prefix` (String) String to be prepended before each batch.
- `batch_suffix` (String) String to be appended after each batch.
- `cve20214428` (Boolean) Mitigation for CVE-2021-44228. If set to true, will cause all occurrences of ${ in the generated files to be replaced with x{. Defaults to `false`.
- `field_delimiter` (String) String to join fields. This field be ignored when record_template is set. Defaults to `,`.
- `field_names` (List of String) List of field names to be included in the Logpush output.
- `output_type` (String) Specifies the output type. Available values: `ndjson`, `csv`. Defaults to `ndjson`.
- `record_delimiter` (String) String to be inserted in-between the records as separator.
- `record_prefix` (String) String to be prepended before each record. Defaults to `{`.
- `record_suffix` (String) String to be appended after each record. Defaults to `}`.
- `record_template` (String) String to use as template for each record instead of the default comma-separated list.
- `sample_rate` (Number) Specifies the sampling rate. Defaults to `1`.
- `timestamp_format` (String) Specifies the format for timestamps. Available values: `unixnano`, `unix`, `rfc3339`. Defaults to `unixnano`.

## Import

Import is supported using the following syntax:
Expand Down
69 changes: 69 additions & 0 deletions internal/sdkv2provider/resource_cloudflare_logpush_job.go
Expand Up @@ -91,10 +91,33 @@ func resourceCloudflareLogpushJobRead(ctx context.Context, d *schema.ResourceDat
filter = string(b)
}

outputOptions := make(map[string]interface{})
if job.OutputOptions != nil {
data, err := json.Marshal(&job.OutputOptions)
if err != nil {
return diag.FromErr(fmt.Errorf("failed to extract output options: %w", err))
}
err = json.Unmarshal(data, &outputOptions)
if err != nil {
return diag.FromErr(fmt.Errorf("failed to extract output options: %w", err))
}
// mapping from the API to the Schema:
// "cve20214428" -> "CVE-2021-44228"
// terraform does not allow the key to be upper case or contain dashes
if job.OutputOptions.CVE202144228 != nil {
delete(outputOptions, "CVE-2021-44228")
outputOptions["cve20214428"] = job.OutputOptions.CVE202144228
}
}

d.Set("name", job.Name)
d.Set("kind", job.Kind)
d.Set("enabled", job.Enabled)
d.Set("logpull_options", job.LogpullOptions)
err = d.Set("output_options", []map[string]interface{}{outputOptions})
if err != nil {
return diag.FromErr(fmt.Errorf("failed to set output_options: %w", err))
}
d.Set("dataset", job.Dataset)
d.Set("destination_conf", job.DestinationConf)
d.Set("ownership_challenge", d.Get("ownership_challenge"))
Expand All @@ -107,6 +130,36 @@ func resourceCloudflareLogpushJobRead(ctx context.Context, d *schema.ResourceDat
return nil
}

// converts the output_options state to the Cloudflare API struct representation.
func toAPIOutputOptions(outputOptionsState interface{}) (*cloudflare.LogpushOutputOptions, error) {
var jobOutputOptions cloudflare.LogpushOutputOptions

outputOptionsList, ok := outputOptionsState.([]interface{})
if !ok {
return nil, fmt.Errorf("failed to convert to []interface{}")
}
if len(outputOptionsList) < 1 {
return nil, fmt.Errorf("failed to extract logpush output options")
}
outputOptions := outputOptionsList[0].(map[string]interface{})
data, err := json.Marshal(outputOptions)
if err != nil {
return nil, fmt.Errorf("failed to extract logpush output options")
}
err = json.Unmarshal(data, &jobOutputOptions)
if err != nil {
return nil, fmt.Errorf("failed to extract logpush output options")
}
// mapping from the API to the Schema:
// "cve20214428" -> "CVE-2021-44228"
// terraform does not allow the key to be upper case or contain dashes
cve20214428, ok := outputOptions["cve20214428"].(bool)
if ok {
jobOutputOptions.CVE202144228 = &cve20214428
}
return &jobOutputOptions, nil
}

func resourceCloudflareLogpushJobCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
client := meta.(*cloudflare.API)

Expand Down Expand Up @@ -136,6 +189,14 @@ func resourceCloudflareLogpushJobCreate(ctx context.Context, d *schema.ResourceD
MaxUploadIntervalSeconds: d.Get("max_upload_interval_seconds").(int),
}

outputOptions, ok := d.GetOk("output_options")
if ok {
job.OutputOptions, err = toAPIOutputOptions(outputOptions)
if err != nil {
return diag.FromErr(err)
}
}

filter := d.Get("filter")
if filter != "" {
var jobFilter cloudflare.LogpushJobFilters
Expand Down Expand Up @@ -201,6 +262,14 @@ func resourceCloudflareLogpushJobUpdate(ctx context.Context, d *schema.ResourceD
MaxUploadIntervalSeconds: d.Get("max_upload_interval_seconds").(int),
}

outputOptions, ok := d.GetOk("output_options")
if ok {
job.OutputOptions, err = toAPIOutputOptions(outputOptions)
if err != nil {
return diag.FromErr(err)
}
}

filter := d.Get("filter")
if filter != "" {
var jobFilter cloudflare.LogpushJobFilters
Expand Down
69 changes: 69 additions & 0 deletions internal/sdkv2provider/resource_cloudflare_logpush_job_test.go
@@ -0,0 +1,69 @@
package sdkv2provider

import (
"encoding/json"
"testing"

"github.com/cloudflare/cloudflare-go"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)

func TestToAPIOutputOptions(t *testing.T) {
cve202144228 := true
testData := cloudflare.LogpushOutputOptions{
CVE202144228: &cve202144228,
BatchPrefix: "a",
BatchSuffix: "b",
FieldDelimiter: ",",
FieldNames: []string{"a", "b", "c"},
OutputType: "csv",
RecordDelimiter: "a",
RecordPrefix: "b",
RecordSuffix: "c",
RecordTemplate: "d",
SampleRate: 0.5,
TimestampFormat: "unix",
}
resourceDataMap := map[string]interface{}{
"output_options": []interface{}{
map[string]interface{}{
"cve20214428": *testData.CVE202144228,
"batch_prefix": testData.BatchPrefix,
"batch_suffix": testData.BatchSuffix,
"field_delimiter": testData.FieldDelimiter,
"field_names": []interface{}{testData.FieldNames[0], testData.FieldNames[1], testData.FieldNames[2]},
"output_type": testData.OutputType,
"record_delimiter": testData.RecordDelimiter,
"record_prefix": testData.RecordPrefix,
"record_suffix": testData.RecordSuffix,
"record_template": testData.RecordTemplate,
"sample_rate": testData.SampleRate,
"timestamp_format": testData.TimestampFormat,
},
},
}
resourceData := schema.TestResourceDataRaw(t, resourceCloudflareLogpushJobSchema(), resourceDataMap)
if resourceData == nil {
t.Fatal("failed to create test ResourceData")
}
outputOptions, ok := resourceData.GetOk("output_options")
if !ok {
t.Fatal("output_options not found")
}
output, err := toAPIOutputOptions(outputOptions)
if err != nil {
t.Fatal(err)
}
// compare output to the testData
testJSON, err := json.Marshal(testData)
if err != nil {
t.Fatal(err)
}
outJSON, err := json.Marshal(output)
if err != nil {
t.Fatal(err)
}
if string(testJSON) != string(outJSON) {
t.Fatalf("output and testData are not equal: %s != %s", string(outJSON), string(testJSON))
}
}
155 changes: 113 additions & 42 deletions internal/sdkv2provider/schema_cloudflare_logpush_job.go
Expand Up @@ -10,6 +10,28 @@ import (
)

func resourceCloudflareLogpushJobSchema() map[string]*schema.Schema {
kindAllowedValues := []string{"edge", "instant-logs", ""}
datasetAllowedValues := []string{
"access_requests",
"casb_findings",
"firewall_events",
"http_requests",
"spectrum_events",
"nel_reports",
"audit_logs",
"gateway_dns",
"gateway_http",
"gateway_network",
"dns_logs",
"network_analytics_logs",
"workers_trace_events",
"device_posture_results",
"zero_trust_network_sessions",
"magic_ids_detections",
}
frequencyAllowedValues := []string{"high", "low"}
outputTypeAllowedValues := []string{"ndjson", "csv"}
timestampFormatAllowedValues := []string{"unixnano", "unix", "rfc3339"}
return map[string]*schema.Schema{
consts.AccountIDSchemaKey: {
Description: consts.AccountIDSchemaDescription,
Expand All @@ -31,8 +53,8 @@ func resourceCloudflareLogpushJobSchema() map[string]*schema.Schema {
"kind": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"edge", "instant-logs", ""}, false),
Description: fmt.Sprintf("The kind of logpush job to create. %s", renderAvailableDocumentationValuesStringSlice([]string{"edge", "instant-logs", `""`})),
ValidateFunc: validation.StringInSlice(kindAllowedValues, false),
Description: fmt.Sprintf("The kind of logpush job to create. %s", renderAvailableDocumentationValuesStringSlice(kindAllowedValues)),
},
"name": {
Type: schema.TypeString,
Expand All @@ -41,46 +63,12 @@ func resourceCloudflareLogpushJobSchema() map[string]*schema.Schema {
Description: "The name of the logpush job to create.",
},
"dataset": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{
"access_requests",
"casb_findings",
"firewall_events",
"http_requests",
"spectrum_events",
"nel_reports",
"audit_logs",
"gateway_dns",
"gateway_http",
"gateway_network",
"dns_logs",
"network_analytics_logs",
"workers_trace_events",
"device_posture_results",
"zero_trust_network_sessions",
"magic_ids_detections",
}, false),
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice(datasetAllowedValues, false),
Description: fmt.Sprintf(
"The kind of the dataset to use with the logpush job. %s",
renderAvailableDocumentationValuesStringSlice([]string{
"access_requests",
"casb_findings",
"firewall_events",
"http_requests",
"spectrum_events",
"nel_reports",
"audit_logs",
"gateway_dns",
"gateway_http",
"gateway_network",
"dns_logs",
"network_analytics_logs",
"workers_trace_events",
"device_posture_results",
"zero_trust_network_sessions",
"magic_ids_detections",
}),
renderAvailableDocumentationValuesStringSlice(datasetAllowedValues),
),
},
"logpull_options": {
Expand All @@ -107,8 +95,8 @@ func resourceCloudflareLogpushJobSchema() map[string]*schema.Schema {
Type: schema.TypeString,
Optional: true,
Default: "high",
ValidateFunc: validation.StringInSlice([]string{"high", "low"}, false),
Description: fmt.Sprintf("A higher frequency will result in logs being pushed on faster with smaller files. `low` frequency will push logs less often with larger files. %s", renderAvailableDocumentationValuesStringSlice([]string{"high", "low"})),
ValidateFunc: validation.StringInSlice(frequencyAllowedValues, false),
Description: fmt.Sprintf("A higher frequency will result in logs being pushed on faster with smaller files. `low` frequency will push logs less often with larger files. %s", renderAvailableDocumentationValuesStringSlice(frequencyAllowedValues)),
},
"max_upload_bytes": {
Type: schema.TypeInt,
Expand All @@ -128,5 +116,88 @@ func resourceCloudflareLogpushJobSchema() map[string]*schema.Schema {
ValidateFunc: validation.IntBetween(30, 300),
Description: fmt.Sprint("The maximum interval in seconds for log batches. Value must be between 30 and 300."),
},
"output_options": {
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
Description: "Structured replacement for logpull_options. When including this field, the logpull_option field will be ignored",
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cve20214428": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "Mitigation for CVE-2021-44228. If set to true, will cause all occurrences of ${ in the generated files to be replaced with x{",
},
"batch_prefix": {
Type: schema.TypeString,
Optional: true,
Description: "String to be prepended before each batch",
},
"batch_suffix": {
Type: schema.TypeString,
Optional: true,
Description: "String to be appended after each batch",
},
"field_delimiter": {
Type: schema.TypeString,
Optional: true,
Default: ",",
Description: "String to join fields. This field be ignored when record_template is set",
},
"field_names": {
Type: schema.TypeList,
Elem: &schema.Schema{
Type: schema.TypeString,
},
Optional: true,
Description: "List of field names to be included in the Logpush output",
},
"output_type": {
Type: schema.TypeString,
Optional: true,
Default: "ndjson",
ValidateFunc: validation.StringInSlice(outputTypeAllowedValues, false),
Description: fmt.Sprintf("Specifies the output type. %s", renderAvailableDocumentationValuesStringSlice(outputTypeAllowedValues)),
},
"record_delimiter": {
Type: schema.TypeString,
Optional: true,
Description: "String to be inserted in-between the records as separator",
},
"record_prefix": {
Type: schema.TypeString,
Optional: true,
Default: "{",
Description: "String to be prepended before each record",
},
"record_suffix": {
Type: schema.TypeString,
Optional: true,
Default: "}",
Description: "String to be appended after each record",
},
"record_template": {
Type: schema.TypeString,
Optional: true,
Description: "String to use as template for each record instead of the default comma-separated list",
},
"sample_rate": {
Type: schema.TypeFloat,
Optional: true,
Default: 1.0,
ValidateFunc: validation.FloatBetween(0.0, 1.0),
Description: "Specifies the sampling rate",
},
"timestamp_format": {
Type: schema.TypeString,
Optional: true,
Default: "unixnano",
ValidateFunc: validation.StringInSlice(timestampFormatAllowedValues, false),
Description: fmt.Sprintf("Specifies the format for timestamps. %s", renderAvailableDocumentationValuesStringSlice(timestampFormatAllowedValues)),
},
},
},
},
}
}