From b8ac984f37eb8648f643ed09e68c0b6ad362d916 Mon Sep 17 00:00:00 2001 From: Argishti Rostamian Date: Tue, 23 Jan 2018 14:25:54 -0800 Subject: [PATCH 1/3] resource/aws_kinesis_firehose: add splunk configuration: --- ...ce_aws_kinesis_firehose_delivery_stream.go | 181 +++++++++++++++++- ...s_kinesis_firehose_delivery_stream_test.go | 125 ++++++++++-- 2 files changed, 288 insertions(+), 18 deletions(-) diff --git a/aws/resource_aws_kinesis_firehose_delivery_stream.go b/aws/resource_aws_kinesis_firehose_delivery_stream.go index ce4e74c13d4b..cfe67019a2b4 100644 --- a/aws/resource_aws_kinesis_firehose_delivery_stream.go +++ b/aws/resource_aws_kinesis_firehose_delivery_stream.go @@ -301,6 +301,22 @@ func flattenKinesisFirehoseDeliveryStream(d *schema.ResourceData, s *firehose.De elasticsearchConfList[0] = elasticsearchConfiguration d.Set("elasticsearch_configuration", elasticsearchConfList) d.Set("s3_configuration", flattenFirehoseS3Configuration(*destination.ElasticsearchDestinationDescription.S3DestinationDescription)) + } else if destination.SplunkDestinationDescription != nil { + d.Set("destination", "splunk") + + splunkConfiguration := map[string]interface{}{ + "hec_acknowledgment_timeout": *destination.SplunkDestinationDescription.HECAcknowledgmentTimeoutInSeconds, + "hec_endpoint": *destination.SplunkDestinationDescription.HECEndpoint, + "hec_endpoint_type": *destination.SplunkDestinationDescription.HECEndpointType, + "hec_token": *destination.SplunkDestinationDescription.HECToken, + "s3_backup_mode": *destination.SplunkDestinationDescription.S3BackupMode, + "retry_duration": *destination.SplunkDestinationDescription.RetryOptions.DurationInSeconds, + "cloudwatch_logging_options": flattenCloudwatchLoggingOptions(*destination.SplunkDestinationDescription.CloudWatchLoggingOptions), + } + splunkConfList := make([]map[string]interface{}, 1) + splunkConfList[0] = splunkConfiguration + d.Set("splunk_configuration", splunkConfList) + d.Set("s3_configuration", flattenFirehoseS3Configuration(*destination.SplunkDestinationDescription.S3DestinationDescription)) } else if d.Get("destination").(string) == "s3" { d.Set("destination", "s3") d.Set("s3_configuration", flattenFirehoseS3Configuration(*destination.S3DestinationDescription)) @@ -404,9 +420,9 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { }, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if value != "s3" && value != "extended_s3" && value != "redshift" && value != "elasticsearch" { + if value != "s3" && value != "extended_s3" && value != "redshift" && value != "elasticsearch" && value != "splunk" { errors = append(errors, fmt.Errorf( - "%q must be one of 's3', 'extended_s3', 'redshift', 'elasticsearch'", k)) + "%q must be one of 's3', 'extended_s3', 'redshift', 'elasticsearch', 'splunk'", k)) } return }, @@ -653,6 +669,85 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { }, }, + "splunk_configuration": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "hec_acknowledgment_timeout": { + Type: schema.TypeInt, + Optional: true, + Default: 180, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 180 || value > 600 { + errors = append(errors, fmt.Errorf( + "%q must be in the range from 180 to 600 seconds.", k)) + } + return + }, + }, + + "hec_endpoint": { + Type: schema.TypeString, + Required: true, + }, + + "hec_endpoint_type": { + Type: schema.TypeString, + Optional: true, + Default: "Raw", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "Raw" && value != "Event" { + errors = append(errors, fmt.Errorf( + "%q must be one of 'Raw', 'Event'", k)) + } + return + }, + }, + + "hec_token": { + Type: schema.TypeString, + Required: true, + }, + + "s3_backup_mode": { + Type: schema.TypeString, + Optional: true, + Default: "FailedEventsOnly", + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(string) + if value != "FailedEventsOnly" && value != "AllEvents" { + errors = append(errors, fmt.Errorf( + "%q must be one of 'FailedEventsOnly', 'AllEvents'", k)) + } + return + }, + }, + + "retry_duration": { + Type: schema.TypeInt, + Optional: true, + Default: 3600, + ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { + value := v.(int) + if value < 0 || value > 7200 { + errors = append(errors, fmt.Errorf( + "%q must be in the range from 0 to 7200 seconds.", k)) + } + return + }, + }, + + "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(), + + "processing_configuration": processingConfigurationSchema(), + }, + }, + }, + "arn": { Type: schema.TypeString, Optional: true, @@ -1052,6 +1147,62 @@ func updateElasticsearchConfig(d *schema.ResourceData, s3Update *firehose.S3Dest return update, nil } +func createSplunkConfig(d *schema.ResourceData, s3Config *firehose.S3DestinationConfiguration) (*firehose.SplunkDestinationConfiguration, error) { + splunkRaw, ok := d.GetOk("splunk_configuration") + if !ok { + return nil, fmt.Errorf("[ERR] Error loading Splunk Configuration for Kinesis Firehose: splunk_configuration not found") + } + sl := splunkRaw.([]interface{}) + + splunk := sl[0].(map[string]interface{}) + + configuration := &firehose.SplunkDestinationConfiguration{ + HECToken: aws.String(splunk["hec_token"].(string)), + HECEndpointType: aws.String(splunk["hec_endpoint_type"].(string)), + HECEndpoint: aws.String(splunk["hec_endpoint"].(string)), + HECAcknowledgmentTimeoutInSeconds: aws.Int64(int64(splunk["hec_acknowledgment_timeout"].(int))), + RetryOptions: extractSplunkRetryOptions(splunk), + S3Configuration: s3Config, + } + + if _, ok := splunk["cloudwatch_logging_options"]; ok { + configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(splunk) + } + if s3BackupMode, ok := splunk["s3_backup_mode"]; ok { + configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + } + + return configuration, nil +} + +func updateSplunkConfig(d *schema.ResourceData, s3Update *firehose.S3DestinationUpdate) (*firehose.SplunkDestinationUpdate, error) { + splunkRaw, ok := d.GetOk("splunk_configuration") + if !ok { + return nil, fmt.Errorf("[ERR] Error loading Splunk Configuration for Kinesis Firehose: splunk_configuration not found") + } + sl := splunkRaw.([]interface{}) + + splunk := sl[0].(map[string]interface{}) + + configuration := &firehose.SplunkDestinationUpdate{ + HECToken: aws.String(splunk["hec_token"].(string)), + HECEndpointType: aws.String(splunk["hec_endpoint_type"].(string)), + HECEndpoint: aws.String(splunk["hec_endpoint"].(string)), + HECAcknowledgmentTimeoutInSeconds: aws.Int64(int64(splunk["hec_acknowledgment_timeout"].(int))), + RetryOptions: extractSplunkRetryOptions(splunk), + S3Update: s3Update, + } + + if _, ok := splunk["cloudwatch_logging_options"]; ok { + configuration.CloudWatchLoggingOptions = extractCloudWatchLoggingConfiguration(splunk) + } + if s3BackupMode, ok := splunk["s3_backup_mode"]; ok { + configuration.S3BackupMode = aws.String(s3BackupMode.(string)) + } + + return configuration, nil +} + func extractBufferingHints(es map[string]interface{}) *firehose.ElasticsearchBufferingHints { bufferingHints := &firehose.ElasticsearchBufferingHints{} @@ -1085,6 +1236,16 @@ func extractRedshiftRetryOptions(redshift map[string]interface{}) *firehose.Reds return retryOptions } +func extractSplunkRetryOptions(splunk map[string]interface{}) *firehose.SplunkRetryOptions { + retryOptions := &firehose.SplunkRetryOptions{} + + if retryDuration, ok := splunk["retry_duration"].(int); ok { + retryOptions.DurationInSeconds = aws.Int64(int64(retryDuration)) + } + + return retryOptions +} + func extractCopyCommandConfiguration(redshift map[string]interface{}) *firehose.CopyCommand { cmd := &firehose.CopyCommand{ DataTableName: aws.String(redshift["data_table_name"].(string)), @@ -1136,12 +1297,18 @@ func resourceAwsKinesisFirehoseDeliveryStreamCreate(d *schema.ResourceData, meta return err } createInput.ElasticsearchDestinationConfiguration = esConfig - } else { + } else if d.Get("destination").(string) == "redshift" { rc, err := createRedshiftConfig(d, s3Config) if err != nil { return err } createInput.RedshiftDestinationConfiguration = rc + } else if d.Get("destination").(string) == "splunk" { + rc, err := createSplunkConfig(d, s3Config) + if err != nil { + return err + } + createInput.SplunkDestinationConfiguration = rc } } @@ -1258,12 +1425,18 @@ func resourceAwsKinesisFirehoseDeliveryStreamUpdate(d *schema.ResourceData, meta return err } updateInput.ElasticsearchDestinationUpdate = esUpdate - } else { + } else if d.Get("destination").(string) == "redshift" { rc, err := updateRedshiftConfig(d, s3Config) if err != nil { return err } updateInput.RedshiftDestinationUpdate = rc + } else if d.Get("destination").(string) == "splunk" { + rc, err := updateSplunkConfig(d, s3Config) + if err != nil { + return err + } + updateInput.SplunkDestinationUpdate = rc } } diff --git a/aws/resource_aws_kinesis_firehose_delivery_stream_test.go b/aws/resource_aws_kinesis_firehose_delivery_stream_test.go index 4f1d80074a96..1abbac90a11e 100644 --- a/aws/resource_aws_kinesis_firehose_delivery_stream_test.go +++ b/aws/resource_aws_kinesis_firehose_delivery_stream_test.go @@ -30,7 +30,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3basic(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), ), }, }, @@ -52,7 +52,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3KinesisStreamSource(t *testing.T) Config: config, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), ), }, }, @@ -72,7 +72,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3WithCloudwatchLogging(t *testing. Config: testAccKinesisFirehoseDeliveryStreamConfig_s3WithCloudwatchLogging(ri), Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), ), }, }, @@ -104,7 +104,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates(t *testing.T) { Config: preConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), ), }, @@ -112,7 +112,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_s3ConfigUpdates(t *testing.T) { Config: postConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, updatedS3DestinationConfig, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, updatedS3DestinationConfig, nil, nil, nil, nil), ), }, }, @@ -140,7 +140,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3basic(t *testing.T) { Config: config, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), ), }, }, @@ -241,14 +241,14 @@ func TestAccAWSKinesisFirehoseDeliveryStream_ExtendedS3Updates(t *testing.T) { Config: preConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), ), }, { Config: postConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, updatedExtendedS3DestinationConfig, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, updatedExtendedS3DestinationConfig, nil, nil, nil), ), }, }, @@ -280,7 +280,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_RedshiftConfigUpdates(t *testing.T) Config: preConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), ), }, @@ -288,7 +288,46 @@ func TestAccAWSKinesisFirehoseDeliveryStream_RedshiftConfigUpdates(t *testing.T) Config: postConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, updatedRedshiftConfig, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, updatedRedshiftConfig, nil, nil), + ), + }, + }, + }) +} + +func TestAccAWSKinesisFirehoseDeliveryStream_SplunkConfigUpdates(t *testing.T) { + var stream firehose.DeliveryStreamDescription + + ri := acctest.RandInt() + preConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_SplunkBasic, + ri, ri, ri, ri) + postConfig := fmt.Sprintf(testAccKinesisFirehoseDeliveryStreamConfig_SplunkUpdates, + ri, ri, ri, ri) + + updatedSplunkConfig := &firehose.SplunkDestinationDescription{ + HECEndpointType: aws.String("Event"), + HECAcknowledgmentTimeoutInSeconds: aws.Int64(600), + S3BackupMode: aws.String("FailedEventsOnly"), + } + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckKinesisFirehoseDeliveryStreamDestroy, + Steps: []resource.TestStep{ + { + Config: preConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), + ), + }, + + { + Config: postConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, updatedSplunkConfig), ), }, }, @@ -319,14 +358,14 @@ func TestAccAWSKinesisFirehoseDeliveryStream_ElasticsearchConfigUpdates(t *testi Config: preConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream_es", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), ), }, { Config: postConfig, Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream_es", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, updatedElasticSearchConfig), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, updatedElasticSearchConfig, nil), ), }, }, @@ -347,7 +386,7 @@ func TestAccAWSKinesisFirehoseDeliveryStream_missingProcessingConfiguration(t *t Config: testAccKinesisFirehoseDeliveryStreamConfig_missingProcessingConfiguration(ri), Check: resource.ComposeTestCheckFunc( testAccCheckKinesisFirehoseDeliveryStreamExists("aws_kinesis_firehose_delivery_stream.test_stream", &stream), - testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil), + testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(&stream, nil, nil, nil, nil, nil), ), }, }, @@ -381,7 +420,7 @@ func testAccCheckKinesisFirehoseDeliveryStreamExists(n string, stream *firehose. } } -func testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescription, s3config interface{}, extendedS3config interface{}, redshiftConfig interface{}, elasticsearchConfig interface{}) resource.TestCheckFunc { +func testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(stream *firehose.DeliveryStreamDescription, s3config interface{}, extendedS3config interface{}, redshiftConfig interface{}, elasticsearchConfig interface{}, splunkConfig interface{}) resource.TestCheckFunc { return func(s *terraform.State) error { if !strings.HasPrefix(*stream.DeliveryStreamName, "terraform-kinesis-firehose") { return fmt.Errorf("Bad Stream name: %s", *stream.DeliveryStreamName) @@ -471,6 +510,28 @@ func testAccCheckAWSKinesisFirehoseDeliveryStreamAttributes(stream *firehose.Del return fmt.Errorf("Mismatch Elasticsearch Buffering Interval, expected: %s, got: %s", es, stream.Destinations) } } + + if splunkConfig != nil { + s := splunkConfig.(*firehose.SplunkDestinationDescription) + // Range over the Stream Destinations, looking for the matching Splunk destination + var matchHECEndpointType, matchHECAcknowledgmentTimeoutInSeconds, matchS3BackupMode bool + for _, d := range stream.Destinations { + if d.SplunkDestinationDescription != nil { + if *d.SplunkDestinationDescription.HECEndpointType == *s.HECEndpointType { + matchHECEndpointType = true + } + if *d.SplunkDestinationDescription.HECAcknowledgmentTimeoutInSeconds == *s.HECAcknowledgmentTimeoutInSeconds { + matchHECAcknowledgmentTimeoutInSeconds = true + } + if *d.SplunkDestinationDescription.S3BackupMode == *s.S3BackupMode { + matchS3BackupMode = true + } + } + } + if !matchHECEndpointType || !matchHECAcknowledgmentTimeoutInSeconds || !matchS3BackupMode { + return fmt.Errorf("Mismatch Splunk HECEndpointType or HECAcknowledgmentTimeoutInSeconds or S3BackupMode, expected: %s, got: %s", s, stream.Destinations) + } + } } return nil } @@ -996,6 +1057,42 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" { } }` +var testAccKinesisFirehoseDeliveryStreamConfig_SplunkBasic = testAccKinesisFirehoseDeliveryStreamBaseConfig + ` +resource "aws_kinesis_firehose_delivery_stream" "test_stream" { + depends_on = ["aws_iam_role_policy.firehose"] + name = "terraform-kinesis-firehose-basicsplunktest-%d" + destination = "splunk" + s3_configuration { + role_arn = "${aws_iam_role.firehose.arn}" + bucket_arn = "${aws_s3_bucket.bucket.arn}" + } + splunk_configuration { + hec_endpoint = "https://input-test.com:443" + hec_token = "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A" + } +}` + +var testAccKinesisFirehoseDeliveryStreamConfig_SplunkUpdates = testAccKinesisFirehoseDeliveryStreamBaseConfig + ` +resource "aws_kinesis_firehose_delivery_stream" "test_stream" { + depends_on = ["aws_iam_role_policy.firehose"] + name = "terraform-kinesis-firehose-basicsplunktest-%d" + destination = "splunk" + s3_configuration { + role_arn = "${aws_iam_role.firehose.arn}" + bucket_arn = "${aws_s3_bucket.bucket.arn}" + buffer_size = 10 + buffer_interval = 400 + compression_format = "GZIP" + } + splunk_configuration { + hec_endpoint = "https://input-test.com:443" + hec_token = "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A" + hec_acknowledgment_timeout = 600 + hec_endpoint_type = "Event" + s3_backup_mode = "FailedEventsOnly" + } +}` + var testAccKinesisFirehoseDeliveryStreamBaseElasticsearchConfig = testAccKinesisFirehoseDeliveryStreamBaseConfig + ` resource "aws_elasticsearch_domain" "test_cluster" { domain_name = "es-test-%d" From df1b12277a801f6d4dd69f0ee412d03aced37d4d Mon Sep 17 00:00:00 2001 From: Argishti Rostamian Date: Tue, 23 Jan 2018 14:44:13 -0800 Subject: [PATCH 2/3] update docs --- ...sis_firehose_delivery_stream.html.markdown | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/website/docs/r/kinesis_firehose_delivery_stream.html.markdown b/website/docs/r/kinesis_firehose_delivery_stream.html.markdown index 738a828c374a..0b850d23490d 100644 --- a/website/docs/r/kinesis_firehose_delivery_stream.html.markdown +++ b/website/docs/r/kinesis_firehose_delivery_stream.html.markdown @@ -207,6 +207,30 @@ resource "aws_kinesis_firehose_delivery_stream" "test_stream" { } ``` + +### Splunk Destination + +```hcl +resource "aws_kinesis_firehose_delivery_stream" "test_stream" { + depends_on = ["aws_iam_role_policy.firehose"] + name = "terraform-kinesis-firehose-basicsplunktest-%d" + destination = "splunk" + s3_configuration { + role_arn = "${aws_iam_role.firehose.arn}" + bucket_arn = "${aws_s3_bucket.bucket.arn}" + buffer_size = 10 + buffer_interval = 400 + compression_format = "GZIP" + } + splunk_configuration { + hec_endpoint = "https://http-inputs-mydomain.splunkcloud.com:443" + hec_token = "51D4DA16-C61B-4F5F-8EC7-ED4301342A4A" + hec_acknowledgment_timeout = 600 + hec_endpoint_type = "Event" + s3_backup_mode = "FailedEventsOnly" + } +} +``` ~> **NOTE:** Kinesis Firehose is currently only supported in us-east-1, us-west-2 and eu-west-1. ## Argument Reference @@ -272,6 +296,16 @@ The `elasticsearch_configuration` object supports the following: * `type_name` - (Required) The Elasticsearch type name with maximum length of 100 characters. * `cloudwatch_logging_options` - (Optional) The CloudWatch Logging Options for the delivery stream. More details are given below +The `splunk_configuration` objects supports the following: + +* `hec_acknowledgment_timeout` - (Optional) The amount of time, in seconds between 180 and 600, that Kinesis Firehose waits to receive an acknowledgment from Splunk after it sends it data. +* `hec_endpoint` - (Required) The HTTP Event Collector (HEC) endpoint to which Kinesis Firehose sends your data. +* `hec_endpoint_type` - (Optional) The HEC endpoint type. Valid values are `Raw` or `Event`. The default value is `Raw`. +* `hec_token` - The GUID that you obtain from your Splunk cluster when you create a new HEC endpoint. +* `s3_backup_mode` - (Optional) Defines how documents should be delivered to Amazon S3. Valid values are `FailedDocumentsOnly` and `AllDocuments`. Default value is `FailedDocumentsOnly`. +* `retry_duration` - (Optional) After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0. +* `cloudwatch_logging_options` - (Optional) The CloudWatch Logging Options for the delivery stream. More details are given below. + The `cloudwatch_logging_options` object supports the following: * `enabled` - (Optional) Enables or disables the logging. Defaults to `false`. From 03a11412f4faa190e08c1754b322ceac4a464ad7 Mon Sep 17 00:00:00 2001 From: Argishti Rostamian Date: Tue, 6 Feb 2018 14:04:05 -0800 Subject: [PATCH 3/3] minor updates --- ...ce_aws_kinesis_firehose_delivery_stream.go | 38 ++++++------------- 1 file changed, 12 insertions(+), 26 deletions(-) diff --git a/aws/resource_aws_kinesis_firehose_delivery_stream.go b/aws/resource_aws_kinesis_firehose_delivery_stream.go index cfe67019a2b4..00403e850128 100644 --- a/aws/resource_aws_kinesis_firehose_delivery_stream.go +++ b/aws/resource_aws_kinesis_firehose_delivery_stream.go @@ -676,17 +676,10 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ "hec_acknowledgment_timeout": { - Type: schema.TypeInt, - Optional: true, - Default: 180, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 180 || value > 600 { - errors = append(errors, fmt.Errorf( - "%q must be in the range from 180 to 600 seconds.", k)) - } - return - }, + Type: schema.TypeInt, + Optional: true, + Default: 180, + ValidateFunc: validateIntegerInRange(180, 600), }, "hec_endpoint": { @@ -697,10 +690,10 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { "hec_endpoint_type": { Type: schema.TypeString, Optional: true, - Default: "Raw", + Default: firehose.HECEndpointTypeRaw, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if value != "Raw" && value != "Event" { + if value != firehose.HECEndpointTypeRaw && value != firehose.HECEndpointTypeEvent { errors = append(errors, fmt.Errorf( "%q must be one of 'Raw', 'Event'", k)) } @@ -716,10 +709,10 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { "s3_backup_mode": { Type: schema.TypeString, Optional: true, - Default: "FailedEventsOnly", + Default: firehose.SplunkS3BackupModeFailedEventsOnly, ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { value := v.(string) - if value != "FailedEventsOnly" && value != "AllEvents" { + if value != firehose.SplunkS3BackupModeFailedEventsOnly && value != firehose.SplunkS3BackupModeAllEvents { errors = append(errors, fmt.Errorf( "%q must be one of 'FailedEventsOnly', 'AllEvents'", k)) } @@ -728,17 +721,10 @@ func resourceAwsKinesisFirehoseDeliveryStream() *schema.Resource { }, "retry_duration": { - Type: schema.TypeInt, - Optional: true, - Default: 3600, - ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) { - value := v.(int) - if value < 0 || value > 7200 { - errors = append(errors, fmt.Errorf( - "%q must be in the range from 0 to 7200 seconds.", k)) - } - return - }, + Type: schema.TypeInt, + Optional: true, + Default: 3600, + ValidateFunc: validateIntegerInRange(0, 7200), }, "cloudwatch_logging_options": cloudWatchLoggingOptionsSchema(),