From 2b3771573786b1a5771936e186fe6166ceaaef0d Mon Sep 17 00:00:00 2001 From: Toby Brain Date: Sun, 14 Sep 2025 20:33:52 +1000 Subject: [PATCH 1/9] Update bundled Kibana client generation --- generated/kbapi/kibana.gen.go | 64 ++++++++++++++++------------- generated/kbapi/transform_schema.go | 23 +++++++++-- 2 files changed, 55 insertions(+), 32 deletions(-) diff --git a/generated/kbapi/kibana.gen.go b/generated/kbapi/kibana.gen.go index 97e146c28..cf1de5697 100644 --- a/generated/kbapi/kibana.gen.go +++ b/generated/kbapi/kibana.gen.go @@ -20779,9 +20779,9 @@ type NewOutputKafka struct { CaTrustedFingerprint *string `json:"ca_trusted_fingerprint,omitempty"` ClientId *string `json:"client_id,omitempty"` Compression *NewOutputKafkaCompression `json:"compression,omitempty"` - CompressionLevel interface{} `json:"compression_level"` + CompressionLevel *float32 `json:"compression_level,omitempty"` ConfigYaml *string `json:"config_yaml,omitempty"` - ConnectionType interface{} `json:"connection_type"` + ConnectionType *string `json:"connection_type,omitempty"` Hash *struct { Hash *string `json:"hash,omitempty"` Random *bool `json:"random,omitempty"` @@ -20799,7 +20799,7 @@ type NewOutputKafka struct { Key *string `json:"key,omitempty"` Name string `json:"name"` Partition *NewOutputKafkaPartition `json:"partition,omitempty"` - Password interface{} `json:"password"` + Password *string `json:"password,omitempty"` ProxyId *string `json:"proxy_id,omitempty"` Random *struct { GroupEvents *float32 `json:"group_events,omitempty"` @@ -20822,7 +20822,7 @@ type NewOutputKafka struct { Timeout *float32 `json:"timeout,omitempty"` Topic *string `json:"topic,omitempty"` Type NewOutputKafkaType `json:"type"` - Username interface{} `json:"username"` + Username *string `json:"username,omitempty"` Version *string `json:"version,omitempty"` WriteToLogsStreams *bool `json:"write_to_logs_streams,omitempty"` } @@ -21082,9 +21082,9 @@ type OutputKafka struct { CaTrustedFingerprint *string `json:"ca_trusted_fingerprint,omitempty"` ClientId *string `json:"client_id,omitempty"` Compression *OutputKafkaCompression `json:"compression,omitempty"` - CompressionLevel interface{} `json:"compression_level"` + CompressionLevel *float32 `json:"compression_level,omitempty"` ConfigYaml *string `json:"config_yaml,omitempty"` - ConnectionType interface{} `json:"connection_type"` + ConnectionType *string `json:"connection_type,omitempty"` Hash *OutputKafka_Hash `json:"hash,omitempty"` Headers *[]OutputKafka_Headers_Item `json:"headers,omitempty"` Hosts []string `json:"hosts"` @@ -21096,7 +21096,7 @@ type OutputKafka struct { Key *string `json:"key,omitempty"` Name string `json:"name"` Partition *OutputKafkaPartition `json:"partition,omitempty"` - Password interface{} `json:"password"` + Password *string `json:"password,omitempty"` ProxyId *string `json:"proxy_id,omitempty"` Random *OutputKafka_Random `json:"random,omitempty"` RequiredAcks *OutputKafkaRequiredAcks `json:"required_acks,omitempty"` @@ -21108,7 +21108,7 @@ type OutputKafka struct { Timeout *float32 `json:"timeout,omitempty"` Topic *string `json:"topic,omitempty"` Type OutputKafkaType `json:"type"` - Username interface{} `json:"username"` + Username *string `json:"username,omitempty"` Version *string `json:"version,omitempty"` WriteToLogsStreams *bool `json:"write_to_logs_streams,omitempty"` AdditionalProperties map[string]interface{} `json:"-"` @@ -23771,9 +23771,9 @@ type UpdateOutputKafka struct { CaTrustedFingerprint *string `json:"ca_trusted_fingerprint,omitempty"` ClientId *string `json:"client_id,omitempty"` Compression *UpdateOutputKafkaCompression `json:"compression,omitempty"` - CompressionLevel interface{} `json:"compression_level"` + CompressionLevel *float32 `json:"compression_level,omitempty"` ConfigYaml *string `json:"config_yaml,omitempty"` - ConnectionType interface{} `json:"connection_type"` + ConnectionType *string `json:"connection_type,omitempty"` Hash *struct { Hash *string `json:"hash,omitempty"` Random *bool `json:"random,omitempty"` @@ -23790,7 +23790,7 @@ type UpdateOutputKafka struct { Key *string `json:"key,omitempty"` Name string `json:"name"` Partition *UpdateOutputKafkaPartition `json:"partition,omitempty"` - Password interface{} `json:"password"` + Password *string `json:"password,omitempty"` ProxyId *string `json:"proxy_id,omitempty"` Random *struct { GroupEvents *float32 `json:"group_events,omitempty"` @@ -23813,7 +23813,7 @@ type UpdateOutputKafka struct { Timeout *float32 `json:"timeout,omitempty"` Topic *string `json:"topic,omitempty"` Type *UpdateOutputKafkaType `json:"type,omitempty"` - Username interface{} `json:"username"` + Username *string `json:"username,omitempty"` Version *string `json:"version,omitempty"` WriteToLogsStreams *bool `json:"write_to_logs_streams,omitempty"` } @@ -41967,9 +41967,11 @@ func (a OutputKafka) MarshalJSON() ([]byte, error) { } } - object["compression_level"], err = json.Marshal(a.CompressionLevel) - if err != nil { - return nil, fmt.Errorf("error marshaling 'compression_level': %w", err) + if a.CompressionLevel != nil { + object["compression_level"], err = json.Marshal(a.CompressionLevel) + if err != nil { + return nil, fmt.Errorf("error marshaling 'compression_level': %w", err) + } } if a.ConfigYaml != nil { @@ -41979,9 +41981,11 @@ func (a OutputKafka) MarshalJSON() ([]byte, error) { } } - object["connection_type"], err = json.Marshal(a.ConnectionType) - if err != nil { - return nil, fmt.Errorf("error marshaling 'connection_type': %w", err) + if a.ConnectionType != nil { + object["connection_type"], err = json.Marshal(a.ConnectionType) + if err != nil { + return nil, fmt.Errorf("error marshaling 'connection_type': %w", err) + } } if a.Hash != nil { @@ -42057,9 +42061,11 @@ func (a OutputKafka) MarshalJSON() ([]byte, error) { } } - object["password"], err = json.Marshal(a.Password) - if err != nil { - return nil, fmt.Errorf("error marshaling 'password': %w", err) + if a.Password != nil { + object["password"], err = json.Marshal(a.Password) + if err != nil { + return nil, fmt.Errorf("error marshaling 'password': %w", err) + } } if a.ProxyId != nil { @@ -42137,9 +42143,11 @@ func (a OutputKafka) MarshalJSON() ([]byte, error) { return nil, fmt.Errorf("error marshaling 'type': %w", err) } - object["username"], err = json.Marshal(a.Username) - if err != nil { - return nil, fmt.Errorf("error marshaling 'username': %w", err) + if a.Username != nil { + object["username"], err = json.Marshal(a.Username) + if err != nil { + return nil, fmt.Errorf("error marshaling 'username': %w", err) + } } if a.Version != nil { @@ -51569,7 +51577,7 @@ func (t SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) AsSLO // FromSLOsTimesliceMetricBasicMetricWithField overwrites any union data inside the SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item as the provided SLOsTimesliceMetricBasicMetricWithField func (t *SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) FromSLOsTimesliceMetricBasicMetricWithField(v SLOsTimesliceMetricBasicMetricWithField) error { - v.Aggregation = "max" + v.Aggregation = "cardinality" b, err := json.Marshal(v) t.union = b return err @@ -51577,7 +51585,7 @@ func (t *SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) From // MergeSLOsTimesliceMetricBasicMetricWithField performs a merge with any union data inside the SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item, using the provided SLOsTimesliceMetricBasicMetricWithField func (t *SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) MergeSLOsTimesliceMetricBasicMetricWithField(v SLOsTimesliceMetricBasicMetricWithField) error { - v.Aggregation = "max" + v.Aggregation = "cardinality" b, err := json.Marshal(v) if err != nil { return err @@ -51658,10 +51666,10 @@ func (t SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) Value return nil, err } switch discriminator { + case "cardinality": + return t.AsSLOsTimesliceMetricBasicMetricWithField() case "doc_count": return t.AsSLOsTimesliceMetricDocCountMetric() - case "max": - return t.AsSLOsTimesliceMetricBasicMetricWithField() case "percentile": return t.AsSLOsTimesliceMetricPercentileMetric() default: diff --git a/generated/kbapi/transform_schema.go b/generated/kbapi/transform_schema.go index 20e5ea8e2..995e07369 100644 --- a/generated/kbapi/transform_schema.go +++ b/generated/kbapi/transform_schema.go @@ -926,10 +926,11 @@ func transformFleetPaths(schema *Schema) { for _, name := range []string{"output", "new_output", "update_output"} { // Ref each index in the anyOf union + kafkaComponent := fmt.Sprintf("%s_kafka", name) schema.Components.CreateRef(schema, fmt.Sprintf("%s_elasticsearch", name), fmt.Sprintf("schemas.%s_union.anyOf.0", name)) schema.Components.CreateRef(schema, fmt.Sprintf("%s_remote_elasticsearch", name), fmt.Sprintf("schemas.%s_union.anyOf.1", name)) schema.Components.CreateRef(schema, fmt.Sprintf("%s_logstash", name), fmt.Sprintf("schemas.%s_union.anyOf.2", name)) - schema.Components.CreateRef(schema, fmt.Sprintf("%s_kafka", name), fmt.Sprintf("schemas.%s_union.anyOf.3", name)) + schema.Components.CreateRef(schema, kafkaComponent, fmt.Sprintf("schemas.%s_union.anyOf.3", name)) // Extract child structs for _, typ := range []string{"elasticsearch", "remote_elasticsearch", "logstash", "kafka"} { @@ -954,10 +955,24 @@ func transformFleetPaths(schema *Schema) { - not: {} */ - props := schema.Components.MustGetMap(fmt.Sprintf("schemas.%s_kafka.properties", name)) - for _, key := range []string{"compression_level", "connection_type", "password", "username"} { - props.Set(key, Map{}) + // https://github.com/elastic/kibana/issues/197153 + kafkaRequiredName := fmt.Sprintf("schemas.%s.required", kafkaComponent) + props := schema.Components.MustGetMap(fmt.Sprintf("schemas.%s.properties", kafkaComponent)) + required := schema.Components.MustGetSlice(kafkaRequiredName) + for key, apiType := range map[string]string{"compression_level": "number", "connection_type": "string", "password": "string", "username": "string"} { + props.Set(key, Map{ + "type": apiType, + }) + required = slices.DeleteFunc(required, func(item any) bool { + itemStr, ok := item.(string) + if !ok { + return false + } + + return itemStr == key + }) } + schema.Components.Set(kafkaRequiredName, required) } // Add the missing discriminator to the response union From 183a2ccc2905f10a0765903058b9f441bccdfe26 Mon Sep 17 00:00:00 2001 From: Toby Brain Date: Sun, 14 Sep 2025 20:34:27 +1000 Subject: [PATCH 2/9] Add support for Kafka fleet output --- internal/fleet/output/acc_test.go | 553 +++++++++++++++ internal/fleet/output/create.go | 2 +- internal/fleet/output/models.go | 653 +++++++++++++++++- internal/fleet/output/resource.go | 67 +- internal/fleet/output/resource_test.go | 449 ++++-------- internal/fleet/output/schema.go | 263 ++++++- internal/fleet/output/update.go | 2 +- internal/utils/validators/conditional.go | 229 ++++++ internal/utils/validators/conditional_test.go | 284 ++++++++ 9 files changed, 2153 insertions(+), 349 deletions(-) create mode 100644 internal/fleet/output/acc_test.go create mode 100644 internal/utils/validators/conditional.go create mode 100644 internal/utils/validators/conditional_test.go diff --git a/internal/fleet/output/acc_test.go b/internal/fleet/output/acc_test.go new file mode 100644 index 000000000..87fa18550 --- /dev/null +++ b/internal/fleet/output/acc_test.go @@ -0,0 +1,553 @@ +package output_test + +import ( + "context" + "fmt" + "testing" + + "github.com/elastic/terraform-provider-elasticstack/internal/acctest" + "github.com/elastic/terraform-provider-elasticstack/internal/clients" + "github.com/elastic/terraform-provider-elasticstack/internal/clients/fleet" + "github.com/elastic/terraform-provider-elasticstack/internal/fleet/output" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/elastic/terraform-provider-elasticstack/internal/versionutils" + "github.com/hashicorp/go-version" + sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" + "github.com/hashicorp/terraform-plugin-testing/helper/resource" + "github.com/hashicorp/terraform-plugin-testing/terraform" +) + +var minVersionOutput = version.Must(version.NewVersion("8.6.0")) + +func TestAccResourceOutputElasticsearchFromSDK(t *testing.T) { + policyName := sdkacctest.RandString(22) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceOutputDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "elasticstack": { + Source: "elastic/elasticstack", + VersionConstraint: "0.11.7", + }, + }, + SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), + Config: testAccResourceOutputCreateElasticsearch(policyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Elasticsearch Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-elasticsearch-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "elasticsearch"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "https://elasticsearch:9200"), + ), + }, + { + ProtoV6ProviderFactories: acctest.Providers, + SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), + Config: testAccResourceOutputCreateElasticsearch(policyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Elasticsearch Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-elasticsearch-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "elasticsearch"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "https://elasticsearch:9200"), + ), + }, + }, + }) +} + +func TestAccResourceOutputElasticsearch(t *testing.T) { + policyName := sdkacctest.RandString(22) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceOutputDestroy, + ProtoV6ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), + Config: testAccResourceOutputCreateElasticsearch(policyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Elasticsearch Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-elasticsearch-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "elasticsearch"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "https://elasticsearch:9200"), + ), + }, + { + SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), + Config: testAccResourceOutputUpdateElasticsearch(policyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Updated Elasticsearch Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-elasticsearch-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "elasticsearch"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "https://elasticsearch:9200"), + ), + }, + { + SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), + Config: testAccResourceOutputUpdateElasticsearch(policyName), + ResourceName: "elasticstack_fleet_output.test_output", + ImportState: true, + ImportStateVerify: true, + }, + }, + }) +} + +func TestAccResourceOutputLogstashFromSDK(t *testing.T) { + policyName := sdkacctest.RandString(22) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceOutputDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "elasticstack": { + Source: "elastic/elasticstack", + VersionConstraint: "0.11.7", + }, + }, + SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), + Config: testAccResourceOutputCreateLogstash(policyName, true), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Logstash Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-logstash-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "logstash"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "logstash:5044"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate_authorities.0", "placeholder"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate", "placeholder"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.key", "placeholder"), + ), + }, + { + ProtoV6ProviderFactories: acctest.Providers, + SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), + Config: testAccResourceOutputCreateLogstash(policyName, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Logstash Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-logstash-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "logstash"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "logstash:5044"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.certificate_authorities.0", "placeholder"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.certificate", "placeholder"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.key", "placeholder"), + ), + }, + }, + }) +} + +func TestAccResourceOutputLogstash(t *testing.T) { + policyName := sdkacctest.RandString(22) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceOutputDestroy, + ProtoV6ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), + Config: testAccResourceOutputCreateLogstash(policyName, false), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Logstash Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-logstash-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "logstash"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "logstash:5044"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.certificate_authorities.0", "placeholder"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.certificate", "placeholder"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.key", "placeholder"), + ), + }, + { + SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), + Config: testAccResourceOutputUpdateLogstash(policyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Updated Logstash Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-logstash-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "logstash"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "logstash:5044"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.certificate_authorities.0", "placeholder"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.certificate", "placeholder"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.key", "placeholder"), + ), + }, + }, + }) +} + +func TestAccResourceOutputKafka(t *testing.T) { + policyName := sdkacctest.RandString(22) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceOutputDestroy, + ProtoV6ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + SkipFunc: versionutils.CheckIfVersionIsUnsupported(output.MinVersionOutputKafka), + Config: testAccResourceOutputCreateKafka(policyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Kafka Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-kafka-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "kafka"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "kafka:9092"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.auth_type", "none"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.topic", "beats"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.partition", "hash"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.compression", "gzip"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.compression_level", "6"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.connection_type", "plaintext"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.required_acks", "1"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.headers.0.key", "environment"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.headers.0.value", "test"), + ), + }, + { + SkipFunc: versionutils.CheckIfVersionIsUnsupported(output.MinVersionOutputKafka), + Config: testAccResourceOutputUpdateKafka(policyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Updated Kafka Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-kafka-output", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "kafka"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "kafka:9092"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.auth_type", "none"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.topic", "logs"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.partition", "round_robin"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.compression", "snappy"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.connection_type", "encryption"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.required_acks", "-1"), + ), + }, + }, + }) +} + +func TestAccResourceOutputKafkaComplex(t *testing.T) { + policyName := sdkacctest.RandString(22) + + resource.Test(t, resource.TestCase{ + PreCheck: func() { acctest.PreCheck(t) }, + CheckDestroy: checkResourceOutputDestroy, + ProtoV6ProviderFactories: acctest.Providers, + Steps: []resource.TestStep{ + { + SkipFunc: versionutils.CheckIfVersionIsUnsupported(output.MinVersionOutputKafka), + Config: testAccResourceOutputCreateKafkaComplex(policyName), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Complex Kafka Output %s", policyName)), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "kafka"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.auth_type", "none"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.topic", "complex-topic"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.partition", "hash"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.compression", "lz4"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.required_acks", "0"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.broker_timeout", "10"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.timeout", "30"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.version", "2.6.0"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.headers.0.key", "datacenter"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.headers.0.value", "us-west-1"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.headers.1.key", "service"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.headers.1.value", "beats"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.hash.0.hash", "event.hash"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.hash.0.random", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.sasl.0.mechanism", "SCRAM-SHA-256"), + ), + }, + }, + }) +} + +func testAccResourceOutputCreateElasticsearch(id string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +resource "elasticstack_fleet_output" "test_output" { + name = "Elasticsearch Output %s" + output_id = "%s-elasticsearch-output" + type = "elasticsearch" + config_yaml = yamlencode({ + "ssl.verification_mode" : "none" + }) + default_integrations = false + default_monitoring = false + hosts = [ + "https://elasticsearch:9200" + ] +} +`, id, id) +} + +func testAccResourceOutputUpdateElasticsearch(id string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +resource "elasticstack_fleet_output" "test_output" { + name = "Updated Elasticsearch Output %s" + output_id = "%s-elasticsearch-output" + type = "elasticsearch" + config_yaml = yamlencode({ + "ssl.verification_mode" : "none" + }) + default_integrations = false + default_monitoring = false + hosts = [ + "https://elasticsearch:9200" + ] +} +`, id, id) +} + +func testAccResourceOutputCreateLogstash(id string, forSDK bool) string { + sslInfix := "" + if !forSDK { + sslInfix = "=" + } + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +resource "elasticstack_fleet_output" "test_output" { + name = "Logstash Output %s" + type = "logstash" + output_id = "%s-logstash-output" + config_yaml = yamlencode({ + "ssl.verification_mode" : "none" + }) + default_integrations = false + default_monitoring = false + hosts = [ + "logstash:5044" + ] + ssl %s { + certificate_authorities = ["placeholder"] + certificate = "placeholder" + key = "placeholder" + } +} +`, id, id, sslInfix) +} + +func testAccResourceOutputUpdateLogstash(id string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +resource "elasticstack_fleet_output" "test_output" { + name = "Updated Logstash Output %s" + output_id = "%s-logstash-output" + type = "logstash" + config_yaml = yamlencode({ + "ssl.verification_mode" : "none" + }) + default_integrations = false + default_monitoring = false + hosts = [ + "logstash:5044" + ] + ssl = { + certificate_authorities = ["placeholder"] + certificate = "placeholder" + key = "placeholder" + } +} +`, id, id) +} + +func testAccResourceOutputCreateKafka(id string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +resource "elasticstack_fleet_output" "test_output" { + name = "Kafka Output %s" + output_id = "%s-kafka-output" + type = "kafka" + config_yaml = yamlencode({ + "ssl.verification_mode" : "none" + }) + default_integrations = false + default_monitoring = false + hosts = [ + "kafka:9092" + ] + + # Kafka-specific configuration + kafka = { + auth_type = "none" + topic = "beats" + partition = "hash" + compression = "gzip" + compression_level = 6 + connection_type = "plaintext" + required_acks = 1 + + headers = [{ + key = "environment" + value = "test" + }] + } +} +`, id, id) +} + +func testAccResourceOutputUpdateKafka(id string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +resource "elasticstack_fleet_output" "test_output" { + name = "Updated Kafka Output %s" + output_id = "%s-kafka-output" + type = "kafka" + config_yaml = yamlencode({ + "ssl.verification_mode" : "none" + }) + default_integrations = false + default_monitoring = false + hosts = [ + "kafka:9092" + ] + + # Updated Kafka-specific configuration + kafka = { + auth_type = "none" + topic = "logs" + partition = "round_robin" + compression = "snappy" + connection_type = "encryption" + required_acks = -1 + } +} +`, id, id) +} + +func testAccResourceOutputCreateKafkaComplex(id string) string { + return fmt.Sprintf(` +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +resource "elasticstack_fleet_output" "test_output" { + name = "Complex Kafka Output %s" + output_id = "%s-kafka-complex-output" + type = "kafka" + config_yaml = yamlencode({ + "ssl.verification_mode" : "none" + }) + default_integrations = false + default_monitoring = false + hosts = [ + "kafka1:9092", + "kafka2:9092", + "kafka3:9092" + ] + + # Complex Kafka configuration showcasing all options + kafka = { + auth_type = "none" + topic = "complex-topic" + partition = "hash" + compression = "lz4" + connection_type = "encryption" + required_acks = 0 + broker_timeout = 10 + timeout = 30 + version = "2.6.0" + + headers = [ + { + key = "datacenter" + value = "us-west-1" + }, + { + key = "service" + value = "beats" + } + ] + + hash = [{ + hash = "event.hash" + random = false + }] + + sasl = [{ + mechanism = "SCRAM-SHA-256" + }] + } +} +`, id, id) +} + +func checkResourceOutputDestroy(s *terraform.State) error { + client, err := clients.NewAcceptanceTestingClient() + if err != nil { + return err + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "elasticstack_fleet_output" { + continue + } + + fleetClient, err := client.GetFleetClient() + if err != nil { + return err + } + output, diags := fleet.GetOutput(context.Background(), fleetClient, rs.Primary.ID) + if diags.HasError() { + return utils.FwDiagsAsError(diags) + } + if output != nil { + return fmt.Errorf("output id=%v still exists, but it should have been removed", rs.Primary.ID) + } + } + return nil +} diff --git a/internal/fleet/output/create.go b/internal/fleet/output/create.go index b4f0c7858..19e59b145 100644 --- a/internal/fleet/output/create.go +++ b/internal/fleet/output/create.go @@ -22,7 +22,7 @@ func (r *outputResource) Create(ctx context.Context, req resource.CreateRequest, return } - body, diags := planModel.toAPICreateModel(ctx) + body, diags := planModel.toAPICreateModel(ctx, r.client) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return diff --git a/internal/fleet/output/models.go b/internal/fleet/output/models.go index 2bece8c9f..465be9037 100644 --- a/internal/fleet/output/models.go +++ b/internal/fleet/output/models.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/elastic/terraform-provider-elasticstack/generated/kbapi" + "github.com/elastic/terraform-provider-elasticstack/internal/clients" "github.com/elastic/terraform-provider-elasticstack/internal/utils" "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" @@ -21,8 +22,31 @@ type outputModel struct { CaTrustedFingerprint types.String `tfsdk:"ca_trusted_fingerprint"` DefaultIntegrations types.Bool `tfsdk:"default_integrations"` DefaultMonitoring types.Bool `tfsdk:"default_monitoring"` - Ssl types.List `tfsdk:"ssl"` //> outputSslModel ConfigYaml types.String `tfsdk:"config_yaml"` + Ssl types.Object `tfsdk:"ssl"` //> outputSslModel + Kafka types.Object `tfsdk:"kafka"` //> outputKafkaModel +} + +type outputKafkaModel struct { + AuthType types.String `tfsdk:"auth_type"` + BrokerTimeout types.Float64 `tfsdk:"broker_timeout"` + ClientId types.String `tfsdk:"client_id"` + Compression types.String `tfsdk:"compression"` + CompressionLevel types.Float64 `tfsdk:"compression_level"` + ConnectionType types.String `tfsdk:"connection_type"` + Topic types.String `tfsdk:"topic"` + Partition types.String `tfsdk:"partition"` + RequiredAcks types.Int64 `tfsdk:"required_acks"` + Timeout types.Float64 `tfsdk:"timeout"` + Version types.String `tfsdk:"version"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + Key types.String `tfsdk:"key"` + Headers types.List `tfsdk:"headers"` //> outputHeadersModel + Hash types.List `tfsdk:"hash"` //> outputHashModel + Random types.List `tfsdk:"random"` //> outputRandomModel + RoundRobin types.List `tfsdk:"round_robin"` //> outputRoundRobinModel + Sasl types.List `tfsdk:"sasl"` //> outputSaslModel } type outputSslModel struct { @@ -31,24 +55,46 @@ type outputSslModel struct { Key types.String `tfsdk:"key"` } +type outputHeadersModel struct { + Key types.String `tfsdk:"key"` + Value types.String `tfsdk:"value"` +} + +type outputHashModel struct { + Hash types.String `tfsdk:"hash"` + Random types.Bool `tfsdk:"random"` +} + +type outputRandomModel struct { + GroupEvents types.Float64 `tfsdk:"group_events"` +} + +type outputRoundRobinModel struct { + GroupEvents types.Float64 `tfsdk:"group_events"` +} + +type outputSaslModel struct { + Mechanism types.String `tfsdk:"mechanism"` +} + func (model *outputModel) populateFromAPI(ctx context.Context, union *kbapi.OutputUnion) (diags diag.Diagnostics) { if union == nil { return } - doSsl := func(ssl *kbapi.OutputSsl) types.List { + doSsl := func(ssl *kbapi.OutputSsl) types.Object { if ssl != nil { p := path.Root("ssl") - sslModels := []outputSslModel{{ + sslModel := outputSslModel{ CertificateAuthorities: utils.SliceToListType_String(ctx, utils.Deref(ssl.CertificateAuthorities), p.AtName("certificate_authorities"), &diags), Certificate: types.StringPointerValue(ssl.Certificate), Key: types.StringPointerValue(ssl.Key), - }} - list, nd := types.ListValueFrom(ctx, getSslAttrTypes(), sslModels) + } + obj, nd := types.ObjectValueFrom(ctx, getSslAttrTypes(), sslModel) diags.Append(nd...) - return list + return obj } else { - return types.ListNull(getSslAttrTypes()) + return types.ObjectNull(getSslAttrTypes()) } } @@ -97,6 +143,165 @@ func (model *outputModel) populateFromAPI(ctx context.Context, union *kbapi.Outp model.ConfigYaml = types.StringPointerValue(data.ConfigYaml) model.Ssl = doSsl(data.Ssl) + case "kafka": + data, err := union.AsOutputKafka() + if err != nil { + diags.AddError(err.Error(), "") + return + } + + model.ID = types.StringPointerValue(data.Id) + model.OutputID = types.StringPointerValue(data.Id) + model.Name = types.StringValue(data.Name) + model.Type = types.StringValue(string(data.Type)) + model.Hosts = utils.SliceToListType_String(ctx, data.Hosts, path.Root("hosts"), &diags) + model.CaSha256 = types.StringPointerValue(data.CaSha256) + model.CaTrustedFingerprint = types.StringPointerValue(data.CaTrustedFingerprint) + model.DefaultIntegrations = types.BoolPointerValue(data.IsDefault) + model.DefaultMonitoring = types.BoolPointerValue(data.IsDefaultMonitoring) + model.ConfigYaml = types.StringPointerValue(data.ConfigYaml) + model.Ssl = doSsl(data.Ssl) + + // Kafka-specific fields - initialize kafka nested object + kafkaModel := outputKafkaModel{} + kafkaModel.AuthType = types.StringValue(string(data.AuthType)) + if data.BrokerTimeout != nil { + kafkaModel.BrokerTimeout = types.Float64Value(float64(*data.BrokerTimeout)) + } else { + kafkaModel.BrokerTimeout = types.Float64Null() + } + kafkaModel.ClientId = types.StringPointerValue(data.ClientId) + if data.Compression != nil { + kafkaModel.Compression = types.StringValue(string(*data.Compression)) + } else { + kafkaModel.Compression = types.StringNull() + } + // Handle CompressionLevel + if data.CompressionLevel != nil { + kafkaModel.CompressionLevel = types.Float64Value(float64(*data.CompressionLevel)) + } else { + kafkaModel.CompressionLevel = types.Float64Null() + } + // Handle ConnectionType + if data.ConnectionType != nil { + kafkaModel.ConnectionType = types.StringValue(*data.ConnectionType) + } else { + kafkaModel.ConnectionType = types.StringNull() + } + kafkaModel.Topic = types.StringPointerValue(data.Topic) + if data.Partition != nil { + kafkaModel.Partition = types.StringValue(string(*data.Partition)) + } else { + kafkaModel.Partition = types.StringNull() + } + if data.RequiredAcks != nil { + kafkaModel.RequiredAcks = types.Int64Value(int64(*data.RequiredAcks)) + } else { + kafkaModel.RequiredAcks = types.Int64Null() + } + if data.Timeout != nil { + kafkaModel.Timeout = types.Float64Value(float64(*data.Timeout)) + } else { + kafkaModel.Timeout = types.Float64Null() + } + kafkaModel.Version = types.StringPointerValue(data.Version) + if data.Username != nil { + kafkaModel.Username = types.StringValue(*data.Username) + } else { + kafkaModel.Username = types.StringNull() + } + if data.Password != nil { + kafkaModel.Password = types.StringValue(*data.Password) + } else { + kafkaModel.Password = types.StringNull() + } + kafkaModel.Key = types.StringPointerValue(data.Key) + + // Handle headers + if data.Headers != nil { + headerModels := make([]outputHeadersModel, len(*data.Headers)) + for i, header := range *data.Headers { + headerModels[i] = outputHeadersModel{ + Key: types.StringValue(header.Key), + Value: types.StringValue(header.Value), + } + } + list, nd := types.ListValueFrom(ctx, getHeadersAttrTypes(), headerModels) + diags.Append(nd...) + kafkaModel.Headers = list + } else { + kafkaModel.Headers = types.ListNull(getHeadersAttrTypes()) + } + + // Handle hash + if data.Hash != nil { + hashModels := []outputHashModel{{ + Hash: types.StringPointerValue(data.Hash.Hash), + Random: types.BoolPointerValue(data.Hash.Random), + }} + list, nd := types.ListValueFrom(ctx, getHashAttrTypes(), hashModels) + diags.Append(nd...) + kafkaModel.Hash = list + } else { + kafkaModel.Hash = types.ListNull(getHashAttrTypes()) + } + + // Handle random + if data.Random != nil { + randomModels := []outputRandomModel{{ + GroupEvents: func() types.Float64 { + if data.Random.GroupEvents != nil { + return types.Float64Value(float64(*data.Random.GroupEvents)) + } + return types.Float64Null() + }(), + }} + list, nd := types.ListValueFrom(ctx, getRandomAttrTypes(), randomModels) + diags.Append(nd...) + kafkaModel.Random = list + } else { + kafkaModel.Random = types.ListNull(getRandomAttrTypes()) + } + + // Handle round_robin + if data.RoundRobin != nil { + roundRobinModels := []outputRoundRobinModel{{ + GroupEvents: func() types.Float64 { + if data.RoundRobin.GroupEvents != nil { + return types.Float64Value(float64(*data.RoundRobin.GroupEvents)) + } + return types.Float64Null() + }(), + }} + list, nd := types.ListValueFrom(ctx, getRoundRobinAttrTypes(), roundRobinModels) + diags.Append(nd...) + kafkaModel.RoundRobin = list + } else { + kafkaModel.RoundRobin = types.ListNull(getRoundRobinAttrTypes()) + } + + // Handle sasl + if data.Sasl != nil { + saslModels := []outputSaslModel{{ + Mechanism: func() types.String { + if data.Sasl.Mechanism != nil { + return types.StringValue(string(*data.Sasl.Mechanism)) + } + return types.StringNull() + }(), + }} + list, nd := types.ListValueFrom(ctx, getSaslAttrTypes(), saslModels) + diags.Append(nd...) + kafkaModel.Sasl = list + } else { + kafkaModel.Sasl = types.ListNull(getSaslAttrTypes()) + } + + // Set the kafka nested object on the main model + kafkaObj, nd := types.ObjectValueFrom(ctx, getKafkaAttrTypes(), kafkaModel) + diags.Append(nd...) + model.Kafka = kafkaObj + default: diags.AddError(fmt.Sprintf("unhandled output type: %s", discriminator), "") } @@ -104,15 +309,15 @@ func (model *outputModel) populateFromAPI(ctx context.Context, union *kbapi.Outp return } -func (model outputModel) toAPICreateModel(ctx context.Context) (union kbapi.NewOutputUnion, diags diag.Diagnostics) { +func (model outputModel) toAPICreateModel(ctx context.Context, client *clients.ApiClient) (union kbapi.NewOutputUnion, diags diag.Diagnostics) { doSsl := func() *kbapi.NewOutputSsl { if utils.IsKnown(model.Ssl) { - sslModels := utils.ListTypeAs[outputSslModel](ctx, model.Ssl, path.Root("ssl"), &diags) - if len(sslModels) > 0 { + sslModel := utils.ObjectTypeAs[outputSslModel](ctx, model.Ssl, path.Root("ssl"), &diags) + if sslModel != nil { return &kbapi.NewOutputSsl{ - Certificate: sslModels[0].Certificate.ValueStringPointer(), - CertificateAuthorities: utils.SliceRef(utils.ListTypeToSlice_String(ctx, sslModels[0].CertificateAuthorities, path.Root("certificate_authorities"), &diags)), - Key: sslModels[0].Key.ValueStringPointer(), + Certificate: sslModel.Certificate.ValueStringPointer(), + CertificateAuthorities: utils.SliceRef(utils.ListTypeToSlice_String(ctx, sslModel.CertificateAuthorities, path.Root("certificate_authorities"), &diags)), + Key: sslModel.Key.ValueStringPointer(), } } } @@ -161,6 +366,211 @@ func (model outputModel) toAPICreateModel(ctx context.Context) (union kbapi.NewO return } + case "kafka": + // Check minimum version requirement for Kafka output type + if supported, versionDiags := client.EnforceMinVersion(ctx, MinVersionOutputKafka); versionDiags.HasError() { + diags.Append(utils.FrameworkDiagsFromSDK(versionDiags)...) + return + } else if !supported { + diags.AddError("Unsupported version for Kafka output", + fmt.Sprintf("Kafka output type requires server version %s or higher", MinVersionOutputKafka.String())) + return + } + + // Extract kafka model from nested structure + var kafkaModel outputKafkaModel + if !model.Kafka.IsNull() { + kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) + kafkaModel = *kafkaObj + } + + // Helper functions for Kafka-specific complex types + doHeaders := func() *[]struct { + Key string `json:"key"` + Value string `json:"value"` + } { + if utils.IsKnown(kafkaModel.Headers) { + headerModels := utils.ListTypeAs[outputHeadersModel](ctx, kafkaModel.Headers, path.Root("kafka").AtName("headers"), &diags) + if len(headerModels) > 0 { + headers := make([]struct { + Key string `json:"key"` + Value string `json:"value"` + }, len(headerModels)) + for i, h := range headerModels { + headers[i] = struct { + Key string `json:"key"` + Value string `json:"value"` + }{ + Key: h.Key.ValueString(), + Value: h.Value.ValueString(), + } + } + return &headers + } + } + return nil + } + + doHash := func() *struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` + } { + if utils.IsKnown(kafkaModel.Hash) { + hashModels := utils.ListTypeAs[outputHashModel](ctx, kafkaModel.Hash, path.Root("kafka").AtName("hash"), &diags) + if len(hashModels) > 0 { + return &struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` + }{ + Hash: hashModels[0].Hash.ValueStringPointer(), + Random: hashModels[0].Random.ValueBoolPointer(), + } + } + } + return nil + } + + doRandom := func() *struct { + GroupEvents *float32 `json:"group_events,omitempty"` + } { + if utils.IsKnown(kafkaModel.Random) { + randomModels := utils.ListTypeAs[outputRandomModel](ctx, kafkaModel.Random, path.Root("kafka").AtName("random"), &diags) + if len(randomModels) > 0 { + return &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: func() *float32 { + if !randomModels[0].GroupEvents.IsNull() { + val := float32(randomModels[0].GroupEvents.ValueFloat64()) + return &val + } + return nil + }(), + } + } + } + return nil + } + + doRoundRobin := func() *struct { + GroupEvents *float32 `json:"group_events,omitempty"` + } { + if utils.IsKnown(kafkaModel.RoundRobin) { + roundRobinModels := utils.ListTypeAs[outputRoundRobinModel](ctx, kafkaModel.RoundRobin, path.Root("kafka").AtName("round_robin"), &diags) + if len(roundRobinModels) > 0 { + return &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: func() *float32 { + if !roundRobinModels[0].GroupEvents.IsNull() { + val := float32(roundRobinModels[0].GroupEvents.ValueFloat64()) + return &val + } + return nil + }(), + } + } + } + return nil + } + + doSasl := func() *struct { + Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + } { + if utils.IsKnown(kafkaModel.Sasl) { + saslModels := utils.ListTypeAs[outputSaslModel](ctx, kafkaModel.Sasl, path.Root("kafka").AtName("sasl"), &diags) + if len(saslModels) > 0 && !saslModels[0].Mechanism.IsNull() { + mechanism := kbapi.NewOutputKafkaSaslMechanism(saslModels[0].Mechanism.ValueString()) + return &struct { + Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + }{ + Mechanism: &mechanism, + } + } + } + return nil + } + + body := kbapi.NewOutputKafka{ + Type: kbapi.NewOutputKafkaTypeKafka, + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), + Id: model.OutputID.ValueStringPointer(), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: doSsl(), + // Kafka-specific fields + AuthType: func() kbapi.NewOutputKafkaAuthType { + if !kafkaModel.AuthType.IsNull() { + return kbapi.NewOutputKafkaAuthType(kafkaModel.AuthType.ValueString()) + } + return kbapi.NewOutputKafkaAuthTypeNone + }(), + BrokerTimeout: func() *float32 { + if !kafkaModel.BrokerTimeout.IsNull() { + val := float32(kafkaModel.BrokerTimeout.ValueFloat64()) + return &val + } + return nil + }(), + ClientId: kafkaModel.ClientId.ValueStringPointer(), + Compression: func() *kbapi.NewOutputKafkaCompression { + if !kafkaModel.Compression.IsNull() { + comp := kbapi.NewOutputKafkaCompression(kafkaModel.Compression.ValueString()) + return &comp + } + return nil + }(), + CompressionLevel: func() *float32 { + if !kafkaModel.CompressionLevel.IsNull() && !kafkaModel.Compression.IsNull() && kafkaModel.Compression.ValueString() == "gzip" { + val := float32(kafkaModel.CompressionLevel.ValueFloat64()) + return &val + } + return nil + }(), + ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), + Topic: kafkaModel.Topic.ValueStringPointer(), + Partition: func() *kbapi.NewOutputKafkaPartition { + if !kafkaModel.Partition.IsNull() { + part := kbapi.NewOutputKafkaPartition(kafkaModel.Partition.ValueString()) + return &part + } + return nil + }(), + RequiredAcks: func() *kbapi.NewOutputKafkaRequiredAcks { + if !kafkaModel.RequiredAcks.IsNull() { + acks := kbapi.NewOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) + return &acks + } + return nil + }(), + Timeout: func() *float32 { + if !kafkaModel.Timeout.IsNull() { + val := float32(kafkaModel.Timeout.ValueFloat64()) + return &val + } + return nil + }(), + Version: kafkaModel.Version.ValueStringPointer(), + Username: kafkaModel.Username.ValueStringPointer(), + Password: kafkaModel.Password.ValueStringPointer(), + Key: kafkaModel.Key.ValueStringPointer(), + Headers: doHeaders(), + Hash: doHash(), + Random: doRandom(), + RoundRobin: doRoundRobin(), + Sasl: doSasl(), + } + + err := union.FromNewOutputKafka(body) + if err != nil { + diags.AddError(err.Error(), "") + return + } + default: diags.AddError(fmt.Sprintf("unhandled output type: %s", outputType), "") } @@ -168,15 +578,15 @@ func (model outputModel) toAPICreateModel(ctx context.Context) (union kbapi.NewO return } -func (model outputModel) toAPIUpdateModel(ctx context.Context) (union kbapi.UpdateOutputUnion, diags diag.Diagnostics) { +func (model outputModel) toAPIUpdateModel(ctx context.Context, client *clients.ApiClient) (union kbapi.UpdateOutputUnion, diags diag.Diagnostics) { doSsl := func() *kbapi.UpdateOutputSsl { if utils.IsKnown(model.Ssl) { - sslModels := utils.ListTypeAs[outputSslModel](ctx, model.Ssl, path.Root("ssl"), &diags) - if len(sslModels) > 0 { + sslModel := utils.ObjectTypeAs[outputSslModel](ctx, model.Ssl, path.Root("ssl"), &diags) + if sslModel != nil { return &kbapi.UpdateOutputSsl{ - Certificate: sslModels[0].Certificate.ValueStringPointer(), - CertificateAuthorities: utils.SliceRef(utils.ListTypeToSlice_String(ctx, sslModels[0].CertificateAuthorities, path.Root("certificate_authorities"), &diags)), - Key: sslModels[0].Key.ValueStringPointer(), + Certificate: sslModel.Certificate.ValueStringPointer(), + CertificateAuthorities: utils.SliceRef(utils.ListTypeToSlice_String(ctx, sslModel.CertificateAuthorities, path.Root("certificate_authorities"), &diags)), + Key: sslModel.Key.ValueStringPointer(), } } } @@ -223,6 +633,211 @@ func (model outputModel) toAPIUpdateModel(ctx context.Context) (union kbapi.Upda return } + case "kafka": + // Check minimum version requirement for Kafka output type + if supported, versionDiags := client.EnforceMinVersion(ctx, MinVersionOutputKafka); versionDiags.HasError() { + diags.Append(utils.FrameworkDiagsFromSDK(versionDiags)...) + return + } else if !supported { + diags.AddError("Unsupported version for Kafka output", + fmt.Sprintf("Kafka output type requires server version %s or higher", MinVersionOutputKafka.String())) + return + } + + // Extract kafka model from nested structure + var kafkaModel outputKafkaModel + if !model.Kafka.IsNull() { + kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) + kafkaModel = *kafkaObj + } + + // Helper functions for Kafka-specific complex types (Update version) + doHeaders := func() *[]struct { + Key string `json:"key"` + Value string `json:"value"` + } { + if utils.IsKnown(kafkaModel.Headers) { + headerModels := utils.ListTypeAs[outputHeadersModel](ctx, kafkaModel.Headers, path.Root("kafka").AtName("headers"), &diags) + if len(headerModels) > 0 { + headers := make([]struct { + Key string `json:"key"` + Value string `json:"value"` + }, len(headerModels)) + for i, h := range headerModels { + headers[i] = struct { + Key string `json:"key"` + Value string `json:"value"` + }{ + Key: h.Key.ValueString(), + Value: h.Value.ValueString(), + } + } + return &headers + } + } + return nil + } + + doHash := func() *struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` + } { + if utils.IsKnown(kafkaModel.Hash) { + hashModels := utils.ListTypeAs[outputHashModel](ctx, kafkaModel.Hash, path.Root("kafka").AtName("hash"), &diags) + if len(hashModels) > 0 { + return &struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` + }{ + Hash: hashModels[0].Hash.ValueStringPointer(), + Random: hashModels[0].Random.ValueBoolPointer(), + } + } + } + return nil + } + + doRandom := func() *struct { + GroupEvents *float32 `json:"group_events,omitempty"` + } { + if utils.IsKnown(kafkaModel.Random) { + randomModels := utils.ListTypeAs[outputRandomModel](ctx, kafkaModel.Random, path.Root("kafka").AtName("random"), &diags) + if len(randomModels) > 0 { + return &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: func() *float32 { + if !randomModels[0].GroupEvents.IsNull() { + val := float32(randomModels[0].GroupEvents.ValueFloat64()) + return &val + } + return nil + }(), + } + } + } + return nil + } + + doRoundRobin := func() *struct { + GroupEvents *float32 `json:"group_events,omitempty"` + } { + if utils.IsKnown(kafkaModel.RoundRobin) { + roundRobinModels := utils.ListTypeAs[outputRoundRobinModel](ctx, kafkaModel.RoundRobin, path.Root("kafka").AtName("round_robin"), &diags) + if len(roundRobinModels) > 0 { + return &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: func() *float32 { + if !roundRobinModels[0].GroupEvents.IsNull() { + val := float32(roundRobinModels[0].GroupEvents.ValueFloat64()) + return &val + } + return nil + }(), + } + } + } + return nil + } + + doSasl := func() *struct { + Mechanism *kbapi.UpdateOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + } { + if utils.IsKnown(kafkaModel.Sasl) { + saslModels := utils.ListTypeAs[outputSaslModel](ctx, kafkaModel.Sasl, path.Root("kafka").AtName("sasl"), &diags) + if len(saslModels) > 0 && !saslModels[0].Mechanism.IsNull() { + mechanism := kbapi.UpdateOutputKafkaSaslMechanism(saslModels[0].Mechanism.ValueString()) + return &struct { + Mechanism *kbapi.UpdateOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + }{ + Mechanism: &mechanism, + } + } + } + return nil + } + + body := kbapi.UpdateOutputKafka{ + Type: utils.Pointer(kbapi.Kafka), + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: doSsl(), + // Kafka-specific fields + AuthType: func() *kbapi.UpdateOutputKafkaAuthType { + if !kafkaModel.AuthType.IsNull() { + authType := kbapi.UpdateOutputKafkaAuthType(kafkaModel.AuthType.ValueString()) + return &authType + } + return nil + }(), + BrokerTimeout: func() *float32 { + if !kafkaModel.BrokerTimeout.IsNull() { + val := float32(kafkaModel.BrokerTimeout.ValueFloat64()) + return &val + } + return nil + }(), + ClientId: kafkaModel.ClientId.ValueStringPointer(), + Compression: func() *kbapi.UpdateOutputKafkaCompression { + if !kafkaModel.Compression.IsNull() { + comp := kbapi.UpdateOutputKafkaCompression(kafkaModel.Compression.ValueString()) + return &comp + } + return nil + }(), + CompressionLevel: func() *float32 { + if !kafkaModel.CompressionLevel.IsNull() && !kafkaModel.Compression.IsNull() && kafkaModel.Compression.ValueString() == "gzip" { + val := float32(kafkaModel.CompressionLevel.ValueFloat64()) + return &val + } + return nil + }(), + ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), + Topic: kafkaModel.Topic.ValueStringPointer(), + Partition: func() *kbapi.UpdateOutputKafkaPartition { + if !kafkaModel.Partition.IsNull() { + part := kbapi.UpdateOutputKafkaPartition(kafkaModel.Partition.ValueString()) + return &part + } + return nil + }(), + RequiredAcks: func() *kbapi.UpdateOutputKafkaRequiredAcks { + if !kafkaModel.RequiredAcks.IsNull() { + acks := kbapi.UpdateOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) + return &acks + } + return nil + }(), + Timeout: func() *float32 { + if !kafkaModel.Timeout.IsNull() { + val := float32(kafkaModel.Timeout.ValueFloat64()) + return &val + } + return nil + }(), + Version: kafkaModel.Version.ValueStringPointer(), + Username: kafkaModel.Username.ValueStringPointer(), + Password: kafkaModel.Password.ValueStringPointer(), + Key: kafkaModel.Key.ValueStringPointer(), + Headers: doHeaders(), + Hash: doHash(), + Random: doRandom(), + RoundRobin: doRoundRobin(), + Sasl: doSasl(), + } + + err := union.FromUpdateOutputKafka(body) + if err != nil { + diags.AddError(err.Error(), "") + return + } + default: diags.AddError(fmt.Sprintf("unhandled output type: %s", outputType), "") } diff --git a/internal/fleet/output/resource.go b/internal/fleet/output/resource.go index 79918917e..5f6a90ea7 100644 --- a/internal/fleet/output/resource.go +++ b/internal/fleet/output/resource.go @@ -2,19 +2,25 @@ package output import ( "context" + "encoding/json" "fmt" "github.com/elastic/terraform-provider-elasticstack/internal/clients" + "github.com/hashicorp/go-version" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" ) var ( - _ resource.Resource = &outputResource{} - _ resource.ResourceWithConfigure = &outputResource{} - _ resource.ResourceWithImportState = &outputResource{} + _ resource.Resource = &outputResource{} + _ resource.ResourceWithConfigure = &outputResource{} + _ resource.ResourceWithImportState = &outputResource{} + _ resource.ResourceWithUpgradeState = &outputResource{} ) +var MinVersionOutputKafka = version.Must(version.NewVersion("8.13.0")) + // NewResource is a helper function to simplify the provider implementation. func NewResource() resource.Resource { return &outputResource{} @@ -37,3 +43,58 @@ func (r *outputResource) Metadata(ctx context.Context, req resource.MetadataRequ func (r *outputResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { resource.ImportStatePassthroughID(ctx, path.Root("output_id"), req, resp) } + +func (r *outputResource) UpgradeState(context.Context) map[int64]resource.StateUpgrader { + return map[int64]resource.StateUpgrader{ + 0: { + // Legacy provider versions used a block for the `ssl` attribute which means it was stored as a list. + // This upgrader migrates the list into a single object if available within the raw state + StateUpgrader: func(ctx context.Context, req resource.UpgradeStateRequest, resp *resource.UpgradeStateResponse) { + if req.RawState == nil || req.RawState.JSON == nil { + resp.Diagnostics.AddError("Invalid raw state", "Raw state or JSON is nil") + return + } + + // Default to returning the original state if no changes are needed + resp.DynamicValue = &tfprotov6.DynamicValue{ + JSON: req.RawState.JSON, + } + + var stateMap map[string]interface{} + err := json.Unmarshal(req.RawState.JSON, &stateMap) + if err != nil { + resp.Diagnostics.AddError("Failed to unmarshal raw state", err.Error()) + return + } + + sslInterface, ok := stateMap["ssl"] + if !ok { + return + } + + sslList, ok := sslInterface.([]any) + if !ok { + resp.Diagnostics.AddAttributeError(path.Root("ssl"), + "Unexpected type for legacy ssl attribute", + fmt.Sprintf("Expected []any, got %T", sslInterface), + ) + return + } + + if len(sslList) > 0 { + stateMap["ssl"] = sslList[0] + } else { + delete(stateMap, "ssl") + } + + stateJSON, err := json.Marshal(stateMap) + if err != nil { + resp.Diagnostics.AddError("Failed to marshal raw state", err.Error()) + return + } + + resp.DynamicValue.JSON = stateJSON + }, + }, + } +} diff --git a/internal/fleet/output/resource_test.go b/internal/fleet/output/resource_test.go index 47d324567..aa525f0c2 100644 --- a/internal/fleet/output/resource_test.go +++ b/internal/fleet/output/resource_test.go @@ -1,330 +1,171 @@ -package output_test +package output import ( "context" - "fmt" + "encoding/json" "testing" - "github.com/elastic/terraform-provider-elasticstack/internal/acctest" - "github.com/elastic/terraform-provider-elasticstack/internal/clients" - "github.com/elastic/terraform-provider-elasticstack/internal/clients/fleet" - "github.com/elastic/terraform-provider-elasticstack/internal/utils" - "github.com/elastic/terraform-provider-elasticstack/internal/versionutils" - "github.com/hashicorp/go-version" - sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest" - "github.com/hashicorp/terraform-plugin-testing/helper/resource" - "github.com/hashicorp/terraform-plugin-testing/terraform" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -var minVersionOutput = version.Must(version.NewVersion("8.6.0")) - -func TestAccResourceOutputElasticsearchFromSDK(t *testing.T) { - policyName := sdkacctest.RandString(22) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - CheckDestroy: checkResourceOutputDestroy, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "elasticstack": { - Source: "elastic/elasticstack", - VersionConstraint: "0.11.7", +func TestOutputResourceUpgradeState(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + rawState map[string]interface{} + expectedState map[string]interface{} + expectError bool + errorContains string + }{ + { + name: "successful upgrade - ssl list to object", + rawState: map[string]interface{}{ + "id": "test-output", + "name": "Test Output", + "type": "elasticsearch", + "ssl": []interface{}{ + map[string]interface{}{ + "certificate": "cert-content", + "key": "key-content", + "certificate_authorities": []interface{}{"ca1", "ca2"}, }, }, - SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), - Config: testAccResourceOutputCreateElasticsearch(policyName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Elasticsearch Output %s", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-elasticsearch-output", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "elasticsearch"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "https://elasticsearch:9200"), - ), + "hosts": []interface{}{"https://localhost:9200"}, }, - { - ProtoV6ProviderFactories: acctest.Providers, - SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), - Config: testAccResourceOutputCreateElasticsearch(policyName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Elasticsearch Output %s", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-elasticsearch-output", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "elasticsearch"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "https://elasticsearch:9200"), - ), + expectedState: map[string]interface{}{ + "id": "test-output", + "name": "Test Output", + "type": "elasticsearch", + "ssl": map[string]interface{}{ + "certificate": "cert-content", + "key": "key-content", + "certificate_authorities": []interface{}{"ca1", "ca2"}, + }, + "hosts": []interface{}{"https://localhost:9200"}, }, + expectError: false, }, - }) -} - -func TestAccResourceOutputElasticsearch(t *testing.T) { - policyName := sdkacctest.RandString(22) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - CheckDestroy: checkResourceOutputDestroy, - ProtoV6ProviderFactories: acctest.Providers, - Steps: []resource.TestStep{ - { - SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), - Config: testAccResourceOutputCreateElasticsearch(policyName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Elasticsearch Output %s", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-elasticsearch-output", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "elasticsearch"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "https://elasticsearch:9200"), - ), + { + name: "no ssl field - no changes", + rawState: map[string]interface{}{ + "id": "test-output", + "name": "Test Output", + "type": "elasticsearch", + "hosts": []interface{}{"https://localhost:9200"}, }, - { - SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), - Config: testAccResourceOutputUpdateElasticsearch(policyName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Updated Elasticsearch Output %s", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-elasticsearch-output", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "elasticsearch"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "https://elasticsearch:9200"), - ), - }, - { - SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), - Config: testAccResourceOutputUpdateElasticsearch(policyName), - ResourceName: "elasticstack_fleet_output.test_output", - ImportState: true, - ImportStateVerify: true, + expectedState: map[string]interface{}{ + "id": "test-output", + "name": "Test Output", + "type": "elasticsearch", + "hosts": []interface{}{"https://localhost:9200"}, }, + expectError: false, }, - }) -} - -func TestAccResourceOutputLogstashFromSDK(t *testing.T) { - policyName := sdkacctest.RandString(22) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - CheckDestroy: checkResourceOutputDestroy, - Steps: []resource.TestStep{ - { - ExternalProviders: map[string]resource.ExternalProvider{ - "elasticstack": { - Source: "elastic/elasticstack", - VersionConstraint: "0.11.7", - }, - }, - SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), - Config: testAccResourceOutputCreateLogstash(policyName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Logstash Output %s", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-logstash-output", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "logstash"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "logstash:5044"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate_authorities.0", "placeholder"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate", "placeholder"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.key", "placeholder"), - ), + { + name: "empty ssl list - removes ssl field", + rawState: map[string]interface{}{ + "id": "test-output", + "name": "Test Output", + "type": "elasticsearch", + "ssl": []interface{}{}, + "hosts": []interface{}{"https://localhost:9200"}, }, - { - ProtoV6ProviderFactories: acctest.Providers, - SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), - Config: testAccResourceOutputCreateLogstash(policyName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Logstash Output %s", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-logstash-output", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "logstash"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "logstash:5044"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate_authorities.0", "placeholder"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate", "placeholder"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.key", "placeholder"), - ), + expectedState: map[string]interface{}{ + "id": "test-output", + "name": "Test Output", + "type": "elasticsearch", + "hosts": []interface{}{"https://localhost:9200"}, }, + expectError: false, }, - }) -} - -func TestAccResourceOutputLogstash(t *testing.T) { - policyName := sdkacctest.RandString(22) - - resource.Test(t, resource.TestCase{ - PreCheck: func() { acctest.PreCheck(t) }, - CheckDestroy: checkResourceOutputDestroy, - ProtoV6ProviderFactories: acctest.Providers, - Steps: []resource.TestStep{ - { - SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), - Config: testAccResourceOutputCreateLogstash(policyName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Logstash Output %s", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-logstash-output", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "logstash"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "logstash:5044"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate_authorities.0", "placeholder"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate", "placeholder"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.key", "placeholder"), - ), + { + name: "ssl not an array - returns error", + rawState: map[string]interface{}{ + "id": "test-output", + "name": "Test Output", + "type": "elasticsearch", + "ssl": "invalid-type", + "hosts": []interface{}{"https://localhost:9200"}, + }, + expectedState: nil, + expectError: true, + errorContains: "Unexpected type for legacy ssl attribute", + }, + { + name: "multiple ssl items - takes first item", + rawState: map[string]interface{}{ + "id": "test-output", + "name": "Test Output", + "type": "elasticsearch", + "ssl": []interface{}{ + map[string]interface{}{"certificate": "cert1"}, + map[string]interface{}{"certificate": "cert2"}, + }, + "hosts": []interface{}{"https://localhost:9200"}, }, - { - SkipFunc: versionutils.CheckIfVersionIsUnsupported(minVersionOutput), - Config: testAccResourceOutputUpdateLogstash(policyName), - Check: resource.ComposeTestCheckFunc( - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "name", fmt.Sprintf("Updated Logstash Output %s", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "id", fmt.Sprintf("%s-logstash-output", policyName)), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "type", "logstash"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "config_yaml", "\"ssl.verification_mode\": \"none\"\n"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_integrations", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "default_monitoring", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "hosts.0", "logstash:5044"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate_authorities.0", "placeholder"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.certificate", "placeholder"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "ssl.0.key", "placeholder"), - ), + expectedState: map[string]interface{}{ + "id": "test-output", + "name": "Test Output", + "type": "elasticsearch", + "ssl": map[string]interface{}{"certificate": "cert1"}, + "hosts": []interface{}{"https://localhost:9200"}, }, + expectError: false, }, - }) -} - -func testAccResourceOutputCreateElasticsearch(id string) string { - return fmt.Sprintf(` -provider "elasticstack" { - elasticsearch {} - kibana {} -} - -resource "elasticstack_fleet_output" "test_output" { - name = "Elasticsearch Output %s" - output_id = "%s-elasticsearch-output" - type = "elasticsearch" - config_yaml = yamlencode({ - "ssl.verification_mode" : "none" - }) - default_integrations = false - default_monitoring = false - hosts = [ - "https://elasticsearch:9200" - ] -} -`, id, id) -} - -func testAccResourceOutputUpdateElasticsearch(id string) string { - return fmt.Sprintf(` -provider "elasticstack" { - elasticsearch {} - kibana {} -} - -resource "elasticstack_fleet_output" "test_output" { - name = "Updated Elasticsearch Output %s" - output_id = "%s-elasticsearch-output" - type = "elasticsearch" - config_yaml = yamlencode({ - "ssl.verification_mode" : "none" - }) - default_integrations = false - default_monitoring = false - hosts = [ - "https://elasticsearch:9200" - ] -} -`, id, id) -} - -func testAccResourceOutputCreateLogstash(id string) string { - return fmt.Sprintf(` -provider "elasticstack" { - elasticsearch {} - kibana {} -} - -resource "elasticstack_fleet_output" "test_output" { - name = "Logstash Output %s" - type = "logstash" - output_id = "%s-logstash-output" - config_yaml = yamlencode({ - "ssl.verification_mode" : "none" - }) - default_integrations = false - default_monitoring = false - hosts = [ - "logstash:5044" - ] - ssl { - certificate_authorities = ["placeholder"] - certificate = "placeholder" - key = "placeholder" - } -} -`, id, id) -} - -func testAccResourceOutputUpdateLogstash(id string) string { - return fmt.Sprintf(` -provider "elasticstack" { - elasticsearch {} - kibana {} -} - -resource "elasticstack_fleet_output" "test_output" { - name = "Updated Logstash Output %s" - output_id = "%s-logstash-output" - type = "logstash" - config_yaml = yamlencode({ - "ssl.verification_mode" : "none" - }) - default_integrations = false - default_monitoring = false - hosts = [ - "logstash:5044" - ] - ssl { - certificate_authorities = ["placeholder"] - certificate = "placeholder" - key = "placeholder" - } -} -`, id, id) -} - -func checkResourceOutputDestroy(s *terraform.State) error { - client, err := clients.NewAcceptanceTestingClient() - if err != nil { - return err } - for _, rs := range s.RootModule().Resources { - if rs.Type != "elasticstack_fleet_output" { - continue - } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() - fleetClient, err := client.GetFleetClient() - if err != nil { - return err - } - output, diags := fleet.GetOutput(context.Background(), fleetClient, rs.Primary.ID) - if diags.HasError() { - return utils.FwDiagsAsError(diags) - } - if output != nil { - return fmt.Errorf("output id=%v still exists, but it should have been removed", rs.Primary.ID) - } + // Marshal the raw state to JSON + rawStateJSON, err := json.Marshal(tt.rawState) + require.NoError(t, err) + + // Create the upgrade request + req := resource.UpgradeStateRequest{ + RawState: &tfprotov6.RawState{ + JSON: rawStateJSON, + }, + } + + // Create a response + resp := &resource.UpgradeStateResponse{} + + // Create the resource and call UpgradeState + r := &outputResource{} + upgraders := r.UpgradeState(context.Background()) + upgrader := upgraders[0] + upgrader.StateUpgrader(context.Background(), req, resp) + + if tt.expectError { + require.True(t, resp.Diagnostics.HasError(), "Expected error but got none") + if tt.errorContains != "" { + errorSummary := "" + for _, diag := range resp.Diagnostics.Errors() { + errorSummary += diag.Summary() + " " + diag.Detail() + } + assert.Contains(t, errorSummary, tt.errorContains) + } + return + } + + // Check no errors occurred + require.False(t, resp.Diagnostics.HasError(), "Unexpected error: %v", resp.Diagnostics.Errors()) + + // Check that a DynamicValue is always returned + require.NotNil(t, resp.DynamicValue, "DynamicValue should always be returned") + + // Unmarshal the upgraded state to compare + var actualState map[string]interface{} + err = json.Unmarshal(resp.DynamicValue.JSON, &actualState) + require.NoError(t, err) + + assert.Equal(t, tt.expectedState, actualState) + }) } - return nil } diff --git a/internal/fleet/output/schema.go b/internal/fleet/output/schema.go index 74919729c..ea51bc3ff 100644 --- a/internal/fleet/output/schema.go +++ b/internal/fleet/output/schema.go @@ -3,12 +3,17 @@ package output import ( "context" + "github.com/elastic/terraform-provider-elasticstack/internal/utils/validators" + "github.com/hashicorp/terraform-plugin-framework-validators/int64validator" "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" "github.com/hashicorp/terraform-plugin-framework/schema/validator" @@ -21,6 +26,7 @@ func (r *outputResource) Schema(ctx context.Context, req resource.SchemaRequest, func getSchema() schema.Schema { return schema.Schema{ + Version: 1, Description: "Creates a new Fleet Output.", Attributes: map[string]schema.Attribute{ "id": schema.StringAttribute{ @@ -47,7 +53,7 @@ func getSchema() schema.Schema { Description: "The output type.", Required: true, Validators: []validator.String{ - stringvalidator.OneOf("elasticsearch", "logstash"), + stringvalidator.OneOf("elasticsearch", "logstash", "kafka"), }, }, "hosts": schema.ListAttribute{ @@ -83,28 +89,219 @@ func getSchema() schema.Schema { Optional: true, Sensitive: true, }, - }, - Blocks: map[string]schema.Block{ - "ssl": schema.ListNestedBlock{ + "ssl": schema.SingleNestedAttribute{ Description: "SSL configuration.", - Validators: []validator.List{ - listvalidator.SizeAtMost(1), + Optional: true, + Attributes: map[string]schema.Attribute{ + "certificate_authorities": schema.ListAttribute{ + Description: "Server SSL certificate authorities.", + Optional: true, + ElementType: types.StringType, + }, + "certificate": schema.StringAttribute{ + Description: "Client SSL certificate.", + Required: true, + }, + "key": schema.StringAttribute{ + Description: "Client SSL certificate key.", + Required: true, + Sensitive: true, + }, }, - NestedObject: schema.NestedBlockObject{ - Attributes: map[string]schema.Attribute{ - "certificate_authorities": schema.ListAttribute{ - Description: "Server SSL certificate authorities.", - Optional: true, - ElementType: types.StringType, + }, + "kafka": schema.SingleNestedAttribute{ + Description: "Kafka-specific configuration.", + Optional: true, + Attributes: map[string]schema.Attribute{ + "auth_type": schema.StringAttribute{ + Description: "Authentication type for Kafka output.", + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf("none", "user_pass", "ssl", "kerberos"), + }, + }, + "broker_timeout": schema.Float64Attribute{ + Description: "Kafka broker timeout.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.UseStateForUnknown(), + }, + }, + "client_id": schema.StringAttribute{ + Description: "Kafka client ID.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), + }, + }, + "compression": schema.StringAttribute{ + Description: "Compression type for Kafka output.", + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf("gzip", "snappy", "lz4", "none"), + }, + }, + "compression_level": schema.Float64Attribute{ + Description: "Compression level for Kafka output.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.UseStateForUnknown(), + }, + Validators: []validator.Float64{ + validators.Float64ConditionalRequirement( + path.Root("kafka").AtName("compression"), + []string{"gzip"}, + "compression_level can only be set when compression is 'gzip'", + ), + }, + }, + "connection_type": schema.StringAttribute{ + Description: "Connection type for Kafka output.", + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf("plaintext", "encryption"), + validators.StringConditionalRequirementSingle( + path.Root("kafka").AtName("auth_type"), + "none", + "connection_type can only be set when auth_type is 'none'", + ), + }, + }, + "topic": schema.StringAttribute{ + Description: "Kafka topic.", + Optional: true, + }, + "partition": schema.StringAttribute{ + Description: "Partition strategy for Kafka output.", + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf("random", "round_robin", "hash"), + }, + }, + "required_acks": schema.Int64Attribute{ + Description: "Number of acknowledgments required for Kafka output.", + Optional: true, + Validators: []validator.Int64{ + int64validator.OneOf(-1, 0, 1), + }, + }, + "timeout": schema.Float64Attribute{ + Description: "Timeout for Kafka output.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.Float64{ + float64planmodifier.UseStateForUnknown(), + }, + }, + "version": schema.StringAttribute{ + Description: "Kafka version.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.String{ + stringplanmodifier.UseStateForUnknown(), }, - "certificate": schema.StringAttribute{ - Description: "Client SSL certificate.", - Required: true, + }, + "username": schema.StringAttribute{ + Description: "Username for Kafka authentication.", + Optional: true, + }, + "password": schema.StringAttribute{ + Description: "Password for Kafka authentication.", + Optional: true, + Sensitive: true, + }, + "key": schema.StringAttribute{ + Description: "Key field for Kafka messages.", + Optional: true, + }, + "headers": schema.ListNestedAttribute{ + Description: "Headers for Kafka messages.", + Optional: true, + Computed: true, + PlanModifiers: []planmodifier.List{ + listplanmodifier.UseStateForUnknown(), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "key": schema.StringAttribute{ + Description: "Header key.", + Required: true, + }, + "value": schema.StringAttribute{ + Description: "Header value.", + Required: true, + }, + }, + }, + }, + "hash": schema.ListNestedAttribute{ + Description: "Hash configuration for Kafka partition.", + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "hash": schema.StringAttribute{ + Description: "Hash field.", + Optional: true, + }, + "random": schema.BoolAttribute{ + Description: "Use random hash.", + Optional: true, + }, + }, + }, + }, + "random": schema.ListNestedAttribute{ + Description: "Random configuration for Kafka partition.", + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "group_events": schema.Float64Attribute{ + Description: "Number of events to group.", + Optional: true, + }, + }, + }, + }, + "round_robin": schema.ListNestedAttribute{ + Description: "Round robin configuration for Kafka partition.", + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "group_events": schema.Float64Attribute{ + Description: "Number of events to group.", + Optional: true, + }, + }, }, - "key": schema.StringAttribute{ - Description: "Client SSL certificate key.", - Required: true, - Sensitive: true, + }, + "sasl": schema.ListNestedAttribute{ + Description: "SASL configuration for Kafka authentication.", + Optional: true, + Validators: []validator.List{ + listvalidator.SizeAtMost(1), + }, + NestedObject: schema.NestedAttributeObject{ + Attributes: map[string]schema.Attribute{ + "mechanism": schema.StringAttribute{ + Description: "SASL mechanism.", + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf("PLAIN", "SCRAM-SHA-256", "SCRAM-SHA-512"), + }, + }, + }, }, }, }, @@ -113,6 +310,30 @@ func getSchema() schema.Schema { } } -func getSslAttrTypes() attr.Type { - return getSchema().Blocks["ssl"].Type().(attr.TypeWithElementType).ElementType() +func getSslAttrTypes() map[string]attr.Type { + return getSchema().Attributes["ssl"].GetType().(attr.TypeWithAttributeTypes).AttributeTypes() +} + +func getHeadersAttrTypes() attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["headers"].GetType().(attr.TypeWithElementType).ElementType() +} + +func getHashAttrTypes() attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["hash"].GetType().(attr.TypeWithElementType).ElementType() +} + +func getRandomAttrTypes() attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["random"].GetType().(attr.TypeWithElementType).ElementType() +} + +func getRoundRobinAttrTypes() attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["round_robin"].GetType().(attr.TypeWithElementType).ElementType() +} + +func getSaslAttrTypes() attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["sasl"].GetType().(attr.TypeWithElementType).ElementType() +} + +func getKafkaAttrTypes() map[string]attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).GetType().(attr.TypeWithAttributeTypes).AttributeTypes() } diff --git a/internal/fleet/output/update.go b/internal/fleet/output/update.go index ca95177a9..46688d5b9 100644 --- a/internal/fleet/output/update.go +++ b/internal/fleet/output/update.go @@ -22,7 +22,7 @@ func (r *outputResource) Update(ctx context.Context, req resource.UpdateRequest, return } - body, diags := planModel.toAPIUpdateModel(ctx) + body, diags := planModel.toAPIUpdateModel(ctx, r.client) resp.Diagnostics.Append(diags...) if resp.Diagnostics.HasError() { return diff --git a/internal/utils/validators/conditional.go b/internal/utils/validators/conditional.go new file mode 100644 index 000000000..e5edad69b --- /dev/null +++ b/internal/utils/validators/conditional.go @@ -0,0 +1,229 @@ +package validators + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// conditionalRequirement represents a validator which ensures that an attribute +// can only be set if another attribute at a specified path equals one of the specified values. +// This is a shared implementation that can be used for both string and float64 validators. +type conditionalRequirement struct { + dependentPath path.Path + allowedValues []string + failureMessage string +} + +// Description describes the validation in plain text formatting. +func (v conditionalRequirement) Description(_ context.Context) string { + if len(v.allowedValues) == 1 { + return fmt.Sprintf("value can only be set when %s equals %q", v.dependentPath, v.allowedValues[0]) + } + return fmt.Sprintf("value can only be set when %s is one of %v", v.dependentPath, v.allowedValues) +} + +// MarkdownDescription describes the validation in Markdown formatting. +func (v conditionalRequirement) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// validateConditionalRequirement was an attempt at shared logic but is not used +// The validation logic is implemented directly in ValidateString and ValidateFloat64 methods + +// ValidateString performs the validation for string attributes. +func (v conditionalRequirement) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + // If the current attribute is null or unknown, no validation needed + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + // Get the value at the dependent path + var dependentValue types.String + diags := request.Config.GetAttribute(ctx, v.dependentPath, &dependentValue) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + // If dependent value is null, unknown, or doesn't match any allowed values, + // then the current attribute should not be set + dependentValueStr := dependentValue.ValueString() + isAllowed := false + + if !dependentValue.IsNull() && !dependentValue.IsUnknown() { + for _, allowedValue := range v.allowedValues { + if dependentValueStr == allowedValue { + isAllowed = true + break + } + } + } + + if !isAllowed { + if v.failureMessage != "" { + response.Diagnostics.AddAttributeError( + request.Path, + "Invalid Configuration", + v.failureMessage, + ) + } else { + if len(v.allowedValues) == 1 { + response.Diagnostics.AddAttributeError( + request.Path, + "Invalid Configuration", + fmt.Sprintf("Attribute %s can only be set when %s equals %q, but %s is %q", + request.Path, + v.dependentPath, + v.allowedValues[0], + v.dependentPath, + dependentValueStr, + ), + ) + } else { + response.Diagnostics.AddAttributeError( + request.Path, + "Invalid Configuration", + fmt.Sprintf("Attribute %s can only be set when %s is one of %v, but %s is %q", + request.Path, + v.dependentPath, + v.allowedValues, + v.dependentPath, + dependentValueStr, + ), + ) + } + } + } +} + +// ValidateFloat64 performs the validation for float64 attributes. +func (v conditionalRequirement) ValidateFloat64(ctx context.Context, request validator.Float64Request, response *validator.Float64Response) { + // If the current attribute is null or unknown, no validation needed + if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { + return + } + + // Get the value at the dependent path + var dependentValue types.String + diags := request.Config.GetAttribute(ctx, v.dependentPath, &dependentValue) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + // If dependent value is null, unknown, or doesn't match any allowed values, + // then the current attribute should not be set + dependentValueStr := dependentValue.ValueString() + isAllowed := false + + if !dependentValue.IsNull() && !dependentValue.IsUnknown() { + for _, allowedValue := range v.allowedValues { + if dependentValueStr == allowedValue { + isAllowed = true + break + } + } + } + + if !isAllowed { + if v.failureMessage != "" { + response.Diagnostics.AddAttributeError( + request.Path, + "Invalid Configuration", + v.failureMessage, + ) + } else { + if len(v.allowedValues) == 1 { + response.Diagnostics.AddAttributeError( + request.Path, + "Invalid Configuration", + fmt.Sprintf("Attribute %s can only be set when %s equals %q, but %s is %q", + request.Path, + v.dependentPath, + v.allowedValues[0], + v.dependentPath, + dependentValueStr, + ), + ) + } else { + response.Diagnostics.AddAttributeError( + request.Path, + "Invalid Configuration", + fmt.Sprintf("Attribute %s can only be set when %s is one of %v, but %s is %q", + request.Path, + v.dependentPath, + v.allowedValues, + v.dependentPath, + dependentValueStr, + ), + ) + } + } + } +} + +// StringConditionalRequirement returns a validator which ensures that a string attribute +// can only be set if another attribute at the specified path equals one of the specified values. +// +// The dependentPath parameter should use path.Root() to specify the attribute path. +// For example: path.Root("auth_type") +// +// Example usage: +// +// "connection_type": schema.StringAttribute{ +// Optional: true, +// Validators: []validator.String{ +// validators.StringConditionalRequirement( +// path.Root("auth_type"), +// []string{"none"}, +// "connection_type can only be set when auth_type is 'none'", +// ), +// }, +// }, +func StringConditionalRequirement(dependentPath path.Path, allowedValues []string, failureMessage string) validator.String { + return conditionalRequirement{ + dependentPath: dependentPath, + allowedValues: allowedValues, + failureMessage: failureMessage, + } +} + +// StringConditionalRequirementSingle is a convenience function for when there's only one allowed value. +func StringConditionalRequirementSingle(dependentPath path.Path, requiredValue string, failureMessage string) validator.String { + return StringConditionalRequirement(dependentPath, []string{requiredValue}, failureMessage) +} + +// Float64ConditionalRequirement returns a validator which ensures that a float64 attribute +// can only be set if another attribute at the specified path equals one of the specified values. +// +// The dependentPath parameter should use path.Root() to specify the attribute path. +// For example: path.Root("compression") +// +// Example usage: +// +// "compression_level": schema.Float64Attribute{ +// Optional: true, +// Validators: []validator.Float64{ +// validators.Float64ConditionalRequirement( +// path.Root("compression"), +// []string{"gzip"}, +// "compression_level can only be set when compression is 'gzip'", +// ), +// }, +// }, +func Float64ConditionalRequirement(dependentPath path.Path, allowedValues []string, failureMessage string) validator.Float64 { + return conditionalRequirement{ + dependentPath: dependentPath, + allowedValues: allowedValues, + failureMessage: failureMessage, + } +} + +// Float64ConditionalRequirementSingle is a convenience function for when there's only one allowed value. +func Float64ConditionalRequirementSingle(dependentPath path.Path, requiredValue string, failureMessage string) validator.Float64 { + return Float64ConditionalRequirement(dependentPath, []string{requiredValue}, failureMessage) +} diff --git a/internal/utils/validators/conditional_test.go b/internal/utils/validators/conditional_test.go new file mode 100644 index 000000000..77309cc08 --- /dev/null +++ b/internal/utils/validators/conditional_test.go @@ -0,0 +1,284 @@ +package validators + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource/schema" + "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" +) + +func TestStringConditionalRequirement(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + currentValue types.String + dependentValue types.String + expectedError bool + } + + testCases := []testCase{ + { + name: "valid - current null, dependent any value", + currentValue: types.StringNull(), + dependentValue: types.StringValue("user_pass"), + expectedError: false, + }, + { + name: "valid - current unknown, dependent any value", + currentValue: types.StringUnknown(), + dependentValue: types.StringValue("user_pass"), + expectedError: false, + }, + { + name: "valid - current set, dependent matches required value", + currentValue: types.StringValue("plaintext"), + dependentValue: types.StringValue("none"), + expectedError: false, + }, + { + name: "invalid - current set, dependent doesn't match required value", + currentValue: types.StringValue("plaintext"), + dependentValue: types.StringValue("user_pass"), + expectedError: true, + }, + { + name: "invalid - current set, dependent is null", + currentValue: types.StringValue("plaintext"), + dependentValue: types.StringNull(), + expectedError: true, + }, + { + name: "invalid - current set, dependent is unknown", + currentValue: types.StringValue("plaintext"), + dependentValue: types.StringUnknown(), + expectedError: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + // Create a simple schema for testing + testSchema := schema.Schema{ + Attributes: map[string]schema.Attribute{ + "connection_type": schema.StringAttribute{ + Optional: true, + }, + "auth_type": schema.StringAttribute{ + Optional: true, + }, + }, + } + + // Create raw config values + currentTfValue, err := testCase.currentValue.ToTerraformValue(context.Background()) + if err != nil { + t.Fatalf("Error converting current value: %v", err) + } + dependentTfValue, err := testCase.dependentValue.ToTerraformValue(context.Background()) + if err != nil { + t.Fatalf("Error converting dependent value: %v", err) + } + + rawConfigValues := map[string]tftypes.Value{ + "connection_type": currentTfValue, + "auth_type": dependentTfValue, + } + + rawConfig := tftypes.NewValue( + tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{ + "connection_type": tftypes.String, + "auth_type": tftypes.String, + }, + }, + rawConfigValues, + ) + + config := tfsdk.Config{ + Raw: rawConfig, + Schema: testSchema, + } + + // Create validator + v := StringConditionalRequirement( + path.Root("auth_type"), + []string{"none"}, + "connection_type can only be set when auth_type is 'none'", + ) + + // Create validation request + request := validator.StringRequest{ + Path: path.Root("connection_type"), + ConfigValue: testCase.currentValue, + Config: config, + } + + // Run validation + response := &validator.StringResponse{} + v.ValidateString(context.Background(), request, response) + + // Check result + if testCase.expectedError { + if !response.Diagnostics.HasError() { + t.Errorf("Expected validation error but got none") + } + } else { + if response.Diagnostics.HasError() { + t.Errorf("Expected no validation error but got: %v", response.Diagnostics.Errors()) + } + } + }) + } +} + +func TestStringConditionalRequirement_Description(t *testing.T) { + v := StringConditionalRequirement( + path.Root("auth_type"), + []string{"none"}, + "connection_type can only be set when auth_type is 'none'", + ) + + description := v.Description(context.Background()) + expected := "value can only be set when auth_type equals \"none\"" + + if description != expected { + t.Errorf("Expected description %q, got %q", expected, description) + } +} + +func TestFloat64ConditionalRequirement(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + currentValue types.Float64 + dependentValue types.String + expectedError bool + } + + testCases := []testCase{ + { + name: "valid - current null, dependent any value", + currentValue: types.Float64Null(), + dependentValue: types.StringValue("none"), + expectedError: false, + }, + { + name: "valid - current unknown, dependent any value", + currentValue: types.Float64Unknown(), + dependentValue: types.StringValue("none"), + expectedError: false, + }, + { + name: "valid - current set, dependent matches required value", + currentValue: types.Float64Value(6.0), + dependentValue: types.StringValue("gzip"), + expectedError: false, + }, + { + name: "invalid - current set, dependent doesn't match required value", + currentValue: types.Float64Value(6.0), + dependentValue: types.StringValue("none"), + expectedError: true, + }, + { + name: "invalid - current set, dependent is null", + currentValue: types.Float64Value(6.0), + dependentValue: types.StringNull(), + expectedError: true, + }, + { + name: "invalid - current set, dependent is unknown", + currentValue: types.Float64Value(6.0), + dependentValue: types.StringUnknown(), + expectedError: true, + }, + } + + for _, testCase := range testCases { + testCase := testCase + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + + // Create a simple schema for testing + testSchema := schema.Schema{ + Attributes: map[string]schema.Attribute{ + "compression_level": schema.Float64Attribute{ + Optional: true, + }, + "compression": schema.StringAttribute{ + Optional: true, + }, + }, + } + + // Create raw config values + currentTfValue, err := testCase.currentValue.ToTerraformValue(context.Background()) + if err != nil { + t.Fatalf("Error converting current value: %v", err) + } + dependentTfValue, err := testCase.dependentValue.ToTerraformValue(context.Background()) + if err != nil { + t.Fatalf("Error converting dependent value: %v", err) + } + + rawConfigValues := map[string]tftypes.Value{ + "compression_level": currentTfValue, + "compression": dependentTfValue, + } + + rawConfig := tftypes.NewValue( + tftypes.Object{ + AttributeTypes: map[string]tftypes.Type{ + "compression_level": tftypes.Number, + "compression": tftypes.String, + }, + }, + rawConfigValues, + ) + + config := tfsdk.Config{ + Raw: rawConfig, + Schema: testSchema, + } + + // Create validator + v := Float64ConditionalRequirement( + path.Root("compression"), + []string{"gzip"}, + "compression_level can only be set when compression is 'gzip'", + ) + + // Create validation request + request := validator.Float64Request{ + Path: path.Root("compression_level"), + ConfigValue: testCase.currentValue, + Config: config, + } + + // Run validation + response := &validator.Float64Response{} + v.ValidateFloat64(context.Background(), request, response) + + // Check result + if testCase.expectedError { + if !response.Diagnostics.HasError() { + t.Errorf("Expected validation error but got none") + } + } else { + if response.Diagnostics.HasError() { + t.Errorf("Expected no validation error but got: %v", response.Diagnostics.Errors()) + } + } + }) + } +} From 87dd7e93b39f390d0cd8fc6378eb3e341a5aa9ca Mon Sep 17 00:00:00 2001 From: Toby Brain Date: Sun, 14 Sep 2025 20:34:37 +1000 Subject: [PATCH 3/9] Docs --- docs/resources/fleet_output.md | 241 +++++++++++++++++- .../kafka_advanced.tf | 112 ++++++++ .../elasticstack_fleet_output/kafka_basic.tf | 44 ++++ templates/resources/fleet_output.md.tmpl | 10 + 4 files changed, 405 insertions(+), 2 deletions(-) create mode 100644 examples/resources/elasticstack_fleet_output/kafka_advanced.tf create mode 100644 examples/resources/elasticstack_fleet_output/kafka_basic.tf diff --git a/docs/resources/fleet_output.md b/docs/resources/fleet_output.md index 800c06540..8f1410041 100644 --- a/docs/resources/fleet_output.md +++ b/docs/resources/fleet_output.md @@ -12,6 +12,8 @@ Creates or updates a Fleet Output. ## Example Usage +### Basic output + ```terraform provider "elasticstack" { kibana {} @@ -31,6 +33,172 @@ resource "elasticstack_fleet_output" "test_output" { } ``` +### Basic Kafka output + +```terraform +terraform { + required_providers { + elasticstack = { + source = "elastic/elasticstack" + version = "~> 0.11" + } + } +} + +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +# Basic Kafka Fleet Output +resource "elasticstack_fleet_output" "kafka_basic" { + name = "Basic Kafka Output" + output_id = "kafka-basic-output" + type = "kafka" + default_integrations = false + default_monitoring = false + + hosts = [ + "kafka:9092" + ] + + # Basic Kafka configuration + kafka = { + auth_type = "user_pass" + username = "kafka_user" + password = "kafka_password" + topic = "elastic-beats" + partition = "hash" + compression = "gzip" + required_acks = 1 + + headers = [ + { + key = "environment" + value = "production" + } + ] + } +} +``` + +### Advanced Kafka output + +```terraform +terraform { + required_providers { + elasticstack = { + source = "elastic/elasticstack" + version = "~> 0.11" + } + } +} + +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +# Advanced Kafka Fleet Output with SSL authentication +resource "elasticstack_fleet_output" "kafka_advanced" { + name = "Advanced Kafka Output" + output_id = "kafka-advanced-output" + type = "kafka" + default_integrations = false + default_monitoring = false + + hosts = [ + "kafka1:9092", + "kafka2:9092", + "kafka3:9092" + ] + + # Advanced Kafka configuration + kafka = { + auth_type = "ssl" + topic = "elastic-logs" + partition = "round_robin" + compression = "snappy" + required_acks = -1 + broker_timeout = 10 + timeout = 30 + version = "2.6.0" + client_id = "elastic-beats-client" + + # Custom headers for message metadata + headers = [ + { + key = "datacenter" + value = "us-west-1" + }, + { + key = "service" + value = "beats" + }, + { + key = "environment" + value = "production" + } + ] + + # Hash-based partitioning + hash = [ + { + hash = "host.name" + random = false + } + ] + + # SASL configuration + sasl = [ + { + mechanism = "SCRAM-SHA-256" + } + ] + } + + # SSL configuration (reusing common SSL block) + ssl = { + certificate_authorities = [ + file("${path.module}/ca.crt") + ] + certificate = file("${path.module}/client.crt") + key = file("${path.module}/client.key") + } + + # Additional YAML configuration for advanced settings + config_yaml = yamlencode({ + "ssl.verification_mode" = "full" + "ssl.supported_protocols" = ["TLSv1.2", "TLSv1.3"] + "max.message.bytes" = 1000000 + }) +} + +# Example showing round-robin partitioning with event grouping +resource "elasticstack_fleet_output" "kafka_round_robin" { + name = "Kafka Round Robin Output" + output_id = "kafka-round-robin-output" + type = "kafka" + default_integrations = false + default_monitoring = false + + hosts = ["kafka:9092"] + + kafka = { + auth_type = "none" + topic = "elastic-metrics" + partition = "round_robin" + compression = "lz4" + + round_robin = [ + { + group_events = 100 + } + ] + } +} +``` + ## Schema @@ -47,14 +215,83 @@ resource "elasticstack_fleet_output" "test_output" { - `default_integrations` (Boolean) Make this output the default for agent integrations. - `default_monitoring` (Boolean) Make this output the default for agent monitoring. - `hosts` (List of String) A list of hosts. +- `kafka` (Attributes) Kafka-specific configuration. (see [below for nested schema](#nestedatt--kafka)) - `output_id` (String) Unique identifier of the output. -- `ssl` (Block List) SSL configuration. (see [below for nested schema](#nestedblock--ssl)) +- `ssl` (Attributes) SSL configuration. (see [below for nested schema](#nestedatt--ssl)) ### Read-Only - `id` (String) The ID of this resource. - + +### Nested Schema for `kafka` + +Optional: + +- `auth_type` (String) Authentication type for Kafka output. +- `broker_timeout` (Number) Kafka broker timeout. +- `client_id` (String) Kafka client ID. +- `compression` (String) Compression type for Kafka output. +- `compression_level` (Number) Compression level for Kafka output. +- `connection_type` (String) Connection type for Kafka output. +- `hash` (Attributes List) Hash configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--hash)) +- `headers` (Attributes List) Headers for Kafka messages. (see [below for nested schema](#nestedatt--kafka--headers)) +- `key` (String) Key field for Kafka messages. +- `partition` (String) Partition strategy for Kafka output. +- `password` (String, Sensitive) Password for Kafka authentication. +- `random` (Attributes List) Random configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--random)) +- `required_acks` (Number) Number of acknowledgments required for Kafka output. +- `round_robin` (Attributes List) Round robin configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--round_robin)) +- `sasl` (Attributes List) SASL configuration for Kafka authentication. (see [below for nested schema](#nestedatt--kafka--sasl)) +- `timeout` (Number) Timeout for Kafka output. +- `topic` (String) Kafka topic. +- `username` (String) Username for Kafka authentication. +- `version` (String) Kafka version. + + +### Nested Schema for `kafka.hash` + +Optional: + +- `hash` (String) Hash field. +- `random` (Boolean) Use random hash. + + + +### Nested Schema for `kafka.headers` + +Required: + +- `key` (String) Header key. +- `value` (String) Header value. + + + +### Nested Schema for `kafka.random` + +Optional: + +- `group_events` (Number) Number of events to group. + + + +### Nested Schema for `kafka.round_robin` + +Optional: + +- `group_events` (Number) Number of events to group. + + + +### Nested Schema for `kafka.sasl` + +Optional: + +- `mechanism` (String) SASL mechanism. + + + + ### Nested Schema for `ssl` Required: diff --git a/examples/resources/elasticstack_fleet_output/kafka_advanced.tf b/examples/resources/elasticstack_fleet_output/kafka_advanced.tf new file mode 100644 index 000000000..497ba563a --- /dev/null +++ b/examples/resources/elasticstack_fleet_output/kafka_advanced.tf @@ -0,0 +1,112 @@ +terraform { + required_providers { + elasticstack = { + source = "elastic/elasticstack" + version = "~> 0.11" + } + } +} + +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +# Advanced Kafka Fleet Output with SSL authentication +resource "elasticstack_fleet_output" "kafka_advanced" { + name = "Advanced Kafka Output" + output_id = "kafka-advanced-output" + type = "kafka" + default_integrations = false + default_monitoring = false + + hosts = [ + "kafka1:9092", + "kafka2:9092", + "kafka3:9092" + ] + + # Advanced Kafka configuration + kafka = { + auth_type = "ssl" + topic = "elastic-logs" + partition = "round_robin" + compression = "snappy" + required_acks = -1 + broker_timeout = 10 + timeout = 30 + version = "2.6.0" + client_id = "elastic-beats-client" + + # Custom headers for message metadata + headers = [ + { + key = "datacenter" + value = "us-west-1" + }, + { + key = "service" + value = "beats" + }, + { + key = "environment" + value = "production" + } + ] + + # Hash-based partitioning + hash = [ + { + hash = "host.name" + random = false + } + ] + + # SASL configuration + sasl = [ + { + mechanism = "SCRAM-SHA-256" + } + ] + } + + # SSL configuration (reusing common SSL block) + ssl = { + certificate_authorities = [ + file("${path.module}/ca.crt") + ] + certificate = file("${path.module}/client.crt") + key = file("${path.module}/client.key") + } + + # Additional YAML configuration for advanced settings + config_yaml = yamlencode({ + "ssl.verification_mode" = "full" + "ssl.supported_protocols" = ["TLSv1.2", "TLSv1.3"] + "max.message.bytes" = 1000000 + }) +} + +# Example showing round-robin partitioning with event grouping +resource "elasticstack_fleet_output" "kafka_round_robin" { + name = "Kafka Round Robin Output" + output_id = "kafka-round-robin-output" + type = "kafka" + default_integrations = false + default_monitoring = false + + hosts = ["kafka:9092"] + + kafka = { + auth_type = "none" + topic = "elastic-metrics" + partition = "round_robin" + compression = "lz4" + + round_robin = [ + { + group_events = 100 + } + ] + } +} diff --git a/examples/resources/elasticstack_fleet_output/kafka_basic.tf b/examples/resources/elasticstack_fleet_output/kafka_basic.tf new file mode 100644 index 000000000..b1efba34e --- /dev/null +++ b/examples/resources/elasticstack_fleet_output/kafka_basic.tf @@ -0,0 +1,44 @@ +terraform { + required_providers { + elasticstack = { + source = "elastic/elasticstack" + version = "~> 0.11" + } + } +} + +provider "elasticstack" { + elasticsearch {} + kibana {} +} + +# Basic Kafka Fleet Output +resource "elasticstack_fleet_output" "kafka_basic" { + name = "Basic Kafka Output" + output_id = "kafka-basic-output" + type = "kafka" + default_integrations = false + default_monitoring = false + + hosts = [ + "kafka:9092" + ] + + # Basic Kafka configuration + kafka = { + auth_type = "user_pass" + username = "kafka_user" + password = "kafka_password" + topic = "elastic-beats" + partition = "hash" + compression = "gzip" + required_acks = 1 + + headers = [ + { + key = "environment" + value = "production" + } + ] + } +} diff --git a/templates/resources/fleet_output.md.tmpl b/templates/resources/fleet_output.md.tmpl index d709042e6..9d3621ac8 100644 --- a/templates/resources/fleet_output.md.tmpl +++ b/templates/resources/fleet_output.md.tmpl @@ -12,8 +12,18 @@ Creates or updates a Fleet Output. ## Example Usage +### Basic output + {{ tffile "examples/resources/elasticstack_fleet_output/resource.tf" }} +### Basic Kafka output + +{{ tffile "examples/resources/elasticstack_fleet_output/kafka_basic.tf" }} + +### Advanced Kafka output + +{{ tffile "examples/resources/elasticstack_fleet_output/kafka_advanced.tf" }} + {{ .SchemaMarkdown | trimspace }} ## Import From d8dd90f57793054d940b69d0f7f1c85fe52628a5 Mon Sep 17 00:00:00 2001 From: Toby Brain Date: Mon, 15 Sep 2025 07:52:56 +1000 Subject: [PATCH 4/9] Changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9bfc460cd..b24b0aef2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ - Migrate `elasticstack_kibana_action_connector` to the Terraform plugin framework ([#1269](https://github.com/elastic/terraform-provider-elasticstack/pull/1269)) - Migrate `elasticstack_elasticsearch_security_role_mapping` resource and data source to Terraform Plugin Framework ([#1279](https://github.com/elastic/terraform-provider-elasticstack/pull/1279)) - Add support for `inactivity_timeout` in `elasticstack_fleet_agent_policy` ([#641](https://github.com/elastic/terraform-provider-elasticstack/issues/641)) +- Add support for `kafka` output types in `elasticstack_fleet_output` ([#1302](https://github.com/elastic/terraform-provider-elasticstack/pull/1302)) ## [0.11.17] - 2025-07-21 From 52ee2ad3be028730961c29e501982b0c773621c4 Mon Sep 17 00:00:00 2001 From: Toby Brain Date: Thu, 18 Sep 2025 15:35:10 +1000 Subject: [PATCH 5/9] Extract common validation from the conditional validator --- internal/utils/validators/conditional.go | 119 ++++++----------------- 1 file changed, 30 insertions(+), 89 deletions(-) diff --git a/internal/utils/validators/conditional.go b/internal/utils/validators/conditional.go index e5edad69b..25d4aa503 100644 --- a/internal/utils/validators/conditional.go +++ b/internal/utils/validators/conditional.go @@ -4,8 +4,11 @@ import ( "context" "fmt" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/schema/validator" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/hashicorp/terraform-plugin-framework/types" ) @@ -31,22 +34,16 @@ func (v conditionalRequirement) MarkdownDescription(ctx context.Context) string return v.Description(ctx) } -// validateConditionalRequirement was an attempt at shared logic but is not used -// The validation logic is implemented directly in ValidateString and ValidateFloat64 methods - -// ValidateString performs the validation for string attributes. -func (v conditionalRequirement) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { - // If the current attribute is null or unknown, no validation needed - if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { - return +func (v conditionalRequirement) validate(ctx context.Context, config tfsdk.Config, val attr.Value, p path.Path) diag.Diagnostics { + if val.IsNull() || val.IsUnknown() { + return nil } // Get the value at the dependent path var dependentValue types.String - diags := request.Config.GetAttribute(ctx, v.dependentPath, &dependentValue) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return + diags := config.GetAttribute(ctx, v.dependentPath, &dependentValue) + if diags.HasError() { + return diags } // If dependent value is null, unknown, or doesn't match any allowed values, @@ -65,105 +62,49 @@ func (v conditionalRequirement) ValidateString(ctx context.Context, request vali if !isAllowed { if v.failureMessage != "" { - response.Diagnostics.AddAttributeError( - request.Path, - "Invalid Configuration", - v.failureMessage, - ) + diags.AddAttributeError(p, "Invalid Configuration", v.failureMessage) + return diags } else { if len(v.allowedValues) == 1 { - response.Diagnostics.AddAttributeError( - request.Path, - "Invalid Configuration", + diags.AddAttributeError(p, "Invalid Configuration", fmt.Sprintf("Attribute %s can only be set when %s equals %q, but %s is %q", - request.Path, + p, v.dependentPath, v.allowedValues[0], v.dependentPath, dependentValueStr, ), ) + return diags } else { - response.Diagnostics.AddAttributeError( - request.Path, - "Invalid Configuration", - fmt.Sprintf("Attribute %s can only be set when %s is one of %v, but %s is %q", - request.Path, + diags.AddAttributeError(p, "Invalid Configuration", + fmt.Sprintf("Attribute %s can only be set when %s equals %q, but %s is %q", + p, v.dependentPath, - v.allowedValues, + v.allowedValues[0], v.dependentPath, dependentValueStr, ), ) + return diags } } } -} -// ValidateFloat64 performs the validation for float64 attributes. -func (v conditionalRequirement) ValidateFloat64(ctx context.Context, request validator.Float64Request, response *validator.Float64Response) { - // If the current attribute is null or unknown, no validation needed - if request.ConfigValue.IsNull() || request.ConfigValue.IsUnknown() { - return - } - - // Get the value at the dependent path - var dependentValue types.String - diags := request.Config.GetAttribute(ctx, v.dependentPath, &dependentValue) - response.Diagnostics.Append(diags...) - if response.Diagnostics.HasError() { - return - } + return nil +} - // If dependent value is null, unknown, or doesn't match any allowed values, - // then the current attribute should not be set - dependentValueStr := dependentValue.ValueString() - isAllowed := false +// validateConditionalRequirement was an attempt at shared logic but is not used +// The validation logic is implemented directly in ValidateString and ValidateFloat64 methods - if !dependentValue.IsNull() && !dependentValue.IsUnknown() { - for _, allowedValue := range v.allowedValues { - if dependentValueStr == allowedValue { - isAllowed = true - break - } - } - } +// ValidateString performs the validation for string attributes. +func (v conditionalRequirement) ValidateString(ctx context.Context, request validator.StringRequest, response *validator.StringResponse) { + response.Diagnostics.Append(v.validate(ctx, request.Config, request.ConfigValue, request.Path)...) +} - if !isAllowed { - if v.failureMessage != "" { - response.Diagnostics.AddAttributeError( - request.Path, - "Invalid Configuration", - v.failureMessage, - ) - } else { - if len(v.allowedValues) == 1 { - response.Diagnostics.AddAttributeError( - request.Path, - "Invalid Configuration", - fmt.Sprintf("Attribute %s can only be set when %s equals %q, but %s is %q", - request.Path, - v.dependentPath, - v.allowedValues[0], - v.dependentPath, - dependentValueStr, - ), - ) - } else { - response.Diagnostics.AddAttributeError( - request.Path, - "Invalid Configuration", - fmt.Sprintf("Attribute %s can only be set when %s is one of %v, but %s is %q", - request.Path, - v.dependentPath, - v.allowedValues, - v.dependentPath, - dependentValueStr, - ), - ) - } - } - } +// ValidateFloat64 performs the validation for float64 attributes. +func (v conditionalRequirement) ValidateFloat64(ctx context.Context, request validator.Float64Request, response *validator.Float64Response) { + response.Diagnostics.Append(v.validate(ctx, request.Config, request.ConfigValue, request.Path)...) } // StringConditionalRequirement returns a validator which ensures that a string attribute From 67c82f88e74f33be1dbc30666c6553482eae0b46 Mon Sep 17 00:00:00 2001 From: Toby Brain Date: Thu, 18 Sep 2025 16:14:50 +1000 Subject: [PATCH 6/9] Remove failureMessage from the conditional validator --- internal/fleet/output/schema.go | 2 - internal/utils/validators/conditional.go | 72 +++++++++---------- internal/utils/validators/conditional_test.go | 3 - 3 files changed, 32 insertions(+), 45 deletions(-) diff --git a/internal/fleet/output/schema.go b/internal/fleet/output/schema.go index ea51bc3ff..66cc91040 100644 --- a/internal/fleet/output/schema.go +++ b/internal/fleet/output/schema.go @@ -154,7 +154,6 @@ func getSchema() schema.Schema { validators.Float64ConditionalRequirement( path.Root("kafka").AtName("compression"), []string{"gzip"}, - "compression_level can only be set when compression is 'gzip'", ), }, }, @@ -166,7 +165,6 @@ func getSchema() schema.Schema { validators.StringConditionalRequirementSingle( path.Root("kafka").AtName("auth_type"), "none", - "connection_type can only be set when auth_type is 'none'", ), }, }, diff --git a/internal/utils/validators/conditional.go b/internal/utils/validators/conditional.go index 25d4aa503..b540801af 100644 --- a/internal/utils/validators/conditional.go +++ b/internal/utils/validators/conditional.go @@ -16,9 +16,8 @@ import ( // can only be set if another attribute at a specified path equals one of the specified values. // This is a shared implementation that can be used for both string and float64 validators. type conditionalRequirement struct { - dependentPath path.Path - allowedValues []string - failureMessage string + dependentPath path.Path + allowedValues []string } // Description describes the validation in plain text formatting. @@ -61,33 +60,28 @@ func (v conditionalRequirement) validate(ctx context.Context, config tfsdk.Confi } if !isAllowed { - if v.failureMessage != "" { - diags.AddAttributeError(p, "Invalid Configuration", v.failureMessage) + if len(v.allowedValues) == 1 { + diags.AddAttributeError(p, "Invalid Configuration", + fmt.Sprintf("Attribute %s can only be set when %s equals %q, but %s is %q", + p, + v.dependentPath, + v.allowedValues[0], + v.dependentPath, + dependentValueStr, + ), + ) return diags } else { - if len(v.allowedValues) == 1 { - diags.AddAttributeError(p, "Invalid Configuration", - fmt.Sprintf("Attribute %s can only be set when %s equals %q, but %s is %q", - p, - v.dependentPath, - v.allowedValues[0], - v.dependentPath, - dependentValueStr, - ), - ) - return diags - } else { - diags.AddAttributeError(p, "Invalid Configuration", - fmt.Sprintf("Attribute %s can only be set when %s equals %q, but %s is %q", - p, - v.dependentPath, - v.allowedValues[0], - v.dependentPath, - dependentValueStr, - ), - ) - return diags - } + diags.AddAttributeError(p, "Invalid Configuration", + fmt.Sprintf("Attribute %s can only be set when %s is one of %v, but %s is %q", + p, + v.dependentPath, + v.allowedValues, + v.dependentPath, + dependentValueStr, + ), + ) + return diags } } @@ -125,17 +119,16 @@ func (v conditionalRequirement) ValidateFloat64(ctx context.Context, request val // ), // }, // }, -func StringConditionalRequirement(dependentPath path.Path, allowedValues []string, failureMessage string) validator.String { +func StringConditionalRequirement(dependentPath path.Path, allowedValues []string) validator.String { return conditionalRequirement{ - dependentPath: dependentPath, - allowedValues: allowedValues, - failureMessage: failureMessage, + dependentPath: dependentPath, + allowedValues: allowedValues, } } // StringConditionalRequirementSingle is a convenience function for when there's only one allowed value. -func StringConditionalRequirementSingle(dependentPath path.Path, requiredValue string, failureMessage string) validator.String { - return StringConditionalRequirement(dependentPath, []string{requiredValue}, failureMessage) +func StringConditionalRequirementSingle(dependentPath path.Path, requiredValue string) validator.String { + return StringConditionalRequirement(dependentPath, []string{requiredValue}) } // Float64ConditionalRequirement returns a validator which ensures that a float64 attribute @@ -156,15 +149,14 @@ func StringConditionalRequirementSingle(dependentPath path.Path, requiredValue s // ), // }, // }, -func Float64ConditionalRequirement(dependentPath path.Path, allowedValues []string, failureMessage string) validator.Float64 { +func Float64ConditionalRequirement(dependentPath path.Path, allowedValues []string) validator.Float64 { return conditionalRequirement{ - dependentPath: dependentPath, - allowedValues: allowedValues, - failureMessage: failureMessage, + dependentPath: dependentPath, + allowedValues: allowedValues, } } // Float64ConditionalRequirementSingle is a convenience function for when there's only one allowed value. -func Float64ConditionalRequirementSingle(dependentPath path.Path, requiredValue string, failureMessage string) validator.Float64 { - return Float64ConditionalRequirement(dependentPath, []string{requiredValue}, failureMessage) +func Float64ConditionalRequirementSingle(dependentPath path.Path, requiredValue string) validator.Float64 { + return Float64ConditionalRequirement(dependentPath, []string{requiredValue}) } diff --git a/internal/utils/validators/conditional_test.go b/internal/utils/validators/conditional_test.go index 77309cc08..b4715b3ec 100644 --- a/internal/utils/validators/conditional_test.go +++ b/internal/utils/validators/conditional_test.go @@ -112,7 +112,6 @@ func TestStringConditionalRequirement(t *testing.T) { v := StringConditionalRequirement( path.Root("auth_type"), []string{"none"}, - "connection_type can only be set when auth_type is 'none'", ) // Create validation request @@ -144,7 +143,6 @@ func TestStringConditionalRequirement_Description(t *testing.T) { v := StringConditionalRequirement( path.Root("auth_type"), []string{"none"}, - "connection_type can only be set when auth_type is 'none'", ) description := v.Description(context.Background()) @@ -255,7 +253,6 @@ func TestFloat64ConditionalRequirement(t *testing.T) { v := Float64ConditionalRequirement( path.Root("compression"), []string{"gzip"}, - "compression_level can only be set when compression is 'gzip'", ) // Create validation request From 969b4b4a7794560edffea53b24fc28e50ccbb19b Mon Sep 17 00:00:00 2001 From: Toby Brain Date: Thu, 18 Sep 2025 21:23:25 +1000 Subject: [PATCH 7/9] Use better schema types, and split up model code Single entry attributes should be objects, not lists --- generated/kbapi/kibana.gen.go | 12 +- generated/kbapi/transform_schema.go | 2 +- internal/fleet/output/acc_test.go | 14 +- internal/fleet/output/models.go | 983 +++++++----------- internal/fleet/output/output_kafka_model.go | 216 ++++ .../fleet/output/output_kafka_model_test.go | 393 +++++++ internal/fleet/output/schema.go | 113 +- internal/utils/validators/conditional.go | 30 +- internal/utils/validators/conditional_test.go | 24 +- 9 files changed, 1085 insertions(+), 702 deletions(-) create mode 100644 internal/fleet/output/output_kafka_model.go create mode 100644 internal/fleet/output/output_kafka_model_test.go diff --git a/generated/kbapi/kibana.gen.go b/generated/kbapi/kibana.gen.go index cf1de5697..3c947f747 100644 --- a/generated/kbapi/kibana.gen.go +++ b/generated/kbapi/kibana.gen.go @@ -20779,7 +20779,7 @@ type NewOutputKafka struct { CaTrustedFingerprint *string `json:"ca_trusted_fingerprint,omitempty"` ClientId *string `json:"client_id,omitempty"` Compression *NewOutputKafkaCompression `json:"compression,omitempty"` - CompressionLevel *float32 `json:"compression_level,omitempty"` + CompressionLevel *int `json:"compression_level,omitempty"` ConfigYaml *string `json:"config_yaml,omitempty"` ConnectionType *string `json:"connection_type,omitempty"` Hash *struct { @@ -21082,7 +21082,7 @@ type OutputKafka struct { CaTrustedFingerprint *string `json:"ca_trusted_fingerprint,omitempty"` ClientId *string `json:"client_id,omitempty"` Compression *OutputKafkaCompression `json:"compression,omitempty"` - CompressionLevel *float32 `json:"compression_level,omitempty"` + CompressionLevel *int `json:"compression_level,omitempty"` ConfigYaml *string `json:"config_yaml,omitempty"` ConnectionType *string `json:"connection_type,omitempty"` Hash *OutputKafka_Hash `json:"hash,omitempty"` @@ -23771,7 +23771,7 @@ type UpdateOutputKafka struct { CaTrustedFingerprint *string `json:"ca_trusted_fingerprint,omitempty"` ClientId *string `json:"client_id,omitempty"` Compression *UpdateOutputKafkaCompression `json:"compression,omitempty"` - CompressionLevel *float32 `json:"compression_level,omitempty"` + CompressionLevel *int `json:"compression_level,omitempty"` ConfigYaml *string `json:"config_yaml,omitempty"` ConnectionType *string `json:"connection_type,omitempty"` Hash *struct { @@ -51577,7 +51577,7 @@ func (t SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) AsSLO // FromSLOsTimesliceMetricBasicMetricWithField overwrites any union data inside the SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item as the provided SLOsTimesliceMetricBasicMetricWithField func (t *SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) FromSLOsTimesliceMetricBasicMetricWithField(v SLOsTimesliceMetricBasicMetricWithField) error { - v.Aggregation = "cardinality" + v.Aggregation = "avg" b, err := json.Marshal(v) t.union = b return err @@ -51585,7 +51585,7 @@ func (t *SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) From // MergeSLOsTimesliceMetricBasicMetricWithField performs a merge with any union data inside the SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item, using the provided SLOsTimesliceMetricBasicMetricWithField func (t *SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) MergeSLOsTimesliceMetricBasicMetricWithField(v SLOsTimesliceMetricBasicMetricWithField) error { - v.Aggregation = "cardinality" + v.Aggregation = "avg" b, err := json.Marshal(v) if err != nil { return err @@ -51666,7 +51666,7 @@ func (t SLOsIndicatorPropertiesTimesliceMetric_Params_Metric_Metrics_Item) Value return nil, err } switch discriminator { - case "cardinality": + case "avg": return t.AsSLOsTimesliceMetricBasicMetricWithField() case "doc_count": return t.AsSLOsTimesliceMetricDocCountMetric() diff --git a/generated/kbapi/transform_schema.go b/generated/kbapi/transform_schema.go index 995e07369..009fcb3e8 100644 --- a/generated/kbapi/transform_schema.go +++ b/generated/kbapi/transform_schema.go @@ -959,7 +959,7 @@ func transformFleetPaths(schema *Schema) { kafkaRequiredName := fmt.Sprintf("schemas.%s.required", kafkaComponent) props := schema.Components.MustGetMap(fmt.Sprintf("schemas.%s.properties", kafkaComponent)) required := schema.Components.MustGetSlice(kafkaRequiredName) - for key, apiType := range map[string]string{"compression_level": "number", "connection_type": "string", "password": "string", "username": "string"} { + for key, apiType := range map[string]string{"compression_level": "integer", "connection_type": "string", "password": "string", "username": "string"} { props.Set(key, Map{ "type": apiType, }) diff --git a/internal/fleet/output/acc_test.go b/internal/fleet/output/acc_test.go index 87fa18550..cd87c6b6d 100644 --- a/internal/fleet/output/acc_test.go +++ b/internal/fleet/output/acc_test.go @@ -281,9 +281,9 @@ func TestAccResourceOutputKafkaComplex(t *testing.T) { resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.headers.0.value", "us-west-1"), resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.headers.1.key", "service"), resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.headers.1.value", "beats"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.hash.0.hash", "event.hash"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.hash.0.random", "false"), - resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.sasl.0.mechanism", "SCRAM-SHA-256"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.hash.hash", "event.hash"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.hash.random", "false"), + resource.TestCheckResourceAttr("elasticstack_fleet_output.test_output", "kafka.sasl.mechanism", "SCRAM-SHA-256"), ), }, }, @@ -513,14 +513,14 @@ resource "elasticstack_fleet_output" "test_output" { } ] - hash = [{ + hash = { hash = "event.hash" random = false - }] + } - sasl = [{ + sasl = { mechanism = "SCRAM-SHA-256" - }] + } } } `, id, id) diff --git a/internal/fleet/output/models.go b/internal/fleet/output/models.go index 465be9037..c715651a5 100644 --- a/internal/fleet/output/models.go +++ b/internal/fleet/output/models.go @@ -27,56 +27,12 @@ type outputModel struct { Kafka types.Object `tfsdk:"kafka"` //> outputKafkaModel } -type outputKafkaModel struct { - AuthType types.String `tfsdk:"auth_type"` - BrokerTimeout types.Float64 `tfsdk:"broker_timeout"` - ClientId types.String `tfsdk:"client_id"` - Compression types.String `tfsdk:"compression"` - CompressionLevel types.Float64 `tfsdk:"compression_level"` - ConnectionType types.String `tfsdk:"connection_type"` - Topic types.String `tfsdk:"topic"` - Partition types.String `tfsdk:"partition"` - RequiredAcks types.Int64 `tfsdk:"required_acks"` - Timeout types.Float64 `tfsdk:"timeout"` - Version types.String `tfsdk:"version"` - Username types.String `tfsdk:"username"` - Password types.String `tfsdk:"password"` - Key types.String `tfsdk:"key"` - Headers types.List `tfsdk:"headers"` //> outputHeadersModel - Hash types.List `tfsdk:"hash"` //> outputHashModel - Random types.List `tfsdk:"random"` //> outputRandomModel - RoundRobin types.List `tfsdk:"round_robin"` //> outputRoundRobinModel - Sasl types.List `tfsdk:"sasl"` //> outputSaslModel -} - type outputSslModel struct { CertificateAuthorities types.List `tfsdk:"certificate_authorities"` //> string Certificate types.String `tfsdk:"certificate"` Key types.String `tfsdk:"key"` } -type outputHeadersModel struct { - Key types.String `tfsdk:"key"` - Value types.String `tfsdk:"value"` -} - -type outputHashModel struct { - Hash types.String `tfsdk:"hash"` - Random types.Bool `tfsdk:"random"` -} - -type outputRandomModel struct { - GroupEvents types.Float64 `tfsdk:"group_events"` -} - -type outputRoundRobinModel struct { - GroupEvents types.Float64 `tfsdk:"group_events"` -} - -type outputSaslModel struct { - Mechanism types.String `tfsdk:"mechanism"` -} - func (model *outputModel) populateFromAPI(ctx context.Context, union *kbapi.OutputUnion) (diags diag.Diagnostics) { if union == nil { return @@ -165,56 +121,29 @@ func (model *outputModel) populateFromAPI(ctx context.Context, union *kbapi.Outp // Kafka-specific fields - initialize kafka nested object kafkaModel := outputKafkaModel{} kafkaModel.AuthType = types.StringValue(string(data.AuthType)) - if data.BrokerTimeout != nil { - kafkaModel.BrokerTimeout = types.Float64Value(float64(*data.BrokerTimeout)) - } else { - kafkaModel.BrokerTimeout = types.Float64Null() - } + kafkaModel.BrokerTimeout = types.Float32PointerValue(data.BrokerTimeout) kafkaModel.ClientId = types.StringPointerValue(data.ClientId) - if data.Compression != nil { - kafkaModel.Compression = types.StringValue(string(*data.Compression)) - } else { - kafkaModel.Compression = types.StringNull() - } + kafkaModel.Compression = types.StringPointerValue((*string)(data.Compression)) // Handle CompressionLevel if data.CompressionLevel != nil { - kafkaModel.CompressionLevel = types.Float64Value(float64(*data.CompressionLevel)) + kafkaModel.CompressionLevel = types.Int64Value(int64(*data.CompressionLevel)) } else { - kafkaModel.CompressionLevel = types.Float64Null() + kafkaModel.CompressionLevel = types.Int64Null() } // Handle ConnectionType - if data.ConnectionType != nil { - kafkaModel.ConnectionType = types.StringValue(*data.ConnectionType) - } else { - kafkaModel.ConnectionType = types.StringNull() - } + kafkaModel.ConnectionType = types.StringPointerValue(data.ConnectionType) kafkaModel.Topic = types.StringPointerValue(data.Topic) - if data.Partition != nil { - kafkaModel.Partition = types.StringValue(string(*data.Partition)) - } else { - kafkaModel.Partition = types.StringNull() - } + kafkaModel.Partition = types.StringPointerValue((*string)(data.Partition)) if data.RequiredAcks != nil { kafkaModel.RequiredAcks = types.Int64Value(int64(*data.RequiredAcks)) } else { kafkaModel.RequiredAcks = types.Int64Null() } - if data.Timeout != nil { - kafkaModel.Timeout = types.Float64Value(float64(*data.Timeout)) - } else { - kafkaModel.Timeout = types.Float64Null() - } + + kafkaModel.Timeout = types.Float32PointerValue(data.Timeout) kafkaModel.Version = types.StringPointerValue(data.Version) - if data.Username != nil { - kafkaModel.Username = types.StringValue(*data.Username) - } else { - kafkaModel.Username = types.StringNull() - } - if data.Password != nil { - kafkaModel.Password = types.StringValue(*data.Password) - } else { - kafkaModel.Password = types.StringNull() - } + kafkaModel.Username = types.StringPointerValue(data.Username) + kafkaModel.Password = types.StringPointerValue(data.Password) kafkaModel.Key = types.StringPointerValue(data.Key) // Handle headers @@ -235,66 +164,66 @@ func (model *outputModel) populateFromAPI(ctx context.Context, union *kbapi.Outp // Handle hash if data.Hash != nil { - hashModels := []outputHashModel{{ + hashModel := outputHashModel{ Hash: types.StringPointerValue(data.Hash.Hash), Random: types.BoolPointerValue(data.Hash.Random), - }} - list, nd := types.ListValueFrom(ctx, getHashAttrTypes(), hashModels) + } + obj, nd := types.ObjectValueFrom(ctx, getHashAttrTypes(), hashModel) diags.Append(nd...) - kafkaModel.Hash = list + kafkaModel.Hash = obj } else { - kafkaModel.Hash = types.ListNull(getHashAttrTypes()) + kafkaModel.Hash = types.ObjectNull(getHashAttrTypes()) } // Handle random if data.Random != nil { - randomModels := []outputRandomModel{{ + randomModel := outputRandomModel{ GroupEvents: func() types.Float64 { if data.Random.GroupEvents != nil { return types.Float64Value(float64(*data.Random.GroupEvents)) } return types.Float64Null() }(), - }} - list, nd := types.ListValueFrom(ctx, getRandomAttrTypes(), randomModels) + } + obj, nd := types.ObjectValueFrom(ctx, getRandomAttrTypes(), randomModel) diags.Append(nd...) - kafkaModel.Random = list + kafkaModel.Random = obj } else { - kafkaModel.Random = types.ListNull(getRandomAttrTypes()) + kafkaModel.Random = types.ObjectNull(getRandomAttrTypes()) } // Handle round_robin if data.RoundRobin != nil { - roundRobinModels := []outputRoundRobinModel{{ + roundRobinModel := outputRoundRobinModel{ GroupEvents: func() types.Float64 { if data.RoundRobin.GroupEvents != nil { return types.Float64Value(float64(*data.RoundRobin.GroupEvents)) } return types.Float64Null() }(), - }} - list, nd := types.ListValueFrom(ctx, getRoundRobinAttrTypes(), roundRobinModels) + } + obj, nd := types.ObjectValueFrom(ctx, getRoundRobinAttrTypes(), roundRobinModel) diags.Append(nd...) - kafkaModel.RoundRobin = list + kafkaModel.RoundRobin = obj } else { - kafkaModel.RoundRobin = types.ListNull(getRoundRobinAttrTypes()) + kafkaModel.RoundRobin = types.ObjectNull(getRoundRobinAttrTypes()) } // Handle sasl if data.Sasl != nil { - saslModels := []outputSaslModel{{ + saslModel := outputSaslModel{ Mechanism: func() types.String { if data.Sasl.Mechanism != nil { return types.StringValue(string(*data.Sasl.Mechanism)) } return types.StringNull() }(), - }} - list, nd := types.ListValueFrom(ctx, getSaslAttrTypes(), saslModels) + } + obj, nd := types.ObjectValueFrom(ctx, getSaslAttrTypes(), saslModel) diags.Append(nd...) - kafkaModel.Sasl = list + kafkaModel.Sasl = obj } else { - kafkaModel.Sasl = types.ListNull(getSaslAttrTypes()) + kafkaModel.Sasl = types.ObjectNull(getSaslAttrTypes()) } // Set the kafka nested object on the main model @@ -309,538 +238,420 @@ func (model *outputModel) populateFromAPI(ctx context.Context, union *kbapi.Outp return } -func (model outputModel) toAPICreateModel(ctx context.Context, client *clients.ApiClient) (union kbapi.NewOutputUnion, diags diag.Diagnostics) { - doSsl := func() *kbapi.NewOutputSsl { - if utils.IsKnown(model.Ssl) { - sslModel := utils.ObjectTypeAs[outputSslModel](ctx, model.Ssl, path.Root("ssl"), &diags) - if sslModel != nil { - return &kbapi.NewOutputSsl{ - Certificate: sslModel.Certificate.ValueStringPointer(), - CertificateAuthorities: utils.SliceRef(utils.ListTypeToSlice_String(ctx, sslModel.CertificateAuthorities, path.Root("certificate_authorities"), &diags)), - Key: sslModel.Key.ValueStringPointer(), - } - } - } - return nil - } - +func (model outputModel) toAPICreateModel(ctx context.Context, client *clients.ApiClient) (kbapi.NewOutputUnion, diag.Diagnostics) { outputType := model.Type.ValueString() + switch outputType { case "elasticsearch": - body := kbapi.NewOutputElasticsearch{ - Type: kbapi.NewOutputElasticsearchTypeElasticsearch, - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), - Id: model.OutputID.ValueStringPointer(), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueString(), - Ssl: doSsl(), + return model.toAPICreateElasticsearchModel(ctx) + case "logstash": + return model.toAPICreateLogstashModel(ctx) + case "kafka": + if diags := assertKafkaSupport(ctx, client); diags.HasError() { + return kbapi.NewOutputUnion{}, diags } - err := union.FromNewOutputElasticsearch(body) - if err != nil { - diags.AddError(err.Error(), "") - return + return model.toAPICreateKafkaModel(ctx) + default: + return kbapi.NewOutputUnion{}, diag.Diagnostics{ + diag.NewErrorDiagnostic(fmt.Sprintf("unhandled output type: %s", outputType), ""), } + } +} - case "logstash": - body := kbapi.NewOutputLogstash{ - Type: kbapi.NewOutputLogstashTypeLogstash, - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), - Id: model.OutputID.ValueStringPointer(), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueString(), - Ssl: doSsl(), - } +func assertKafkaSupport(ctx context.Context, client *clients.ApiClient) diag.Diagnostics { + var diags diag.Diagnostics + + // Check minimum version requirement for Kafka output type + if supported, versionDiags := client.EnforceMinVersion(ctx, MinVersionOutputKafka); versionDiags.HasError() { + diags.Append(utils.FrameworkDiagsFromSDK(versionDiags)...) + return diags + } else if !supported { + diags.AddError("Unsupported version for Kafka output", + fmt.Sprintf("Kafka output type requires server version %s or higher", MinVersionOutputKafka.String())) + return diags + } - err := union.FromNewOutputLogstash(body) - if err != nil { - diags.AddError(err.Error(), "") - return - } + return nil +} +func (model outputModel) toAPIUpdateModel(ctx context.Context, client *clients.ApiClient) (union kbapi.UpdateOutputUnion, diags diag.Diagnostics) { + outputType := model.Type.ValueString() + + switch outputType { + case "elasticsearch": + return model.toAPIUpdateElasticsearchModel(ctx) + case "logstash": + return model.toAPIUpdateLogstashModel(ctx) case "kafka": - // Check minimum version requirement for Kafka output type - if supported, versionDiags := client.EnforceMinVersion(ctx, MinVersionOutputKafka); versionDiags.HasError() { - diags.Append(utils.FrameworkDiagsFromSDK(versionDiags)...) - return - } else if !supported { - diags.AddError("Unsupported version for Kafka output", - fmt.Sprintf("Kafka output type requires server version %s or higher", MinVersionOutputKafka.String())) - return + if diags := assertKafkaSupport(ctx, client); diags.HasError() { + return kbapi.UpdateOutputUnion{}, diags } - // Extract kafka model from nested structure - var kafkaModel outputKafkaModel - if !model.Kafka.IsNull() { - kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) - kafkaModel = *kafkaObj - } + return model.toAPIUpdateKafkaModel(ctx) + default: + diags.AddError(fmt.Sprintf("unhandled output type: %s", outputType), "") + } - // Helper functions for Kafka-specific complex types - doHeaders := func() *[]struct { - Key string `json:"key"` - Value string `json:"value"` - } { - if utils.IsKnown(kafkaModel.Headers) { - headerModels := utils.ListTypeAs[outputHeadersModel](ctx, kafkaModel.Headers, path.Root("kafka").AtName("headers"), &diags) - if len(headerModels) > 0 { - headers := make([]struct { - Key string `json:"key"` - Value string `json:"value"` - }, len(headerModels)) - for i, h := range headerModels { - headers[i] = struct { - Key string `json:"key"` - Value string `json:"value"` - }{ - Key: h.Key.ValueString(), - Value: h.Value.ValueString(), - } - } - return &headers - } - } - return nil - } + return +} - doHash := func() *struct { - Hash *string `json:"hash,omitempty"` - Random *bool `json:"random,omitempty"` - } { - if utils.IsKnown(kafkaModel.Hash) { - hashModels := utils.ListTypeAs[outputHashModel](ctx, kafkaModel.Hash, path.Root("kafka").AtName("hash"), &diags) - if len(hashModels) > 0 { - return &struct { - Hash *string `json:"hash,omitempty"` - Random *bool `json:"random,omitempty"` - }{ - Hash: hashModels[0].Hash.ValueStringPointer(), - Random: hashModels[0].Random.ValueBoolPointer(), - } - } - } - return nil - } +func (model outputModel) toAPICreateElasticsearchModel(ctx context.Context) (kbapi.NewOutputUnion, diag.Diagnostics) { + ssl, diags := model.toAPISSL(ctx) + if diags.HasError() { + return kbapi.NewOutputUnion{}, diags + } - doRandom := func() *struct { - GroupEvents *float32 `json:"group_events,omitempty"` - } { - if utils.IsKnown(kafkaModel.Random) { - randomModels := utils.ListTypeAs[outputRandomModel](ctx, kafkaModel.Random, path.Root("kafka").AtName("random"), &diags) - if len(randomModels) > 0 { - return &struct { - GroupEvents *float32 `json:"group_events,omitempty"` - }{ - GroupEvents: func() *float32 { - if !randomModels[0].GroupEvents.IsNull() { - val := float32(randomModels[0].GroupEvents.ValueFloat64()) - return &val - } - return nil - }(), - } - } - } - return nil - } + body := kbapi.NewOutputElasticsearch{ + Type: kbapi.NewOutputElasticsearchTypeElasticsearch, + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), + Id: model.OutputID.ValueStringPointer(), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: ssl, + } - doRoundRobin := func() *struct { - GroupEvents *float32 `json:"group_events,omitempty"` - } { - if utils.IsKnown(kafkaModel.RoundRobin) { - roundRobinModels := utils.ListTypeAs[outputRoundRobinModel](ctx, kafkaModel.RoundRobin, path.Root("kafka").AtName("round_robin"), &diags) - if len(roundRobinModels) > 0 { - return &struct { - GroupEvents *float32 `json:"group_events,omitempty"` - }{ - GroupEvents: func() *float32 { - if !roundRobinModels[0].GroupEvents.IsNull() { - val := float32(roundRobinModels[0].GroupEvents.ValueFloat64()) - return &val - } - return nil - }(), - } - } - } - return nil - } + var union kbapi.NewOutputUnion + err := union.FromNewOutputElasticsearch(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.NewOutputUnion{}, diags + } - doSasl := func() *struct { - Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` - } { - if utils.IsKnown(kafkaModel.Sasl) { - saslModels := utils.ListTypeAs[outputSaslModel](ctx, kafkaModel.Sasl, path.Root("kafka").AtName("sasl"), &diags) - if len(saslModels) > 0 && !saslModels[0].Mechanism.IsNull() { - mechanism := kbapi.NewOutputKafkaSaslMechanism(saslModels[0].Mechanism.ValueString()) - return &struct { - Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` - }{ - Mechanism: &mechanism, - } - } - } - return nil - } + return union, diags +} - body := kbapi.NewOutputKafka{ - Type: kbapi.NewOutputKafkaTypeKafka, - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), - Id: model.OutputID.ValueStringPointer(), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueString(), - Ssl: doSsl(), - // Kafka-specific fields - AuthType: func() kbapi.NewOutputKafkaAuthType { - if !kafkaModel.AuthType.IsNull() { - return kbapi.NewOutputKafkaAuthType(kafkaModel.AuthType.ValueString()) - } - return kbapi.NewOutputKafkaAuthTypeNone - }(), - BrokerTimeout: func() *float32 { - if !kafkaModel.BrokerTimeout.IsNull() { - val := float32(kafkaModel.BrokerTimeout.ValueFloat64()) - return &val - } +func (model outputModel) toAPICreateLogstashModel(ctx context.Context) (kbapi.NewOutputUnion, diag.Diagnostics) { + ssl, diags := model.toAPISSL(ctx) + if diags.HasError() { + return kbapi.NewOutputUnion{}, diags + } + body := kbapi.NewOutputLogstash{ + Type: kbapi.NewOutputLogstashTypeLogstash, + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), + Id: model.OutputID.ValueStringPointer(), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: ssl, + } + + var union kbapi.NewOutputUnion + err := union.FromNewOutputLogstash(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.NewOutputUnion{}, diags + } + + return union, diags +} + +func (model outputModel) toAPICreateKafkaModel(ctx context.Context) (kbapi.NewOutputUnion, diag.Diagnostics) { + ssl, diags := model.toAPISSL(ctx) + if diags.HasError() { + return kbapi.NewOutputUnion{}, diags + } + + // Extract kafka model from nested structure + var kafkaModel outputKafkaModel + if !model.Kafka.IsNull() { + kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) + kafkaModel = *kafkaObj + } + + hash, hashDiags := kafkaModel.toAPIHash(ctx) + diags.Append(hashDiags...) + + headers, headersDiags := kafkaModel.toAPIHeaders(ctx) + diags.Append(headersDiags...) + + random, randomDiags := kafkaModel.toAPIRandom(ctx) + diags.Append(randomDiags...) + + roundRobin, rrDiags := kafkaModel.toAPIRoundRobin(ctx) + diags.Append(rrDiags...) + + sasl, saslDiags := kafkaModel.toAPISasl(ctx) + diags.Append(saslDiags...) + + body := kbapi.NewOutputKafka{ + Type: kbapi.NewOutputKafkaTypeKafka, + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), + Id: model.OutputID.ValueStringPointer(), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: ssl, + // Kafka-specific fields + AuthType: kafkaModel.toAuthType(), + BrokerTimeout: func() *float32 { + if !utils.IsKnown(kafkaModel.BrokerTimeout) { return nil - }(), - ClientId: kafkaModel.ClientId.ValueStringPointer(), - Compression: func() *kbapi.NewOutputKafkaCompression { - if !kafkaModel.Compression.IsNull() { - comp := kbapi.NewOutputKafkaCompression(kafkaModel.Compression.ValueString()) - return &comp - } + } + val := kafkaModel.BrokerTimeout.ValueFloat32() + return &val + }(), + ClientId: kafkaModel.ClientId.ValueStringPointer(), + Compression: func() *kbapi.NewOutputKafkaCompression { + if !utils.IsKnown(kafkaModel.Compression) { return nil - }(), - CompressionLevel: func() *float32 { - if !kafkaModel.CompressionLevel.IsNull() && !kafkaModel.Compression.IsNull() && kafkaModel.Compression.ValueString() == "gzip" { - val := float32(kafkaModel.CompressionLevel.ValueFloat64()) - return &val - } + } + comp := kbapi.NewOutputKafkaCompression(kafkaModel.Compression.ValueString()) + return &comp + }(), + CompressionLevel: func() *int { + if !utils.IsKnown(kafkaModel.CompressionLevel) || kafkaModel.Compression.ValueString() != "gzip" { return nil - }(), - ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), - Topic: kafkaModel.Topic.ValueStringPointer(), - Partition: func() *kbapi.NewOutputKafkaPartition { - if !kafkaModel.Partition.IsNull() { - part := kbapi.NewOutputKafkaPartition(kafkaModel.Partition.ValueString()) - return &part - } + } + + val := int(kafkaModel.CompressionLevel.ValueInt64()) + return &val + }(), + ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), + Topic: kafkaModel.Topic.ValueStringPointer(), + Partition: func() *kbapi.NewOutputKafkaPartition { + if !utils.IsKnown(kafkaModel.Partition) { return nil - }(), - RequiredAcks: func() *kbapi.NewOutputKafkaRequiredAcks { - if !kafkaModel.RequiredAcks.IsNull() { - acks := kbapi.NewOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) - return &acks - } + } + part := kbapi.NewOutputKafkaPartition(kafkaModel.Partition.ValueString()) + return &part + }(), + RequiredAcks: func() *kbapi.NewOutputKafkaRequiredAcks { + if !utils.IsKnown(kafkaModel.RequiredAcks) { return nil - }(), - Timeout: func() *float32 { - if !kafkaModel.Timeout.IsNull() { - val := float32(kafkaModel.Timeout.ValueFloat64()) - return &val - } + } + val := kbapi.NewOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) + return &val + }(), + Timeout: func() *float32 { + if !utils.IsKnown(kafkaModel.Timeout) { return nil - }(), - Version: kafkaModel.Version.ValueStringPointer(), - Username: kafkaModel.Username.ValueStringPointer(), - Password: kafkaModel.Password.ValueStringPointer(), - Key: kafkaModel.Key.ValueStringPointer(), - Headers: doHeaders(), - Hash: doHash(), - Random: doRandom(), - RoundRobin: doRoundRobin(), - Sasl: doSasl(), - } + } - err := union.FromNewOutputKafka(body) - if err != nil { - diags.AddError(err.Error(), "") - return - } + val := kafkaModel.Timeout.ValueFloat32() + return &val + }(), + Version: kafkaModel.Version.ValueStringPointer(), + Username: kafkaModel.Username.ValueStringPointer(), + Password: kafkaModel.Password.ValueStringPointer(), + Key: kafkaModel.Key.ValueStringPointer(), + Headers: headers, + Hash: hash, + Random: random, + RoundRobin: roundRobin, + Sasl: sasl, + } - default: - diags.AddError(fmt.Sprintf("unhandled output type: %s", outputType), "") + var union kbapi.NewOutputUnion + err := union.FromNewOutputKafka(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.NewOutputUnion{}, diags } - return + return union, diags } -func (model outputModel) toAPIUpdateModel(ctx context.Context, client *clients.ApiClient) (union kbapi.UpdateOutputUnion, diags diag.Diagnostics) { - doSsl := func() *kbapi.UpdateOutputSsl { - if utils.IsKnown(model.Ssl) { - sslModel := utils.ObjectTypeAs[outputSslModel](ctx, model.Ssl, path.Root("ssl"), &diags) - if sslModel != nil { - return &kbapi.UpdateOutputSsl{ - Certificate: sslModel.Certificate.ValueStringPointer(), - CertificateAuthorities: utils.SliceRef(utils.ListTypeToSlice_String(ctx, sslModel.CertificateAuthorities, path.Root("certificate_authorities"), &diags)), - Key: sslModel.Key.ValueStringPointer(), - } - } - } - return nil +func (model outputModel) toAPIUpdateElasticsearchModel(ctx context.Context) (kbapi.UpdateOutputUnion, diag.Diagnostics) { + ssl, diags := model.toUpdateAPISSL(ctx) + if diags.HasError() { + return kbapi.UpdateOutputUnion{}, diags + } + body := kbapi.UpdateOutputElasticsearch{ + Type: utils.Pointer(kbapi.Elasticsearch), + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueStringPointer(), + Ssl: ssl, } - outputType := model.Type.ValueString() - switch outputType { - case "elasticsearch": - body := kbapi.UpdateOutputElasticsearch{ - Type: utils.Pointer(kbapi.Elasticsearch), - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueStringPointer(), - Ssl: doSsl(), - } - - err := union.FromUpdateOutputElasticsearch(body) - if err != nil { - diags.AddError(err.Error(), "") - return - } - - case "logstash": - body := kbapi.UpdateOutputLogstash{ - Type: utils.Pointer(kbapi.Logstash), - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueStringPointer(), - Ssl: doSsl(), - } - - err := union.FromUpdateOutputLogstash(body) - if err != nil { - diags.AddError(err.Error(), "") - return - } - - case "kafka": - // Check minimum version requirement for Kafka output type - if supported, versionDiags := client.EnforceMinVersion(ctx, MinVersionOutputKafka); versionDiags.HasError() { - diags.Append(utils.FrameworkDiagsFromSDK(versionDiags)...) - return - } else if !supported { - diags.AddError("Unsupported version for Kafka output", - fmt.Sprintf("Kafka output type requires server version %s or higher", MinVersionOutputKafka.String())) - return - } + var union kbapi.UpdateOutputUnion + err := union.FromUpdateOutputElasticsearch(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.UpdateOutputUnion{}, diags + } - // Extract kafka model from nested structure - var kafkaModel outputKafkaModel - if !model.Kafka.IsNull() { - kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) - kafkaModel = *kafkaObj - } + return union, diags +} - // Helper functions for Kafka-specific complex types (Update version) - doHeaders := func() *[]struct { - Key string `json:"key"` - Value string `json:"value"` - } { - if utils.IsKnown(kafkaModel.Headers) { - headerModels := utils.ListTypeAs[outputHeadersModel](ctx, kafkaModel.Headers, path.Root("kafka").AtName("headers"), &diags) - if len(headerModels) > 0 { - headers := make([]struct { - Key string `json:"key"` - Value string `json:"value"` - }, len(headerModels)) - for i, h := range headerModels { - headers[i] = struct { - Key string `json:"key"` - Value string `json:"value"` - }{ - Key: h.Key.ValueString(), - Value: h.Value.ValueString(), - } - } - return &headers - } - } - return nil - } +func (model outputModel) toAPIUpdateLogstashModel(ctx context.Context) (kbapi.UpdateOutputUnion, diag.Diagnostics) { + ssl, diags := model.toUpdateAPISSL(ctx) + if diags.HasError() { + return kbapi.UpdateOutputUnion{}, diags + } + body := kbapi.UpdateOutputLogstash{ + Type: utils.Pointer(kbapi.Logstash), + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueStringPointer(), + Ssl: ssl, + } - doHash := func() *struct { - Hash *string `json:"hash,omitempty"` - Random *bool `json:"random,omitempty"` - } { - if utils.IsKnown(kafkaModel.Hash) { - hashModels := utils.ListTypeAs[outputHashModel](ctx, kafkaModel.Hash, path.Root("kafka").AtName("hash"), &diags) - if len(hashModels) > 0 { - return &struct { - Hash *string `json:"hash,omitempty"` - Random *bool `json:"random,omitempty"` - }{ - Hash: hashModels[0].Hash.ValueStringPointer(), - Random: hashModels[0].Random.ValueBoolPointer(), - } - } - } - return nil - } + var union kbapi.UpdateOutputUnion + err := union.FromUpdateOutputLogstash(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.UpdateOutputUnion{}, diags + } - doRandom := func() *struct { - GroupEvents *float32 `json:"group_events,omitempty"` - } { - if utils.IsKnown(kafkaModel.Random) { - randomModels := utils.ListTypeAs[outputRandomModel](ctx, kafkaModel.Random, path.Root("kafka").AtName("random"), &diags) - if len(randomModels) > 0 { - return &struct { - GroupEvents *float32 `json:"group_events,omitempty"` - }{ - GroupEvents: func() *float32 { - if !randomModels[0].GroupEvents.IsNull() { - val := float32(randomModels[0].GroupEvents.ValueFloat64()) - return &val - } - return nil - }(), - } - } - } - return nil - } + return union, diags +} - doRoundRobin := func() *struct { - GroupEvents *float32 `json:"group_events,omitempty"` - } { - if utils.IsKnown(kafkaModel.RoundRobin) { - roundRobinModels := utils.ListTypeAs[outputRoundRobinModel](ctx, kafkaModel.RoundRobin, path.Root("kafka").AtName("round_robin"), &diags) - if len(roundRobinModels) > 0 { - return &struct { - GroupEvents *float32 `json:"group_events,omitempty"` - }{ - GroupEvents: func() *float32 { - if !roundRobinModels[0].GroupEvents.IsNull() { - val := float32(roundRobinModels[0].GroupEvents.ValueFloat64()) - return &val - } - return nil - }(), - } - } - } - return nil - } +func (model outputModel) toAPIUpdateKafkaModel(ctx context.Context) (kbapi.UpdateOutputUnion, diag.Diagnostics) { + ssl, diags := model.toUpdateAPISSL(ctx) + if diags.HasError() { + return kbapi.UpdateOutputUnion{}, diags + } - doSasl := func() *struct { - Mechanism *kbapi.UpdateOutputKafkaSaslMechanism `json:"mechanism,omitempty"` - } { - if utils.IsKnown(kafkaModel.Sasl) { - saslModels := utils.ListTypeAs[outputSaslModel](ctx, kafkaModel.Sasl, path.Root("kafka").AtName("sasl"), &diags) - if len(saslModels) > 0 && !saslModels[0].Mechanism.IsNull() { - mechanism := kbapi.UpdateOutputKafkaSaslMechanism(saslModels[0].Mechanism.ValueString()) - return &struct { - Mechanism *kbapi.UpdateOutputKafkaSaslMechanism `json:"mechanism,omitempty"` - }{ - Mechanism: &mechanism, - } - } - } - return nil - } + // Extract kafka model from nested structure + var kafkaModel outputKafkaModel + if !model.Kafka.IsNull() { + kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) + kafkaModel = *kafkaObj + } - body := kbapi.UpdateOutputKafka{ - Type: utils.Pointer(kbapi.Kafka), - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueString(), - Ssl: doSsl(), - // Kafka-specific fields - AuthType: func() *kbapi.UpdateOutputKafkaAuthType { - if !kafkaModel.AuthType.IsNull() { - authType := kbapi.UpdateOutputKafkaAuthType(kafkaModel.AuthType.ValueString()) - return &authType - } - return nil - }(), - BrokerTimeout: func() *float32 { - if !kafkaModel.BrokerTimeout.IsNull() { - val := float32(kafkaModel.BrokerTimeout.ValueFloat64()) - return &val - } + hash, hashDiags := kafkaModel.toAPIHash(ctx) + diags.Append(hashDiags...) + + headers, headersDiags := kafkaModel.toAPIHeaders(ctx) + diags.Append(headersDiags...) + + random, randomDiags := kafkaModel.toAPIRandom(ctx) + diags.Append(randomDiags...) + + roundRobin, rrDiags := kafkaModel.toAPIRoundRobin(ctx) + diags.Append(rrDiags...) + + sasl, saslDiags := kafkaModel.toUpdateAPISasl(ctx) + diags.Append(saslDiags...) + + body := kbapi.UpdateOutputKafka{ + Type: utils.Pointer(kbapi.Kafka), + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: ssl, + // Kafka-specific fields + AuthType: kafkaModel.toUpdateAuthType(), + BrokerTimeout: func() *float32 { + if !utils.IsKnown(kafkaModel.BrokerTimeout) { return nil - }(), - ClientId: kafkaModel.ClientId.ValueStringPointer(), - Compression: func() *kbapi.UpdateOutputKafkaCompression { - if !kafkaModel.Compression.IsNull() { - comp := kbapi.UpdateOutputKafkaCompression(kafkaModel.Compression.ValueString()) - return &comp - } + } + val := kafkaModel.BrokerTimeout.ValueFloat32() + return &val + }(), + ClientId: kafkaModel.ClientId.ValueStringPointer(), + Compression: func() *kbapi.UpdateOutputKafkaCompression { + if !utils.IsKnown(kafkaModel.Compression) { return nil - }(), - CompressionLevel: func() *float32 { - if !kafkaModel.CompressionLevel.IsNull() && !kafkaModel.Compression.IsNull() && kafkaModel.Compression.ValueString() == "gzip" { - val := float32(kafkaModel.CompressionLevel.ValueFloat64()) - return &val - } + } + comp := kbapi.UpdateOutputKafkaCompression(kafkaModel.Compression.ValueString()) + return &comp + }(), + CompressionLevel: func() *int { + if !utils.IsKnown(kafkaModel.CompressionLevel) || kafkaModel.Compression.ValueString() != "gzip" { return nil - }(), - ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), - Topic: kafkaModel.Topic.ValueStringPointer(), - Partition: func() *kbapi.UpdateOutputKafkaPartition { - if !kafkaModel.Partition.IsNull() { - part := kbapi.UpdateOutputKafkaPartition(kafkaModel.Partition.ValueString()) - return &part - } + } + val := int(kafkaModel.CompressionLevel.ValueInt64()) + return &val + }(), + ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), + Topic: kafkaModel.Topic.ValueStringPointer(), + Partition: func() *kbapi.UpdateOutputKafkaPartition { + if !utils.IsKnown(kafkaModel.Partition) { return nil - }(), - RequiredAcks: func() *kbapi.UpdateOutputKafkaRequiredAcks { - if !kafkaModel.RequiredAcks.IsNull() { - acks := kbapi.UpdateOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) - return &acks - } + } + part := kbapi.UpdateOutputKafkaPartition(kafkaModel.Partition.ValueString()) + return &part + }(), + RequiredAcks: func() *kbapi.UpdateOutputKafkaRequiredAcks { + if !utils.IsKnown(kafkaModel.RequiredAcks) { return nil - }(), - Timeout: func() *float32 { - if !kafkaModel.Timeout.IsNull() { - val := float32(kafkaModel.Timeout.ValueFloat64()) - return &val - } + } + val := kbapi.UpdateOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) + return &val + }(), + Timeout: func() *float32 { + if !utils.IsKnown(kafkaModel.Timeout) { return nil - }(), - Version: kafkaModel.Version.ValueStringPointer(), - Username: kafkaModel.Username.ValueStringPointer(), - Password: kafkaModel.Password.ValueStringPointer(), - Key: kafkaModel.Key.ValueStringPointer(), - Headers: doHeaders(), - Hash: doHash(), - Random: doRandom(), - RoundRobin: doRoundRobin(), - Sasl: doSasl(), - } + } + val := kafkaModel.Timeout.ValueFloat32() + return &val + }(), + Version: kafkaModel.Version.ValueStringPointer(), + Username: kafkaModel.Username.ValueStringPointer(), + Password: kafkaModel.Password.ValueStringPointer(), + Key: kafkaModel.Key.ValueStringPointer(), + Headers: headers, + Hash: hash, + Random: random, + RoundRobin: roundRobin, + Sasl: sasl, + } - err := union.FromUpdateOutputKafka(body) - if err != nil { - diags.AddError(err.Error(), "") - return - } + var union kbapi.UpdateOutputUnion + err := union.FromUpdateOutputKafka(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.UpdateOutputUnion{}, diags + } - default: - diags.AddError(fmt.Sprintf("unhandled output type: %s", outputType), "") + return union, diags +} + +func (model outputModel) toAPISSL(ctx context.Context) (*kbapi.NewOutputSsl, diag.Diagnostics) { + if !utils.IsKnown(model.Ssl) { + return nil, nil + } + var diags diag.Diagnostics + sslModel := utils.ObjectTypeAs[outputSslModel](ctx, model.Ssl, path.Root("ssl"), &diags) + if diags.HasError() { + return nil, diags } - return + if sslModel == nil { + return nil, diags + } + + return &kbapi.NewOutputSsl{ + Certificate: sslModel.Certificate.ValueStringPointer(), + CertificateAuthorities: utils.SliceRef(utils.ListTypeToSlice_String(ctx, sslModel.CertificateAuthorities, path.Root("certificate_authorities"), &diags)), + Key: sslModel.Key.ValueStringPointer(), + }, diags +} + +func (model outputModel) toUpdateAPISSL(ctx context.Context) (*kbapi.UpdateOutputSsl, diag.Diagnostics) { + ssl, diags := model.toAPISSL(ctx) + if diags.HasError() || ssl == nil { + return nil, diags + } + + return &kbapi.UpdateOutputSsl{ + Certificate: ssl.Certificate, + CertificateAuthorities: ssl.CertificateAuthorities, + Key: ssl.Key, + }, diags } diff --git a/internal/fleet/output/output_kafka_model.go b/internal/fleet/output/output_kafka_model.go new file mode 100644 index 000000000..6c3d1ae0e --- /dev/null +++ b/internal/fleet/output/output_kafka_model.go @@ -0,0 +1,216 @@ +package output + +import ( + "context" + + "github.com/elastic/terraform-provider-elasticstack/generated/kbapi" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +type outputKafkaModel struct { + AuthType types.String `tfsdk:"auth_type"` + BrokerTimeout types.Float32 `tfsdk:"broker_timeout"` + ClientId types.String `tfsdk:"client_id"` + Compression types.String `tfsdk:"compression"` + CompressionLevel types.Int64 `tfsdk:"compression_level"` + ConnectionType types.String `tfsdk:"connection_type"` + Topic types.String `tfsdk:"topic"` + Partition types.String `tfsdk:"partition"` + RequiredAcks types.Int64 `tfsdk:"required_acks"` + Timeout types.Float32 `tfsdk:"timeout"` + Version types.String `tfsdk:"version"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + Key types.String `tfsdk:"key"` + Headers types.List `tfsdk:"headers"` //> outputHeadersModel + Hash types.Object `tfsdk:"hash"` //> outputHashModel + Random types.Object `tfsdk:"random"` //> outputRandomModel + RoundRobin types.Object `tfsdk:"round_robin"` //> outputRoundRobinModel + Sasl types.Object `tfsdk:"sasl"` //> outputSaslModel +} + +type outputHeadersModel struct { + Key types.String `tfsdk:"key"` + Value types.String `tfsdk:"value"` +} + +type outputHashModel struct { + Hash types.String `tfsdk:"hash"` + Random types.Bool `tfsdk:"random"` +} + +type outputRandomModel struct { + GroupEvents types.Float64 `tfsdk:"group_events"` +} + +type outputRoundRobinModel struct { + GroupEvents types.Float64 `tfsdk:"group_events"` +} + +type outputSaslModel struct { + Mechanism types.String `tfsdk:"mechanism"` +} + +func (m outputKafkaModel) toAPIHash(ctx context.Context) (*struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.Hash) { + return nil, nil + } + + var hashModel outputHashModel + diags := m.Hash.As(ctx, &hashModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, diags + } + + return &struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` + }{ + Hash: hashModel.Hash.ValueStringPointer(), + Random: hashModel.Random.ValueBoolPointer(), + }, diags +} + +func (m outputKafkaModel) toAPIHeaders(ctx context.Context) (*[]struct { + Key string `json:"key"` + Value string `json:"value"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.Headers) { + return nil, nil + } + + var diags diag.Diagnostics + headerModels := utils.ListTypeAs[outputHeadersModel](ctx, m.Headers, path.Root("kafka").AtName("headers"), &diags) + if len(headerModels) == 0 { + return nil, diags + } + + headers := make([]struct { + Key string `json:"key"` + Value string `json:"value"` + }, len(headerModels)) + for i, h := range headerModels { + headers[i] = struct { + Key string `json:"key"` + Value string `json:"value"` + }{ + Key: h.Key.ValueString(), + Value: h.Value.ValueString(), + } + } + return &headers, diags +} + +func (m outputKafkaModel) toAPIRandom(ctx context.Context) (*struct { + GroupEvents *float32 `json:"group_events,omitempty"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.Random) { + return nil, nil + } + + var randomModel outputRandomModel + diags := m.Random.As(ctx, &randomModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, diags + } + + return &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: func() *float32 { + if !randomModel.GroupEvents.IsNull() { + val := float32(randomModel.GroupEvents.ValueFloat64()) + return &val + } + return nil + }(), + }, diags +} + +func (m outputKafkaModel) toAPIRoundRobin(ctx context.Context) (*struct { + GroupEvents *float32 `json:"group_events,omitempty"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.RoundRobin) { + return nil, nil + } + + var roundRobinModel outputRoundRobinModel + diags := m.RoundRobin.As(ctx, &roundRobinModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, diags + } + return &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: func() *float32 { + if !roundRobinModel.GroupEvents.IsNull() { + val := float32(roundRobinModel.GroupEvents.ValueFloat64()) + return &val + } + return nil + }(), + }, nil +} + +func (m outputKafkaModel) toAPISasl(ctx context.Context) (*struct { + Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.Sasl) { + return nil, nil + } + var saslModel outputSaslModel + diags := m.Sasl.As(ctx, &saslModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, diags + } + + if saslModel.Mechanism.IsNull() { + return nil, diags + } + + mechanism := kbapi.NewOutputKafkaSaslMechanism(saslModel.Mechanism.ValueString()) + return &struct { + Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + }{ + Mechanism: &mechanism, + }, diags +} + +func (m outputKafkaModel) toUpdateAPISasl(ctx context.Context) (*struct { + Mechanism *kbapi.UpdateOutputKafkaSaslMechanism `json:"mechanism,omitempty"` +}, diag.Diagnostics) { + sasl, diags := m.toAPISasl(ctx) + if diags.HasError() || sasl == nil { + return nil, diags + } + + mechanism := kbapi.UpdateOutputKafkaSaslMechanism(*sasl.Mechanism) + return &struct { + Mechanism *kbapi.UpdateOutputKafkaSaslMechanism "json:\"mechanism,omitempty\"" + }{ + Mechanism: &mechanism, + }, diags +} + +func (m outputKafkaModel) toAuthType() kbapi.NewOutputKafkaAuthType { + if !utils.IsKnown(m.AuthType) { + return kbapi.NewOutputKafkaAuthTypeNone + } + + return kbapi.NewOutputKafkaAuthType(m.AuthType.ValueString()) +} + +func (m outputKafkaModel) toUpdateAuthType() *kbapi.UpdateOutputKafkaAuthType { + if !utils.IsKnown(m.AuthType) { + return nil + } + + return utils.Pointer(kbapi.UpdateOutputKafkaAuthType(m.AuthType.ValueString())) +} diff --git a/internal/fleet/output/output_kafka_model_test.go b/internal/fleet/output/output_kafka_model_test.go new file mode 100644 index 000000000..fc76760dc --- /dev/null +++ b/internal/fleet/output/output_kafka_model_test.go @@ -0,0 +1,393 @@ +package output + +import ( + "context" + "testing" + + "github.com/elastic/terraform-provider-elasticstack/generated/kbapi" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func Test_outputKafkaModel_toAPIHash(t *testing.T) { + type fields struct { + Hash types.Object + } + tests := []struct { + name string + fields fields + want *struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` + } + wantErr bool + }{ + { + name: "returns nil when hash is unknown", + fields: fields{ + Hash: types.ObjectUnknown(getHashAttrTypes()), + }, + }, + { + name: "returns a hash object when all fields are set", + fields: fields{ + Hash: types.ObjectValueMust( + getHashAttrTypes(), + map[string]attr.Value{ + "hash": types.StringValue("field"), + "random": types.BoolValue(true), + }, + ), + }, + want: &struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` + }{ + Hash: utils.Pointer("field"), + Random: utils.Pointer(true), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := outputKafkaModel{ + Hash: tt.fields.Hash, + } + got, diags := m.toAPIHash(context.Background()) + if (diags.HasError()) != tt.wantErr { + t.Errorf("outputKafkaModel.toAPIHash() error = %v, wantErr %v", diags.HasError(), tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_outputKafkaModel_toAPIHeaders(t *testing.T) { + type fields struct { + Headers types.List + } + tests := []struct { + name string + fields fields + want *[]struct { + Key string `json:"key"` + Value string `json:"value"` + } + wantErr bool + }{ + { + name: "returns nil when headers are unknown", + fields: fields{ + Headers: types.ListUnknown(getHeadersAttrTypes()), + }, + }, + { + name: "returns headers when populated", + fields: fields{ + Headers: types.ListValueMust( + getHeadersAttrTypes(), + []attr.Value{ + types.ObjectValueMust(getHeadersAttrTypes().(types.ObjectType).AttrTypes, map[string]attr.Value{ + "key": types.StringValue("key-1"), + "value": types.StringValue("value-1"), + }), + types.ObjectValueMust(getHeadersAttrTypes().(types.ObjectType).AttrTypes, map[string]attr.Value{ + "key": types.StringValue("key-2"), + "value": types.StringValue("value-2"), + }), + }, + ), + }, + want: &[]struct { + Key string `json:"key"` + Value string `json:"value"` + }{ + {Key: "key-1", Value: "value-1"}, + {Key: "key-2", Value: "value-2"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := outputKafkaModel{ + Headers: tt.fields.Headers, + } + got, diags := m.toAPIHeaders(context.Background()) + if (diags.HasError()) != tt.wantErr { + t.Errorf("outputKafkaModel.toAPIHeaders() error = %v, wantErr %v", diags.HasError(), tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_outputKafkaModel_toAPIRandom(t *testing.T) { + type fields struct { + Random types.Object + } + tests := []struct { + name string + fields fields + want *struct { + GroupEvents *float32 `json:"group_events,omitempty"` + } + wantErr bool + }{ + { + name: "returns nil when random is unknown", + fields: fields{ + Random: types.ObjectUnknown(getRandomAttrTypes()), + }, + }, + { + name: "returns a random object when populated", + fields: fields{ + Random: types.ObjectValueMust( + getRandomAttrTypes(), + map[string]attr.Value{ + "group_events": types.Float64Value(1), + }, + ), + }, + want: &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: utils.Pointer(float32(1)), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := outputKafkaModel{ + Random: tt.fields.Random, + } + got, diags := m.toAPIRandom(context.Background()) + if (diags.HasError()) != tt.wantErr { + t.Errorf("outputKafkaModel.toAPIRandom() error = %v, wantErr %v", diags.HasError(), tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_outputKafkaModel_toAPIRoundRobin(t *testing.T) { + type fields struct { + RoundRobin types.Object + } + tests := []struct { + name string + fields fields + want *struct { + GroupEvents *float32 `json:"group_events,omitempty"` + } + wantErr bool + }{ + { + name: "returns nil when round_robin is unknown", + fields: fields{ + RoundRobin: types.ObjectUnknown(getRoundRobinAttrTypes()), + }, + }, + { + name: "returns a round_robin object when populated", + fields: fields{ + RoundRobin: types.ObjectValueMust( + getRoundRobinAttrTypes(), + map[string]attr.Value{ + "group_events": types.Float64Value(1), + }, + ), + }, + want: &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: utils.Pointer(float32(1)), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := outputKafkaModel{ + RoundRobin: tt.fields.RoundRobin, + } + got, diags := m.toAPIRoundRobin(context.Background()) + if (diags.HasError()) != tt.wantErr { + t.Errorf("outputKafkaModel.toAPIRoundRobin() error = %v, wantErr %v", diags.HasError(), tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_outputKafkaModel_toAPISasl(t *testing.T) { + type fields struct { + Sasl types.Object + } + tests := []struct { + name string + fields fields + want *struct { + Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + } + wantErr bool + }{ + { + name: "returns nil when sasl is unknown", + fields: fields{ + Sasl: types.ObjectUnknown(getSaslAttrTypes()), + }, + }, + { + name: "returns a sasl object when populated", + fields: fields{ + Sasl: types.ObjectValueMust( + getSaslAttrTypes(), + map[string]attr.Value{ + "mechanism": types.StringValue("plain"), + }, + ), + }, + want: &struct { + Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + }{ + Mechanism: utils.Pointer(kbapi.NewOutputKafkaSaslMechanism("plain")), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := outputKafkaModel{ + Sasl: tt.fields.Sasl, + } + got, diags := m.toAPISasl(context.Background()) + if (diags.HasError()) != tt.wantErr { + t.Errorf("outputKafkaModel.toAPISasl() error = %v, wantErr %v", diags.HasError(), tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_outputKafkaModel_toUpdateAPISasl(t *testing.T) { + type fields struct { + Sasl types.Object + } + tests := []struct { + name string + fields fields + want *struct { + Mechanism *kbapi.UpdateOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + } + wantErr bool + }{ + { + name: "returns nil when sasl is unknown", + fields: fields{ + Sasl: types.ObjectUnknown(getSaslAttrTypes()), + }, + }, + { + name: "returns a sasl object when populated", + fields: fields{ + Sasl: types.ObjectValueMust( + getSaslAttrTypes(), + map[string]attr.Value{ + "mechanism": types.StringValue("plain"), + }, + ), + }, + want: &struct { + Mechanism *kbapi.UpdateOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + }{ + Mechanism: utils.Pointer(kbapi.UpdateOutputKafkaSaslMechanism("plain")), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := outputKafkaModel{ + Sasl: tt.fields.Sasl, + } + got, diags := m.toUpdateAPISasl(context.Background()) + if (diags.HasError()) != tt.wantErr { + t.Errorf("outputKafkaModel.toUpdateAPISasl() error = %v, wantErr %v", diags.HasError(), tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_outputKafkaModel_toAuthType(t *testing.T) { + type fields struct { + AuthType types.String + } + tests := []struct { + name string + fields fields + want kbapi.NewOutputKafkaAuthType + }{ + { + name: "returns none when auth_type is unknown", + fields: fields{ + AuthType: types.StringUnknown(), + }, + want: kbapi.NewOutputKafkaAuthTypeNone, + }, + { + name: "returns an auth_type object when populated", + fields: fields{ + AuthType: types.StringValue("user"), + }, + want: kbapi.NewOutputKafkaAuthType("user"), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := outputKafkaModel{ + AuthType: tt.fields.AuthType, + } + assert.Equal(t, tt.want, m.toAuthType()) + }) + } +} + +func Test_outputKafkaModel_toUpdateAuthType(t *testing.T) { + type fields struct { + AuthType types.String + } + tests := []struct { + name string + fields fields + want *kbapi.UpdateOutputKafkaAuthType + }{ + { + name: "returns nil when auth_type is unknown", + fields: fields{ + AuthType: types.StringUnknown(), + }, + }, + { + name: "returns an auth_type object when populated", + fields: fields{ + AuthType: types.StringValue("user"), + }, + want: utils.Pointer(kbapi.UpdateOutputKafkaAuthType("user")), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + m := outputKafkaModel{ + AuthType: tt.fields.AuthType, + } + assert.Equal(t, tt.want, m.toUpdateAuthType()) + }) + } +} diff --git a/internal/fleet/output/schema.go b/internal/fleet/output/schema.go index 66cc91040..590374c21 100644 --- a/internal/fleet/output/schema.go +++ b/internal/fleet/output/schema.go @@ -12,7 +12,8 @@ import ( "github.com/hashicorp/terraform-plugin-framework/resource" "github.com/hashicorp/terraform-plugin-framework/resource/schema" "github.com/hashicorp/terraform-plugin-framework/resource/schema/booldefault" - "github.com/hashicorp/terraform-plugin-framework/resource/schema/float64planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/float32planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/listplanmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier" "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier" @@ -120,12 +121,12 @@ func getSchema() schema.Schema { stringvalidator.OneOf("none", "user_pass", "ssl", "kerberos"), }, }, - "broker_timeout": schema.Float64Attribute{ + "broker_timeout": schema.Float32Attribute{ Description: "Kafka broker timeout.", Optional: true, Computed: true, - PlanModifiers: []planmodifier.Float64{ - float64planmodifier.UseStateForUnknown(), + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), }, }, "client_id": schema.StringAttribute{ @@ -143,15 +144,15 @@ func getSchema() schema.Schema { stringvalidator.OneOf("gzip", "snappy", "lz4", "none"), }, }, - "compression_level": schema.Float64Attribute{ + "compression_level": schema.Int64Attribute{ Description: "Compression level for Kafka output.", Optional: true, Computed: true, - PlanModifiers: []planmodifier.Float64{ - float64planmodifier.UseStateForUnknown(), + PlanModifiers: []planmodifier.Int64{ + int64planmodifier.UseStateForUnknown(), }, - Validators: []validator.Float64{ - validators.Float64ConditionalRequirement( + Validators: []validator.Int64{ + validators.Int64ConditionalRequirement( path.Root("kafka").AtName("compression"), []string{"gzip"}, ), @@ -186,12 +187,12 @@ func getSchema() schema.Schema { int64validator.OneOf(-1, 0, 1), }, }, - "timeout": schema.Float64Attribute{ + "timeout": schema.Float32Attribute{ Description: "Timeout for Kafka output.", Optional: true, Computed: true, - PlanModifiers: []planmodifier.Float64{ - float64planmodifier.UseStateForUnknown(), + PlanModifiers: []planmodifier.Float32{ + float32planmodifier.UseStateForUnknown(), }, }, "version": schema.StringAttribute{ @@ -235,69 +236,49 @@ func getSchema() schema.Schema { }, }, }, - "hash": schema.ListNestedAttribute{ + "hash": schema.SingleNestedAttribute{ Description: "Hash configuration for Kafka partition.", Optional: true, - Validators: []validator.List{ - listvalidator.SizeAtMost(1), - }, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "hash": schema.StringAttribute{ - Description: "Hash field.", - Optional: true, - }, - "random": schema.BoolAttribute{ - Description: "Use random hash.", - Optional: true, - }, + Attributes: map[string]schema.Attribute{ + "hash": schema.StringAttribute{ + Description: "Hash field.", + Optional: true, + }, + "random": schema.BoolAttribute{ + Description: "Use random hash.", + Optional: true, }, }, }, - "random": schema.ListNestedAttribute{ + "random": schema.SingleNestedAttribute{ Description: "Random configuration for Kafka partition.", Optional: true, - Validators: []validator.List{ - listvalidator.SizeAtMost(1), - }, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "group_events": schema.Float64Attribute{ - Description: "Number of events to group.", - Optional: true, - }, + Attributes: map[string]schema.Attribute{ + "group_events": schema.Float64Attribute{ + Description: "Number of events to group.", + Optional: true, }, }, }, - "round_robin": schema.ListNestedAttribute{ + "round_robin": schema.SingleNestedAttribute{ Description: "Round robin configuration for Kafka partition.", Optional: true, - Validators: []validator.List{ - listvalidator.SizeAtMost(1), - }, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "group_events": schema.Float64Attribute{ - Description: "Number of events to group.", - Optional: true, - }, + Attributes: map[string]schema.Attribute{ + "group_events": schema.Float64Attribute{ + Description: "Number of events to group.", + Optional: true, }, }, }, - "sasl": schema.ListNestedAttribute{ + "sasl": schema.SingleNestedAttribute{ Description: "SASL configuration for Kafka authentication.", Optional: true, - Validators: []validator.List{ - listvalidator.SizeAtMost(1), - }, - NestedObject: schema.NestedAttributeObject{ - Attributes: map[string]schema.Attribute{ - "mechanism": schema.StringAttribute{ - Description: "SASL mechanism.", - Optional: true, - Validators: []validator.String{ - stringvalidator.OneOf("PLAIN", "SCRAM-SHA-256", "SCRAM-SHA-512"), - }, + Attributes: map[string]schema.Attribute{ + "mechanism": schema.StringAttribute{ + Description: "SASL mechanism.", + Optional: true, + Validators: []validator.String{ + stringvalidator.OneOf("PLAIN", "SCRAM-SHA-256", "SCRAM-SHA-512"), }, }, }, @@ -316,20 +297,20 @@ func getHeadersAttrTypes() attr.Type { return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["headers"].GetType().(attr.TypeWithElementType).ElementType() } -func getHashAttrTypes() attr.Type { - return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["hash"].GetType().(attr.TypeWithElementType).ElementType() +func getHashAttrTypes() map[string]attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["hash"].GetType().(attr.TypeWithAttributeTypes).AttributeTypes() } -func getRandomAttrTypes() attr.Type { - return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["random"].GetType().(attr.TypeWithElementType).ElementType() +func getRandomAttrTypes() map[string]attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["random"].GetType().(attr.TypeWithAttributeTypes).AttributeTypes() } -func getRoundRobinAttrTypes() attr.Type { - return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["round_robin"].GetType().(attr.TypeWithElementType).ElementType() +func getRoundRobinAttrTypes() map[string]attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["round_robin"].GetType().(attr.TypeWithAttributeTypes).AttributeTypes() } -func getSaslAttrTypes() attr.Type { - return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["sasl"].GetType().(attr.TypeWithElementType).ElementType() +func getSaslAttrTypes() map[string]attr.Type { + return getSchema().Attributes["kafka"].(schema.SingleNestedAttribute).Attributes["sasl"].GetType().(attr.TypeWithAttributeTypes).AttributeTypes() } func getKafkaAttrTypes() map[string]attr.Type { diff --git a/internal/utils/validators/conditional.go b/internal/utils/validators/conditional.go index b540801af..a13d400a5 100644 --- a/internal/utils/validators/conditional.go +++ b/internal/utils/validators/conditional.go @@ -96,8 +96,8 @@ func (v conditionalRequirement) ValidateString(ctx context.Context, request vali response.Diagnostics.Append(v.validate(ctx, request.Config, request.ConfigValue, request.Path)...) } -// ValidateFloat64 performs the validation for float64 attributes. -func (v conditionalRequirement) ValidateFloat64(ctx context.Context, request validator.Float64Request, response *validator.Float64Response) { +// ValidateInt64 performs the validation for int64 attributes. +func (v conditionalRequirement) ValidateInt64(ctx context.Context, request validator.Int64Request, response *validator.Int64Response) { response.Diagnostics.Append(v.validate(ctx, request.Config, request.ConfigValue, request.Path)...) } @@ -131,32 +131,14 @@ func StringConditionalRequirementSingle(dependentPath path.Path, requiredValue s return StringConditionalRequirement(dependentPath, []string{requiredValue}) } -// Float64ConditionalRequirement returns a validator which ensures that a float64 attribute -// can only be set if another attribute at the specified path equals one of the specified values. -// -// The dependentPath parameter should use path.Root() to specify the attribute path. -// For example: path.Root("compression") -// -// Example usage: -// -// "compression_level": schema.Float64Attribute{ -// Optional: true, -// Validators: []validator.Float64{ -// validators.Float64ConditionalRequirement( -// path.Root("compression"), -// []string{"gzip"}, -// "compression_level can only be set when compression is 'gzip'", -// ), -// }, -// }, -func Float64ConditionalRequirement(dependentPath path.Path, allowedValues []string) validator.Float64 { +func Int64ConditionalRequirement(dependentPath path.Path, allowedValues []string) validator.Int64 { return conditionalRequirement{ dependentPath: dependentPath, allowedValues: allowedValues, } } -// Float64ConditionalRequirementSingle is a convenience function for when there's only one allowed value. -func Float64ConditionalRequirementSingle(dependentPath path.Path, requiredValue string) validator.Float64 { - return Float64ConditionalRequirement(dependentPath, []string{requiredValue}) +// Int64ConditionalRequirementSingle is a convenience function for when there's only one allowed value. +func Int64ConditionalRequirementSingle(dependentPath path.Path, requiredValue string) validator.Int64 { + return Int64ConditionalRequirement(dependentPath, []string{requiredValue}) } diff --git a/internal/utils/validators/conditional_test.go b/internal/utils/validators/conditional_test.go index b4715b3ec..faca2f8e5 100644 --- a/internal/utils/validators/conditional_test.go +++ b/internal/utils/validators/conditional_test.go @@ -153,12 +153,12 @@ func TestStringConditionalRequirement_Description(t *testing.T) { } } -func TestFloat64ConditionalRequirement(t *testing.T) { +func TestInt64ConditionalRequirement(t *testing.T) { t.Parallel() type testCase struct { name string - currentValue types.Float64 + currentValue types.Int64 dependentValue types.String expectedError bool } @@ -166,37 +166,37 @@ func TestFloat64ConditionalRequirement(t *testing.T) { testCases := []testCase{ { name: "valid - current null, dependent any value", - currentValue: types.Float64Null(), + currentValue: types.Int64Null(), dependentValue: types.StringValue("none"), expectedError: false, }, { name: "valid - current unknown, dependent any value", - currentValue: types.Float64Unknown(), + currentValue: types.Int64Unknown(), dependentValue: types.StringValue("none"), expectedError: false, }, { name: "valid - current set, dependent matches required value", - currentValue: types.Float64Value(6.0), + currentValue: types.Int64Value(6), dependentValue: types.StringValue("gzip"), expectedError: false, }, { name: "invalid - current set, dependent doesn't match required value", - currentValue: types.Float64Value(6.0), + currentValue: types.Int64Value(6), dependentValue: types.StringValue("none"), expectedError: true, }, { name: "invalid - current set, dependent is null", - currentValue: types.Float64Value(6.0), + currentValue: types.Int64Value(6), dependentValue: types.StringNull(), expectedError: true, }, { name: "invalid - current set, dependent is unknown", - currentValue: types.Float64Value(6.0), + currentValue: types.Int64Value(6), dependentValue: types.StringUnknown(), expectedError: true, }, @@ -250,21 +250,21 @@ func TestFloat64ConditionalRequirement(t *testing.T) { } // Create validator - v := Float64ConditionalRequirement( + v := Int64ConditionalRequirement( path.Root("compression"), []string{"gzip"}, ) // Create validation request - request := validator.Float64Request{ + request := validator.Int64Request{ Path: path.Root("compression_level"), ConfigValue: testCase.currentValue, Config: config, } // Run validation - response := &validator.Float64Response{} - v.ValidateFloat64(context.Background(), request, response) + response := &validator.Int64Response{} + v.ValidateInt64(context.Background(), request, response) // Check result if testCase.expectedError { From eb678eebcad58a1b9aab317442d599d7c56e8af3 Mon Sep 17 00:00:00 2001 From: Toby Brain Date: Thu, 18 Sep 2025 21:25:10 +1000 Subject: [PATCH 8/9] Update docs from schema change --- docs/resources/fleet_output.md | 26 ++++++++----------- .../kafka_advanced.tf | 18 +++++-------- 2 files changed, 18 insertions(+), 26 deletions(-) diff --git a/docs/resources/fleet_output.md b/docs/resources/fleet_output.md index 23035fb44..23b909903 100644 --- a/docs/resources/fleet_output.md +++ b/docs/resources/fleet_output.md @@ -142,19 +142,15 @@ resource "elasticstack_fleet_output" "kafka_advanced" { ] # Hash-based partitioning - hash = [ - { - hash = "host.name" - random = false - } - ] + hash = { + hash = "host.name" + random = false + } # SASL configuration - sasl = [ - { - mechanism = "SCRAM-SHA-256" - } - ] + sasl = { + mechanism = "SCRAM-SHA-256" + } } # SSL configuration (reusing common SSL block) @@ -234,15 +230,15 @@ Optional: - `compression` (String) Compression type for Kafka output. - `compression_level` (Number) Compression level for Kafka output. - `connection_type` (String) Connection type for Kafka output. -- `hash` (Attributes List) Hash configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--hash)) +- `hash` (Attributes) Hash configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--hash)) - `headers` (Attributes List) Headers for Kafka messages. (see [below for nested schema](#nestedatt--kafka--headers)) - `key` (String) Key field for Kafka messages. - `partition` (String) Partition strategy for Kafka output. - `password` (String, Sensitive) Password for Kafka authentication. -- `random` (Attributes List) Random configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--random)) +- `random` (Attributes) Random configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--random)) - `required_acks` (Number) Number of acknowledgments required for Kafka output. -- `round_robin` (Attributes List) Round robin configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--round_robin)) -- `sasl` (Attributes List) SASL configuration for Kafka authentication. (see [below for nested schema](#nestedatt--kafka--sasl)) +- `round_robin` (Attributes) Round robin configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--round_robin)) +- `sasl` (Attributes) SASL configuration for Kafka authentication. (see [below for nested schema](#nestedatt--kafka--sasl)) - `timeout` (Number) Timeout for Kafka output. - `topic` (String) Kafka topic. - `username` (String) Username for Kafka authentication. diff --git a/examples/resources/elasticstack_fleet_output/kafka_advanced.tf b/examples/resources/elasticstack_fleet_output/kafka_advanced.tf index 497ba563a..3b093a8dd 100644 --- a/examples/resources/elasticstack_fleet_output/kafka_advanced.tf +++ b/examples/resources/elasticstack_fleet_output/kafka_advanced.tf @@ -55,19 +55,15 @@ resource "elasticstack_fleet_output" "kafka_advanced" { ] # Hash-based partitioning - hash = [ - { - hash = "host.name" - random = false - } - ] + hash = { + hash = "host.name" + random = false + } # SASL configuration - sasl = [ - { - mechanism = "SCRAM-SHA-256" - } - ] + sasl = { + mechanism = "SCRAM-SHA-256" + } } # SSL configuration (reusing common SSL block) From 4be20381540a1f78367921880df7acc4772bc0d1 Mon Sep 17 00:00:00 2001 From: Toby Brain Date: Fri, 19 Sep 2025 09:02:55 +1000 Subject: [PATCH 9/9] Split the models file up even more --- internal/fleet/output/models.go | 583 +----------------- internal/fleet/output/models_elasticsearch.go | 82 +++ internal/fleet/output/models_kafka.go | 559 +++++++++++++++++ ...fka_model_test.go => models_kafka_test.go} | 0 internal/fleet/output/models_logstash.go | 81 +++ internal/fleet/output/models_ssl.go | 69 +++ internal/fleet/output/models_ssl_test.go | 154 +++++ internal/fleet/output/output_kafka_model.go | 216 ------- 8 files changed, 964 insertions(+), 780 deletions(-) create mode 100644 internal/fleet/output/models_elasticsearch.go create mode 100644 internal/fleet/output/models_kafka.go rename internal/fleet/output/{output_kafka_model_test.go => models_kafka_test.go} (100%) create mode 100644 internal/fleet/output/models_logstash.go create mode 100644 internal/fleet/output/models_ssl.go create mode 100644 internal/fleet/output/models_ssl_test.go delete mode 100644 internal/fleet/output/output_kafka_model.go diff --git a/internal/fleet/output/models.go b/internal/fleet/output/models.go index c715651a5..3c1847368 100644 --- a/internal/fleet/output/models.go +++ b/internal/fleet/output/models.go @@ -8,7 +8,6 @@ import ( "github.com/elastic/terraform-provider-elasticstack/internal/clients" "github.com/elastic/terraform-provider-elasticstack/internal/utils" "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/path" "github.com/hashicorp/terraform-plugin-framework/types" ) @@ -27,212 +26,28 @@ type outputModel struct { Kafka types.Object `tfsdk:"kafka"` //> outputKafkaModel } -type outputSslModel struct { - CertificateAuthorities types.List `tfsdk:"certificate_authorities"` //> string - Certificate types.String `tfsdk:"certificate"` - Key types.String `tfsdk:"key"` -} - func (model *outputModel) populateFromAPI(ctx context.Context, union *kbapi.OutputUnion) (diags diag.Diagnostics) { if union == nil { return } - doSsl := func(ssl *kbapi.OutputSsl) types.Object { - if ssl != nil { - p := path.Root("ssl") - sslModel := outputSslModel{ - CertificateAuthorities: utils.SliceToListType_String(ctx, utils.Deref(ssl.CertificateAuthorities), p.AtName("certificate_authorities"), &diags), - Certificate: types.StringPointerValue(ssl.Certificate), - Key: types.StringPointerValue(ssl.Key), - } - obj, nd := types.ObjectValueFrom(ctx, getSslAttrTypes(), sslModel) - diags.Append(nd...) - return obj - } else { - return types.ObjectNull(getSslAttrTypes()) - } - } - - discriminator, err := union.Discriminator() + output, err := union.ValueByDiscriminator() if err != nil { diags.AddError(err.Error(), "") return } - switch discriminator { - case "elasticsearch": - data, err := union.AsOutputElasticsearch() - if err != nil { - diags.AddError(err.Error(), "") - return - } - - model.ID = types.StringPointerValue(data.Id) - model.OutputID = types.StringPointerValue(data.Id) - model.Name = types.StringValue(data.Name) - model.Type = types.StringValue(string(data.Type)) - model.Hosts = utils.SliceToListType_String(ctx, data.Hosts, path.Root("hosts"), &diags) - model.CaSha256 = types.StringPointerValue(data.CaSha256) - model.CaTrustedFingerprint = types.StringPointerValue(data.CaTrustedFingerprint) - model.DefaultIntegrations = types.BoolPointerValue(data.IsDefault) - model.DefaultMonitoring = types.BoolPointerValue(data.IsDefaultMonitoring) - model.ConfigYaml = types.StringPointerValue(data.ConfigYaml) - model.Ssl = doSsl(data.Ssl) - - case "logstash": - data, err := union.AsOutputLogstash() - if err != nil { - diags.AddError(err.Error(), "") - return - } - - model.ID = types.StringPointerValue(data.Id) - model.OutputID = types.StringPointerValue(data.Id) - model.Name = types.StringValue(data.Name) - model.Type = types.StringValue(string(data.Type)) - model.Hosts = utils.SliceToListType_String(ctx, data.Hosts, path.Root("hosts"), &diags) - model.CaSha256 = types.StringPointerValue(data.CaSha256) - model.CaTrustedFingerprint = types.StringPointerValue(data.CaTrustedFingerprint) - model.DefaultIntegrations = types.BoolPointerValue(data.IsDefault) - model.DefaultMonitoring = types.BoolPointerValue(data.IsDefaultMonitoring) - model.ConfigYaml = types.StringPointerValue(data.ConfigYaml) - model.Ssl = doSsl(data.Ssl) - - case "kafka": - data, err := union.AsOutputKafka() - if err != nil { - diags.AddError(err.Error(), "") - return - } - - model.ID = types.StringPointerValue(data.Id) - model.OutputID = types.StringPointerValue(data.Id) - model.Name = types.StringValue(data.Name) - model.Type = types.StringValue(string(data.Type)) - model.Hosts = utils.SliceToListType_String(ctx, data.Hosts, path.Root("hosts"), &diags) - model.CaSha256 = types.StringPointerValue(data.CaSha256) - model.CaTrustedFingerprint = types.StringPointerValue(data.CaTrustedFingerprint) - model.DefaultIntegrations = types.BoolPointerValue(data.IsDefault) - model.DefaultMonitoring = types.BoolPointerValue(data.IsDefaultMonitoring) - model.ConfigYaml = types.StringPointerValue(data.ConfigYaml) - model.Ssl = doSsl(data.Ssl) - - // Kafka-specific fields - initialize kafka nested object - kafkaModel := outputKafkaModel{} - kafkaModel.AuthType = types.StringValue(string(data.AuthType)) - kafkaModel.BrokerTimeout = types.Float32PointerValue(data.BrokerTimeout) - kafkaModel.ClientId = types.StringPointerValue(data.ClientId) - kafkaModel.Compression = types.StringPointerValue((*string)(data.Compression)) - // Handle CompressionLevel - if data.CompressionLevel != nil { - kafkaModel.CompressionLevel = types.Int64Value(int64(*data.CompressionLevel)) - } else { - kafkaModel.CompressionLevel = types.Int64Null() - } - // Handle ConnectionType - kafkaModel.ConnectionType = types.StringPointerValue(data.ConnectionType) - kafkaModel.Topic = types.StringPointerValue(data.Topic) - kafkaModel.Partition = types.StringPointerValue((*string)(data.Partition)) - if data.RequiredAcks != nil { - kafkaModel.RequiredAcks = types.Int64Value(int64(*data.RequiredAcks)) - } else { - kafkaModel.RequiredAcks = types.Int64Null() - } + switch output := output.(type) { + case kbapi.OutputElasticsearch: + diags.Append(model.fromAPIElasticsearchModel(ctx, &output)...) - kafkaModel.Timeout = types.Float32PointerValue(data.Timeout) - kafkaModel.Version = types.StringPointerValue(data.Version) - kafkaModel.Username = types.StringPointerValue(data.Username) - kafkaModel.Password = types.StringPointerValue(data.Password) - kafkaModel.Key = types.StringPointerValue(data.Key) - - // Handle headers - if data.Headers != nil { - headerModels := make([]outputHeadersModel, len(*data.Headers)) - for i, header := range *data.Headers { - headerModels[i] = outputHeadersModel{ - Key: types.StringValue(header.Key), - Value: types.StringValue(header.Value), - } - } - list, nd := types.ListValueFrom(ctx, getHeadersAttrTypes(), headerModels) - diags.Append(nd...) - kafkaModel.Headers = list - } else { - kafkaModel.Headers = types.ListNull(getHeadersAttrTypes()) - } - - // Handle hash - if data.Hash != nil { - hashModel := outputHashModel{ - Hash: types.StringPointerValue(data.Hash.Hash), - Random: types.BoolPointerValue(data.Hash.Random), - } - obj, nd := types.ObjectValueFrom(ctx, getHashAttrTypes(), hashModel) - diags.Append(nd...) - kafkaModel.Hash = obj - } else { - kafkaModel.Hash = types.ObjectNull(getHashAttrTypes()) - } - - // Handle random - if data.Random != nil { - randomModel := outputRandomModel{ - GroupEvents: func() types.Float64 { - if data.Random.GroupEvents != nil { - return types.Float64Value(float64(*data.Random.GroupEvents)) - } - return types.Float64Null() - }(), - } - obj, nd := types.ObjectValueFrom(ctx, getRandomAttrTypes(), randomModel) - diags.Append(nd...) - kafkaModel.Random = obj - } else { - kafkaModel.Random = types.ObjectNull(getRandomAttrTypes()) - } - - // Handle round_robin - if data.RoundRobin != nil { - roundRobinModel := outputRoundRobinModel{ - GroupEvents: func() types.Float64 { - if data.RoundRobin.GroupEvents != nil { - return types.Float64Value(float64(*data.RoundRobin.GroupEvents)) - } - return types.Float64Null() - }(), - } - obj, nd := types.ObjectValueFrom(ctx, getRoundRobinAttrTypes(), roundRobinModel) - diags.Append(nd...) - kafkaModel.RoundRobin = obj - } else { - kafkaModel.RoundRobin = types.ObjectNull(getRoundRobinAttrTypes()) - } - - // Handle sasl - if data.Sasl != nil { - saslModel := outputSaslModel{ - Mechanism: func() types.String { - if data.Sasl.Mechanism != nil { - return types.StringValue(string(*data.Sasl.Mechanism)) - } - return types.StringNull() - }(), - } - obj, nd := types.ObjectValueFrom(ctx, getSaslAttrTypes(), saslModel) - diags.Append(nd...) - kafkaModel.Sasl = obj - } else { - kafkaModel.Sasl = types.ObjectNull(getSaslAttrTypes()) - } - - // Set the kafka nested object on the main model - kafkaObj, nd := types.ObjectValueFrom(ctx, getKafkaAttrTypes(), kafkaModel) - diags.Append(nd...) - model.Kafka = kafkaObj + case kbapi.OutputLogstash: + diags.Append(model.fromAPILogstashModel(ctx, &output)...) + case kbapi.OutputKafka: + diags.Append(model.fromAPIKafkaModel(ctx, &output)...) default: - diags.AddError(fmt.Sprintf("unhandled output type: %s", discriminator), "") + diags.AddError(fmt.Sprintf("unhandled output type: %T", output), "") } return @@ -259,22 +74,6 @@ func (model outputModel) toAPICreateModel(ctx context.Context, client *clients.A } } -func assertKafkaSupport(ctx context.Context, client *clients.ApiClient) diag.Diagnostics { - var diags diag.Diagnostics - - // Check minimum version requirement for Kafka output type - if supported, versionDiags := client.EnforceMinVersion(ctx, MinVersionOutputKafka); versionDiags.HasError() { - diags.Append(utils.FrameworkDiagsFromSDK(versionDiags)...) - return diags - } else if !supported { - diags.AddError("Unsupported version for Kafka output", - fmt.Sprintf("Kafka output type requires server version %s or higher", MinVersionOutputKafka.String())) - return diags - } - - return nil -} - func (model outputModel) toAPIUpdateModel(ctx context.Context, client *clients.ApiClient) (union kbapi.UpdateOutputUnion, diags diag.Diagnostics) { outputType := model.Type.ValueString() @@ -296,362 +95,18 @@ func (model outputModel) toAPIUpdateModel(ctx context.Context, client *clients.A return } -func (model outputModel) toAPICreateElasticsearchModel(ctx context.Context) (kbapi.NewOutputUnion, diag.Diagnostics) { - ssl, diags := model.toAPISSL(ctx) - if diags.HasError() { - return kbapi.NewOutputUnion{}, diags - } - - body := kbapi.NewOutputElasticsearch{ - Type: kbapi.NewOutputElasticsearchTypeElasticsearch, - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), - Id: model.OutputID.ValueStringPointer(), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueString(), - Ssl: ssl, - } - - var union kbapi.NewOutputUnion - err := union.FromNewOutputElasticsearch(body) - if err != nil { - diags.AddError(err.Error(), "") - return kbapi.NewOutputUnion{}, diags - } - - return union, diags -} - -func (model outputModel) toAPICreateLogstashModel(ctx context.Context) (kbapi.NewOutputUnion, diag.Diagnostics) { - ssl, diags := model.toAPISSL(ctx) - if diags.HasError() { - return kbapi.NewOutputUnion{}, diags - } - body := kbapi.NewOutputLogstash{ - Type: kbapi.NewOutputLogstashTypeLogstash, - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), - Id: model.OutputID.ValueStringPointer(), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueString(), - Ssl: ssl, - } - - var union kbapi.NewOutputUnion - err := union.FromNewOutputLogstash(body) - if err != nil { - diags.AddError(err.Error(), "") - return kbapi.NewOutputUnion{}, diags - } - - return union, diags -} - -func (model outputModel) toAPICreateKafkaModel(ctx context.Context) (kbapi.NewOutputUnion, diag.Diagnostics) { - ssl, diags := model.toAPISSL(ctx) - if diags.HasError() { - return kbapi.NewOutputUnion{}, diags - } - - // Extract kafka model from nested structure - var kafkaModel outputKafkaModel - if !model.Kafka.IsNull() { - kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) - kafkaModel = *kafkaObj - } - - hash, hashDiags := kafkaModel.toAPIHash(ctx) - diags.Append(hashDiags...) - - headers, headersDiags := kafkaModel.toAPIHeaders(ctx) - diags.Append(headersDiags...) - - random, randomDiags := kafkaModel.toAPIRandom(ctx) - diags.Append(randomDiags...) - - roundRobin, rrDiags := kafkaModel.toAPIRoundRobin(ctx) - diags.Append(rrDiags...) - - sasl, saslDiags := kafkaModel.toAPISasl(ctx) - diags.Append(saslDiags...) - - body := kbapi.NewOutputKafka{ - Type: kbapi.NewOutputKafkaTypeKafka, - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), - Id: model.OutputID.ValueStringPointer(), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueString(), - Ssl: ssl, - // Kafka-specific fields - AuthType: kafkaModel.toAuthType(), - BrokerTimeout: func() *float32 { - if !utils.IsKnown(kafkaModel.BrokerTimeout) { - return nil - } - val := kafkaModel.BrokerTimeout.ValueFloat32() - return &val - }(), - ClientId: kafkaModel.ClientId.ValueStringPointer(), - Compression: func() *kbapi.NewOutputKafkaCompression { - if !utils.IsKnown(kafkaModel.Compression) { - return nil - } - comp := kbapi.NewOutputKafkaCompression(kafkaModel.Compression.ValueString()) - return &comp - }(), - CompressionLevel: func() *int { - if !utils.IsKnown(kafkaModel.CompressionLevel) || kafkaModel.Compression.ValueString() != "gzip" { - return nil - } - - val := int(kafkaModel.CompressionLevel.ValueInt64()) - return &val - }(), - ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), - Topic: kafkaModel.Topic.ValueStringPointer(), - Partition: func() *kbapi.NewOutputKafkaPartition { - if !utils.IsKnown(kafkaModel.Partition) { - return nil - } - part := kbapi.NewOutputKafkaPartition(kafkaModel.Partition.ValueString()) - return &part - }(), - RequiredAcks: func() *kbapi.NewOutputKafkaRequiredAcks { - if !utils.IsKnown(kafkaModel.RequiredAcks) { - return nil - } - val := kbapi.NewOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) - return &val - }(), - Timeout: func() *float32 { - if !utils.IsKnown(kafkaModel.Timeout) { - return nil - } - - val := kafkaModel.Timeout.ValueFloat32() - return &val - }(), - Version: kafkaModel.Version.ValueStringPointer(), - Username: kafkaModel.Username.ValueStringPointer(), - Password: kafkaModel.Password.ValueStringPointer(), - Key: kafkaModel.Key.ValueStringPointer(), - Headers: headers, - Hash: hash, - Random: random, - RoundRobin: roundRobin, - Sasl: sasl, - } - - var union kbapi.NewOutputUnion - err := union.FromNewOutputKafka(body) - if err != nil { - diags.AddError(err.Error(), "") - return kbapi.NewOutputUnion{}, diags - } - - return union, diags -} - -func (model outputModel) toAPIUpdateElasticsearchModel(ctx context.Context) (kbapi.UpdateOutputUnion, diag.Diagnostics) { - ssl, diags := model.toUpdateAPISSL(ctx) - if diags.HasError() { - return kbapi.UpdateOutputUnion{}, diags - } - body := kbapi.UpdateOutputElasticsearch{ - Type: utils.Pointer(kbapi.Elasticsearch), - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueStringPointer(), - Ssl: ssl, - } - - var union kbapi.UpdateOutputUnion - err := union.FromUpdateOutputElasticsearch(body) - if err != nil { - diags.AddError(err.Error(), "") - return kbapi.UpdateOutputUnion{}, diags - } - - return union, diags -} - -func (model outputModel) toAPIUpdateLogstashModel(ctx context.Context) (kbapi.UpdateOutputUnion, diag.Diagnostics) { - ssl, diags := model.toUpdateAPISSL(ctx) - if diags.HasError() { - return kbapi.UpdateOutputUnion{}, diags - } - body := kbapi.UpdateOutputLogstash{ - Type: utils.Pointer(kbapi.Logstash), - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueStringPointer(), - Ssl: ssl, - } - - var union kbapi.UpdateOutputUnion - err := union.FromUpdateOutputLogstash(body) - if err != nil { - diags.AddError(err.Error(), "") - return kbapi.UpdateOutputUnion{}, diags - } - - return union, diags -} - -func (model outputModel) toAPIUpdateKafkaModel(ctx context.Context) (kbapi.UpdateOutputUnion, diag.Diagnostics) { - ssl, diags := model.toUpdateAPISSL(ctx) - if diags.HasError() { - return kbapi.UpdateOutputUnion{}, diags - } - - // Extract kafka model from nested structure - var kafkaModel outputKafkaModel - if !model.Kafka.IsNull() { - kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) - kafkaModel = *kafkaObj - } - - hash, hashDiags := kafkaModel.toAPIHash(ctx) - diags.Append(hashDiags...) - - headers, headersDiags := kafkaModel.toAPIHeaders(ctx) - diags.Append(headersDiags...) - - random, randomDiags := kafkaModel.toAPIRandom(ctx) - diags.Append(randomDiags...) - - roundRobin, rrDiags := kafkaModel.toAPIRoundRobin(ctx) - diags.Append(rrDiags...) - - sasl, saslDiags := kafkaModel.toUpdateAPISasl(ctx) - diags.Append(saslDiags...) - - body := kbapi.UpdateOutputKafka{ - Type: utils.Pointer(kbapi.Kafka), - CaSha256: model.CaSha256.ValueStringPointer(), - CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), - ConfigYaml: model.ConfigYaml.ValueStringPointer(), - Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), - IsDefault: model.DefaultIntegrations.ValueBoolPointer(), - IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), - Name: model.Name.ValueString(), - Ssl: ssl, - // Kafka-specific fields - AuthType: kafkaModel.toUpdateAuthType(), - BrokerTimeout: func() *float32 { - if !utils.IsKnown(kafkaModel.BrokerTimeout) { - return nil - } - val := kafkaModel.BrokerTimeout.ValueFloat32() - return &val - }(), - ClientId: kafkaModel.ClientId.ValueStringPointer(), - Compression: func() *kbapi.UpdateOutputKafkaCompression { - if !utils.IsKnown(kafkaModel.Compression) { - return nil - } - comp := kbapi.UpdateOutputKafkaCompression(kafkaModel.Compression.ValueString()) - return &comp - }(), - CompressionLevel: func() *int { - if !utils.IsKnown(kafkaModel.CompressionLevel) || kafkaModel.Compression.ValueString() != "gzip" { - return nil - } - val := int(kafkaModel.CompressionLevel.ValueInt64()) - return &val - }(), - ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), - Topic: kafkaModel.Topic.ValueStringPointer(), - Partition: func() *kbapi.UpdateOutputKafkaPartition { - if !utils.IsKnown(kafkaModel.Partition) { - return nil - } - part := kbapi.UpdateOutputKafkaPartition(kafkaModel.Partition.ValueString()) - return &part - }(), - RequiredAcks: func() *kbapi.UpdateOutputKafkaRequiredAcks { - if !utils.IsKnown(kafkaModel.RequiredAcks) { - return nil - } - val := kbapi.UpdateOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) - return &val - }(), - Timeout: func() *float32 { - if !utils.IsKnown(kafkaModel.Timeout) { - return nil - } - val := kafkaModel.Timeout.ValueFloat32() - return &val - }(), - Version: kafkaModel.Version.ValueStringPointer(), - Username: kafkaModel.Username.ValueStringPointer(), - Password: kafkaModel.Password.ValueStringPointer(), - Key: kafkaModel.Key.ValueStringPointer(), - Headers: headers, - Hash: hash, - Random: random, - RoundRobin: roundRobin, - Sasl: sasl, - } - - var union kbapi.UpdateOutputUnion - err := union.FromUpdateOutputKafka(body) - if err != nil { - diags.AddError(err.Error(), "") - return kbapi.UpdateOutputUnion{}, diags - } - - return union, diags -} - -func (model outputModel) toAPISSL(ctx context.Context) (*kbapi.NewOutputSsl, diag.Diagnostics) { - if !utils.IsKnown(model.Ssl) { - return nil, nil - } +func assertKafkaSupport(ctx context.Context, client *clients.ApiClient) diag.Diagnostics { var diags diag.Diagnostics - sslModel := utils.ObjectTypeAs[outputSslModel](ctx, model.Ssl, path.Root("ssl"), &diags) - if diags.HasError() { - return nil, diags - } - - if sslModel == nil { - return nil, diags - } - - return &kbapi.NewOutputSsl{ - Certificate: sslModel.Certificate.ValueStringPointer(), - CertificateAuthorities: utils.SliceRef(utils.ListTypeToSlice_String(ctx, sslModel.CertificateAuthorities, path.Root("certificate_authorities"), &diags)), - Key: sslModel.Key.ValueStringPointer(), - }, diags -} -func (model outputModel) toUpdateAPISSL(ctx context.Context) (*kbapi.UpdateOutputSsl, diag.Diagnostics) { - ssl, diags := model.toAPISSL(ctx) - if diags.HasError() || ssl == nil { - return nil, diags + // Check minimum version requirement for Kafka output type + if supported, versionDiags := client.EnforceMinVersion(ctx, MinVersionOutputKafka); versionDiags.HasError() { + diags.Append(utils.FrameworkDiagsFromSDK(versionDiags)...) + return diags + } else if !supported { + diags.AddError("Unsupported version for Kafka output", + fmt.Sprintf("Kafka output type requires server version %s or higher", MinVersionOutputKafka.String())) + return diags } - return &kbapi.UpdateOutputSsl{ - Certificate: ssl.Certificate, - CertificateAuthorities: ssl.CertificateAuthorities, - Key: ssl.Key, - }, diags + return nil } diff --git a/internal/fleet/output/models_elasticsearch.go b/internal/fleet/output/models_elasticsearch.go new file mode 100644 index 000000000..d9fba285a --- /dev/null +++ b/internal/fleet/output/models_elasticsearch.go @@ -0,0 +1,82 @@ +package output + +import ( + "context" + + "github.com/elastic/terraform-provider-elasticstack/generated/kbapi" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func (model *outputModel) fromAPIElasticsearchModel(ctx context.Context, data *kbapi.OutputElasticsearch) (diags diag.Diagnostics) { + model.ID = types.StringPointerValue(data.Id) + model.OutputID = types.StringPointerValue(data.Id) + model.Name = types.StringValue(data.Name) + model.Type = types.StringValue(string(data.Type)) + model.Hosts = utils.SliceToListType_String(ctx, data.Hosts, path.Root("hosts"), &diags) + model.CaSha256 = types.StringPointerValue(data.CaSha256) + model.CaTrustedFingerprint = types.StringPointerValue(data.CaTrustedFingerprint) + model.DefaultIntegrations = types.BoolPointerValue(data.IsDefault) + model.DefaultMonitoring = types.BoolPointerValue(data.IsDefaultMonitoring) + model.ConfigYaml = types.StringPointerValue(data.ConfigYaml) + model.Ssl, diags = sslToObjectValue(ctx, data.Ssl) + return +} + +func (model outputModel) toAPICreateElasticsearchModel(ctx context.Context) (kbapi.NewOutputUnion, diag.Diagnostics) { + ssl, diags := objectValueToSSL(ctx, model.Ssl) + if diags.HasError() { + return kbapi.NewOutputUnion{}, diags + } + + body := kbapi.NewOutputElasticsearch{ + Type: kbapi.NewOutputElasticsearchTypeElasticsearch, + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), + Id: model.OutputID.ValueStringPointer(), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: ssl, + } + + var union kbapi.NewOutputUnion + err := union.FromNewOutputElasticsearch(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.NewOutputUnion{}, diags + } + + return union, diags +} + +func (model outputModel) toAPIUpdateElasticsearchModel(ctx context.Context) (kbapi.UpdateOutputUnion, diag.Diagnostics) { + ssl, diags := objectValueToSSLUpdate(ctx, model.Ssl) + if diags.HasError() { + return kbapi.UpdateOutputUnion{}, diags + } + body := kbapi.UpdateOutputElasticsearch{ + Type: utils.Pointer(kbapi.Elasticsearch), + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueStringPointer(), + Ssl: ssl, + } + + var union kbapi.UpdateOutputUnion + err := union.FromUpdateOutputElasticsearch(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.UpdateOutputUnion{}, diags + } + + return union, diags +} diff --git a/internal/fleet/output/models_kafka.go b/internal/fleet/output/models_kafka.go new file mode 100644 index 000000000..a5b831624 --- /dev/null +++ b/internal/fleet/output/models_kafka.go @@ -0,0 +1,559 @@ +package output + +import ( + "context" + + "github.com/elastic/terraform-provider-elasticstack/generated/kbapi" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-framework/types/basetypes" +) + +type outputKafkaModel struct { + AuthType types.String `tfsdk:"auth_type"` + BrokerTimeout types.Float32 `tfsdk:"broker_timeout"` + ClientId types.String `tfsdk:"client_id"` + Compression types.String `tfsdk:"compression"` + CompressionLevel types.Int64 `tfsdk:"compression_level"` + ConnectionType types.String `tfsdk:"connection_type"` + Topic types.String `tfsdk:"topic"` + Partition types.String `tfsdk:"partition"` + RequiredAcks types.Int64 `tfsdk:"required_acks"` + Timeout types.Float32 `tfsdk:"timeout"` + Version types.String `tfsdk:"version"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + Key types.String `tfsdk:"key"` + Headers types.List `tfsdk:"headers"` //> outputHeadersModel + Hash types.Object `tfsdk:"hash"` //> outputHashModel + Random types.Object `tfsdk:"random"` //> outputRandomModel + RoundRobin types.Object `tfsdk:"round_robin"` //> outputRoundRobinModel + Sasl types.Object `tfsdk:"sasl"` //> outputSaslModel +} + +type outputHeadersModel struct { + Key types.String `tfsdk:"key"` + Value types.String `tfsdk:"value"` +} + +type outputHashModel struct { + Hash types.String `tfsdk:"hash"` + Random types.Bool `tfsdk:"random"` +} + +type outputRandomModel struct { + GroupEvents types.Float64 `tfsdk:"group_events"` +} + +type outputRoundRobinModel struct { + GroupEvents types.Float64 `tfsdk:"group_events"` +} + +type outputSaslModel struct { + Mechanism types.String `tfsdk:"mechanism"` +} + +func (m outputKafkaModel) toAPIHash(ctx context.Context) (*struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.Hash) { + return nil, nil + } + + var hashModel outputHashModel + diags := m.Hash.As(ctx, &hashModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, diags + } + + return &struct { + Hash *string `json:"hash,omitempty"` + Random *bool `json:"random,omitempty"` + }{ + Hash: hashModel.Hash.ValueStringPointer(), + Random: hashModel.Random.ValueBoolPointer(), + }, diags +} + +func (m outputKafkaModel) toAPIHeaders(ctx context.Context) (*[]struct { + Key string `json:"key"` + Value string `json:"value"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.Headers) { + return nil, nil + } + + var diags diag.Diagnostics + headerModels := utils.ListTypeAs[outputHeadersModel](ctx, m.Headers, path.Root("kafka").AtName("headers"), &diags) + if len(headerModels) == 0 { + return nil, diags + } + + headers := make([]struct { + Key string `json:"key"` + Value string `json:"value"` + }, len(headerModels)) + for i, h := range headerModels { + headers[i] = struct { + Key string `json:"key"` + Value string `json:"value"` + }{ + Key: h.Key.ValueString(), + Value: h.Value.ValueString(), + } + } + return &headers, diags +} + +func (m outputKafkaModel) toAPIRandom(ctx context.Context) (*struct { + GroupEvents *float32 `json:"group_events,omitempty"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.Random) { + return nil, nil + } + + var randomModel outputRandomModel + diags := m.Random.As(ctx, &randomModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, diags + } + + return &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: func() *float32 { + if !randomModel.GroupEvents.IsNull() { + val := float32(randomModel.GroupEvents.ValueFloat64()) + return &val + } + return nil + }(), + }, diags +} + +func (m outputKafkaModel) toAPIRoundRobin(ctx context.Context) (*struct { + GroupEvents *float32 `json:"group_events,omitempty"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.RoundRobin) { + return nil, nil + } + + var roundRobinModel outputRoundRobinModel + diags := m.RoundRobin.As(ctx, &roundRobinModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, diags + } + return &struct { + GroupEvents *float32 `json:"group_events,omitempty"` + }{ + GroupEvents: func() *float32 { + if !roundRobinModel.GroupEvents.IsNull() { + val := float32(roundRobinModel.GroupEvents.ValueFloat64()) + return &val + } + return nil + }(), + }, nil +} + +func (m outputKafkaModel) toAPISasl(ctx context.Context) (*struct { + Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` +}, diag.Diagnostics) { + if !utils.IsKnown(m.Sasl) { + return nil, nil + } + var saslModel outputSaslModel + diags := m.Sasl.As(ctx, &saslModel, basetypes.ObjectAsOptions{}) + if diags.HasError() { + return nil, diags + } + + if saslModel.Mechanism.IsNull() { + return nil, diags + } + + mechanism := kbapi.NewOutputKafkaSaslMechanism(saslModel.Mechanism.ValueString()) + return &struct { + Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` + }{ + Mechanism: &mechanism, + }, diags +} + +func (m outputKafkaModel) toUpdateAPISasl(ctx context.Context) (*struct { + Mechanism *kbapi.UpdateOutputKafkaSaslMechanism `json:"mechanism,omitempty"` +}, diag.Diagnostics) { + sasl, diags := m.toAPISasl(ctx) + if diags.HasError() || sasl == nil { + return nil, diags + } + + mechanism := kbapi.UpdateOutputKafkaSaslMechanism(*sasl.Mechanism) + return &struct { + Mechanism *kbapi.UpdateOutputKafkaSaslMechanism "json:\"mechanism,omitempty\"" + }{ + Mechanism: &mechanism, + }, diags +} + +func (m outputKafkaModel) toAuthType() kbapi.NewOutputKafkaAuthType { + if !utils.IsKnown(m.AuthType) { + return kbapi.NewOutputKafkaAuthTypeNone + } + + return kbapi.NewOutputKafkaAuthType(m.AuthType.ValueString()) +} + +func (m outputKafkaModel) toUpdateAuthType() *kbapi.UpdateOutputKafkaAuthType { + if !utils.IsKnown(m.AuthType) { + return nil + } + + return utils.Pointer(kbapi.UpdateOutputKafkaAuthType(m.AuthType.ValueString())) +} + +func (model outputModel) toAPICreateKafkaModel(ctx context.Context) (kbapi.NewOutputUnion, diag.Diagnostics) { + ssl, diags := objectValueToSSL(ctx, model.Ssl) + if diags.HasError() { + return kbapi.NewOutputUnion{}, diags + } + + // Extract kafka model from nested structure + var kafkaModel outputKafkaModel + if !model.Kafka.IsNull() { + kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) + kafkaModel = *kafkaObj + } + + hash, hashDiags := kafkaModel.toAPIHash(ctx) + diags.Append(hashDiags...) + + headers, headersDiags := kafkaModel.toAPIHeaders(ctx) + diags.Append(headersDiags...) + + random, randomDiags := kafkaModel.toAPIRandom(ctx) + diags.Append(randomDiags...) + + roundRobin, rrDiags := kafkaModel.toAPIRoundRobin(ctx) + diags.Append(rrDiags...) + + sasl, saslDiags := kafkaModel.toAPISasl(ctx) + diags.Append(saslDiags...) + + body := kbapi.NewOutputKafka{ + Type: kbapi.NewOutputKafkaTypeKafka, + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), + Id: model.OutputID.ValueStringPointer(), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: ssl, + // Kafka-specific fields + AuthType: kafkaModel.toAuthType(), + BrokerTimeout: func() *float32 { + if !utils.IsKnown(kafkaModel.BrokerTimeout) { + return nil + } + val := kafkaModel.BrokerTimeout.ValueFloat32() + return &val + }(), + ClientId: kafkaModel.ClientId.ValueStringPointer(), + Compression: func() *kbapi.NewOutputKafkaCompression { + if !utils.IsKnown(kafkaModel.Compression) { + return nil + } + comp := kbapi.NewOutputKafkaCompression(kafkaModel.Compression.ValueString()) + return &comp + }(), + CompressionLevel: func() *int { + if !utils.IsKnown(kafkaModel.CompressionLevel) || kafkaModel.Compression.ValueString() != "gzip" { + return nil + } + + val := int(kafkaModel.CompressionLevel.ValueInt64()) + return &val + }(), + ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), + Topic: kafkaModel.Topic.ValueStringPointer(), + Partition: func() *kbapi.NewOutputKafkaPartition { + if !utils.IsKnown(kafkaModel.Partition) { + return nil + } + part := kbapi.NewOutputKafkaPartition(kafkaModel.Partition.ValueString()) + return &part + }(), + RequiredAcks: func() *kbapi.NewOutputKafkaRequiredAcks { + if !utils.IsKnown(kafkaModel.RequiredAcks) { + return nil + } + val := kbapi.NewOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) + return &val + }(), + Timeout: func() *float32 { + if !utils.IsKnown(kafkaModel.Timeout) { + return nil + } + + val := kafkaModel.Timeout.ValueFloat32() + return &val + }(), + Version: kafkaModel.Version.ValueStringPointer(), + Username: kafkaModel.Username.ValueStringPointer(), + Password: kafkaModel.Password.ValueStringPointer(), + Key: kafkaModel.Key.ValueStringPointer(), + Headers: headers, + Hash: hash, + Random: random, + RoundRobin: roundRobin, + Sasl: sasl, + } + + var union kbapi.NewOutputUnion + err := union.FromNewOutputKafka(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.NewOutputUnion{}, diags + } + + return union, diags +} + +func (model outputModel) toAPIUpdateKafkaModel(ctx context.Context) (kbapi.UpdateOutputUnion, diag.Diagnostics) { + ssl, diags := objectValueToSSLUpdate(ctx, model.Ssl) + if diags.HasError() { + return kbapi.UpdateOutputUnion{}, diags + } + + // Extract kafka model from nested structure + var kafkaModel outputKafkaModel + if !model.Kafka.IsNull() { + kafkaObj := utils.ObjectTypeAs[outputKafkaModel](ctx, model.Kafka, path.Root("kafka"), &diags) + kafkaModel = *kafkaObj + } + + hash, hashDiags := kafkaModel.toAPIHash(ctx) + diags.Append(hashDiags...) + + headers, headersDiags := kafkaModel.toAPIHeaders(ctx) + diags.Append(headersDiags...) + + random, randomDiags := kafkaModel.toAPIRandom(ctx) + diags.Append(randomDiags...) + + roundRobin, rrDiags := kafkaModel.toAPIRoundRobin(ctx) + diags.Append(rrDiags...) + + sasl, saslDiags := kafkaModel.toUpdateAPISasl(ctx) + diags.Append(saslDiags...) + + body := kbapi.UpdateOutputKafka{ + Type: utils.Pointer(kbapi.Kafka), + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: ssl, + // Kafka-specific fields + AuthType: kafkaModel.toUpdateAuthType(), + BrokerTimeout: func() *float32 { + if !utils.IsKnown(kafkaModel.BrokerTimeout) { + return nil + } + val := kafkaModel.BrokerTimeout.ValueFloat32() + return &val + }(), + ClientId: kafkaModel.ClientId.ValueStringPointer(), + Compression: func() *kbapi.UpdateOutputKafkaCompression { + if !utils.IsKnown(kafkaModel.Compression) { + return nil + } + comp := kbapi.UpdateOutputKafkaCompression(kafkaModel.Compression.ValueString()) + return &comp + }(), + CompressionLevel: func() *int { + if !utils.IsKnown(kafkaModel.CompressionLevel) || kafkaModel.Compression.ValueString() != "gzip" { + return nil + } + val := int(kafkaModel.CompressionLevel.ValueInt64()) + return &val + }(), + ConnectionType: kafkaModel.ConnectionType.ValueStringPointer(), + Topic: kafkaModel.Topic.ValueStringPointer(), + Partition: func() *kbapi.UpdateOutputKafkaPartition { + if !utils.IsKnown(kafkaModel.Partition) { + return nil + } + part := kbapi.UpdateOutputKafkaPartition(kafkaModel.Partition.ValueString()) + return &part + }(), + RequiredAcks: func() *kbapi.UpdateOutputKafkaRequiredAcks { + if !utils.IsKnown(kafkaModel.RequiredAcks) { + return nil + } + val := kbapi.UpdateOutputKafkaRequiredAcks(kafkaModel.RequiredAcks.ValueInt64()) + return &val + }(), + Timeout: func() *float32 { + if !utils.IsKnown(kafkaModel.Timeout) { + return nil + } + val := kafkaModel.Timeout.ValueFloat32() + return &val + }(), + Version: kafkaModel.Version.ValueStringPointer(), + Username: kafkaModel.Username.ValueStringPointer(), + Password: kafkaModel.Password.ValueStringPointer(), + Key: kafkaModel.Key.ValueStringPointer(), + Headers: headers, + Hash: hash, + Random: random, + RoundRobin: roundRobin, + Sasl: sasl, + } + + var union kbapi.UpdateOutputUnion + err := union.FromUpdateOutputKafka(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.UpdateOutputUnion{}, diags + } + + return union, diags +} + +func (model *outputModel) fromAPIKafkaModel(ctx context.Context, data *kbapi.OutputKafka) (diags diag.Diagnostics) { + model.ID = types.StringPointerValue(data.Id) + model.OutputID = types.StringPointerValue(data.Id) + model.Name = types.StringValue(data.Name) + model.Type = types.StringValue(string(data.Type)) + model.Hosts = utils.SliceToListType_String(ctx, data.Hosts, path.Root("hosts"), &diags) + model.CaSha256 = types.StringPointerValue(data.CaSha256) + model.CaTrustedFingerprint = types.StringPointerValue(data.CaTrustedFingerprint) + model.DefaultIntegrations = types.BoolPointerValue(data.IsDefault) + model.DefaultMonitoring = types.BoolPointerValue(data.IsDefaultMonitoring) + model.ConfigYaml = types.StringPointerValue(data.ConfigYaml) + model.Ssl, diags = sslToObjectValue(ctx, data.Ssl) + + // Kafka-specific fields - initialize kafka nested object + kafkaModel := outputKafkaModel{} + kafkaModel.AuthType = types.StringValue(string(data.AuthType)) + kafkaModel.BrokerTimeout = types.Float32PointerValue(data.BrokerTimeout) + kafkaModel.ClientId = types.StringPointerValue(data.ClientId) + kafkaModel.Compression = types.StringPointerValue((*string)(data.Compression)) + // Handle CompressionLevel + if data.CompressionLevel != nil { + kafkaModel.CompressionLevel = types.Int64Value(int64(*data.CompressionLevel)) + } else { + kafkaModel.CompressionLevel = types.Int64Null() + } + // Handle ConnectionType + kafkaModel.ConnectionType = types.StringPointerValue(data.ConnectionType) + kafkaModel.Topic = types.StringPointerValue(data.Topic) + kafkaModel.Partition = types.StringPointerValue((*string)(data.Partition)) + if data.RequiredAcks != nil { + kafkaModel.RequiredAcks = types.Int64Value(int64(*data.RequiredAcks)) + } else { + kafkaModel.RequiredAcks = types.Int64Null() + } + + kafkaModel.Timeout = types.Float32PointerValue(data.Timeout) + kafkaModel.Version = types.StringPointerValue(data.Version) + kafkaModel.Username = types.StringPointerValue(data.Username) + kafkaModel.Password = types.StringPointerValue(data.Password) + kafkaModel.Key = types.StringPointerValue(data.Key) + + // Handle headers + if data.Headers != nil { + headerModels := make([]outputHeadersModel, len(*data.Headers)) + for i, header := range *data.Headers { + headerModels[i] = outputHeadersModel{ + Key: types.StringValue(header.Key), + Value: types.StringValue(header.Value), + } + } + list, nd := types.ListValueFrom(ctx, getHeadersAttrTypes(), headerModels) + diags.Append(nd...) + kafkaModel.Headers = list + } else { + kafkaModel.Headers = types.ListNull(getHeadersAttrTypes()) + } + + // Handle hash + if data.Hash != nil { + hashModel := outputHashModel{ + Hash: types.StringPointerValue(data.Hash.Hash), + Random: types.BoolPointerValue(data.Hash.Random), + } + obj, nd := types.ObjectValueFrom(ctx, getHashAttrTypes(), hashModel) + diags.Append(nd...) + kafkaModel.Hash = obj + } else { + kafkaModel.Hash = types.ObjectNull(getHashAttrTypes()) + } + + // Handle random + if data.Random != nil { + randomModel := outputRandomModel{ + GroupEvents: func() types.Float64 { + if data.Random.GroupEvents != nil { + return types.Float64Value(float64(*data.Random.GroupEvents)) + } + return types.Float64Null() + }(), + } + obj, nd := types.ObjectValueFrom(ctx, getRandomAttrTypes(), randomModel) + diags.Append(nd...) + kafkaModel.Random = obj + } else { + kafkaModel.Random = types.ObjectNull(getRandomAttrTypes()) + } + + // Handle round_robin + if data.RoundRobin != nil { + roundRobinModel := outputRoundRobinModel{ + GroupEvents: func() types.Float64 { + if data.RoundRobin.GroupEvents != nil { + return types.Float64Value(float64(*data.RoundRobin.GroupEvents)) + } + return types.Float64Null() + }(), + } + obj, nd := types.ObjectValueFrom(ctx, getRoundRobinAttrTypes(), roundRobinModel) + diags.Append(nd...) + kafkaModel.RoundRobin = obj + } else { + kafkaModel.RoundRobin = types.ObjectNull(getRoundRobinAttrTypes()) + } + + // Handle sasl + if data.Sasl != nil { + saslModel := outputSaslModel{ + Mechanism: func() types.String { + if data.Sasl.Mechanism != nil { + return types.StringValue(string(*data.Sasl.Mechanism)) + } + return types.StringNull() + }(), + } + obj, nd := types.ObjectValueFrom(ctx, getSaslAttrTypes(), saslModel) + diags.Append(nd...) + kafkaModel.Sasl = obj + } else { + kafkaModel.Sasl = types.ObjectNull(getSaslAttrTypes()) + } + + // Set the kafka nested object on the main model + kafkaObj, nd := types.ObjectValueFrom(ctx, getKafkaAttrTypes(), kafkaModel) + diags.Append(nd...) + model.Kafka = kafkaObj + return +} diff --git a/internal/fleet/output/output_kafka_model_test.go b/internal/fleet/output/models_kafka_test.go similarity index 100% rename from internal/fleet/output/output_kafka_model_test.go rename to internal/fleet/output/models_kafka_test.go diff --git a/internal/fleet/output/models_logstash.go b/internal/fleet/output/models_logstash.go new file mode 100644 index 000000000..680d5809b --- /dev/null +++ b/internal/fleet/output/models_logstash.go @@ -0,0 +1,81 @@ +package output + +import ( + "context" + + "github.com/elastic/terraform-provider-elasticstack/generated/kbapi" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func (model *outputModel) fromAPILogstashModel(ctx context.Context, data *kbapi.OutputLogstash) (diags diag.Diagnostics) { + model.ID = types.StringPointerValue(data.Id) + model.OutputID = types.StringPointerValue(data.Id) + model.Name = types.StringValue(data.Name) + model.Type = types.StringValue(string(data.Type)) + model.Hosts = utils.SliceToListType_String(ctx, data.Hosts, path.Root("hosts"), &diags) + model.CaSha256 = types.StringPointerValue(data.CaSha256) + model.CaTrustedFingerprint = types.StringPointerValue(data.CaTrustedFingerprint) + model.DefaultIntegrations = types.BoolPointerValue(data.IsDefault) + model.DefaultMonitoring = types.BoolPointerValue(data.IsDefaultMonitoring) + model.ConfigYaml = types.StringPointerValue(data.ConfigYaml) + model.Ssl, diags = sslToObjectValue(ctx, data.Ssl) + return +} + +func (model outputModel) toAPICreateLogstashModel(ctx context.Context) (kbapi.NewOutputUnion, diag.Diagnostics) { + ssl, diags := objectValueToSSL(ctx, model.Ssl) + if diags.HasError() { + return kbapi.NewOutputUnion{}, diags + } + body := kbapi.NewOutputLogstash{ + Type: kbapi.NewOutputLogstashTypeLogstash, + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags), + Id: model.OutputID.ValueStringPointer(), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueString(), + Ssl: ssl, + } + + var union kbapi.NewOutputUnion + err := union.FromNewOutputLogstash(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.NewOutputUnion{}, diags + } + + return union, diags +} + +func (model outputModel) toAPIUpdateLogstashModel(ctx context.Context) (kbapi.UpdateOutputUnion, diag.Diagnostics) { + ssl, diags := objectValueToSSLUpdate(ctx, model.Ssl) + if diags.HasError() { + return kbapi.UpdateOutputUnion{}, diags + } + body := kbapi.UpdateOutputLogstash{ + Type: utils.Pointer(kbapi.Logstash), + CaSha256: model.CaSha256.ValueStringPointer(), + CaTrustedFingerprint: model.CaTrustedFingerprint.ValueStringPointer(), + ConfigYaml: model.ConfigYaml.ValueStringPointer(), + Hosts: utils.SliceRef(utils.ListTypeToSlice_String(ctx, model.Hosts, path.Root("hosts"), &diags)), + IsDefault: model.DefaultIntegrations.ValueBoolPointer(), + IsDefaultMonitoring: model.DefaultMonitoring.ValueBoolPointer(), + Name: model.Name.ValueStringPointer(), + Ssl: ssl, + } + + var union kbapi.UpdateOutputUnion + err := union.FromUpdateOutputLogstash(body) + if err != nil { + diags.AddError(err.Error(), "") + return kbapi.UpdateOutputUnion{}, diags + } + + return union, diags +} diff --git a/internal/fleet/output/models_ssl.go b/internal/fleet/output/models_ssl.go new file mode 100644 index 000000000..e1f05d09a --- /dev/null +++ b/internal/fleet/output/models_ssl.go @@ -0,0 +1,69 @@ +package output + +import ( + "context" + + "github.com/elastic/terraform-provider-elasticstack/generated/kbapi" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type outputSslModel struct { + CertificateAuthorities types.List `tfsdk:"certificate_authorities"` //> string + Certificate types.String `tfsdk:"certificate"` + Key types.String `tfsdk:"key"` +} + +func objectValueToSSL(ctx context.Context, obj types.Object) (*kbapi.NewOutputSsl, diag.Diagnostics) { + if !utils.IsKnown(obj) { + return nil, nil + } + + var diags diag.Diagnostics + sslModel := utils.ObjectTypeAs[outputSslModel](ctx, obj, path.Root("ssl"), &diags) + if diags.HasError() { + return nil, diags + } + + if sslModel == nil { + return nil, diags + } + + return &kbapi.NewOutputSsl{ + Certificate: sslModel.Certificate.ValueStringPointer(), + CertificateAuthorities: utils.SliceRef(utils.ListTypeToSlice_String(ctx, sslModel.CertificateAuthorities, path.Root("certificate_authorities"), &diags)), + Key: sslModel.Key.ValueStringPointer(), + }, diags +} + +func objectValueToSSLUpdate(ctx context.Context, obj types.Object) (*kbapi.UpdateOutputSsl, diag.Diagnostics) { + ssl, diags := objectValueToSSL(ctx, obj) + if diags.HasError() || ssl == nil { + return nil, diags + } + + return &kbapi.UpdateOutputSsl{ + Certificate: ssl.Certificate, + CertificateAuthorities: ssl.CertificateAuthorities, + Key: ssl.Key, + }, diags +} + +func sslToObjectValue(ctx context.Context, ssl *kbapi.OutputSsl) (types.Object, diag.Diagnostics) { + if ssl == nil { + return types.ObjectNull(getSslAttrTypes()), nil + } + + var diags diag.Diagnostics + p := path.Root("ssl") + sslModel := outputSslModel{ + CertificateAuthorities: utils.SliceToListType_String(ctx, utils.Deref(ssl.CertificateAuthorities), p.AtName("certificate_authorities"), &diags), + Certificate: types.StringPointerValue(ssl.Certificate), + Key: types.StringPointerValue(ssl.Key), + } + obj, diagTemp := types.ObjectValueFrom(ctx, getSslAttrTypes(), sslModel) + diags.Append(diagTemp...) + return obj, diags +} diff --git a/internal/fleet/output/models_ssl_test.go b/internal/fleet/output/models_ssl_test.go new file mode 100644 index 000000000..19f9b79eb --- /dev/null +++ b/internal/fleet/output/models_ssl_test.go @@ -0,0 +1,154 @@ +package output + +import ( + "context" + "testing" + + "github.com/elastic/terraform-provider-elasticstack/generated/kbapi" + "github.com/elastic/terraform-provider-elasticstack/internal/utils" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func Test_objectValueToSSL(t *testing.T) { + type args struct { + obj types.Object + } + tests := []struct { + name string + args args + want *kbapi.NewOutputSsl + wantErr bool + }{ + { + name: "returns nil when object is unknown", + args: args{ + obj: types.ObjectUnknown(getSslAttrTypes()), + }, + }, + { + name: "returns an ssl object when populated", + args: args{ + obj: types.ObjectValueMust( + getSslAttrTypes(), + map[string]attr.Value{ + "certificate_authorities": types.ListValueMust(types.StringType, []attr.Value{types.StringValue("ca")}), + "certificate": types.StringValue("cert"), + "key": types.StringValue("key"), + }, + ), + }, + want: &kbapi.NewOutputSsl{ + Certificate: utils.Pointer("cert"), + CertificateAuthorities: &[]string{"ca"}, + Key: utils.Pointer("key"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, diags := objectValueToSSL(context.Background(), tt.args.obj) + if (diags.HasError()) != tt.wantErr { + t.Errorf("objectValueToSSL() error = %v, wantErr %v", diags.HasError(), tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_objectValueToSSLUpdate(t *testing.T) { + type args struct { + obj types.Object + } + tests := []struct { + name string + args args + want *kbapi.UpdateOutputSsl + wantErr bool + }{ + { + name: "returns nil when object is unknown", + args: args{ + obj: types.ObjectUnknown(getSslAttrTypes()), + }, + }, + { + name: "returns an ssl object when populated", + args: args{ + obj: types.ObjectValueMust( + getSslAttrTypes(), + map[string]attr.Value{ + "certificate_authorities": types.ListValueMust(types.StringType, []attr.Value{types.StringValue("ca")}), + "certificate": types.StringValue("cert"), + "key": types.StringValue("key"), + }, + ), + }, + want: &kbapi.UpdateOutputSsl{ + Certificate: utils.Pointer("cert"), + CertificateAuthorities: &[]string{"ca"}, + Key: utils.Pointer("key"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, diags := objectValueToSSLUpdate(context.Background(), tt.args.obj) + if (diags.HasError()) != tt.wantErr { + t.Errorf("objectValueToSSLUpdate() error = %v, wantErr %v", diags.HasError(), tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} + +func Test_sslToObjectValue(t *testing.T) { + type args struct { + ssl *kbapi.OutputSsl + } + tests := []struct { + name string + args args + want types.Object + wantErr bool + }{ + { + name: "returns nil when ssl is nil", + args: args{ + ssl: nil, + }, + want: types.ObjectNull(getSslAttrTypes()), + }, + { + name: "returns an object when populated", + args: args{ + ssl: &kbapi.OutputSsl{ + Certificate: utils.Pointer("cert"), + CertificateAuthorities: &[]string{"ca"}, + Key: utils.Pointer("key"), + }, + }, + want: types.ObjectValueMust( + getSslAttrTypes(), + map[string]attr.Value{ + "certificate_authorities": types.ListValueMust(types.StringType, []attr.Value{types.StringValue("ca")}), + "certificate": types.StringValue("cert"), + "key": types.StringValue("key"), + }, + ), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, diags := sslToObjectValue(context.Background(), tt.args.ssl) + if (diags.HasError()) != tt.wantErr { + t.Errorf("sslToObjectValue() error = %v, wantErr %v", diags.HasError(), tt.wantErr) + return + } + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/internal/fleet/output/output_kafka_model.go b/internal/fleet/output/output_kafka_model.go deleted file mode 100644 index 6c3d1ae0e..000000000 --- a/internal/fleet/output/output_kafka_model.go +++ /dev/null @@ -1,216 +0,0 @@ -package output - -import ( - "context" - - "github.com/elastic/terraform-provider-elasticstack/generated/kbapi" - "github.com/elastic/terraform-provider-elasticstack/internal/utils" - "github.com/hashicorp/terraform-plugin-framework/diag" - "github.com/hashicorp/terraform-plugin-framework/path" - "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/hashicorp/terraform-plugin-framework/types/basetypes" -) - -type outputKafkaModel struct { - AuthType types.String `tfsdk:"auth_type"` - BrokerTimeout types.Float32 `tfsdk:"broker_timeout"` - ClientId types.String `tfsdk:"client_id"` - Compression types.String `tfsdk:"compression"` - CompressionLevel types.Int64 `tfsdk:"compression_level"` - ConnectionType types.String `tfsdk:"connection_type"` - Topic types.String `tfsdk:"topic"` - Partition types.String `tfsdk:"partition"` - RequiredAcks types.Int64 `tfsdk:"required_acks"` - Timeout types.Float32 `tfsdk:"timeout"` - Version types.String `tfsdk:"version"` - Username types.String `tfsdk:"username"` - Password types.String `tfsdk:"password"` - Key types.String `tfsdk:"key"` - Headers types.List `tfsdk:"headers"` //> outputHeadersModel - Hash types.Object `tfsdk:"hash"` //> outputHashModel - Random types.Object `tfsdk:"random"` //> outputRandomModel - RoundRobin types.Object `tfsdk:"round_robin"` //> outputRoundRobinModel - Sasl types.Object `tfsdk:"sasl"` //> outputSaslModel -} - -type outputHeadersModel struct { - Key types.String `tfsdk:"key"` - Value types.String `tfsdk:"value"` -} - -type outputHashModel struct { - Hash types.String `tfsdk:"hash"` - Random types.Bool `tfsdk:"random"` -} - -type outputRandomModel struct { - GroupEvents types.Float64 `tfsdk:"group_events"` -} - -type outputRoundRobinModel struct { - GroupEvents types.Float64 `tfsdk:"group_events"` -} - -type outputSaslModel struct { - Mechanism types.String `tfsdk:"mechanism"` -} - -func (m outputKafkaModel) toAPIHash(ctx context.Context) (*struct { - Hash *string `json:"hash,omitempty"` - Random *bool `json:"random,omitempty"` -}, diag.Diagnostics) { - if !utils.IsKnown(m.Hash) { - return nil, nil - } - - var hashModel outputHashModel - diags := m.Hash.As(ctx, &hashModel, basetypes.ObjectAsOptions{}) - if diags.HasError() { - return nil, diags - } - - return &struct { - Hash *string `json:"hash,omitempty"` - Random *bool `json:"random,omitempty"` - }{ - Hash: hashModel.Hash.ValueStringPointer(), - Random: hashModel.Random.ValueBoolPointer(), - }, diags -} - -func (m outputKafkaModel) toAPIHeaders(ctx context.Context) (*[]struct { - Key string `json:"key"` - Value string `json:"value"` -}, diag.Diagnostics) { - if !utils.IsKnown(m.Headers) { - return nil, nil - } - - var diags diag.Diagnostics - headerModels := utils.ListTypeAs[outputHeadersModel](ctx, m.Headers, path.Root("kafka").AtName("headers"), &diags) - if len(headerModels) == 0 { - return nil, diags - } - - headers := make([]struct { - Key string `json:"key"` - Value string `json:"value"` - }, len(headerModels)) - for i, h := range headerModels { - headers[i] = struct { - Key string `json:"key"` - Value string `json:"value"` - }{ - Key: h.Key.ValueString(), - Value: h.Value.ValueString(), - } - } - return &headers, diags -} - -func (m outputKafkaModel) toAPIRandom(ctx context.Context) (*struct { - GroupEvents *float32 `json:"group_events,omitempty"` -}, diag.Diagnostics) { - if !utils.IsKnown(m.Random) { - return nil, nil - } - - var randomModel outputRandomModel - diags := m.Random.As(ctx, &randomModel, basetypes.ObjectAsOptions{}) - if diags.HasError() { - return nil, diags - } - - return &struct { - GroupEvents *float32 `json:"group_events,omitempty"` - }{ - GroupEvents: func() *float32 { - if !randomModel.GroupEvents.IsNull() { - val := float32(randomModel.GroupEvents.ValueFloat64()) - return &val - } - return nil - }(), - }, diags -} - -func (m outputKafkaModel) toAPIRoundRobin(ctx context.Context) (*struct { - GroupEvents *float32 `json:"group_events,omitempty"` -}, diag.Diagnostics) { - if !utils.IsKnown(m.RoundRobin) { - return nil, nil - } - - var roundRobinModel outputRoundRobinModel - diags := m.RoundRobin.As(ctx, &roundRobinModel, basetypes.ObjectAsOptions{}) - if diags.HasError() { - return nil, diags - } - return &struct { - GroupEvents *float32 `json:"group_events,omitempty"` - }{ - GroupEvents: func() *float32 { - if !roundRobinModel.GroupEvents.IsNull() { - val := float32(roundRobinModel.GroupEvents.ValueFloat64()) - return &val - } - return nil - }(), - }, nil -} - -func (m outputKafkaModel) toAPISasl(ctx context.Context) (*struct { - Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` -}, diag.Diagnostics) { - if !utils.IsKnown(m.Sasl) { - return nil, nil - } - var saslModel outputSaslModel - diags := m.Sasl.As(ctx, &saslModel, basetypes.ObjectAsOptions{}) - if diags.HasError() { - return nil, diags - } - - if saslModel.Mechanism.IsNull() { - return nil, diags - } - - mechanism := kbapi.NewOutputKafkaSaslMechanism(saslModel.Mechanism.ValueString()) - return &struct { - Mechanism *kbapi.NewOutputKafkaSaslMechanism `json:"mechanism,omitempty"` - }{ - Mechanism: &mechanism, - }, diags -} - -func (m outputKafkaModel) toUpdateAPISasl(ctx context.Context) (*struct { - Mechanism *kbapi.UpdateOutputKafkaSaslMechanism `json:"mechanism,omitempty"` -}, diag.Diagnostics) { - sasl, diags := m.toAPISasl(ctx) - if diags.HasError() || sasl == nil { - return nil, diags - } - - mechanism := kbapi.UpdateOutputKafkaSaslMechanism(*sasl.Mechanism) - return &struct { - Mechanism *kbapi.UpdateOutputKafkaSaslMechanism "json:\"mechanism,omitempty\"" - }{ - Mechanism: &mechanism, - }, diags -} - -func (m outputKafkaModel) toAuthType() kbapi.NewOutputKafkaAuthType { - if !utils.IsKnown(m.AuthType) { - return kbapi.NewOutputKafkaAuthTypeNone - } - - return kbapi.NewOutputKafkaAuthType(m.AuthType.ValueString()) -} - -func (m outputKafkaModel) toUpdateAuthType() *kbapi.UpdateOutputKafkaAuthType { - if !utils.IsKnown(m.AuthType) { - return nil - } - - return utils.Pointer(kbapi.UpdateOutputKafkaAuthType(m.AuthType.ValueString())) -}