Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
- Migrate `elasticstack_kibana_action_connector` to the Terraform plugin framework ([#1269](https://github.com/elastic/terraform-provider-elasticstack/pull/1269))
- Migrate `elasticstack_elasticsearch_security_role_mapping` resource and data source to Terraform Plugin Framework ([#1279](https://github.com/elastic/terraform-provider-elasticstack/pull/1279))
- Add support for `inactivity_timeout` in `elasticstack_fleet_agent_policy` ([#641](https://github.com/elastic/terraform-provider-elasticstack/issues/641))
- Add support for `kafka` output types in `elasticstack_fleet_output` ([#1302](https://github.com/elastic/terraform-provider-elasticstack/pull/1302))
- Add support for `prevent_initial_backfill` to `elasticstack_kibana_slo` ([#1071](https://github.com/elastic/terraform-provider-elasticstack/pull/1071))
- [Refactor] Regenerate the SLO client using the current OpenAPI spec ([#1303](https://github.com/elastic/terraform-provider-elasticstack/pull/1303))
- Add support for `data_view_id` in the `elasticstack_kibana_slo` resource ([#1305](https://github.com/elastic/terraform-provider-elasticstack/pull/1305))
Expand Down
238 changes: 235 additions & 3 deletions docs/resources/fleet_output.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

---
# generated by https://github.com/hashicorp/terraform-plugin-docs
page_title: "elasticstack_fleet_output Resource - terraform-provider-elasticstack"
Expand All @@ -13,6 +12,8 @@ Creates a new Fleet Output.

## Example Usage

### Basic output

```terraform
provider "elasticstack" {
kibana {}
Expand All @@ -32,6 +33,168 @@ resource "elasticstack_fleet_output" "test_output" {
}
```

### Basic Kafka output

```terraform
terraform {
required_providers {
elasticstack = {
source = "elastic/elasticstack"
version = "~> 0.11"
}
}
}

provider "elasticstack" {
elasticsearch {}
kibana {}
}

# Basic Kafka Fleet Output
resource "elasticstack_fleet_output" "kafka_basic" {
name = "Basic Kafka Output"
output_id = "kafka-basic-output"
type = "kafka"
default_integrations = false
default_monitoring = false

hosts = [
"kafka:9092"
]

# Basic Kafka configuration
kafka = {
auth_type = "user_pass"
username = "kafka_user"
password = "kafka_password"
topic = "elastic-beats"
partition = "hash"
compression = "gzip"
required_acks = 1

headers = [
{
key = "environment"
value = "production"
}
]
}
}
```

### Advanced Kafka output

```terraform
terraform {
required_providers {
elasticstack = {
source = "elastic/elasticstack"
version = "~> 0.11"
}
}
}

provider "elasticstack" {
elasticsearch {}
kibana {}
}

# Advanced Kafka Fleet Output with SSL authentication
resource "elasticstack_fleet_output" "kafka_advanced" {
name = "Advanced Kafka Output"
output_id = "kafka-advanced-output"
type = "kafka"
default_integrations = false
default_monitoring = false

hosts = [
"kafka1:9092",
"kafka2:9092",
"kafka3:9092"
]

# Advanced Kafka configuration
kafka = {
auth_type = "ssl"
topic = "elastic-logs"
partition = "round_robin"
compression = "snappy"
required_acks = -1
broker_timeout = 10
timeout = 30
version = "2.6.0"
client_id = "elastic-beats-client"

# Custom headers for message metadata
headers = [
{
key = "datacenter"
value = "us-west-1"
},
{
key = "service"
value = "beats"
},
{
key = "environment"
value = "production"
}
]

# Hash-based partitioning
hash = {
hash = "host.name"
random = false
}

# SASL configuration
sasl = {
mechanism = "SCRAM-SHA-256"
}
}

# SSL configuration (reusing common SSL block)
ssl = {
certificate_authorities = [
file("${path.module}/ca.crt")
]
certificate = file("${path.module}/client.crt")
key = file("${path.module}/client.key")
}

# Additional YAML configuration for advanced settings
config_yaml = yamlencode({
"ssl.verification_mode" = "full"
"ssl.supported_protocols" = ["TLSv1.2", "TLSv1.3"]
"max.message.bytes" = 1000000
})
}

# Example showing round-robin partitioning with event grouping
resource "elasticstack_fleet_output" "kafka_round_robin" {
name = "Kafka Round Robin Output"
output_id = "kafka-round-robin-output"
type = "kafka"
default_integrations = false
default_monitoring = false

hosts = ["kafka:9092"]

kafka = {
auth_type = "none"
topic = "elastic-metrics"
partition = "round_robin"
compression = "lz4"

round_robin = [
{
group_events = 100
}
]
}
}
```

<!-- schema generated by tfplugindocs -->
## Schema

Expand All @@ -48,14 +211,83 @@ resource "elasticstack_fleet_output" "test_output" {
- `default_integrations` (Boolean) Make this output the default for agent integrations.
- `default_monitoring` (Boolean) Make this output the default for agent monitoring.
- `hosts` (List of String) A list of hosts.
- `kafka` (Attributes) Kafka-specific configuration. (see [below for nested schema](#nestedatt--kafka))
- `output_id` (String) Unique identifier of the output.
- `ssl` (Block List) SSL configuration. (see [below for nested schema](#nestedblock--ssl))
- `ssl` (Attributes) SSL configuration. (see [below for nested schema](#nestedatt--ssl))

### Read-Only

- `id` (String) The ID of this resource.

<a id="nestedblock--ssl"></a>
<a id="nestedatt--kafka"></a>
### Nested Schema for `kafka`

Optional:

- `auth_type` (String) Authentication type for Kafka output.
- `broker_timeout` (Number) Kafka broker timeout.
- `client_id` (String) Kafka client ID.
- `compression` (String) Compression type for Kafka output.
- `compression_level` (Number) Compression level for Kafka output.
- `connection_type` (String) Connection type for Kafka output.
- `hash` (Attributes) Hash configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--hash))
- `headers` (Attributes List) Headers for Kafka messages. (see [below for nested schema](#nestedatt--kafka--headers))
- `key` (String) Key field for Kafka messages.
- `partition` (String) Partition strategy for Kafka output.
- `password` (String, Sensitive) Password for Kafka authentication.
- `random` (Attributes) Random configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--random))
- `required_acks` (Number) Number of acknowledgments required for Kafka output.
- `round_robin` (Attributes) Round robin configuration for Kafka partition. (see [below for nested schema](#nestedatt--kafka--round_robin))
- `sasl` (Attributes) SASL configuration for Kafka authentication. (see [below for nested schema](#nestedatt--kafka--sasl))
- `timeout` (Number) Timeout for Kafka output.
- `topic` (String) Kafka topic.
- `username` (String) Username for Kafka authentication.
- `version` (String) Kafka version.

<a id="nestedatt--kafka--hash"></a>
### Nested Schema for `kafka.hash`

Optional:

- `hash` (String) Hash field.
- `random` (Boolean) Use random hash.


<a id="nestedatt--kafka--headers"></a>
### Nested Schema for `kafka.headers`

Required:

- `key` (String) Header key.
- `value` (String) Header value.


<a id="nestedatt--kafka--random"></a>
### Nested Schema for `kafka.random`

Optional:

- `group_events` (Number) Number of events to group.


<a id="nestedatt--kafka--round_robin"></a>
### Nested Schema for `kafka.round_robin`

Optional:

- `group_events` (Number) Number of events to group.


<a id="nestedatt--kafka--sasl"></a>
### Nested Schema for `kafka.sasl`

Optional:

- `mechanism` (String) SASL mechanism.



<a id="nestedatt--ssl"></a>
### Nested Schema for `ssl`

Required:
Expand Down
108 changes: 108 additions & 0 deletions examples/resources/elasticstack_fleet_output/kafka_advanced.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
terraform {
required_providers {
elasticstack = {
source = "elastic/elasticstack"
version = "~> 0.11"
}
}
}

provider "elasticstack" {
elasticsearch {}
kibana {}
}

# Advanced Kafka Fleet Output with SSL authentication
resource "elasticstack_fleet_output" "kafka_advanced" {
name = "Advanced Kafka Output"
output_id = "kafka-advanced-output"
type = "kafka"
default_integrations = false
default_monitoring = false

hosts = [
"kafka1:9092",
"kafka2:9092",
"kafka3:9092"
]

# Advanced Kafka configuration
kafka = {
auth_type = "ssl"
topic = "elastic-logs"
partition = "round_robin"
compression = "snappy"
required_acks = -1
broker_timeout = 10
timeout = 30
version = "2.6.0"
client_id = "elastic-beats-client"

# Custom headers for message metadata
headers = [
{
key = "datacenter"
value = "us-west-1"
},
{
key = "service"
value = "beats"
},
{
key = "environment"
value = "production"
}
]

# Hash-based partitioning
hash = {
hash = "host.name"
random = false
}

# SASL configuration
sasl = {
mechanism = "SCRAM-SHA-256"
}
}

# SSL configuration (reusing common SSL block)
ssl = {
certificate_authorities = [
file("${path.module}/ca.crt")
]
certificate = file("${path.module}/client.crt")
key = file("${path.module}/client.key")
}

# Additional YAML configuration for advanced settings
config_yaml = yamlencode({
"ssl.verification_mode" = "full"
"ssl.supported_protocols" = ["TLSv1.2", "TLSv1.3"]
"max.message.bytes" = 1000000
})
}

# Example showing round-robin partitioning with event grouping
resource "elasticstack_fleet_output" "kafka_round_robin" {
name = "Kafka Round Robin Output"
output_id = "kafka-round-robin-output"
type = "kafka"
default_integrations = false
default_monitoring = false

hosts = ["kafka:9092"]

kafka = {
auth_type = "none"
topic = "elastic-metrics"
partition = "round_robin"
compression = "lz4"

round_robin = [
{
group_events = 100
}
]
}
}
Loading
Loading