diff --git a/.meta/sinks/clickhouse.toml.erb b/.meta/sinks/clickhouse.toml.erb index 1ccfa1d2932c0..e9d3b6f5b52e4 100644 --- a/.meta/sinks/clickhouse.toml.erb +++ b/.meta/sinks/clickhouse.toml.erb @@ -92,12 +92,12 @@ description = "The token to use for bearer authentication" } ) %> -[sinks.clickhouse.options.host] +[sinks.clickhouse.options.endpoint] type = "string" common = true examples = ["http://localhost:8123"] required = true -description = "The host url of the [Clickhouse][urls.clickhouse] server." +description = "The endpoint of the [Clickhouse][urls.clickhouse] server." [sinks.clickhouse.options.table] type = "string" diff --git a/.meta/sinks/datadog_metrics.toml.erb b/.meta/sinks/datadog_metrics.toml.erb index b7e47b3c25e6c..84ddb17c84f98 100644 --- a/.meta/sinks/datadog_metrics.toml.erb +++ b/.meta/sinks/datadog_metrics.toml.erb @@ -41,7 +41,7 @@ examples = ["${DATADOG_API_KEY}", "ef8d5de700e7989468166c40fc8a0ccd"] required = true description = "Datadog [API key](https://docs.datadoghq.com/api/?lang=bash#authentication)" -[sinks.datadog_metrics.options.host] +[sinks.datadog_metrics.options.endpoint] type = "string" examples = ["https://api.datadoghq.com", "https://api.datadoghq.eu"] default = "https://api.datadoghq.com" diff --git a/.meta/sinks/elasticsearch.toml.erb b/.meta/sinks/elasticsearch.toml.erb index acc14cc563f0d..5ef0a8b3bb46b 100644 --- a/.meta/sinks/elasticsearch.toml.erb +++ b/.meta/sinks/elasticsearch.toml.erb @@ -124,7 +124,7 @@ relevant_when = {strategy = "aws"} required = false description = """\ The [AWS region][urls.aws_regions] of the target service. \ -This defaults to the region named in the host parameter, \ +This defaults to the region named in the endpoint parameter, \ or the value of the `$AWS_REGION` or `$AWS_DEFAULT_REGION` environment \ variables if that cannot be determined, or "us-east-1".\ """ @@ -160,12 +160,12 @@ description = """\ A custom header to be added to each outgoing Elasticsearch request.\ """ -[sinks.elasticsearch.options.host] +[sinks.elasticsearch.options.endpoint] type = "string" common = true examples = ["http://10.24.32.122:9000"] description = """\ -The host of your Elasticsearch cluster. This should be the full URL as shown \ +The endpoint of your Elasticsearch cluster. This should be the full URL as shown \ in the example.\ """ diff --git a/.meta/sinks/humio_logs.toml.erb b/.meta/sinks/humio_logs.toml.erb index 1d38a480d0cd6..c5e0350c94c08 100644 --- a/.meta/sinks/humio_logs.toml.erb +++ b/.meta/sinks/humio_logs.toml.erb @@ -78,11 +78,11 @@ to use to ingest the data. If unset, Humio will default it to none. """ -[sinks.humio_logs.options.host] +[sinks.humio_logs.options.endpoint] type = "string" default = "https://cloud.humio.com" examples = ["http://myhumiohost.com"] -description = "The optional host to send Humio logs to." +description = "The optional endpoint to send Humio logs to." <%= render("_partials/fields/_compression_options.toml", namespace: "sinks.humio_logs.options", diff --git a/.meta/sinks/logdna.toml.erb b/.meta/sinks/logdna.toml.erb index a78a889f9458e..9db9597dde52a 100644 --- a/.meta/sinks/logdna.toml.erb +++ b/.meta/sinks/logdna.toml.erb @@ -51,11 +51,11 @@ required = true examples = ["${LOGDNA_API_KEY}", "ef8d5de700e7989468166c40fc8a0ccd"] description = "The Ingestion API key." -[sinks.logdna.options.host] +[sinks.logdna.options.endpoint] type = "string" required = false examples = ["http://127.0.0.1", "http://example.com"] -description = "An optional host that will override the default one." +description = "An optional endpoint that will override the default one." [sinks.logdna.options.hostname] type = "string" diff --git a/.meta/sinks/pulsar.toml.erb b/.meta/sinks/pulsar.toml.erb index 2cde6c77a0386..154dc9cf9ed49 100644 --- a/.meta/sinks/pulsar.toml.erb +++ b/.meta/sinks/pulsar.toml.erb @@ -30,12 +30,12 @@ write_to_description = "[Apache Pulsar][urls.pulsar] via the [Pulsar protocol][u default: "text" ) %> -[sinks.pulsar.options.address] +[sinks.pulsar.options.endpoint] type = "string" common = true examples = ["pulsar://127.0.0.1:6650"] required = true -description = "A host and port pair that the pulsar client should connect to." +description = "Endpoint to which the pulsar client should connect to." [sinks.pulsar.options.topic] type = "string" diff --git a/.meta/sinks/sematext_logs.toml.erb b/.meta/sinks/sematext_logs.toml.erb index 13e8563ae0df4..19819276b1f2c 100644 --- a/.meta/sinks/sematext_logs.toml.erb +++ b/.meta/sinks/sematext_logs.toml.erb @@ -48,13 +48,13 @@ write_to_description = "[Sematext][urls.sematext] via the [Elasticsearch API][ur type = "string" required = false examples = ["na", "eu"] -description = "The region destination to send logs to. This option is required if `host` is not set." +description = "The region destination to send logs to. This option is required if `endpoint` is not set." -[sinks.sematext_logs.options.host] +[sinks.sematext_logs.options.endpoint] type = "string" required = false examples = ["http://127.0.0.1", "http://example.com"] -description = "The host that will be used to send logs to. This option is required if `region` is not set." +description = "The endpoint that will be used to send logs to. This option is required if `region` is not set." [sinks.sematext_logs.options.token] type = "string" diff --git a/.meta/sinks/splunk_hec.toml.erb b/.meta/sinks/splunk_hec.toml.erb index ead84550dd0a1..7d8e6b9b48a53 100644 --- a/.meta/sinks/splunk_hec.toml.erb +++ b/.meta/sinks/splunk_hec.toml.erb @@ -48,12 +48,12 @@ write_to_description = "a [Splunk's HTTP Event Collector][urls.splunk_hec]" default: "text" ) %> -[sinks.splunk_hec.options.host] +[sinks.splunk_hec.options.endpoint] type = "string" common = true examples = ["http://my-splunk-host.com"] required = true -description = "Your Splunk HEC host." +description = "Your Splunk HEC endpoint." [sinks.splunk_hec.options.host_key] type = "string" diff --git a/.meta/sources/prometheus.toml.erb b/.meta/sources/prometheus.toml.erb index 59115dd71635d..f0aa238dfbba1 100644 --- a/.meta/sources/prometheus.toml.erb +++ b/.meta/sources/prometheus.toml.erb @@ -18,12 +18,12 @@ through_description = "the [Prometheus text exposition format][urls.prometheus_t <%= render("_partials/fields/_component_options.toml", type: "source", name: "prometheus") %> -[sources.prometheus.options.hosts] +[sources.prometheus.options.endpoints] type = "[string]" common = true required = true examples = [["http://localhost:9090"]] -description = "Host addresses to scrape metrics from." +description = "Endpoints to scrape metrics from." [sources.prometheus.options.scrape_interval_secs] type = "uint" diff --git a/.meta/transforms/aws_ec2_metadata.toml.erb b/.meta/transforms/aws_ec2_metadata.toml.erb index 250453da5ad5c..921ef5c227a54 100644 --- a/.meta/transforms/aws_ec2_metadata.toml.erb +++ b/.meta/transforms/aws_ec2_metadata.toml.erb @@ -40,11 +40,11 @@ common = true default = 10 description = "The interval in seconds at which the EC2 Metadata api will be called." -[transforms.aws_ec2_metadata.options.host] +[transforms.aws_ec2_metadata.options.endpoint] type = "string" common = true default = "http://169.254.169.254" -description = "Override the default EC2 Metadata host." +description = "Override the default EC2 Metadata endpoint." [transforms.aws_ec2_metadata.fields.log.fields.ami-id] type = "string" diff --git a/src/sinks/aws_kinesis_firehose.rs b/src/sinks/aws_kinesis_firehose.rs index d79cb58b49a80..0ba54c11c2e6e 100644 --- a/src/sinks/aws_kinesis_firehose.rs +++ b/src/sinks/aws_kinesis_firehose.rs @@ -332,7 +332,7 @@ mod integration_tests { let config = ElasticSearchConfig { auth: Some(ElasticSearchAuth::Aws { assume_role: None }), - host: "http://localhost:4571".into(), + endpoint: "http://localhost:4571".into(), index: Some(stream.clone()), ..Default::default() }; diff --git a/src/sinks/clickhouse.rs b/src/sinks/clickhouse.rs index cbc7bfd724343..05472cb5688bb 100644 --- a/src/sinks/clickhouse.rs +++ b/src/sinks/clickhouse.rs @@ -21,7 +21,9 @@ use snafu::ResultExt; #[derive(Deserialize, Serialize, Debug, Clone, Default)] #[serde(deny_unknown_fields)] pub struct ClickhouseConfig { - pub host: String, + // Deprecated name + #[serde(alias = "host")] + pub endpoint: String, pub table: String, pub database: Option, #[serde(default = "Compression::default_gzip")] @@ -114,7 +116,7 @@ impl HttpSink for ClickhouseConfig { "default" }; - let uri = encode_uri(&self.host, database, &self.table).expect("Unable to encode uri"); + let uri = encode_uri(&self.endpoint, database, &self.table).expect("Unable to encode uri"); let mut builder = Request::post(&uri).header("Content-Type", "application/x-ndjson"); @@ -134,7 +136,7 @@ impl HttpSink for ClickhouseConfig { async fn healthcheck(mut client: HttpClient, config: ClickhouseConfig) -> crate::Result<()> { // TODO: check if table exists? - let uri = format!("{}/?query=SELECT%201", config.host); + let uri = format!("{}/?query=SELECT%201", config.endpoint); let mut request = Request::get(uri).body(Body::empty()).unwrap(); if let Some(auth) = &config.auth { @@ -251,7 +253,7 @@ mod integration_tests { let host = String::from("http://localhost:8123"); let config = ClickhouseConfig { - host: host.clone(), + endpoint: host.clone(), table: table.clone(), compression: Compression::None, batch: BatchConfig { @@ -294,7 +296,7 @@ mod integration_tests { encoding.timestamp_format = Some(TimestampFormat::Unix); let config = ClickhouseConfig { - host: host.clone(), + endpoint: host.clone(), table: table.clone(), compression: Compression::None, encoding, @@ -411,7 +413,7 @@ timestamp_format = "unix""#, let host = String::from("http://localhost:8123"); let config = ClickhouseConfig { - host: host.clone(), + endpoint: host.clone(), table: table.clone(), compression: Compression::None, batch: BatchConfig { diff --git a/src/sinks/datadog/logs.rs b/src/sinks/datadog/logs.rs index 490a5d3b011a4..a78dc2c7c049e 100644 --- a/src/sinks/datadog/logs.rs +++ b/src/sinks/datadog/logs.rs @@ -33,10 +33,10 @@ impl SinkConfig for DatadogLogsConfig { let (host, port, tls) = if let Some(uri) = &self.endpoint { let host = uri .host() - .ok_or_else(|| "A host is required for endpoints".to_string())?; + .ok_or_else(|| "A host is required for endpoint".to_string())?; let port = uri .port_u16() - .ok_or_else(|| "A port is required for endpoints".to_string())?; + .ok_or_else(|| "A port is required for endpoint".to_string())?; (host.to_string(), port, self.tls.clone()) } else { diff --git a/src/sinks/datadog/metrics.rs b/src/sinks/datadog/metrics.rs index 9480a63c5c9e8..827eea994a74c 100644 --- a/src/sinks/datadog/metrics.rs +++ b/src/sinks/datadog/metrics.rs @@ -35,8 +35,9 @@ struct DatadogState { #[serde(deny_unknown_fields)] pub struct DatadogConfig { pub namespace: String, - #[serde(default = "default_host")] - pub host: String, + // Deprecated name + #[serde(alias = "host", default = "default_endpoint")] + pub endpoint: String, pub api_key: String, #[serde(default)] pub batch: BatchConfig, @@ -63,7 +64,7 @@ struct DatadogRequest { series: Vec, } -pub fn default_host() -> String { +pub fn default_endpoint() -> String { String::from("https://api.datadoghq.com") } @@ -114,7 +115,7 @@ impl SinkConfig for DatadogConfig { .parse_config(self.batch)?; let request = self.request.unwrap_with(&REQUEST_DEFAULTS); - let uri = build_uri(&self.host)?; + let uri = build_uri(&self.endpoint)?; let timestamp = Utc::now().timestamp(); let sink = DatadogSink { @@ -179,7 +180,7 @@ fn build_uri(host: &str) -> crate::Result { } async fn healthcheck(config: DatadogConfig, mut client: HttpClient) -> crate::Result<()> { - let uri = format!("{}/api/v1/validate", config.host) + let uri = format!("{}/api/v1/validate", config.endpoint) .parse::() .context(super::UriParseError)?; @@ -417,7 +418,7 @@ mod tests { let timestamp = Utc::now().timestamp(); let sink = DatadogSink { config: sink, - uri: build_uri(&default_host()).unwrap(), + uri: build_uri(&default_endpoint()).unwrap(), last_sent_timestamp: AtomicI64::new(timestamp), }; diff --git a/src/sinks/elasticsearch.rs b/src/sinks/elasticsearch.rs index a258c0c6abc64..90d37529e9ee6 100644 --- a/src/sinks/elasticsearch.rs +++ b/src/sinks/elasticsearch.rs @@ -35,7 +35,9 @@ use std::convert::TryFrom; #[derive(Deserialize, Serialize, Debug, Clone, Default)] #[serde(deny_unknown_fields)] pub struct ElasticSearchConfig { - pub host: String, + // Deprecated name + #[serde(alias = "host")] + pub endpoint: String, pub index: Option, pub doc_type: Option, pub id_key: Option, @@ -358,20 +360,20 @@ impl ElasticSearchCommon { _ => None, }; - let base_url = config.host.clone(); + let base_url = config.endpoint.clone(); let region = match &config.aws { Some(region) => Region::try_from(region)?, - None => region_from_endpoint(&config.host)?, + None => region_from_endpoint(&config.endpoint)?, }; // Test the configured host, but ignore the result - let uri = format!("{}/_test", &config.host); + let uri = format!("{}/_test", &config.endpoint); let uri = uri .parse::() .with_context(|| InvalidHost { host: &base_url })?; if uri.host().is_none() { return Err(ParseError::HostMustIncludeHostname { - host: config.host.clone(), + host: config.endpoint.clone(), } .into()); } @@ -588,7 +590,7 @@ mod integration_tests { let pipeline = String::from("test-pipeline"); let config = ElasticSearchConfig { - host: "http://localhost:9200".into(), + endpoint: "http://localhost:9200".into(), index: Some(index.clone()), pipeline: Some(pipeline.clone()), ..config() @@ -602,7 +604,7 @@ mod integration_tests { async fn structures_events_correctly() { let index = gen_index(); let config = ElasticSearchConfig { - host: "http://localhost:9200".into(), + endpoint: "http://localhost:9200".into(), index: Some(index.clone()), doc_type: Some("log_lines".into()), id_key: Some("my_id".into()), @@ -659,7 +661,7 @@ mod integration_tests { run_insert_tests( ElasticSearchConfig { - host: "http://localhost:9200".into(), + endpoint: "http://localhost:9200".into(), doc_type: Some("log_lines".into()), compression: Compression::None, ..config() @@ -675,7 +677,7 @@ mod integration_tests { run_insert_tests( ElasticSearchConfig { - host: "https://localhost:9201".into(), + endpoint: "https://localhost:9201".into(), doc_type: Some("log_lines".into()), compression: Compression::None, tls: Some(TlsOptions { @@ -696,7 +698,7 @@ mod integration_tests { run_insert_tests( ElasticSearchConfig { auth: Some(ElasticSearchAuth::Aws { assume_role: None }), - host: "http://localhost:4571".into(), + endpoint: "http://localhost:4571".into(), ..config() }, false, @@ -710,7 +712,7 @@ mod integration_tests { run_insert_tests( ElasticSearchConfig { - host: "http://localhost:9200".into(), + endpoint: "http://localhost:9200".into(), doc_type: Some("log_lines".into()), compression: Compression::None, ..config() diff --git a/src/sinks/humio_logs.rs b/src/sinks/humio_logs.rs index 1d19fd5002b5b..1935b17721424 100644 --- a/src/sinks/humio_logs.rs +++ b/src/sinks/humio_logs.rs @@ -13,7 +13,9 @@ const HOST: &str = "https://cloud.humio.com"; #[derive(Clone, Debug, Deserialize, Serialize, Default)] pub struct HumioLogsConfig { token: String, - host: Option, + // Deprecated name + #[serde(alias = "host")] + endpoint: Option, source: Option