diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1c9ecaa12..99ba9ab42 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,9 @@
## [Unreleased]
+### Added
+- Introduce `elasticstack_kibana_import_saved_objects` resource as an additive only way to manage Kibana saved objects ([#343](https://github.com/elastic/terraform-provider-elasticstack/pull/343)).
+- Add support for Terraform Plugin Framework ([#343](https://github.com/elastic/terraform-provider-elasticstack/pull/343)).
+
## [0.9.0] - 2023-10-09
### Added
diff --git a/docs/index.md b/docs/index.md
index 3d358c5d7..d302157d7 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -145,7 +145,7 @@ Optional:
Optional:
-- `api_key` (String, Sensitive) API key to use for API authentication to Fleet.
+- `api_key` (String, Sensitive) API Key to use for authentication to Fleet.
- `ca_certs` (List of String) A list of paths to CA certificates to validate the certificate presented by the Fleet server.
- `endpoint` (String, Sensitive) The Fleet server where the terraform provider will point to, this must include the http(s) schema and port number.
- `insecure` (Boolean) Disable TLS certificate validation
@@ -158,7 +158,7 @@ Optional:
Optional:
-- `endpoints` (List of String, Sensitive) A list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number.
+- `endpoints` (List of String, Sensitive) A comma-separated list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number.
- `insecure` (Boolean) Disable TLS certificate validation
- `password` (String, Sensitive) Password to use for API authentication to Kibana.
- `username` (String) Username to use for API authentication to Kibana.
diff --git a/docs/resources/kibana_import_saved_objects.md b/docs/resources/kibana_import_saved_objects.md
new file mode 100644
index 000000000..efbe94ac6
--- /dev/null
+++ b/docs/resources/kibana_import_saved_objects.md
@@ -0,0 +1,99 @@
+---
+subcategory: "Kibana"
+layout: ""
+page_title: "Elasticstack: elasticstack_kibana_import_saved_objects Resource"
+description: |-
+ Create sets of Kibana saved objects from a file created by the export API.
+---
+
+# Resource: elasticstack_kibana_import_saved_objects
+
+Create sets of Kibana saved objects from a file created by the export API. See https://www.elastic.co/guide/en/kibana/current/saved-objects-api-import.html
+
+## Example Usage
+
+```terraform
+provider "elasticstack" {
+ kibana {}
+}
+
+resource "elasticstack_kibana_import_saved_objects" "settings" {
+ overwrite = true
+ file_contents = <<-EOT
+{"attributes":{"buildNum":42747,"defaultIndex":"metricbeat-*","theme:darkMode":true},"coreMigrationVersion":"7.0.0","id":"7.14.0","managed":false,"references":[],"type":"config","typeMigrationVersion":"7.0.0","updated_at":"2021-08-04T02:04:43.306Z","version":"WzY1MiwyXQ=="}
+{"excludedObjects":[],"excludedObjectsCount":0,"exportedCount":1,"missingRefCount":0,"missingReferences":[]}
+EOT
+}
+```
+
+
+## Schema
+
+### Required
+
+- `file_contents` (String) The contents of the exported saved objects file.
+
+### Optional
+
+- `ignore_import_errors` (Boolean) If set to true, errors during the import process will not fail the configuration application
+- `overwrite` (Boolean) Overwrites saved objects when they already exist. When used, potential conflict errors are automatically resolved by overwriting the destination object.
+- `space_id` (String) An identifier for the space. If space_id is not provided, the default space is used.
+
+### Read-Only
+
+- `errors` (List of Object) (see [below for nested schema](#nestedatt--errors))
+- `id` (String) Generated ID for the import.
+- `success` (Boolean) Indicates when the import was successfully completed. When set to false, some objects may not have been created. For additional information, refer to the errors and success_results properties.
+- `success_count` (Number) Indicates the number of successfully imported records.
+- `success_results` (List of Object) (see [below for nested schema](#nestedatt--success_results))
+
+
+### Nested Schema for `errors`
+
+Read-Only:
+
+- `error` (Object) (see [below for nested schema](#nestedobjatt--errors--error))
+- `id` (String)
+- `meta` (Object) (see [below for nested schema](#nestedobjatt--errors--meta))
+- `title` (String)
+- `type` (String)
+
+
+### Nested Schema for `errors.error`
+
+Read-Only:
+
+- `type` (String)
+
+
+
+### Nested Schema for `errors.meta`
+
+Read-Only:
+
+- `icon` (String)
+- `title` (String)
+
+
+
+
+### Nested Schema for `success_results`
+
+Read-Only:
+
+- `destination_id` (String)
+- `id` (String)
+- `meta` (Object) (see [below for nested schema](#nestedobjatt--success_results--meta))
+- `type` (String)
+
+
+### Nested Schema for `success_results.meta`
+
+Read-Only:
+
+- `icon` (String)
+- `title` (String)
+
+## Import
+
+Import is not supported.
diff --git a/examples/resources/elasticstack_kibana_import_saved_objects/resource.tf b/examples/resources/elasticstack_kibana_import_saved_objects/resource.tf
new file mode 100644
index 000000000..20a239cca
--- /dev/null
+++ b/examples/resources/elasticstack_kibana_import_saved_objects/resource.tf
@@ -0,0 +1,11 @@
+provider "elasticstack" {
+ kibana {}
+}
+
+resource "elasticstack_kibana_import_saved_objects" "settings" {
+ overwrite = true
+ file_contents = <<-EOT
+{"attributes":{"buildNum":42747,"defaultIndex":"metricbeat-*","theme:darkMode":true},"coreMigrationVersion":"7.0.0","id":"7.14.0","managed":false,"references":[],"type":"config","typeMigrationVersion":"7.0.0","updated_at":"2021-08-04T02:04:43.306Z","version":"WzY1MiwyXQ=="}
+{"excludedObjects":[],"excludedObjectsCount":0,"exportedCount":1,"missingRefCount":0,"missingReferences":[]}
+EOT
+}
diff --git a/go.mod b/go.mod
index eee00d8e9..87d0e4e8c 100644
--- a/go.mod
+++ b/go.mod
@@ -8,6 +8,8 @@ require (
github.com/elastic/go-elasticsearch/v7 v7.17.10
github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320
github.com/hashicorp/go-version v1.6.0
+ github.com/hashicorp/terraform-plugin-framework v1.4.0
+ github.com/hashicorp/terraform-plugin-framework-validators v0.12.0
github.com/hashicorp/terraform-plugin-go v0.19.0
github.com/hashicorp/terraform-plugin-log v0.9.0
github.com/hashicorp/terraform-plugin-mux v0.12.0
diff --git a/go.sum b/go.sum
index b89a3b9d2..b6a729878 100644
--- a/go.sum
+++ b/go.sum
@@ -80,6 +80,14 @@ github.com/hashicorp/terraform-exec v0.19.0 h1:FpqZ6n50Tk95mItTSS9BjeOVUb4eg81Sp
github.com/hashicorp/terraform-exec v0.19.0/go.mod h1:tbxUpe3JKruE9Cuf65mycSIT8KiNPZ0FkuTE3H4urQg=
github.com/hashicorp/terraform-json v0.17.1 h1:eMfvh/uWggKmY7Pmb3T85u86E2EQg6EQHgyRwf3RkyA=
github.com/hashicorp/terraform-json v0.17.1/go.mod h1:Huy6zt6euxaY9knPAFKjUITn8QxUFIe9VuSzb4zn/0o=
+github.com/hashicorp/terraform-plugin-framework v1.2.0 h1:MZjFFfULnFq8fh04FqrKPcJ/nGpHOvX4buIygT3MSNY=
+github.com/hashicorp/terraform-plugin-framework v1.2.0/go.mod h1:nToI62JylqXDq84weLJ/U3umUsBhZAaTmU0HXIVUOcw=
+github.com/hashicorp/terraform-plugin-framework v1.4.0 h1:WKbtCRtNrjsh10eA7NZvC/Qyr7zp77j+D21aDO5th9c=
+github.com/hashicorp/terraform-plugin-framework v1.4.0/go.mod h1:XC0hPcQbBvlbxwmjxuV/8sn8SbZRg4XwGMs22f+kqV0=
+github.com/hashicorp/terraform-plugin-framework-validators v0.10.0 h1:4L0tmy/8esP6OcvocVymw52lY0HyQ5OxB7VNl7k4bS0=
+github.com/hashicorp/terraform-plugin-framework-validators v0.10.0/go.mod h1:qdQJCdimB9JeX2YwOpItEu+IrfoJjWQ5PhLpAOMDQAE=
+github.com/hashicorp/terraform-plugin-framework-validators v0.12.0 h1:HOjBuMbOEzl7snOdOoUfE2Jgeto6JOjLVQ39Ls2nksc=
+github.com/hashicorp/terraform-plugin-framework-validators v0.12.0/go.mod h1:jfHGE/gzjxYz6XoUwi/aYiiKrJDeutQNUtGQXkaHklg=
github.com/hashicorp/terraform-plugin-go v0.19.0 h1:BuZx/6Cp+lkmiG0cOBk6Zps0Cb2tmqQpDM3iAtnhDQU=
github.com/hashicorp/terraform-plugin-go v0.19.0/go.mod h1:EhRSkEPNoylLQntYsk5KrDHTZJh9HQoumZXbOGOXmec=
github.com/hashicorp/terraform-plugin-log v0.9.0 h1:i7hOA+vdAItN1/7UrfBqBwvYPQ9TFvymaRGZED3FCV0=
diff --git a/internal/clients/api_client.go b/internal/clients/api_client.go
index 7da6d38de..5db6c8e1c 100644
--- a/internal/clients/api_client.go
+++ b/internal/clients/api_client.go
@@ -2,13 +2,10 @@ package clients
import (
"context"
- "crypto/tls"
"encoding/json"
"errors"
"fmt"
"net/http"
- "os"
- "strconv"
"strings"
"github.com/deepmap/oapi-codegen/pkg/securityprovider"
@@ -17,10 +14,12 @@ import (
"github.com/elastic/terraform-provider-elasticstack/generated/alerting"
"github.com/elastic/terraform-provider-elasticstack/generated/connectors"
"github.com/elastic/terraform-provider-elasticstack/generated/slo"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/config"
"github.com/elastic/terraform-provider-elasticstack/internal/clients/fleet"
"github.com/elastic/terraform-provider-elasticstack/internal/models"
"github.com/elastic/terraform-provider-elasticstack/internal/utils"
"github.com/hashicorp/go-version"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
"github.com/hashicorp/terraform-plugin-log/tflog"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging"
@@ -74,88 +73,32 @@ type ApiClient struct {
version string
}
-func NewApiClientFunc(version string) func(context.Context, *schema.ResourceData) (interface{}, diag.Diagnostics) {
+func NewApiClientFuncFromSDK(version string) func(context.Context, *schema.ResourceData) (interface{}, diag.Diagnostics) {
return func(ctx context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) {
- return newApiClient(d, version)
+ return newApiClientFromSDK(d, version)
}
}
func NewAcceptanceTestingClient() (*ApiClient, error) {
- ua := buildUserAgent("tf-acceptance-testing")
- baseConfig := BaseConfig{
- UserAgent: ua,
- Header: http.Header{"User-Agent": []string{ua}},
- Username: os.Getenv("ELASTICSEARCH_USERNAME"),
- Password: os.Getenv("ELASTICSEARCH_PASSWORD"),
- }
-
- buildEsAccClient := func() (*elasticsearch.Client, error) {
- config := elasticsearch.Config{
- Header: baseConfig.Header,
- }
-
- if apiKey := os.Getenv("ELASTICSEARCH_API_KEY"); apiKey != "" {
- config.APIKey = apiKey
- } else {
- config.Username = baseConfig.Username
- config.Password = baseConfig.Password
- }
-
- if es := os.Getenv("ELASTICSEARCH_ENDPOINTS"); es != "" {
- endpoints := make([]string, 0)
- for _, e := range strings.Split(es, ",") {
- endpoints = append(endpoints, strings.TrimSpace(e))
- }
- config.Addresses = endpoints
- }
-
- if insecure := os.Getenv("ELASTICSEARCH_INSECURE"); insecure != "" {
- if insecureValue, _ := strconv.ParseBool(insecure); insecureValue {
- tlsClientConfig := ensureTLSClientConfig(&config)
- tlsClientConfig.InsecureSkipVerify = true
- }
- }
-
- return elasticsearch.NewClient(config)
- }
-
- kibanaConfig := kibana.Config{
- Username: baseConfig.Username,
- Password: baseConfig.Password,
- Address: os.Getenv("KIBANA_ENDPOINT"),
- }
- if insecure := os.Getenv("KIBANA_INSECURE"); insecure != "" {
- if insecureValue, _ := strconv.ParseBool(insecure); insecureValue {
- kibanaConfig.DisableVerifySSL = true
- }
- }
+ version := "tf-acceptance-testing"
+ cfg := config.NewFromEnv(version)
- es, err := buildEsAccClient()
+ es, err := elasticsearch.NewClient(*cfg.Elasticsearch)
if err != nil {
return nil, err
}
- kib, err := kibana.NewClient(kibanaConfig)
+ kib, err := kibana.NewClient(*cfg.Kibana)
if err != nil {
return nil, err
}
- actionConnectors, err := buildConnectorsClient(baseConfig, kibanaConfig)
+ actionConnectors, err := buildConnectorsClient(cfg)
if err != nil {
return nil, fmt.Errorf("cannot create Kibana action connectors client: [%w]", err)
}
- fleetCfg := fleet.Config{
- URL: kibanaConfig.Address,
- Username: kibanaConfig.Username,
- Password: kibanaConfig.Password,
- APIKey: os.Getenv("FLEET_API_KEY"),
- Insecure: kibanaConfig.DisableVerifySSL,
- }
- if v := os.Getenv("FLEET_CA_CERTS"); v != "" {
- fleetCfg.CACerts = strings.Split(os.Getenv("FLEET_CA_CERTS"), ",")
- }
- fleetClient, err := fleet.NewClient(fleetCfg)
+ fleetClient, err := fleet.NewClient(*cfg.Fleet)
if err != nil {
return nil, err
}
@@ -163,33 +106,68 @@ func NewAcceptanceTestingClient() (*ApiClient, error) {
return &ApiClient{
elasticsearch: es,
kibana: kib,
- alerting: buildAlertingClient(baseConfig, kibanaConfig).AlertingApi,
- slo: buildSloClient(baseConfig, kibanaConfig).SloAPI,
+ alerting: buildAlertingClient(cfg).AlertingApi,
+ slo: buildSloClient(cfg).SloAPI,
connectors: actionConnectors,
- kibanaConfig: kibanaConfig,
+ kibanaConfig: *cfg.Kibana,
fleet: fleetClient,
- version: "acceptance-testing",
+ version: version,
},
nil
}
-const esConnectionKey string = "elasticsearch_connection"
+func NewApiClientFromFramework(ctx context.Context, cfg config.ProviderConfiguration, version string) (*ApiClient, fwdiags.Diagnostics) {
+ clientCfg, diags := config.NewFromFramework(ctx, cfg, version)
+ if diags.HasError() {
+ return nil, diags
+ }
-func NewApiClient(d *schema.ResourceData, meta interface{}) (*ApiClient, diag.Diagnostics) {
- defaultClient := meta.(*ApiClient)
+ client, err := newApiClientFromConfig(clientCfg, version)
+ if err != nil {
+ return nil, fwdiags.Diagnostics{
+ fwdiags.NewErrorDiagnostic("Failed to create API client", err.Error()),
+ }
+ }
- if _, ok := d.GetOk(esConnectionKey); !ok {
- return defaultClient, nil
+ return client, nil
+}
+
+func ConvertProviderData(providerData any) (*ApiClient, fwdiags.Diagnostics) {
+ var diags fwdiags.Diagnostics
+
+ if providerData == nil {
+ return nil, diags
}
- version := defaultClient.version
- baseConfig := buildBaseConfig(d, version, esConnectionKey)
+ client, ok := providerData.(*ApiClient)
+ if !ok {
+ diags.AddError(
+ "Unexpected Provider Data",
+ fmt.Sprintf("Expected *ApiClient, got: %T. Please report this issue to the provider developers.", providerData),
+ )
- esClient, diags := buildEsClient(d, baseConfig, false, esConnectionKey)
+ return nil, diags
+ }
+ return client, diags
+}
+
+func NewApiClientFromSDKResource(d *schema.ResourceData, meta interface{}) (*ApiClient, diag.Diagnostics) {
+ defaultClient := meta.(*ApiClient)
+ version := defaultClient.version
+ resourceConfig, diags := config.NewFromSDKResource(d, version)
if diags.HasError() {
return nil, diags
}
+ if resourceConfig == nil {
+ return defaultClient, nil
+ }
+
+ esClient, err := buildEsClient(*resourceConfig)
+ if err != nil {
+ return nil, diag.FromErr(err)
+ }
+
return &ApiClient{
elasticsearch: esClient,
elasticsearchClusterInfo: defaultClient.elasticsearchClusterInfo,
@@ -199,16 +177,6 @@ func NewApiClient(d *schema.ResourceData, meta interface{}) (*ApiClient, diag.Di
}, diags
}
-func ensureTLSClientConfig(config *elasticsearch.Config) *tls.Config {
- if config.Transport == nil {
- config.Transport = http.DefaultTransport.(*http.Transport)
- }
- if config.Transport.(*http.Transport).TLSClientConfig == nil {
- config.Transport.(*http.Transport).TLSClientConfig = &tls.Config{}
- }
- return config.Transport.(*http.Transport).TLSClientConfig
-}
-
func (a *ApiClient) GetESClient() (*elasticsearch.Client, error) {
if a.elasticsearch == nil {
return nil, errors.New("elasticsearch client not found")
@@ -344,223 +312,27 @@ func (a *ApiClient) ClusterID(ctx context.Context) (*string, diag.Diagnostics) {
return nil, diags
}
-type BaseConfig struct {
- Username string
- Password string
- UserAgent string
- Header http.Header
-}
-
-// Build base config from ES which can be shared for other resources
-func buildBaseConfig(d *schema.ResourceData, version string, esKey string) BaseConfig {
- baseConfig := BaseConfig{}
- baseConfig.UserAgent = buildUserAgent(version)
- baseConfig.Header = http.Header{"User-Agent": []string{baseConfig.UserAgent}}
-
- if esConn, ok := d.GetOk(esKey); ok {
- if resource := esConn.([]interface{})[0]; resource != nil {
- config := resource.(map[string]interface{})
-
- if username, ok := config["username"]; ok {
- baseConfig.Username = username.(string)
- }
- if password, ok := config["password"]; ok {
- baseConfig.Password = password.(string)
- }
- }
- }
-
- return baseConfig
-}
-
-func buildUserAgent(version string) string {
- return fmt.Sprintf("elasticstack-terraform-provider/%s", version)
-}
-
-func buildEsClient(d *schema.ResourceData, baseConfig BaseConfig, useEnvAsDefault bool, key string) (*elasticsearch.Client, diag.Diagnostics) {
- var diags diag.Diagnostics
-
- esConn, ok := d.GetOk(key)
- if !ok {
- return nil, diags
- }
-
- config := elasticsearch.Config{
- Header: baseConfig.Header,
- Username: baseConfig.Username,
- Password: baseConfig.Password,
+func buildEsClient(cfg config.Client) (*elasticsearch.Client, error) {
+ if cfg.Elasticsearch == nil {
+ return nil, nil
}
- // if defined, then we only have a single entry
- if es := esConn.([]interface{})[0]; es != nil {
- esConfig := es.(map[string]interface{})
-
- if apikey, ok := esConfig["api_key"]; ok {
- config.APIKey = apikey.(string)
- }
-
- if useEnvAsDefault {
- if endpoints := os.Getenv("ELASTICSEARCH_ENDPOINTS"); endpoints != "" {
- var addrs []string
- for _, e := range strings.Split(endpoints, ",") {
- addrs = append(addrs, strings.TrimSpace(e))
- }
- config.Addresses = addrs
- }
- }
-
- if endpoints, ok := esConfig["endpoints"]; ok && len(endpoints.([]interface{})) > 0 {
- var addrs []string
- for _, e := range endpoints.([]interface{}) {
- addrs = append(addrs, e.(string))
- }
- config.Addresses = addrs
- }
-
- if insecure, ok := esConfig["insecure"]; ok && insecure.(bool) {
- tlsClientConfig := ensureTLSClientConfig(&config)
- tlsClientConfig.InsecureSkipVerify = true
- }
-
- if caFile, ok := esConfig["ca_file"]; ok && caFile.(string) != "" {
- caCert, err := os.ReadFile(caFile.(string))
- if err != nil {
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unable to read CA File",
- Detail: err.Error(),
- })
- return nil, diags
- }
- config.CACert = caCert
- }
- if caData, ok := esConfig["ca_data"]; ok && caData.(string) != "" {
- config.CACert = []byte(caData.(string))
- }
-
- if certFile, ok := esConfig["cert_file"]; ok && certFile.(string) != "" {
- if keyFile, ok := esConfig["key_file"]; ok && keyFile.(string) != "" {
- cert, err := tls.LoadX509KeyPair(certFile.(string), keyFile.(string))
- if err != nil {
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unable to read certificate or key file",
- Detail: err.Error(),
- })
- return nil, diags
- }
- tlsClientConfig := ensureTLSClientConfig(&config)
- tlsClientConfig.Certificates = []tls.Certificate{cert}
- } else {
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unable to read key file",
- Detail: "Path to key file has not been configured or is empty",
- })
- return nil, diags
- }
- }
- if certData, ok := esConfig["cert_data"]; ok && certData.(string) != "" {
- if keyData, ok := esConfig["key_data"]; ok && keyData.(string) != "" {
- cert, err := tls.X509KeyPair([]byte(certData.(string)), []byte(keyData.(string)))
- if err != nil {
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unable to parse certificate or key",
- Detail: err.Error(),
- })
- return nil, diags
- }
- tlsClientConfig := ensureTLSClientConfig(&config)
- tlsClientConfig.Certificates = []tls.Certificate{cert}
- } else {
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unable to parse key",
- Detail: "Key data has not been configured or is empty",
- })
- return nil, diags
- }
- }
- }
-
- if logging.IsDebugOrHigher() {
- config.EnableDebugLogger = true
- config.Logger = &debugLogger{Name: "elasticsearch"}
- }
-
- es, err := elasticsearch.NewClient(config)
+ es, err := elasticsearch.NewClient(*cfg.Elasticsearch)
if err != nil {
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unable to create Elasticsearch client",
- Detail: err.Error(),
- })
- return nil, diags
+ return nil, fmt.Errorf("Unable to create Elasticsearch client: %w", err)
}
- return es, diags
+ return es, nil
}
-func buildKibanaConfig(d *schema.ResourceData, baseConfig BaseConfig) (kibana.Config, diag.Diagnostics) {
- var diags diag.Diagnostics
-
- kibConn, ok := d.GetOk("kibana")
- if !ok {
- return kibana.Config{}, diags
- }
-
- // Use ES details by default
- config := kibana.Config{
- Username: baseConfig.Username,
- Password: baseConfig.Password,
- }
-
- // if defined, then we only have a single entry
- if kib := kibConn.([]interface{})[0]; kib != nil {
- kibConfig := kib.(map[string]interface{})
-
- if username := os.Getenv("KIBANA_USERNAME"); username != "" {
- config.Username = strings.TrimSpace(username)
- }
- if password := os.Getenv("KIBANA_PASSWORD"); password != "" {
- config.Password = strings.TrimSpace(password)
- }
- if endpoint := os.Getenv("KIBANA_ENDPOINT"); endpoint != "" {
- config.Address = endpoint
- }
- if insecure := os.Getenv("KIBANA_INSECURE"); insecure != "" {
- if insecureValue, _ := strconv.ParseBool(insecure); insecureValue {
- config.DisableVerifySSL = true
- }
- }
-
- if username, ok := kibConfig["username"]; ok && username != "" {
- config.Username = username.(string)
- }
- if password, ok := kibConfig["password"]; ok && password != "" {
- config.Password = password.(string)
- }
-
- if endpoints, ok := kibConfig["endpoints"]; ok && len(endpoints.([]interface{})) > 0 {
- // We're curently limited by the API to a single endpoint
- if endpoint := endpoints.([]interface{})[0]; endpoint != nil {
- config.Address = endpoint.(string)
- }
- }
-
- if insecure, ok := kibConfig["insecure"]; ok && insecure.(bool) {
- config.DisableVerifySSL = true
- }
+func buildKibanaClient(cfg config.Client) (*kibana.Client, error) {
+ if cfg.Kibana == nil {
+ return nil, nil
}
- return config, nil
-}
-
-func buildKibanaClient(config kibana.Config) (*kibana.Client, diag.Diagnostics) {
- kib, err := kibana.NewClient(config)
+ kib, err := kibana.NewClient(*cfg.Kibana)
if err != nil {
- return nil, diag.FromErr(err)
+ return nil, err
}
if logging.IsDebugOrHigher() {
@@ -570,12 +342,12 @@ func buildKibanaClient(config kibana.Config) (*kibana.Client, diag.Diagnostics)
return kib, nil
}
-func buildAlertingClient(baseConfig BaseConfig, config kibana.Config) *alerting.APIClient {
+func buildAlertingClient(cfg config.Client) *alerting.APIClient {
alertingConfig := alerting.Configuration{
- UserAgent: baseConfig.UserAgent,
+ UserAgent: cfg.UserAgent,
Servers: alerting.ServerConfigurations{
{
- URL: config.Address,
+ URL: cfg.Kibana.Address,
},
},
Debug: logging.IsDebugOrHigher(),
@@ -583,8 +355,8 @@ func buildAlertingClient(baseConfig BaseConfig, config kibana.Config) *alerting.
return alerting.NewAPIClient(&alertingConfig)
}
-func buildConnectorsClient(baseConfig BaseConfig, config kibana.Config) (*connectors.Client, error) {
- basicAuthProvider, err := securityprovider.NewSecurityProviderBasicAuth(config.Username, config.Password)
+func buildConnectorsClient(cfg config.Client) (*connectors.Client, error) {
+ basicAuthProvider, err := securityprovider.NewSecurityProviderBasicAuth(cfg.Kibana.Username, cfg.Kibana.Password)
if err != nil {
return nil, fmt.Errorf("unable to create basic auth provider: %w", err)
}
@@ -598,18 +370,18 @@ func buildConnectorsClient(baseConfig BaseConfig, config kibana.Config) (*connec
}
return connectors.NewClient(
- config.Address,
+ cfg.Kibana.Address,
connectors.WithRequestEditorFn(basicAuthProvider.Intercept),
connectors.WithHTTPClient(httpClient),
)
}
-func buildSloClient(baseConfig BaseConfig, config kibana.Config) *slo.APIClient {
+func buildSloClient(cfg config.Client) *slo.APIClient {
sloConfig := slo.Configuration{
- UserAgent: baseConfig.UserAgent,
+ UserAgent: cfg.UserAgent,
Servers: slo.ServerConfigurations{
{
- URL: config.Address,
+ URL: cfg.Kibana.Address,
},
},
Debug: logging.IsDebugOrHigher(),
@@ -617,127 +389,68 @@ func buildSloClient(baseConfig BaseConfig, config kibana.Config) *slo.APIClient
return slo.NewAPIClient(&sloConfig)
}
-func buildFleetClient(d *schema.ResourceData, kibanaCfg kibana.Config) (*fleet.Client, diag.Diagnostics) {
- var diags diag.Diagnostics
-
- // Order of precedence for config options:
- // 1 (highest): environment variables
- // 2: resource config
- // 3: kibana config
-
- // Set variables from kibana config.
- config := fleet.Config{
- URL: kibanaCfg.Address,
- Username: kibanaCfg.Username,
- Password: kibanaCfg.Password,
- Insecure: kibanaCfg.DisableVerifySSL,
- }
-
- // Set variables from resource config.
- if fleetDataRaw, ok := d.GetOk("fleet"); ok {
- fleetData, ok := fleetDataRaw.([]interface{})[0].(map[string]any)
- if !ok {
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unable to parse Fleet configuration",
- Detail: "Fleet configuration data has not been configured correctly or is empty",
- })
- return nil, diags
- }
- if v, ok := fleetData["endpoint"].(string); ok && v != "" {
- config.URL = v
- }
- if v, ok := fleetData["username"].(string); ok && v != "" {
- config.Username = v
- }
- if v, ok := fleetData["password"].(string); ok && v != "" {
- config.Password = v
- }
- if v, ok := fleetData["api_key"].(string); ok && v != "" {
- config.APIKey = v
- }
- if v, ok := fleetData["ca_certs"].([]interface{}); ok && len(v) > 0 {
- for _, elem := range v {
- if vStr, elemOk := elem.(string); elemOk {
- config.CACerts = append(config.CACerts, vStr)
- }
- }
- }
- if v, ok := fleetData["insecure"].(bool); ok {
- config.Insecure = v
- }
+func buildFleetClient(cfg config.Client) (*fleet.Client, error) {
+ client, err := fleet.NewClient(*cfg.Fleet)
+ if err != nil {
+ return nil, fmt.Errorf("Unable to create Fleet client: %w", err)
}
- if v := os.Getenv("FLEET_ENDPOINT"); v != "" {
- config.URL = v
- }
- if v := os.Getenv("FLEET_USERNAME"); v != "" {
- config.Username = v
- }
- if v := os.Getenv("FLEET_PASSWORD"); v != "" {
- config.Password = v
- }
- if v := os.Getenv("FLEET_API_KEY"); v != "" {
- config.APIKey = v
- }
- if v := os.Getenv("FLEET_CA_CERTS"); v != "" {
- config.CACerts = strings.Split(v, ",")
+ return client, nil
+}
+
+func newApiClientFromSDK(d *schema.ResourceData, version string) (*ApiClient, diag.Diagnostics) {
+ cfg, diags := config.NewFromSDK(d, version)
+ if diags.HasError() {
+ return nil, diags
}
- client, err := fleet.NewClient(config)
+ client, err := newApiClientFromConfig(cfg, version)
if err != nil {
- diags = append(diags, diag.Diagnostic{
- Severity: diag.Error,
- Summary: "Unable to create Fleet client",
- Detail: err.Error(),
- })
- return nil, diags
+ return nil, diag.FromErr(err)
}
- return client, diags
+ return client, nil
}
-const esKey string = "elasticsearch"
-
-func newApiClient(d *schema.ResourceData, version string) (*ApiClient, diag.Diagnostics) {
- baseConfig := buildBaseConfig(d, version, esKey)
- kibanaConfig, diags := buildKibanaConfig(d, baseConfig)
- if diags.HasError() {
- return nil, diags
+func newApiClientFromConfig(cfg config.Client, version string) (*ApiClient, error) {
+ client := &ApiClient{
+ kibanaConfig: *cfg.Kibana,
+ version: version,
}
- esClient, diags := buildEsClient(d, baseConfig, true, esKey)
- if diags.HasError() {
- return nil, diags
+ if cfg.Elasticsearch != nil {
+ esClient, err := buildEsClient(cfg)
+ if err != nil {
+ return nil, err
+ }
+ client.elasticsearch = esClient
}
- kibanaClient, diags := buildKibanaClient(kibanaConfig)
- if diags.HasError() {
- return nil, diags
- }
+ if cfg.Kibana != nil {
+ kibanaClient, err := buildKibanaClient(cfg)
+ if err != nil {
+ return nil, err
+ }
- alertingClient := buildAlertingClient(baseConfig, kibanaConfig)
- sloClient := buildSloClient(baseConfig, kibanaConfig)
+ connectorsClient, err := buildConnectorsClient(cfg)
+ if err != nil {
+ return nil, fmt.Errorf("cannot create Kibana connectors client: [%w]", err)
+ }
- connectorsClient, err := buildConnectorsClient(baseConfig, kibanaConfig)
- if err != nil {
- return nil, diag.FromErr(fmt.Errorf("cannot create Kibana connectors client: [%w]", err))
+ client.kibana = kibanaClient
+ client.alerting = buildAlertingClient(cfg).AlertingApi
+ client.slo = buildSloClient(cfg).SloAPI
+ client.connectors = connectorsClient
}
- fleetClient, diags := buildFleetClient(d, kibanaConfig)
- if diags.HasError() {
- return nil, diags
+ if cfg.Fleet != nil {
+ fleetClient, err := buildFleetClient(cfg)
+ if err != nil {
+ return nil, err
+ }
+
+ client.fleet = fleetClient
}
- return &ApiClient{
- elasticsearch: esClient,
- elasticsearchClusterInfo: nil,
- kibana: kibanaClient,
- kibanaConfig: kibanaConfig,
- alerting: alertingClient.AlertingApi,
- connectors: connectorsClient,
- slo: sloClient.SloAPI,
- fleet: fleetClient,
- version: version,
- }, nil
+ return client, nil
}
diff --git a/internal/clients/config/base.go b/internal/clients/config/base.go
new file mode 100644
index 000000000..89e2f3b45
--- /dev/null
+++ b/internal/clients/config/base.go
@@ -0,0 +1,97 @@
+package config
+
+import (
+ "fmt"
+ "net/http"
+ "os"
+
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+type baseConfig struct {
+ Username string
+ Password string
+ ApiKey string
+ UserAgent string
+ Header http.Header
+}
+
+func newBaseConfigFromSDK(d *schema.ResourceData, version string, esKey string) baseConfig {
+ userAgent := buildUserAgent(version)
+ baseConfig := baseConfig{
+ UserAgent: userAgent,
+ Header: http.Header{"User-Agent": []string{userAgent}},
+ }
+
+ if esConn, ok := d.GetOk(esKey); ok {
+ if resource := esConn.([]interface{})[0]; resource != nil {
+ config := resource.(map[string]interface{})
+
+ if apiKey, ok := config["api_key"]; ok && apiKey != "" {
+ baseConfig.ApiKey = apiKey.(string)
+ } else {
+ if username, ok := config["username"]; ok {
+ baseConfig.Username = username.(string)
+ }
+ if password, ok := config["password"]; ok {
+ baseConfig.Password = password.(string)
+ }
+ }
+ }
+ }
+
+ return baseConfig.withEnvironmentOverrides()
+}
+
+func newBaseConfigFromFramework(config ProviderConfiguration, version string) baseConfig {
+ userAgent := buildUserAgent(version)
+ baseConfig := baseConfig{
+ UserAgent: userAgent,
+ Header: http.Header{"User-Agent": []string{userAgent}},
+ }
+
+ if len(config.Elasticsearch) > 0 {
+ esConfig := config.Elasticsearch[0]
+ baseConfig.Username = esConfig.Username.ValueString()
+ baseConfig.Password = esConfig.Password.ValueString()
+ baseConfig.ApiKey = esConfig.APIKey.ValueString()
+ }
+
+ return baseConfig.withEnvironmentOverrides()
+}
+
+func (b baseConfig) withEnvironmentOverrides() baseConfig {
+ b.Username = withEnvironmentOverride(b.Username, "ELASTICSEARCH_USERNAME")
+ b.Password = withEnvironmentOverride(b.Password, "ELASTICSEARCH_PASSWORD")
+ b.ApiKey = withEnvironmentOverride(b.ApiKey, "ELASTICSEARCH_API_KEY")
+
+ return b
+}
+
+func (b baseConfig) toKibanaConfig() kibanaConfig {
+ return kibanaConfig{
+ Username: b.Username,
+ Password: b.Password,
+ }
+}
+
+func (b baseConfig) toElasticsearchConfig() elasticsearchConfig {
+ return elasticsearchConfig{
+ Header: b.Header,
+ Username: b.Username,
+ Password: b.Password,
+ APIKey: b.ApiKey,
+ }
+}
+
+func withEnvironmentOverride(currentValue, envOverrideKey string) string {
+ if envValue, ok := os.LookupEnv(envOverrideKey); ok {
+ return envValue
+ }
+
+ return currentValue
+}
+
+func buildUserAgent(version string) string {
+ return fmt.Sprintf("elasticstack-terraform-provider/%s", version)
+}
diff --git a/internal/clients/config/base_test.go b/internal/clients/config/base_test.go
new file mode 100644
index 000000000..e78f275a5
--- /dev/null
+++ b/internal/clients/config/base_test.go
@@ -0,0 +1,89 @@
+package config
+
+import (
+ "net/http"
+ "os"
+ "testing"
+
+ providerSchema "github.com/elastic/terraform-provider-elasticstack/internal/schema"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/stretchr/testify/require"
+)
+
+func TestNewBaseConfigFromSDK(t *testing.T) {
+ os.Unsetenv("ELASTICSEARCH_USERNAME")
+ os.Unsetenv("ELASTICSEARCH_PASSWORD")
+ os.Unsetenv("ELASTICSEARCH_API_KEY")
+
+ rd := schema.TestResourceDataRaw(t, map[string]*schema.Schema{
+ "elasticsearch": providerSchema.GetEsConnectionSchema("elasticsearch", true),
+ }, map[string]interface{}{
+ "elasticsearch": []interface{}{
+ map[string]interface{}{
+ "username": "elastic",
+ "password": "changeme",
+ },
+ },
+ })
+
+ baseCfg := newBaseConfigFromSDK(rd, "unit-testing", "elasticsearch")
+ ua := "elasticstack-terraform-provider/unit-testing"
+ require.Equal(t, baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ UserAgent: ua,
+ Header: http.Header{"User-Agent": []string{ua}},
+ }, baseCfg)
+}
+
+func TestNewBaseConfigFromFramework(t *testing.T) {
+ os.Unsetenv("ELASTICSEARCH_USERNAME")
+ os.Unsetenv("ELASTICSEARCH_PASSWORD")
+ os.Unsetenv("ELASTICSEARCH_API_KEY")
+
+ expectedUA := "elasticstack-terraform-provider/unit-testing"
+
+ tests := []struct {
+ name string
+ config ProviderConfiguration
+ expectedBaseConfig baseConfig
+ }{
+ {
+ name: "with es config defined",
+ config: ProviderConfiguration{
+ Elasticsearch: []ElasticsearchConnection{
+ {
+ Username: types.StringValue("elastic"),
+ Password: types.StringValue("changeme"),
+ APIKey: types.StringValue("apikey"),
+ },
+ },
+ },
+ expectedBaseConfig: baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ ApiKey: "apikey",
+ UserAgent: expectedUA,
+ Header: http.Header{"User-Agent": []string{expectedUA}},
+ },
+ },
+ {
+ name: "should not set credentials if no configuration available",
+ config: ProviderConfiguration{
+ Elasticsearch: []ElasticsearchConnection{},
+ },
+ expectedBaseConfig: baseConfig{
+ UserAgent: expectedUA,
+ Header: http.Header{"User-Agent": []string{expectedUA}},
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ baseCfg := newBaseConfigFromFramework(tt.config, "unit-testing")
+ require.Equal(t, tt.expectedBaseConfig, baseCfg)
+ })
+ }
+}
diff --git a/internal/clients/config/client.go b/internal/clients/config/client.go
new file mode 100644
index 000000000..83723d840
--- /dev/null
+++ b/internal/clients/config/client.go
@@ -0,0 +1,14 @@
+package config
+
+import (
+ "github.com/disaster37/go-kibana-rest/v8"
+ "github.com/elastic/go-elasticsearch/v7"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/fleet"
+)
+
+type Client struct {
+ UserAgent string
+ Kibana *kibana.Config
+ Elasticsearch *elasticsearch.Config
+ Fleet *fleet.Config
+}
diff --git a/internal/clients/debug.go b/internal/clients/config/debug.go
similarity index 99%
rename from internal/clients/debug.go
rename to internal/clients/config/debug.go
index 93633b8fa..6c69ff2ff 100644
--- a/internal/clients/debug.go
+++ b/internal/clients/config/debug.go
@@ -1,4 +1,4 @@
-package clients
+package config
import (
"context"
diff --git a/internal/clients/config/elasticsearch.go b/internal/clients/config/elasticsearch.go
new file mode 100644
index 000000000..a47f0a527
--- /dev/null
+++ b/internal/clients/config/elasticsearch.go
@@ -0,0 +1,217 @@
+package config
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/elastic/go-elasticsearch/v7"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
+ sdkdiags "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/logging"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+type elasticsearchConfig elasticsearch.Config
+
+func newElasticsearchConfigFromSDK(d *schema.ResourceData, base baseConfig, key string, useEnvAsDefault bool) (*elasticsearchConfig, sdkdiags.Diagnostics) {
+ esConn, ok := d.GetOk(key)
+ if !ok {
+ return nil, nil
+ }
+
+ var diags sdkdiags.Diagnostics
+ config := base.toElasticsearchConfig()
+
+ // if defined, then we only have a single entry
+ if es := esConn.([]interface{})[0]; es != nil {
+ esConfig := es.(map[string]interface{})
+
+ if endpoints, ok := esConfig["endpoints"]; ok && len(endpoints.([]interface{})) > 0 {
+ var addrs []string
+ for _, e := range endpoints.([]interface{}) {
+ addrs = append(addrs, e.(string))
+ }
+ config.Addresses = addrs
+ }
+
+ if insecure, ok := esConfig["insecure"]; ok && insecure.(bool) {
+ tlsClientConfig := config.ensureTLSClientConfig()
+ tlsClientConfig.InsecureSkipVerify = true
+ }
+
+ if caFile, ok := esConfig["ca_file"]; ok && caFile.(string) != "" {
+ caCert, err := os.ReadFile(caFile.(string))
+ if err != nil {
+ diags = append(diags, sdkdiags.Diagnostic{
+ Severity: sdkdiags.Error,
+ Summary: "Unable to read CA File",
+ Detail: err.Error(),
+ })
+ return nil, diags
+ }
+ config.CACert = caCert
+ }
+ if caData, ok := esConfig["ca_data"]; ok && caData.(string) != "" {
+ config.CACert = []byte(caData.(string))
+ }
+
+ if certFile, ok := esConfig["cert_file"]; ok && certFile.(string) != "" {
+ if keyFile, ok := esConfig["key_file"]; ok && keyFile.(string) != "" {
+ cert, err := tls.LoadX509KeyPair(certFile.(string), keyFile.(string))
+ if err != nil {
+ diags = append(diags, sdkdiags.Diagnostic{
+ Severity: sdkdiags.Error,
+ Summary: "Unable to read certificate or key file",
+ Detail: err.Error(),
+ })
+ return nil, diags
+ }
+ tlsClientConfig := config.ensureTLSClientConfig()
+ tlsClientConfig.Certificates = []tls.Certificate{cert}
+ } else {
+ diags = append(diags, sdkdiags.Diagnostic{
+ Severity: sdkdiags.Error,
+ Summary: "Unable to read key file",
+ Detail: "Path to key file has not been configured or is empty",
+ })
+ return nil, diags
+ }
+ }
+ if certData, ok := esConfig["cert_data"]; ok && certData.(string) != "" {
+ if keyData, ok := esConfig["key_data"]; ok && keyData.(string) != "" {
+ cert, err := tls.X509KeyPair([]byte(certData.(string)), []byte(keyData.(string)))
+ if err != nil {
+ diags = append(diags, sdkdiags.Diagnostic{
+ Severity: sdkdiags.Error,
+ Summary: "Unable to parse certificate or key",
+ Detail: err.Error(),
+ })
+ return nil, diags
+ }
+ tlsClientConfig := config.ensureTLSClientConfig()
+ tlsClientConfig.Certificates = []tls.Certificate{cert}
+ } else {
+ diags = append(diags, sdkdiags.Diagnostic{
+ Severity: sdkdiags.Error,
+ Summary: "Unable to parse key",
+ Detail: "Key data has not been configured or is empty",
+ })
+ return nil, diags
+ }
+ }
+ }
+
+ if logging.IsDebugOrHigher() {
+ config.EnableDebugLogger = true
+ config.Logger = &debugLogger{Name: "elasticsearch"}
+ }
+
+ config = config.withEnvironmentOverrides()
+ return &config, nil
+}
+
+func newElasticsearchConfigFromFramework(ctx context.Context, cfg ProviderConfiguration, base baseConfig) (*elasticsearchConfig, fwdiags.Diagnostics) {
+ if len(cfg.Elasticsearch) == 0 {
+ return nil, nil
+ }
+
+ config := base.toElasticsearchConfig()
+ esConfig := cfg.Elasticsearch[0]
+
+ var endpoints []string
+ diags := esConfig.Endpoints.ElementsAs(ctx, &endpoints, true)
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ if len(endpoints) > 0 {
+ config.Addresses = endpoints
+ }
+
+ if esConfig.Insecure.ValueBool() {
+ tlsClientConfig := config.ensureTLSClientConfig()
+ tlsClientConfig.InsecureSkipVerify = true
+ }
+
+ if caFile := esConfig.CAFile.ValueString(); caFile != "" {
+ caCert, err := os.ReadFile(caFile)
+ if err != nil {
+ diags.Append(fwdiags.NewErrorDiagnostic("Unable to read CA file", err.Error()))
+ return nil, diags
+ }
+ config.CACert = caCert
+ }
+ if caData := esConfig.CAData.ValueString(); caData != "" {
+ config.CACert = []byte(caData)
+ }
+
+ if certFile := esConfig.CertFile.ValueString(); certFile != "" {
+ if keyFile := esConfig.KeyFile.ValueString(); keyFile != "" {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ diags.Append(fwdiags.NewErrorDiagnostic("Unable to read certificate or key file", err.Error()))
+ return nil, diags
+ }
+ tlsClientConfig := config.ensureTLSClientConfig()
+ tlsClientConfig.Certificates = []tls.Certificate{cert}
+ } else {
+ diags.Append(fwdiags.NewErrorDiagnostic("Unable to read key file", "Path to key file has not been configured or is empty"))
+ return nil, diags
+ }
+ }
+ if certData := esConfig.CertData.ValueString(); certData != "" {
+ if keyData := esConfig.KeyData.ValueString(); keyData != "" {
+ cert, err := tls.X509KeyPair([]byte(certData), []byte(keyData))
+ if err != nil {
+ diags.Append(fwdiags.NewErrorDiagnostic("Unable to parse certificate or key", err.Error()))
+ return nil, diags
+ }
+ tlsClientConfig := config.ensureTLSClientConfig()
+ tlsClientConfig.Certificates = []tls.Certificate{cert}
+ } else {
+ diags.Append(fwdiags.NewErrorDiagnostic("Unable to parse key", "Key data has not been configured or is empty"))
+ return nil, diags
+ }
+ }
+
+ if logging.IsDebugOrHigher() {
+ config.EnableDebugLogger = true
+ config.Logger = &debugLogger{Name: "elasticsearch"}
+ }
+
+ config = config.withEnvironmentOverrides()
+ return &config, nil
+}
+
+func (c *elasticsearchConfig) ensureTLSClientConfig() *tls.Config {
+ if c.Transport == nil {
+ c.Transport = http.DefaultTransport.(*http.Transport)
+ }
+ if c.Transport.(*http.Transport).TLSClientConfig == nil {
+ c.Transport.(*http.Transport).TLSClientConfig = &tls.Config{}
+ }
+ return c.Transport.(*http.Transport).TLSClientConfig
+}
+
+func (c elasticsearchConfig) withEnvironmentOverrides() elasticsearchConfig {
+ if endpointsCSV, ok := os.LookupEnv("ELASTICSEARCH_ENDPOINTS"); ok {
+ endpoints := make([]string, 0)
+ for _, e := range strings.Split(endpointsCSV, ",") {
+ endpoints = append(endpoints, strings.TrimSpace(e))
+ }
+ c.Addresses = endpoints
+ }
+
+ if insecure, ok := os.LookupEnv("ELASTICSEARCH_INSECURE"); ok {
+ if insecureValue, err := strconv.ParseBool(insecure); err != nil {
+ tlsClientConfig := c.ensureTLSClientConfig()
+ tlsClientConfig.InsecureSkipVerify = insecureValue
+ }
+ }
+
+ return c
+}
diff --git a/internal/clients/config/elasticsearch_test.go b/internal/clients/config/elasticsearch_test.go
new file mode 100644
index 000000000..cd93bfd1a
--- /dev/null
+++ b/internal/clients/config/elasticsearch_test.go
@@ -0,0 +1,229 @@
+package config
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ sdkdiags "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+
+ providerSchema "github.com/elastic/terraform-provider-elasticstack/internal/schema"
+ "github.com/elastic/terraform-provider-elasticstack/internal/utils"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_newElasticsearchConfigFromSDK(t *testing.T) {
+ type args struct {
+ resourceData map[string]interface{}
+ base baseConfig
+ env map[string]string
+ expectedESConfig *elasticsearchConfig
+ expectedDiags sdkdiags.Diagnostics
+ }
+ tests := []struct {
+ name string
+ args func(string) args
+ }{
+ {
+ name: "should return nil if no config is specified",
+ args: func(key string) args {
+ return args{}
+ },
+ },
+ {
+ name: "should use the options set in config",
+ args: func(key string) args {
+ base := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ config := base.toElasticsearchConfig()
+ config.Addresses = []string{"localhost", "example.com"}
+ tlsConfig := config.ensureTLSClientConfig()
+ tlsConfig.InsecureSkipVerify = true
+
+ return args{
+ resourceData: map[string]interface{}{
+ key: []interface{}{
+ map[string]interface{}{
+ "endpoints": []interface{}{"localhost", "example.com"},
+ "insecure": true,
+ },
+ },
+ },
+ base: base,
+ expectedESConfig: &config,
+ }
+ },
+ },
+ {
+ name: "should prefer config defined in environment variables",
+ args: func(key string) args {
+ base := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ config := base.toElasticsearchConfig()
+ config.Addresses = []string{"127.0.0.1", "example.com/elastic"}
+ tlsConfig := config.ensureTLSClientConfig()
+ tlsConfig.InsecureSkipVerify = false
+
+ return args{
+ resourceData: map[string]interface{}{
+ key: []interface{}{
+ map[string]interface{}{
+ "endpoints": []interface{}{"localhost", "example.com"},
+ "insecure": true,
+ },
+ },
+ },
+ env: map[string]string{
+ "ELASTICSEARCH_ENDPOINTS": "127.0.0.1,example.com/elastic",
+ "ELASTICSEARCH_INSECURE": "false",
+ },
+ base: base,
+ expectedESConfig: &config,
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ os.Unsetenv("ELASTICSEARCH_ENDPOINTS")
+ os.Unsetenv("ELASTICSEARCH_INSECURE")
+
+ key := "elasticsearch"
+ args := tt.args(key)
+ rd := schema.TestResourceDataRaw(t, map[string]*schema.Schema{
+ key: providerSchema.GetEsConnectionSchema(key, true),
+ }, args.resourceData)
+
+ for key, val := range args.env {
+ os.Setenv(key, val)
+ }
+
+ esConfig, diags := newElasticsearchConfigFromSDK(rd, args.base, key, false)
+
+ require.Equal(t, args.expectedESConfig, esConfig)
+ require.Equal(t, args.expectedDiags, diags)
+ })
+ }
+}
+
+func Test_newElasticsearchConfigFromFramework(t *testing.T) {
+ type args struct {
+ providerConfig ProviderConfiguration
+ base baseConfig
+ env map[string]string
+ expectedESConfig *elasticsearchConfig
+ expectedDiags fwdiags.Diagnostics
+ }
+ tests := []struct {
+ name string
+ args func() args
+ }{
+ {
+ name: "should return nil if no config is specified",
+ args: func() args {
+ return args{
+ providerConfig: ProviderConfiguration{},
+ }
+ },
+ },
+ {
+ name: "should use the options set in config",
+ args: func() args {
+ base := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ config := base.toElasticsearchConfig()
+ config.Addresses = []string{"localhost", "example.com"}
+ tlsConfig := config.ensureTLSClientConfig()
+ tlsConfig.InsecureSkipVerify = true
+
+ return args{
+ providerConfig: ProviderConfiguration{
+ Elasticsearch: []ElasticsearchConnection{
+ {
+ Endpoints: basetypes.NewListValueMust(
+ basetypes.StringType{},
+ []attr.Value{
+ basetypes.NewStringValue("localhost"),
+ basetypes.NewStringValue("example.com"),
+ },
+ ),
+ Insecure: basetypes.NewBoolPointerValue(utils.Pointer(true)),
+ },
+ },
+ },
+ base: base,
+ expectedESConfig: &config,
+ }
+ },
+ },
+ {
+ name: "should prefer config defined in environment variables",
+ args: func() args {
+ base := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ config := base.toElasticsearchConfig()
+ config.Addresses = []string{"127.0.0.1", "example.com/elastic"}
+ tlsConfig := config.ensureTLSClientConfig()
+ tlsConfig.InsecureSkipVerify = false
+
+ return args{
+ providerConfig: ProviderConfiguration{
+ Elasticsearch: []ElasticsearchConnection{
+ {
+ Endpoints: basetypes.NewListValueMust(
+ basetypes.StringType{},
+ []attr.Value{
+ basetypes.NewStringValue("localhost"),
+ basetypes.NewStringValue("example.com"),
+ },
+ ),
+ Insecure: basetypes.NewBoolPointerValue(utils.Pointer(true)),
+ },
+ },
+ },
+ env: map[string]string{
+ "ELASTICSEARCH_ENDPOINTS": "127.0.0.1,example.com/elastic",
+ "ELASTICSEARCH_INSECURE": "false",
+ },
+ base: base,
+ expectedESConfig: &config,
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ os.Unsetenv("ELASTICSEARCH_ENDPOINTS")
+ os.Unsetenv("ELASTICSEARCH_INSECURE")
+
+ args := tt.args()
+
+ for key, val := range args.env {
+ os.Setenv(key, val)
+ }
+
+ esConfig, diags := newElasticsearchConfigFromFramework(context.Background(), args.providerConfig, args.base)
+
+ require.Equal(t, args.expectedESConfig, esConfig)
+ require.Equal(t, args.expectedDiags, diags)
+ })
+ }
+}
diff --git a/internal/clients/config/env.go b/internal/clients/config/env.go
new file mode 100644
index 000000000..a01845350
--- /dev/null
+++ b/internal/clients/config/env.go
@@ -0,0 +1,29 @@
+package config
+
+import (
+ "github.com/disaster37/go-kibana-rest/v8"
+ "github.com/elastic/go-elasticsearch/v7"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/fleet"
+ "github.com/elastic/terraform-provider-elasticstack/internal/utils"
+)
+
+func NewFromEnv(version string) Client {
+ base := baseConfig{
+ UserAgent: buildUserAgent(version),
+ }.withEnvironmentOverrides()
+
+ client := Client{
+ UserAgent: base.UserAgent,
+ }
+
+ esCfg := base.toElasticsearchConfig().withEnvironmentOverrides()
+ client.Elasticsearch = utils.Pointer(elasticsearch.Config(esCfg))
+
+ kibanaCfg := base.toKibanaConfig().withEnvironmentOverrides()
+ client.Kibana = (*kibana.Config)(&kibanaCfg)
+
+ fleetCfg := kibanaCfg.toFleetConfig().withEnvironmentOverrides()
+ client.Fleet = (*fleet.Config)(&fleetCfg)
+
+ return client
+}
diff --git a/internal/clients/config/fleet.go b/internal/clients/config/fleet.go
new file mode 100644
index 000000000..58b46fc51
--- /dev/null
+++ b/internal/clients/config/fleet.go
@@ -0,0 +1,113 @@
+package config
+
+import (
+ "context"
+ "os"
+ "strings"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/fleet"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
+ sdkdiags "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+type fleetConfig fleet.Config
+
+func newFleetConfigFromSDK(d *schema.ResourceData, kibanaCfg kibanaConfig) (fleetConfig, sdkdiags.Diagnostics) {
+ config := kibanaCfg.toFleetConfig()
+
+ // Set variables from resource config.
+ if fleetDataRaw, ok := d.GetOk("fleet"); ok {
+ fleetData, ok := fleetDataRaw.([]interface{})[0].(map[string]any)
+ if !ok {
+ diags := sdkdiags.Diagnostics{
+ sdkdiags.Diagnostic{
+ Severity: sdkdiags.Error,
+ Summary: "Unable to parse Fleet configuration",
+ Detail: "Fleet configuration data has not been configured correctly or is empty",
+ },
+ }
+ return fleetConfig{}, diags
+ }
+ if v, ok := fleetData["endpoint"].(string); ok && v != "" {
+ config.URL = v
+ }
+ if v, ok := fleetData["username"].(string); ok && v != "" {
+ config.Username = v
+ }
+ if v, ok := fleetData["password"].(string); ok && v != "" {
+ config.Password = v
+ }
+ if v, ok := fleetData["api_key"].(string); ok && v != "" {
+ config.APIKey = v
+ }
+ if v, ok := fleetData["ca_certs"].([]interface{}); ok && len(v) > 0 {
+ for _, elem := range v {
+ if vStr, elemOk := elem.(string); elemOk {
+ config.CACerts = append(config.CACerts, vStr)
+ }
+ }
+ }
+ if v, ok := fleetData["insecure"].(bool); ok {
+ config.Insecure = v
+ }
+ }
+
+ return config.withEnvironmentOverrides(), nil
+}
+
+func newFleetConfigFromFramework(ctx context.Context, cfg ProviderConfiguration, kibanaCfg kibanaConfig) (fleetConfig, fwdiags.Diagnostics) {
+ config := kibanaCfg.toFleetConfig()
+
+ if len(cfg.Fleet) > 0 {
+ fleetCfg := cfg.Fleet[0]
+ if fleetCfg.Username.ValueString() != "" {
+ config.Username = fleetCfg.Username.ValueString()
+ }
+ if fleetCfg.Password.ValueString() != "" {
+ config.Password = fleetCfg.Password.ValueString()
+ }
+ if fleetCfg.Endpoint.ValueString() != "" {
+ config.URL = fleetCfg.Endpoint.ValueString()
+ }
+ if fleetCfg.APIKey.ValueString() != "" {
+ config.APIKey = fleetCfg.APIKey.ValueString()
+ }
+
+ if !fleetCfg.Insecure.IsNull() && !fleetCfg.Insecure.IsUnknown() {
+ config.Insecure = fleetCfg.Insecure.ValueBool()
+ }
+
+ var caCerts []string
+ diags := fleetCfg.CACerts.ElementsAs(ctx, &caCerts, true)
+ if diags.HasError() {
+ return fleetConfig{}, diags
+ }
+
+ if len(caCerts) > 0 {
+ config.CACerts = caCerts
+ }
+ }
+
+ return config.withEnvironmentOverrides(), nil
+}
+
+func (c fleetConfig) withEnvironmentOverrides() fleetConfig {
+ if v, ok := os.LookupEnv("FLEET_ENDPOINT"); ok {
+ c.URL = v
+ }
+ if v, ok := os.LookupEnv("FLEET_USERNAME"); ok {
+ c.Username = v
+ }
+ if v, ok := os.LookupEnv("FLEET_PASSWORD"); ok {
+ c.Password = v
+ }
+ if v, ok := os.LookupEnv("FLEET_API_KEY"); ok {
+ c.APIKey = v
+ }
+ if v, ok := os.LookupEnv("FLEET_CA_CERTS"); ok {
+ c.CACerts = strings.Split(v, ",")
+ }
+
+ return c
+}
diff --git a/internal/clients/config/fleet_test.go b/internal/clients/config/fleet_test.go
new file mode 100644
index 000000000..decc53ff8
--- /dev/null
+++ b/internal/clients/config/fleet_test.go
@@ -0,0 +1,284 @@
+package config
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ providerSchema "github.com/elastic/terraform-provider-elasticstack/internal/schema"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ sdkdiags "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_newFleetConfigFromSDK(t *testing.T) {
+ type args struct {
+ kibanaCfg kibanaConfig
+ resourceData map[string]interface{}
+ expectedConfig fleetConfig
+ expectedDiags sdkdiags.Diagnostics
+ env map[string]string
+ }
+ tests := []struct {
+ name string
+ args func() args
+ }{
+ {
+ name: "should return kibana config if no fleet config defined",
+ args: func() args {
+ kibanaCfg := kibanaConfig{
+ Address: "example.com/kibana",
+ Username: "elastic",
+ Password: "changeme",
+ DisableVerifySSL: true,
+ }
+
+ return args{
+ kibanaCfg: kibanaCfg,
+ resourceData: map[string]interface{}{},
+ expectedConfig: kibanaCfg.toFleetConfig(),
+ }
+ },
+ },
+ {
+ name: "should use the provided config optios",
+ args: func() args {
+ kibanaCfg := kibanaConfig{
+ Address: "example.com/kibana",
+ Username: "elastic",
+ Password: "changeme",
+ DisableVerifySSL: true,
+ }
+
+ return args{
+ kibanaCfg: kibanaCfg,
+ resourceData: map[string]interface{}{
+ "fleet": []interface{}{
+ map[string]interface{}{
+ "endpoint": "example.com/fleet",
+ "username": "fleet",
+ "password": "baltic",
+ "api_key": "leviosa",
+ "ca_certs": []interface{}{"internal", "lets_decrypt"},
+ "insecure": false,
+ },
+ },
+ },
+ expectedConfig: fleetConfig{
+ URL: "example.com/fleet",
+ Username: "fleet",
+ Password: "baltic",
+ APIKey: "leviosa",
+ CACerts: []string{"internal", "lets_decrypt"},
+ Insecure: false,
+ },
+ }
+ },
+ },
+ {
+ name: "should prefer environment variables",
+ args: func() args {
+ kibanaCfg := kibanaConfig{
+ Address: "example.com/kibana",
+ Username: "elastic",
+ Password: "changeme",
+ DisableVerifySSL: true,
+ }
+
+ return args{
+ kibanaCfg: kibanaCfg,
+ resourceData: map[string]interface{}{
+ "fleet": []interface{}{
+ map[string]interface{}{
+ "endpoint": "example.com/fleet",
+ "username": "fleet",
+ "password": "baltic",
+ "api_key": "leviosa",
+ "ca_certs": []interface{}{"internal", "lets_decrypt"},
+ "insecure": false,
+ },
+ },
+ },
+ env: map[string]string{
+ "FLEET_ENDPOINT": "example.com/black_sea_fleet",
+ "FLEET_USERNAME": "black_sea",
+ "FLEET_PASSWORD": "fleet",
+ "FLEET_API_KEY": "stupefy",
+ "FLEET_CA_CERTS": "black,sea",
+ },
+ expectedConfig: fleetConfig{
+ URL: "example.com/black_sea_fleet",
+ Username: "black_sea",
+ Password: "fleet",
+ APIKey: "stupefy",
+ CACerts: []string{"black", "sea"},
+ Insecure: false,
+ },
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ os.Unsetenv("FLEET_ENDPOINT")
+ os.Unsetenv("FLEET_USERNAME")
+ os.Unsetenv("FLEET_PASSWORD")
+ os.Unsetenv("FLEET_API_KEY")
+ os.Unsetenv("FLEET_CA_CERTS")
+
+ args := tt.args()
+ rd := schema.TestResourceDataRaw(t, map[string]*schema.Schema{
+ "fleet": providerSchema.GetFleetConnectionSchema(),
+ }, args.resourceData)
+
+ for key, val := range args.env {
+ os.Setenv(key, val)
+ }
+
+ fleetConfig, diags := newFleetConfigFromSDK(rd, args.kibanaCfg)
+
+ require.Equal(t, args.expectedConfig, fleetConfig)
+ require.Equal(t, args.expectedDiags, diags)
+ })
+ }
+}
+
+func Test_newFleetConfigFromFramework(t *testing.T) {
+ type args struct {
+ kibanaCfg kibanaConfig
+ providerConfig ProviderConfiguration
+ expectedConfig fleetConfig
+ expectedDiags fwdiags.Diagnostics
+ env map[string]string
+ }
+ tests := []struct {
+ name string
+ args func() args
+ }{
+ {
+ name: "should return kibana config if no fleet config defined",
+ args: func() args {
+ kibanaCfg := kibanaConfig{
+ Address: "example.com/kibana",
+ Username: "elastic",
+ Password: "changeme",
+ DisableVerifySSL: true,
+ }
+
+ return args{
+ kibanaCfg: kibanaCfg,
+ providerConfig: ProviderConfiguration{},
+ expectedConfig: kibanaCfg.toFleetConfig(),
+ }
+ },
+ },
+ {
+ name: "should use the provided config optios",
+ args: func() args {
+ kibanaCfg := kibanaConfig{
+ Address: "example.com/kibana",
+ Username: "elastic",
+ Password: "changeme",
+ DisableVerifySSL: true,
+ }
+
+ return args{
+ kibanaCfg: kibanaCfg,
+ providerConfig: ProviderConfiguration{
+ Fleet: []FleetConnection{
+ {
+ Username: types.StringValue("fleet"),
+ Password: types.StringValue("baltic"),
+ Endpoint: types.StringValue("example.com/fleet"),
+ APIKey: types.StringValue("leviosa"),
+ CACerts: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("internal"),
+ types.StringValue("lets_decrypt"),
+ }),
+ Insecure: types.BoolValue(false),
+ },
+ },
+ },
+ expectedConfig: fleetConfig{
+ URL: "example.com/fleet",
+ Username: "fleet",
+ Password: "baltic",
+ APIKey: "leviosa",
+ CACerts: []string{"internal", "lets_decrypt"},
+ Insecure: false,
+ },
+ }
+ },
+ },
+ {
+ name: "should prefer environment variables",
+ args: func() args {
+ kibanaCfg := kibanaConfig{
+ Address: "example.com/kibana",
+ Username: "elastic",
+ Password: "changeme",
+ DisableVerifySSL: true,
+ }
+
+ return args{
+ kibanaCfg: kibanaCfg,
+ providerConfig: ProviderConfiguration{
+ Fleet: []FleetConnection{
+ {
+ Username: types.StringValue("fleet"),
+ Password: types.StringValue("baltic"),
+ Endpoint: types.StringValue("example.com/fleet"),
+ APIKey: types.StringValue("leviosa"),
+ CACerts: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("internal"),
+ types.StringValue("lets_decrypt"),
+ }),
+ Insecure: types.BoolValue(false),
+ },
+ },
+ },
+ env: map[string]string{
+ "FLEET_ENDPOINT": "example.com/black_sea_fleet",
+ "FLEET_USERNAME": "black_sea",
+ "FLEET_PASSWORD": "fleet",
+ "FLEET_API_KEY": "stupefy",
+ "FLEET_CA_CERTS": "black,sea",
+ },
+ expectedConfig: fleetConfig{
+ URL: "example.com/black_sea_fleet",
+ Username: "black_sea",
+ Password: "fleet",
+ APIKey: "stupefy",
+ CACerts: []string{"black", "sea"},
+ Insecure: false,
+ },
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ os.Unsetenv("FLEET_ENDPOINT")
+ os.Unsetenv("FLEET_USERNAME")
+ os.Unsetenv("FLEET_PASSWORD")
+ os.Unsetenv("FLEET_API_KEY")
+ os.Unsetenv("FLEET_CA_CERTS")
+
+ args := tt.args()
+
+ for key, val := range args.env {
+ os.Setenv(key, val)
+ }
+
+ fleetConfig, diags := newFleetConfigFromFramework(context.Background(), args.providerConfig, args.kibanaCfg)
+
+ require.Equal(t, args.expectedConfig, fleetConfig)
+ require.Equal(t, args.expectedDiags, diags)
+ })
+ }
+}
diff --git a/internal/clients/config/framework.go b/internal/clients/config/framework.go
new file mode 100644
index 000000000..87540e97f
--- /dev/null
+++ b/internal/clients/config/framework.go
@@ -0,0 +1,43 @@
+package config
+
+import (
+ "context"
+
+ "github.com/disaster37/go-kibana-rest/v8"
+ "github.com/elastic/go-elasticsearch/v7"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/fleet"
+ "github.com/elastic/terraform-provider-elasticstack/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+)
+
+func NewFromFramework(ctx context.Context, cfg ProviderConfiguration, version string) (Client, diag.Diagnostics) {
+ base := newBaseConfigFromFramework(cfg, version)
+ client := Client{
+ UserAgent: base.UserAgent,
+ }
+
+ esCfg, diags := newElasticsearchConfigFromFramework(ctx, cfg, base)
+ if diags.HasError() {
+ return Client{}, diags
+ }
+
+ if esCfg != nil {
+ client.Elasticsearch = utils.Pointer(elasticsearch.Config(*esCfg))
+ }
+
+ kibanaCfg, diags := newKibanaConfigFromFramework(ctx, cfg, base)
+ if diags.HasError() {
+ return Client{}, diags
+ }
+
+ client.Kibana = (*kibana.Config)(&kibanaCfg)
+
+ fleetCfg, diags := newFleetConfigFromFramework(ctx, cfg, kibanaCfg)
+ if diags.HasError() {
+ return Client{}, diags
+ }
+
+ client.Fleet = (*fleet.Config)(&fleetCfg)
+
+ return client, nil
+}
diff --git a/internal/clients/config/kibana.go b/internal/clients/config/kibana.go
new file mode 100644
index 000000000..b0f80c806
--- /dev/null
+++ b/internal/clients/config/kibana.go
@@ -0,0 +1,100 @@
+package config
+
+import (
+ "context"
+ "os"
+ "strconv"
+
+ "github.com/disaster37/go-kibana-rest/v8"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
+ sdkdiags "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+type kibanaConfig kibana.Config
+
+func newKibanaConfigFromSDK(d *schema.ResourceData, base baseConfig) (kibanaConfig, sdkdiags.Diagnostics) {
+ var diags sdkdiags.Diagnostics
+
+ // Use ES details by default
+ config := base.toKibanaConfig()
+ kibConn, ok := d.GetOk("kibana")
+ if !ok {
+ return config, diags
+ }
+
+ // if defined, then we only have a single entry
+ if kib := kibConn.([]interface{})[0]; kib != nil {
+ kibConfig := kib.(map[string]interface{})
+
+ if username, ok := kibConfig["username"]; ok && username != "" {
+ config.Username = username.(string)
+ }
+ if password, ok := kibConfig["password"]; ok && password != "" {
+ config.Password = password.(string)
+ }
+
+ if endpoints, ok := kibConfig["endpoints"]; ok && len(endpoints.([]interface{})) > 0 {
+ // We're curently limited by the API to a single endpoint
+ if endpoint := endpoints.([]interface{})[0]; endpoint != nil {
+ config.Address = endpoint.(string)
+ }
+ }
+
+ if insecure, ok := kibConfig["insecure"]; ok && insecure.(bool) {
+ config.DisableVerifySSL = true
+ }
+ }
+
+ return config.withEnvironmentOverrides(), nil
+}
+
+func newKibanaConfigFromFramework(ctx context.Context, cfg ProviderConfiguration, base baseConfig) (kibanaConfig, fwdiags.Diagnostics) {
+ config := base.toKibanaConfig()
+
+ if len(cfg.Kibana) > 0 {
+ kibConfig := cfg.Kibana[0]
+ if kibConfig.Username.ValueString() != "" {
+ config.Username = kibConfig.Username.ValueString()
+ }
+ if kibConfig.Password.ValueString() != "" {
+ config.Password = kibConfig.Password.ValueString()
+ }
+ var endpoints []string
+ diags := kibConfig.Endpoints.ElementsAs(ctx, &endpoints, true)
+ if diags.HasError() {
+ return kibanaConfig{}, diags
+ }
+
+ if len(endpoints) > 0 {
+ config.Address = endpoints[0]
+ }
+
+ config.DisableVerifySSL = kibConfig.Insecure.ValueBool()
+ }
+
+ return config.withEnvironmentOverrides(), nil
+}
+
+func (k kibanaConfig) withEnvironmentOverrides() kibanaConfig {
+ k.Username = withEnvironmentOverride(k.Username, "KIBANA_USERNAME")
+ k.Password = withEnvironmentOverride(k.Password, "KIBANA_PASSWORD")
+ k.Address = withEnvironmentOverride(k.Address, "KIBANA_ENDPOINT")
+
+ if insecure, ok := os.LookupEnv("KIBANA_INSECURE"); ok {
+ if insecureValue, err := strconv.ParseBool(insecure); err == nil {
+ k.DisableVerifySSL = insecureValue
+ }
+ }
+
+ return k
+}
+
+func (k kibanaConfig) toFleetConfig() fleetConfig {
+ return fleetConfig{
+ URL: k.Address,
+ Username: k.Username,
+ Password: k.Password,
+ Insecure: k.DisableVerifySSL,
+ }
+}
diff --git a/internal/clients/config/kibana_test.go b/internal/clients/config/kibana_test.go
new file mode 100644
index 000000000..a7b8fbbac
--- /dev/null
+++ b/internal/clients/config/kibana_test.go
@@ -0,0 +1,250 @@
+package config
+
+import (
+ "context"
+ "os"
+ "testing"
+
+ providerSchema "github.com/elastic/terraform-provider-elasticstack/internal/schema"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ sdkdiags "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/stretchr/testify/require"
+)
+
+func Test_newKibanaConfigFromSDK(t *testing.T) {
+ type args struct {
+ baseCfg baseConfig
+ resourceData map[string]interface{}
+ expectedConfig kibanaConfig
+ expectedDiags sdkdiags.Diagnostics
+ env map[string]string
+ }
+ tests := []struct {
+ name string
+ args func() args
+ }{
+ {
+ name: "should return kibana config if no fleet config defined",
+ args: func() args {
+ baseCfg := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ return args{
+ baseCfg: baseCfg,
+ resourceData: map[string]interface{}{},
+ expectedConfig: baseCfg.toKibanaConfig(),
+ }
+ },
+ },
+ {
+ name: "should use the provided config optios",
+ args: func() args {
+ baseCfg := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ return args{
+ baseCfg: baseCfg,
+ resourceData: map[string]interface{}{
+ "kibana": []interface{}{
+ map[string]interface{}{
+ "endpoints": []interface{}{"example.com/kibana"},
+ "username": "kibana",
+ "password": "baltic",
+ "insecure": true,
+ },
+ },
+ },
+ expectedConfig: kibanaConfig{
+ Address: "example.com/kibana",
+ Username: "kibana",
+ Password: "baltic",
+ DisableVerifySSL: true,
+ },
+ }
+ },
+ },
+ {
+ name: "should prefer environment variables",
+ args: func() args {
+ baseCfg := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ return args{
+ baseCfg: baseCfg,
+ resourceData: map[string]interface{}{
+ "kibana": []interface{}{
+ map[string]interface{}{
+ "endpoints": []interface{}{"example.com/kibana"},
+ "username": "kibana",
+ "password": "baltic",
+ "insecure": true,
+ },
+ },
+ },
+ env: map[string]string{
+ "KIBANA_ENDPOINT": "example.com/cabana",
+ "KIBANA_USERNAME": "elastic",
+ "KIBANA_PASSWORD": "thin-lines",
+ "KIBANA_INSECURE": "false",
+ },
+ expectedConfig: kibanaConfig{
+ Address: "example.com/cabana",
+ Username: "elastic",
+ Password: "thin-lines",
+ DisableVerifySSL: false,
+ },
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ os.Unsetenv("KIBANA_USERNAME")
+ os.Unsetenv("KIBANA_PASSWORD")
+ os.Unsetenv("KIBANA_ENDPOINT")
+ os.Unsetenv("KIBANA_INSECURE")
+
+ args := tt.args()
+ rd := schema.TestResourceDataRaw(t, map[string]*schema.Schema{
+ "kibana": providerSchema.GetKibanaConnectionSchema(),
+ }, args.resourceData)
+
+ for key, val := range args.env {
+ os.Setenv(key, val)
+ }
+
+ kibanaCfg, diags := newKibanaConfigFromSDK(rd, args.baseCfg)
+
+ require.Equal(t, args.expectedConfig, kibanaCfg)
+ require.Equal(t, args.expectedDiags, diags)
+ })
+ }
+}
+
+func Test_newKibanaConfigFromFramework(t *testing.T) {
+ type args struct {
+ baseCfg baseConfig
+ providerConfig ProviderConfiguration
+ expectedConfig kibanaConfig
+ expectedDiags fwdiags.Diagnostics
+ env map[string]string
+ }
+ tests := []struct {
+ name string
+ args func() args
+ }{
+ {
+ name: "should return kibana config if no fleet config defined",
+ args: func() args {
+ baseCfg := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ return args{
+ baseCfg: baseCfg,
+ providerConfig: ProviderConfiguration{},
+ expectedConfig: baseCfg.toKibanaConfig(),
+ }
+ },
+ },
+ {
+ name: "should use the provided config optios",
+ args: func() args {
+ baseCfg := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ return args{
+ baseCfg: baseCfg,
+ providerConfig: ProviderConfiguration{
+ Kibana: []KibanaConnection{
+ {
+ Username: types.StringValue("kibana"),
+ Password: types.StringValue("baltic"),
+ Endpoints: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("example.com/kibana"),
+ }),
+ Insecure: types.BoolValue(true),
+ },
+ },
+ },
+ expectedConfig: kibanaConfig{
+ Address: "example.com/kibana",
+ Username: "kibana",
+ Password: "baltic",
+ DisableVerifySSL: true,
+ },
+ }
+ },
+ },
+ {
+ name: "should prefer environment variables",
+ args: func() args {
+ baseCfg := baseConfig{
+ Username: "elastic",
+ Password: "changeme",
+ }
+
+ return args{
+ baseCfg: baseCfg,
+ providerConfig: ProviderConfiguration{
+ Kibana: []KibanaConnection{
+ {
+ Username: types.StringValue("kibana"),
+ Password: types.StringValue("baltic"),
+ Endpoints: types.ListValueMust(types.StringType, []attr.Value{
+ types.StringValue("example.com/kibana"),
+ }),
+ Insecure: types.BoolValue(true),
+ },
+ },
+ },
+ env: map[string]string{
+ "KIBANA_ENDPOINT": "example.com/cabana",
+ "KIBANA_USERNAME": "elastic",
+ "KIBANA_PASSWORD": "thin-lines",
+ "KIBANA_INSECURE": "false",
+ },
+ expectedConfig: kibanaConfig{
+ Address: "example.com/cabana",
+ Username: "elastic",
+ Password: "thin-lines",
+ DisableVerifySSL: false,
+ },
+ }
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ os.Unsetenv("KIBANA_USERNAME")
+ os.Unsetenv("KIBANA_PASSWORD")
+ os.Unsetenv("KIBANA_ENDPOINT")
+ os.Unsetenv("KIBANA_INSECURE")
+
+ args := tt.args()
+
+ for key, val := range args.env {
+ os.Setenv(key, val)
+ }
+
+ kibanaCfg, diags := newKibanaConfigFromFramework(context.Background(), args.providerConfig, args.baseCfg)
+
+ require.Equal(t, args.expectedConfig, kibanaCfg)
+ require.Equal(t, args.expectedDiags, diags)
+ })
+ }
+}
diff --git a/internal/clients/config/provider.go b/internal/clients/config/provider.go
new file mode 100644
index 000000000..7da987528
--- /dev/null
+++ b/internal/clients/config/provider.go
@@ -0,0 +1,39 @@
+package config
+
+import "github.com/hashicorp/terraform-plugin-framework/types"
+
+type ProviderConfiguration struct {
+ Elasticsearch []ElasticsearchConnection `tfsdk:"elasticsearch"`
+ Kibana []KibanaConnection `tfsdk:"kibana"`
+ Fleet []FleetConnection `tfsdk:"fleet"`
+}
+
+type ElasticsearchConnection struct {
+ Username types.String `tfsdk:"username"`
+ Password types.String `tfsdk:"password"`
+ APIKey types.String `tfsdk:"api_key"`
+ Endpoints types.List `tfsdk:"endpoints"`
+ Insecure types.Bool `tfsdk:"insecure"`
+ CAFile types.String `tfsdk:"ca_file"`
+ CAData types.String `tfsdk:"ca_data"`
+ CertFile types.String `tfsdk:"cert_file"`
+ KeyFile types.String `tfsdk:"key_file"`
+ CertData types.String `tfsdk:"cert_data"`
+ KeyData types.String `tfsdk:"key_data"`
+}
+
+type KibanaConnection struct {
+ Username types.String `tfsdk:"username"`
+ Password types.String `tfsdk:"password"`
+ Endpoints types.List `tfsdk:"endpoints"`
+ Insecure types.Bool `tfsdk:"insecure"`
+}
+
+type FleetConnection struct {
+ Username types.String `tfsdk:"username"`
+ Password types.String `tfsdk:"password"`
+ APIKey types.String `tfsdk:"api_key"`
+ Endpoint types.String `tfsdk:"endpoint"`
+ Insecure types.Bool `tfsdk:"insecure"`
+ CACerts types.List `tfsdk:"ca_certs"`
+}
diff --git a/internal/clients/config/sdk.go b/internal/clients/config/sdk.go
new file mode 100644
index 000000000..da7cf84e2
--- /dev/null
+++ b/internal/clients/config/sdk.go
@@ -0,0 +1,60 @@
+package config
+
+import (
+ "github.com/disaster37/go-kibana-rest/v8"
+ "github.com/elastic/go-elasticsearch/v7"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/fleet"
+ "github.com/elastic/terraform-provider-elasticstack/internal/utils"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+const (
+ esKey string = "elasticsearch"
+ esConnectionKey string = "elasticsearch_connection"
+)
+
+func NewFromSDK(d *schema.ResourceData, version string) (Client, diag.Diagnostics) {
+ return newFromSDK(d, version, esKey)
+}
+
+func NewFromSDKResource(d *schema.ResourceData, version string) (*Client, diag.Diagnostics) {
+ if _, ok := d.GetOk(esConnectionKey); !ok {
+ return nil, nil
+ }
+
+ client, diags := newFromSDK(d, version, esConnectionKey)
+ return &client, diags
+}
+
+func newFromSDK(d *schema.ResourceData, version, esConfigKey string) (Client, diag.Diagnostics) {
+ base := newBaseConfigFromSDK(d, version, esConfigKey)
+ client := Client{
+ UserAgent: base.UserAgent,
+ }
+
+ esCfg, diags := newElasticsearchConfigFromSDK(d, base, esConfigKey, true)
+ if diags.HasError() {
+ return Client{}, diags
+ }
+
+ if esCfg != nil {
+ client.Elasticsearch = utils.Pointer(elasticsearch.Config(*esCfg))
+ }
+
+ kibanaCfg, diags := newKibanaConfigFromSDK(d, base)
+ if diags.HasError() {
+ return Client{}, diags
+ }
+
+ client.Kibana = (*kibana.Config)(&kibanaCfg)
+
+ fleetCfg, diags := newFleetConfigFromSDK(d, kibanaCfg)
+ if diags.HasError() {
+ return Client{}, diags
+ }
+
+ client.Fleet = (*fleet.Config)(&fleetCfg)
+
+ return client, nil
+}
diff --git a/internal/elasticsearch/cluster/script.go b/internal/elasticsearch/cluster/script.go
index 35eac8d29..8b09c443e 100644
--- a/internal/elasticsearch/cluster/script.go
+++ b/internal/elasticsearch/cluster/script.go
@@ -66,7 +66,7 @@ func ResourceScript() *schema.Resource {
}
func resourceScriptRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -100,7 +100,7 @@ func resourceScriptRead(ctx context.Context, d *schema.ResourceData, meta interf
}
func resourceScriptPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -137,7 +137,7 @@ func resourceScriptPut(ctx context.Context, d *schema.ResourceData, meta interfa
}
func resourceScriptDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/cluster/settings.go b/internal/elasticsearch/cluster/settings.go
index a37b0e32a..09910e7c8 100644
--- a/internal/elasticsearch/cluster/settings.go
+++ b/internal/elasticsearch/cluster/settings.go
@@ -86,7 +86,7 @@ func ResourceSettings() *schema.Resource {
}
func resourceClusterSettingsPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -204,7 +204,7 @@ func expandSettings(s interface{}) (map[string]interface{}, diag.Diagnostics) {
}
func resourceClusterSettingsRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -258,7 +258,7 @@ func flattenSettings(name string, old, new map[string]interface{}) []interface{}
}
func resourceClusterSettingsDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/cluster/slm.go b/internal/elasticsearch/cluster/slm.go
index 06fb9b834..77cd8a827 100644
--- a/internal/elasticsearch/cluster/slm.go
+++ b/internal/elasticsearch/cluster/slm.go
@@ -155,7 +155,7 @@ func ResourceSlm() *schema.Resource {
}
func resourceSlmPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -237,7 +237,7 @@ func resourceSlmPut(ctx context.Context, d *schema.ResourceData, meta interface{
}
func resourceSlmRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -329,7 +329,7 @@ func resourceSlmRead(ctx context.Context, d *schema.ResourceData, meta interface
}
func resourceSlmDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/cluster/snapshot_repository.go b/internal/elasticsearch/cluster/snapshot_repository.go
index b6c21a8fa..ec38f1420 100644
--- a/internal/elasticsearch/cluster/snapshot_repository.go
+++ b/internal/elasticsearch/cluster/snapshot_repository.go
@@ -322,7 +322,7 @@ func ResourceSnapshotRepository() *schema.Resource {
}
func resourceSnapRepoPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -367,7 +367,7 @@ func expandFsSettings(source, target map[string]interface{}) {
}
func resourceSnapRepoRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -450,7 +450,7 @@ func flattenRepoSettings(r *models.SnapshotRepository, s map[string]*schema.Sche
}
func resourceSnapRepoDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/cluster/snapshot_repository_data_source.go b/internal/elasticsearch/cluster/snapshot_repository_data_source.go
index 71c03f3be..f29a58899 100644
--- a/internal/elasticsearch/cluster/snapshot_repository_data_source.go
+++ b/internal/elasticsearch/cluster/snapshot_repository_data_source.go
@@ -258,7 +258,7 @@ func DataSourceSnapshotRespository() *schema.Resource {
}
func dataSourceSnapRepoRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/enrich/policy.go b/internal/elasticsearch/enrich/policy.go
index 20e3ebcbf..16179c441 100644
--- a/internal/elasticsearch/enrich/policy.go
+++ b/internal/elasticsearch/enrich/policy.go
@@ -92,7 +92,7 @@ func ResourceEnrichPolicy() *schema.Resource {
}
func resourceEnrichPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -133,7 +133,7 @@ func resourceEnrichPolicyRead(ctx context.Context, d *schema.ResourceData, meta
}
func resourceEnrichPolicyPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -169,7 +169,7 @@ func resourceEnrichPolicyPut(ctx context.Context, d *schema.ResourceData, meta i
}
func resourceEnrichPolicyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/enrich/policy_data_source.go b/internal/elasticsearch/enrich/policy_data_source.go
index a90dc11e3..975c50294 100644
--- a/internal/elasticsearch/enrich/policy_data_source.go
+++ b/internal/elasticsearch/enrich/policy_data_source.go
@@ -61,7 +61,7 @@ func DataSourceEnrichPolicy() *schema.Resource {
}
func dataSourceEnrichPolicyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/index/component_template.go b/internal/elasticsearch/index/component_template.go
index a3a9559a7..8d0448dd8 100644
--- a/internal/elasticsearch/index/component_template.go
+++ b/internal/elasticsearch/index/component_template.go
@@ -135,7 +135,7 @@ func ResourceComponentTemplate() *schema.Resource {
}
func resourceComponentTemplatePut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -228,7 +228,7 @@ func resourceComponentTemplatePut(ctx context.Context, d *schema.ResourceData, m
}
func resourceComponentTemplateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -282,7 +282,7 @@ func resourceComponentTemplateRead(ctx context.Context, d *schema.ResourceData,
}
func resourceComponentTemplateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/index/data_stream.go b/internal/elasticsearch/index/data_stream.go
index 28024a0cf..1c81f3215 100644
--- a/internal/elasticsearch/index/data_stream.go
+++ b/internal/elasticsearch/index/data_stream.go
@@ -119,7 +119,7 @@ func ResourceDataStream() *schema.Resource {
}
func resourceDataStreamPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -138,7 +138,7 @@ func resourceDataStreamPut(ctx context.Context, d *schema.ResourceData, meta int
}
func resourceDataStreamRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -211,7 +211,7 @@ func resourceDataStreamRead(ctx context.Context, d *schema.ResourceData, meta in
}
func resourceDataStreamDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/index/ilm.go b/internal/elasticsearch/index/ilm.go
index 420c2d442..5a8fc9650 100644
--- a/internal/elasticsearch/index/ilm.go
+++ b/internal/elasticsearch/index/ilm.go
@@ -409,7 +409,7 @@ func getSchema(actions ...string) map[string]*schema.Schema {
}
func resourceIlmPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -587,7 +587,7 @@ func expandAction(a []interface{}, serverVersion *version.Version, settings ...s
}
func resourceIlmRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -703,7 +703,7 @@ func flattenPhase(phaseName string, p models.Phase, d *schema.ResourceData) (int
}
func resourceIlmDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/index/index.go b/internal/elasticsearch/index/index.go
index c253dee00..f193ae3e0 100644
--- a/internal/elasticsearch/index/index.go
+++ b/internal/elasticsearch/index/index.go
@@ -582,7 +582,7 @@ If specified, this mapping can include: field names, [field data types](https://
return nil, fmt.Errorf("unable to import requested index")
}
- client, diags := clients.NewApiClient(d, m)
+ client, diags := clients.NewApiClientFromSDKResource(d, m)
if diags.HasError() {
return nil, fmt.Errorf("Unabled to create API client %v", diags)
}
@@ -661,7 +661,7 @@ If specified, this mapping can include: field names, [field data types](https://
}
func resourceIndexCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -795,7 +795,7 @@ func resourceIndexCreate(ctx context.Context, d *schema.ResourceData, meta inter
// Because of limitation of ES API we must handle changes to aliases, mappings and settings separately
func resourceIndexUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -899,7 +899,7 @@ func flattenIndexSettings(settings []interface{}) map[string]interface{} {
}
func resourceIndexRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -960,7 +960,7 @@ func resourceIndexDelete(ctx context.Context, d *schema.ResourceData, meta inter
if d.Get("deletion_protection").(bool) {
return diag.Errorf("cannot destroy index without setting deletion_protection=false and running `terraform apply`")
}
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/index/template.go b/internal/elasticsearch/index/template.go
index b062677f6..9df3b649f 100644
--- a/internal/elasticsearch/index/template.go
+++ b/internal/elasticsearch/index/template.go
@@ -182,7 +182,7 @@ func ResourceTemplate() *schema.Resource {
}
func resourceIndexTemplatePut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -304,7 +304,7 @@ func resourceIndexTemplatePut(ctx context.Context, d *schema.ResourceData, meta
}
func resourceIndexTemplateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -408,7 +408,7 @@ func flattenTemplateData(template *models.Template) ([]interface{}, diag.Diagnos
}
func resourceIndexTemplateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/ingest/pipeline.go b/internal/elasticsearch/ingest/pipeline.go
index 0c7f985a9..f0f4cd16a 100644
--- a/internal/elasticsearch/ingest/pipeline.go
+++ b/internal/elasticsearch/ingest/pipeline.go
@@ -84,7 +84,7 @@ func ResourceIngestPipeline() *schema.Resource {
}
func resourceIngestPipelineTemplatePut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -138,7 +138,7 @@ func resourceIngestPipelineTemplatePut(ctx context.Context, d *schema.ResourceDa
}
func resourceIngestPipelineTemplateRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -206,7 +206,7 @@ func resourceIngestPipelineTemplateRead(ctx context.Context, d *schema.ResourceD
}
func resourceIngestPipelineTemplateDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/logstash/pipeline.go b/internal/elasticsearch/logstash/pipeline.go
index 01adee829..34fb98b42 100644
--- a/internal/elasticsearch/logstash/pipeline.go
+++ b/internal/elasticsearch/logstash/pipeline.go
@@ -183,7 +183,7 @@ func ResourceLogstashPipeline() *schema.Resource {
}
func resourceLogstashPipelinePut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -223,7 +223,7 @@ func resourceLogstashPipelinePut(ctx context.Context, d *schema.ResourceData, me
}
func resourceLogstashPipelineRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -287,7 +287,7 @@ func resourceLogstashPipelineRead(ctx context.Context, d *schema.ResourceData, m
}
func resourceLogstashPipelineDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/security/api_key.go b/internal/elasticsearch/security/api_key.go
index c6d23780e..ef44aa1ad 100644
--- a/internal/elasticsearch/security/api_key.go
+++ b/internal/elasticsearch/security/api_key.go
@@ -92,7 +92,7 @@ func ResourceApiKey() *schema.Resource {
}
func resourceSecurityApiKeyCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -166,7 +166,7 @@ func resourceSecurityApiKeyUpdate(ctx context.Context, d *schema.ResourceData, m
}
func resourceSecurityApiKeyRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -216,7 +216,7 @@ func resourceSecurityApiKeyRead(ctx context.Context, d *schema.ResourceData, met
}
func resourceSecurityApiKeyDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/security/role.go b/internal/elasticsearch/security/role.go
index 6d957c091..3b886753c 100644
--- a/internal/elasticsearch/security/role.go
+++ b/internal/elasticsearch/security/role.go
@@ -174,7 +174,7 @@ func ResourceRole() *schema.Resource {
}
func resourceSecurityRolePut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -312,7 +312,7 @@ func resourceSecurityRolePut(ctx context.Context, d *schema.ResourceData, meta i
}
func resourceSecurityRoleRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -420,7 +420,7 @@ func flattenIndicesData(indices *[]models.IndexPerms) []interface{} {
}
func resourceSecurityRoleDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/security/role_data_source.go b/internal/elasticsearch/security/role_data_source.go
index 06e0a7531..cc263de74 100644
--- a/internal/elasticsearch/security/role_data_source.go
+++ b/internal/elasticsearch/security/role_data_source.go
@@ -149,7 +149,7 @@ func DataSourceRole() *schema.Resource {
}
func dataSourceSecurityRoleRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/security/role_mapping.go b/internal/elasticsearch/security/role_mapping.go
index f666292e4..811b61c85 100644
--- a/internal/elasticsearch/security/role_mapping.go
+++ b/internal/elasticsearch/security/role_mapping.go
@@ -84,7 +84,7 @@ func ResourceRoleMapping() *schema.Resource {
}
func resourceSecurityRoleMappingPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -123,7 +123,7 @@ func resourceSecurityRoleMappingPut(ctx context.Context, d *schema.ResourceData,
}
func resourceSecurityRoleMappingRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -182,7 +182,7 @@ func resourceSecurityRoleMappingRead(ctx context.Context, d *schema.ResourceData
}
func resourceSecurityRoleMappingDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/security/role_mapping_data_source.go b/internal/elasticsearch/security/role_mapping_data_source.go
index 19baeec83..9484ca3fc 100644
--- a/internal/elasticsearch/security/role_mapping_data_source.go
+++ b/internal/elasticsearch/security/role_mapping_data_source.go
@@ -61,7 +61,7 @@ func DataSourceRoleMapping() *schema.Resource {
}
func dataSourceSecurityRoleMappingRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/security/system_user.go b/internal/elasticsearch/security/system_user.go
index 52d4e9f55..fc2b3cf2a 100644
--- a/internal/elasticsearch/security/system_user.go
+++ b/internal/elasticsearch/security/system_user.go
@@ -71,7 +71,7 @@ func ResourceSystemUser() *schema.Resource {
}
func resourceSecuritySystemUserPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -121,7 +121,7 @@ func resourceSecuritySystemUserPut(ctx context.Context, d *schema.ResourceData,
}
func resourceSecuritySystemUserRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/security/user.go b/internal/elasticsearch/security/user.go
index 430af00c6..ab58c1a64 100644
--- a/internal/elasticsearch/security/user.go
+++ b/internal/elasticsearch/security/user.go
@@ -106,7 +106,7 @@ func ResourceUser() *schema.Resource {
}
func resourceSecurityUserPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -161,7 +161,7 @@ func resourceSecurityUserPut(ctx context.Context, d *schema.ResourceData, meta i
}
func resourceSecurityUserRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -210,7 +210,7 @@ func resourceSecurityUserRead(ctx context.Context, d *schema.ResourceData, meta
}
func resourceSecurityUserDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/security/user_data_source.go b/internal/elasticsearch/security/user_data_source.go
index 5e7380940..77fd08269 100644
--- a/internal/elasticsearch/security/user_data_source.go
+++ b/internal/elasticsearch/security/user_data_source.go
@@ -63,7 +63,7 @@ func DataSourceUser() *schema.Resource {
}
func dataSourceSecurityUserRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/transform/transform.go b/internal/elasticsearch/transform/transform.go
index ff376c97c..412c3f0b6 100644
--- a/internal/elasticsearch/transform/transform.go
+++ b/internal/elasticsearch/transform/transform.go
@@ -297,7 +297,7 @@ func ResourceTransform() *schema.Resource {
func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -339,7 +339,7 @@ func resourceTransformCreate(ctx context.Context, d *schema.ResourceData, meta i
func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -384,7 +384,7 @@ func resourceTransformRead(ctx context.Context, d *schema.ResourceData, meta int
func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -430,7 +430,7 @@ func resourceTransformUpdate(ctx context.Context, d *schema.ResourceData, meta i
func resourceTransformDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/elasticsearch/watcher/watch.go b/internal/elasticsearch/watcher/watch.go
index b8a4dd0d8..7190e4db8 100644
--- a/internal/elasticsearch/watcher/watch.go
+++ b/internal/elasticsearch/watcher/watch.go
@@ -105,7 +105,7 @@ func ResourceWatch() *schema.Resource {
}
func resourceWatchPut(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -169,7 +169,7 @@ func resourceWatchPut(ctx context.Context, d *schema.ResourceData, meta interfac
}
func resourceWatchRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -253,7 +253,7 @@ func resourceWatchRead(ctx context.Context, d *schema.ResourceData, meta interfa
}
func resourceWatchDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/fleet/shared.go b/internal/fleet/shared.go
index ee8a590cf..da806e246 100644
--- a/internal/fleet/shared.go
+++ b/internal/fleet/shared.go
@@ -8,7 +8,7 @@ import (
)
func getFleetClient(d *schema.ResourceData, meta interface{}) (*fleet.Client, diag.Diagnostics) {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return nil, diags
}
diff --git a/internal/kibana/alerting.go b/internal/kibana/alerting.go
index 161074d51..6878769cb 100644
--- a/internal/kibana/alerting.go
+++ b/internal/kibana/alerting.go
@@ -216,7 +216,7 @@ func getActionsFromResourceData(d *schema.ResourceData) ([]models.AlertingRuleAc
}
func resourceRuleCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -239,7 +239,7 @@ func resourceRuleCreate(ctx context.Context, d *schema.ResourceData, meta interf
}
func resourceRuleUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -268,7 +268,7 @@ func resourceRuleUpdate(ctx context.Context, d *schema.ResourceData, meta interf
}
func resourceRuleRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -357,7 +357,7 @@ func resourceRuleRead(ctx context.Context, d *schema.ResourceData, meta interfac
}
func resourceRuleDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/kibana/connector.go b/internal/kibana/connector.go
index eed6ed8fc..1a57b7fb7 100644
--- a/internal/kibana/connector.go
+++ b/internal/kibana/connector.go
@@ -124,7 +124,7 @@ func connectorCustomizeDiff(ctx context.Context, rd *schema.ResourceDiff, in int
}
func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -147,7 +147,7 @@ func resourceConnectorCreate(ctx context.Context, d *schema.ResourceData, meta i
}
func resourceConnectorUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -176,7 +176,7 @@ func resourceConnectorUpdate(ctx context.Context, d *schema.ResourceData, meta i
}
func resourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -199,7 +199,7 @@ func resourceConnectorRead(ctx context.Context, d *schema.ResourceData, meta int
}
func resourceConnectorDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/kibana/import_saved_objects/acc_test.go b/internal/kibana/import_saved_objects/acc_test.go
new file mode 100644
index 000000000..f8a39df6f
--- /dev/null
+++ b/internal/kibana/import_saved_objects/acc_test.go
@@ -0,0 +1,69 @@
+package import_saved_objects_test
+
+import (
+ "testing"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/acctest"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+)
+
+func TestAccResourceImportSavedObjects(t *testing.T) {
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { acctest.PreCheck(t) },
+ ProtoV5ProviderFactories: acctest.Providers,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccResourceImportSavedObjects(),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_kibana_import_saved_objects.settings", "success", "true"),
+ resource.TestCheckResourceAttr("elasticstack_kibana_import_saved_objects.settings", "success_count", "1"),
+ resource.TestCheckResourceAttr("elasticstack_kibana_import_saved_objects.settings", "success_results.#", "1"),
+ resource.TestCheckResourceAttr("elasticstack_kibana_import_saved_objects.settings", "errors.#", "0"),
+ ),
+ },
+ {
+ Config: testAccResourceImportSavedObjectsUpdate(),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_kibana_import_saved_objects.settings", "success", "true"),
+ resource.TestCheckResourceAttr("elasticstack_kibana_import_saved_objects.settings", "success_count", "1"),
+ resource.TestCheckResourceAttr("elasticstack_kibana_import_saved_objects.settings", "success_results.#", "1"),
+ resource.TestCheckResourceAttr("elasticstack_kibana_import_saved_objects.settings", "errors.#", "0"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccResourceImportSavedObjects() string {
+ return `
+provider "elasticstack" {
+ elasticsearch {}
+ kibana {}
+}
+
+resource "elasticstack_kibana_import_saved_objects" "settings" {
+ overwrite = true
+ file_contents = <<-EOT
+{"attributes":{"buildNum":42747,"defaultIndex":"metricbeat-*","theme:darkMode":true},"coreMigrationVersion":"7.0.0","id":"7.14.0","managed":false,"references":[],"type":"config","typeMigrationVersion":"7.0.0","updated_at":"2021-08-04T02:04:43.306Z","version":"WzY1MiwyXQ=="}
+{"excludedObjects":[],"excludedObjectsCount":0,"exportedCount":1,"missingRefCount":0,"missingReferences":[]}
+EOT
+}
+ `
+}
+
+func testAccResourceImportSavedObjectsUpdate() string {
+ return `
+provider "elasticstack" {
+ elasticsearch {}
+ kibana {}
+}
+
+resource "elasticstack_kibana_import_saved_objects" "settings" {
+ overwrite = true
+ file_contents = <<-EOT
+{"attributes":{"buildNum":42747,"defaultIndex":"metricbeat-*","theme:darkMode":false},"coreMigrationVersion":"7.0.0","id":"7.14.0","managed":false,"references":[],"type":"config","typeMigrationVersion":"7.0.0","updated_at":"2021-08-04T02:04:43.306Z","version":"WzY1MiwyXQ=="}
+{"excludedObjects":[],"excludedObjectsCount":0,"exportedCount":1,"missingRefCount":0,"missingReferences":[]}
+EOT
+}
+ `
+}
diff --git a/internal/kibana/import_saved_objects/create.go b/internal/kibana/import_saved_objects/create.go
new file mode 100644
index 000000000..358a06bff
--- /dev/null
+++ b/internal/kibana/import_saved_objects/create.go
@@ -0,0 +1,107 @@
+package import_saved_objects
+
+import (
+ "context"
+
+ "github.com/google/uuid"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/tfsdk"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/mitchellh/mapstructure"
+)
+
+func (r *Resource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) {
+ r.importObjects(ctx, request.Plan, &response.State, &response.Diagnostics)
+}
+
+func (r *Resource) importObjects(ctx context.Context, plan tfsdk.Plan, state *tfsdk.State, diags *diag.Diagnostics) {
+ if !resourceReady(r, diags) {
+ return
+ }
+
+ var model modelV0
+
+ diags.Append(plan.Get(ctx, &model)...)
+ if diags.HasError() {
+ return
+ }
+
+ kibanaClient, err := r.client.GetKibanaClient()
+ if err != nil {
+ diags.AddError("unable to get kibana client", err.Error())
+ return
+ }
+
+ resp, err := kibanaClient.KibanaSavedObject.Import([]byte(model.FileContents.ValueString()), model.Overwrite.ValueBool(), model.SpaceID.ValueString())
+ if err != nil {
+ diags.AddError("failed to import saved objects", err.Error())
+ return
+ }
+
+ var respModel responseModel
+
+ decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
+ Result: &respModel,
+ TagName: "json",
+ })
+ if err != nil {
+ diags.AddError("failed to create model decoder", err.Error())
+ return
+ }
+
+ err = decoder.Decode(resp)
+ if err != nil {
+ diags.AddError("failed to decode response", err.Error())
+ return
+ }
+
+ if model.ID.IsUnknown() {
+ model.ID = types.StringValue(uuid.NewString())
+ }
+
+ diags.Append(state.Set(ctx, model)...)
+ diags.Append(state.SetAttribute(ctx, path.Root("success"), respModel.Success)...)
+ diags.Append(state.SetAttribute(ctx, path.Root("success_count"), respModel.SuccessCount)...)
+ diags.Append(state.SetAttribute(ctx, path.Root("errors"), respModel.Errors)...)
+ diags.Append(state.SetAttribute(ctx, path.Root("success_results"), respModel.SuccessResults)...)
+ if diags.HasError() {
+ return
+ }
+
+ if !respModel.Success && !model.IgnoreImportErrors.ValueBool() {
+ diags.AddError("not all objects were imported successfully", "see errors attribute for more details")
+ }
+}
+
+type responseModel struct {
+ Success bool `json:"success"`
+ SuccessCount int `json:"successCount"`
+ Errors []importError `json:"errors"`
+ SuccessResults []importSuccess `json:"successResults"`
+}
+
+type importSuccess struct {
+ ID string `tfsdk:"id" json:"id"`
+ Type string `tfsdk:"type" json:"type"`
+ DestinationID string `tfsdk:"destination_id" json:"destinationId"`
+ Meta importMeta `tfsdk:"meta" json:"meta"`
+}
+
+type importError struct {
+ ID string `json:"id"`
+ Type string `json:"type"`
+ Title string `json:"title"`
+ Error importErrorType `json:"error"`
+ Meta importMeta `json:"meta"`
+}
+
+type importErrorType struct {
+ Type string `json:"type"`
+}
+
+type importMeta struct {
+ Icon string `tfsdk:"icon" json:"icon"`
+ Title string `tfsdk:"title" json:"title"`
+}
diff --git a/internal/kibana/import_saved_objects/delete.go b/internal/kibana/import_saved_objects/delete.go
new file mode 100644
index 000000000..46e65848e
--- /dev/null
+++ b/internal/kibana/import_saved_objects/delete.go
@@ -0,0 +1,12 @@
+package import_saved_objects
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+func (r *Resource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) {
+ tflog.Info(ctx, "Delete isn't supported for elasticstack_kibana_import_saved_objects")
+}
diff --git a/internal/kibana/import_saved_objects/read.go b/internal/kibana/import_saved_objects/read.go
new file mode 100644
index 000000000..454d1123a
--- /dev/null
+++ b/internal/kibana/import_saved_objects/read.go
@@ -0,0 +1,12 @@
+package import_saved_objects
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+func (r *Resource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) {
+ tflog.Info(ctx, "Read isn't supported for elasticstack_kibana_import_saved_objects")
+}
diff --git a/internal/kibana/import_saved_objects/schema.go b/internal/kibana/import_saved_objects/schema.go
new file mode 100644
index 000000000..1d2b68706
--- /dev/null
+++ b/internal/kibana/import_saved_objects/schema.go
@@ -0,0 +1,161 @@
+package import_saved_objects
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+)
+
+// Ensure provider defined types fully satisfy framework interfaces
+var _ resource.Resource = &Resource{}
+var _ resource.ResourceWithConfigure = &Resource{}
+
+// TODO - Uncomment these lines when we're using a kibana client which supports create_new_copies and compatibility_mode
+// create_new_copies and compatibility_mode aren't supported by the current version of the Kibana client
+// We can add these ourselves once https://github.com/elastic/terraform-provider-elasticstack/pull/372 is merged
+
+// var _ resource.ResourceWithConfigValidators = &Resource{}
+
+// func (r *Resource) ConfigValidators(context.Context) []resource.ConfigValidator {
+// return []resource.ConfigValidator{
+// resourcevalidator.Conflicting(
+// path.MatchRoot("create_new_copies"),
+// path.MatchRoot("overwrite"),
+// path.MatchRoot("compatibility_mode"),
+// ),
+// }
+// }
+
+func (r *Resource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = schema.Schema{
+ Description: "Imports saved objects from the referenced file",
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ Computed: true,
+ MarkdownDescription: "Generated ID for the import.",
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "space_id": schema.StringAttribute{
+ Description: "An identifier for the space. If space_id is not provided, the default space is used.",
+ Optional: true,
+ },
+ "ignore_import_errors": schema.BoolAttribute{
+ Description: "If set to true, errors during the import process will not fail the configuration application",
+ Optional: true,
+ },
+ // create_new_copies and compatibility_mode aren't supported by the current version of the Kibana client
+ // We can add these ourselves once https://github.com/elastic/terraform-provider-elasticstack/pull/372 is merged
+ // "create_new_copies": schema.BoolAttribute{
+ // Description: "Creates copies of saved objects, regenerates each object ID, and resets the origin. When used, potential conflict errors are avoided.",
+ // Optional: true,
+ // },
+ "overwrite": schema.BoolAttribute{
+ Description: "Overwrites saved objects when they already exist. When used, potential conflict errors are automatically resolved by overwriting the destination object.",
+ Optional: true,
+ },
+ // "compatibility_mode": schema.BoolAttribute{
+ // Description: "Applies various adjustments to the saved objects that are being imported to maintain compatibility between different Kibana versions. Use this option only if you encounter issues with imported saved objects.",
+ // Optional: true,
+ // },
+ "file_contents": schema.StringAttribute{
+ Description: "The contents of the exported saved objects file.",
+ Required: true,
+ },
+
+ "success": schema.BoolAttribute{
+ Description: "Indicates when the import was successfully completed. When set to false, some objects may not have been created. For additional information, refer to the errors and success_results properties.",
+ Computed: true,
+ },
+ "success_count": schema.Int64Attribute{
+ Description: "Indicates the number of successfully imported records.",
+ Computed: true,
+ },
+ "errors": schema.ListAttribute{
+ Computed: true,
+ ElementType: types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "id": types.StringType,
+ "type": types.StringType,
+ "title": types.StringType,
+ "error": types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "type": types.StringType,
+ },
+ },
+ "meta": types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "icon": types.StringType,
+ "title": types.StringType,
+ },
+ },
+ },
+ },
+ },
+ "success_results": schema.ListAttribute{
+ Computed: true,
+ ElementType: types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "id": types.StringType,
+ "type": types.StringType,
+ "destination_id": types.StringType,
+ "meta": types.ObjectType{
+ AttrTypes: map[string]attr.Type{
+ "icon": types.StringType,
+ "title": types.StringType,
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+}
+
+type Resource struct {
+ client *clients.ApiClient
+}
+
+func resourceReady(r *Resource, dg *diag.Diagnostics) bool {
+ if r.client == nil {
+ dg.AddError(
+ "Unconfigured Client",
+ "Expected configured client. Please report this issue to the provider developers.",
+ )
+
+ return false
+ }
+ return true
+}
+
+func (r *Resource) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) {
+ client, diags := clients.ConvertProviderData(request.ProviderData)
+ response.Diagnostics.Append(diags...)
+ r.client = client
+}
+
+func (r *Resource) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) {
+ response.TypeName = request.ProviderTypeName + "_kibana_import_saved_objects"
+}
+
+type modelV0 struct {
+ ID types.String `tfsdk:"id"`
+ SpaceID types.String `tfsdk:"space_id"`
+ IgnoreImportErrors types.Bool `tfsdk:"ignore_import_errors"`
+ // CreateNewCopies types.Bool `tfsdk:"create_new_copies"`
+ Overwrite types.Bool `tfsdk:"overwrite"`
+ // CompatibilityMode types.Bool `tfsdk:"compatibility_mode"`
+ FileContents types.String `tfsdk:"file_contents"`
+ Success types.Bool `tfsdk:"success"`
+ SuccessCount types.Int64 `tfsdk:"success_count"`
+ Errors types.List `tfsdk:"errors"`
+ SuccessResults types.List `tfsdk:"success_results"`
+}
diff --git a/internal/kibana/import_saved_objects/update.go b/internal/kibana/import_saved_objects/update.go
new file mode 100644
index 000000000..0731c452c
--- /dev/null
+++ b/internal/kibana/import_saved_objects/update.go
@@ -0,0 +1,11 @@
+package import_saved_objects
+
+import (
+ "context"
+
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+)
+
+func (r *Resource) Update(ctx context.Context, request resource.UpdateRequest, response *resource.UpdateResponse) {
+ r.importObjects(ctx, request.Plan, &response.State, &response.Diagnostics)
+}
diff --git a/internal/kibana/slo.go b/internal/kibana/slo.go
index 1d12ffe27..bbb1eea3d 100644
--- a/internal/kibana/slo.go
+++ b/internal/kibana/slo.go
@@ -603,7 +603,7 @@ func getSloFromResourceData(d *schema.ResourceData) (models.Slo, diag.Diagnostic
}
func resourceSloCreate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -626,7 +626,7 @@ func resourceSloCreate(ctx context.Context, d *schema.ResourceData, meta interfa
}
func resourceSloUpdate(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -655,7 +655,7 @@ func resourceSloUpdate(ctx context.Context, d *schema.ResourceData, meta interfa
}
func resourceSloRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -832,7 +832,7 @@ func resourceSloRead(ctx context.Context, d *schema.ResourceData, meta interface
}
func resourceSloDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/kibana/space.go b/internal/kibana/space.go
index 716b1f172..908e3e213 100644
--- a/internal/kibana/space.go
+++ b/internal/kibana/space.go
@@ -71,7 +71,7 @@ func ResourceSpace() *schema.Resource {
}
func resourceSpaceUpsert(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -132,7 +132,7 @@ func resourceSpaceUpsert(ctx context.Context, d *schema.ResourceData, meta inter
}
func resourceSpaceRead(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
@@ -180,7 +180,7 @@ func resourceSpaceRead(ctx context.Context, d *schema.ResourceData, meta interfa
}
func resourceSpaceDelete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
- client, diags := clients.NewApiClient(d, meta)
+ client, diags := clients.NewApiClientFromSDKResource(d, meta)
if diags.HasError() {
return diags
}
diff --git a/internal/schema/connection.go b/internal/schema/connection.go
index f8e961447..39b9692de 100644
--- a/internal/schema/connection.go
+++ b/internal/schema/connection.go
@@ -3,9 +3,207 @@ package schema
import (
"fmt"
+ "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ fwschema "github.com/hashicorp/terraform-plugin-framework/provider/schema"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
+func GetEsFWConnectionBlock(keyName string) fwschema.Block {
+ usernamePath := path.MatchRelative().AtParent().AtName("username")
+ passwordPath := path.MatchRelative().AtParent().AtName("password")
+ caFilePath := path.MatchRelative().AtParent().AtName("ca_file")
+ caDataPath := path.MatchRelative().AtParent().AtName("ca_data")
+ certFilePath := path.MatchRelative().AtParent().AtName("cert_file")
+ certDataPath := path.MatchRelative().AtParent().AtName("cert_data")
+ keyFilePath := path.MatchRelative().AtParent().AtName("key_file")
+ keyDataPath := path.MatchRelative().AtParent().AtName("key_data")
+
+ return fwschema.ListNestedBlock{
+ MarkdownDescription: "Elasticsearch connection configuration block. ",
+ Description: "Elasticsearch connection configuration block. ",
+ NestedObject: fwschema.NestedBlockObject{
+ Attributes: map[string]fwschema.Attribute{
+ "username": fwschema.StringAttribute{
+ MarkdownDescription: "Username to use for API authentication to Elasticsearch.",
+ Optional: true,
+ Validators: []validator.String{stringvalidator.AlsoRequires(passwordPath)},
+ },
+ "password": fwschema.StringAttribute{
+ MarkdownDescription: "Password to use for API authentication to Elasticsearch.",
+ Optional: true,
+ Sensitive: true,
+ Validators: []validator.String{stringvalidator.AlsoRequires(usernamePath)},
+ },
+ "api_key": fwschema.StringAttribute{
+ MarkdownDescription: "API Key to use for authentication to Elasticsearch",
+ Optional: true,
+ Sensitive: true,
+ Validators: []validator.String{
+ stringvalidator.ConflictsWith(usernamePath, passwordPath),
+ },
+ },
+ "endpoints": fwschema.ListAttribute{
+ MarkdownDescription: "A list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number.",
+ Optional: true,
+ Sensitive: true,
+ ElementType: types.StringType,
+ },
+ "insecure": fwschema.BoolAttribute{
+ MarkdownDescription: "Disable TLS certificate validation",
+ Optional: true,
+ },
+ "ca_file": fwschema.StringAttribute{
+ MarkdownDescription: "Path to a custom Certificate Authority certificate",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.ConflictsWith(caDataPath),
+ },
+ },
+ "ca_data": fwschema.StringAttribute{
+ MarkdownDescription: "PEM-encoded custom Certificate Authority certificate",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.ConflictsWith(caFilePath),
+ },
+ },
+ "cert_file": fwschema.StringAttribute{
+ MarkdownDescription: "Path to a file containing the PEM encoded certificate for client auth",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.AlsoRequires(keyFilePath),
+ stringvalidator.ConflictsWith(caDataPath, keyDataPath),
+ },
+ },
+ "key_file": fwschema.StringAttribute{
+ MarkdownDescription: "Path to a file containing the PEM encoded private key for client auth",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.AlsoRequires(certFilePath),
+ stringvalidator.ConflictsWith(certDataPath, keyDataPath),
+ },
+ },
+ "cert_data": fwschema.StringAttribute{
+ MarkdownDescription: "PEM encoded certificate for client auth",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.AlsoRequires(keyDataPath),
+ stringvalidator.ConflictsWith(certFilePath, keyFilePath),
+ },
+ },
+ "key_data": fwschema.StringAttribute{
+ MarkdownDescription: "PEM encoded private key for client auth",
+ Optional: true,
+ Sensitive: true,
+ Validators: []validator.String{
+ stringvalidator.AlsoRequires(certDataPath),
+ stringvalidator.ConflictsWith(certFilePath, keyFilePath),
+ },
+ },
+ },
+ },
+ Validators: []validator.List{
+ listvalidator.SizeAtMost(1),
+ },
+ }
+}
+
+func GetKbFWConnectionBlock(keyName string) fwschema.Block {
+ usernamePath := makePathRef(keyName, "username")
+ passwordPath := makePathRef(keyName, "password")
+
+ usernameValidators := []validator.String{stringvalidator.AlsoRequires(path.MatchRoot(passwordPath))}
+ passwordValidators := []validator.String{stringvalidator.AlsoRequires(path.MatchRoot(usernamePath))}
+
+ return fwschema.ListNestedBlock{
+ MarkdownDescription: "Kibana connection configuration block.",
+ NestedObject: fwschema.NestedBlockObject{
+ Attributes: map[string]fwschema.Attribute{
+ "username": fwschema.StringAttribute{
+ MarkdownDescription: "Username to use for API authentication to Kibana.",
+ Optional: true,
+ Validators: usernameValidators,
+ },
+ "password": fwschema.StringAttribute{
+ MarkdownDescription: "Password to use for API authentication to Kibana.",
+ Optional: true,
+ Sensitive: true,
+ Validators: passwordValidators,
+ },
+ "endpoints": fwschema.ListAttribute{
+ MarkdownDescription: "A comma-separated list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number.",
+ Optional: true,
+ Sensitive: true,
+ ElementType: types.StringType,
+ },
+ "insecure": fwschema.BoolAttribute{
+ MarkdownDescription: "Disable TLS certificate validation",
+ Optional: true,
+ },
+ },
+ },
+ Validators: []validator.List{
+ listvalidator.SizeAtMost(1),
+ },
+ }
+}
+
+func GetFleetFWConnectionBlock(keyName string) fwschema.Block {
+ usernamePath := makePathRef(keyName, "username")
+ passwordPath := makePathRef(keyName, "password")
+
+ usernameValidators := []validator.String{stringvalidator.AlsoRequires(path.MatchRoot(passwordPath))}
+ passwordValidators := []validator.String{stringvalidator.AlsoRequires(path.MatchRoot(usernamePath))}
+
+ return fwschema.ListNestedBlock{
+ MarkdownDescription: "Fleet connection configuration block.",
+ NestedObject: fwschema.NestedBlockObject{
+ Attributes: map[string]fwschema.Attribute{
+ "username": fwschema.StringAttribute{
+ MarkdownDescription: "Username to use for API authentication to Fleet.",
+ Optional: true,
+ Validators: usernameValidators,
+ },
+ "password": fwschema.StringAttribute{
+ MarkdownDescription: "Password to use for API authentication to Fleet.",
+ Optional: true,
+ Sensitive: true,
+ Validators: passwordValidators,
+ },
+ "api_key": fwschema.StringAttribute{
+ MarkdownDescription: "API Key to use for authentication to Fleet.",
+ Optional: true,
+ Sensitive: true,
+ Validators: []validator.String{
+ stringvalidator.ConflictsWith(path.MatchRelative().AtParent().AtName("username")),
+ stringvalidator.ConflictsWith(path.MatchRoot(passwordPath)),
+ },
+ },
+ "endpoint": fwschema.StringAttribute{
+ MarkdownDescription: "The Fleet server where the terraform provider will point to, this must include the http(s) schema and port number.",
+ Optional: true,
+ Sensitive: true,
+ },
+ "ca_certs": fwschema.ListAttribute{
+ MarkdownDescription: "A list of paths to CA certificates to validate the certificate presented by the Fleet server.",
+ Optional: true,
+ ElementType: types.StringType,
+ },
+ "insecure": fwschema.BoolAttribute{
+ MarkdownDescription: "Disable TLS certificate validation",
+ Optional: true,
+ },
+ },
+ },
+ Validators: []validator.List{
+ listvalidator.SizeAtMost(1),
+ },
+ }
+}
+
func GetEsConnectionSchema(keyName string, isProviderConfiguration bool) *schema.Schema {
usernamePath := makePathRef(keyName, "username")
passwordPath := makePathRef(keyName, "password")
@@ -20,11 +218,9 @@ func GetEsConnectionSchema(keyName string, isProviderConfiguration bool) *schema
passwordRequiredWithValidation := []string{usernamePath}
withEnvDefault := func(key string, dv interface{}) schema.SchemaDefaultFunc { return nil }
- deprecationMessage := "This property will be removed in a future provider version. Configure the Elasticsearch connection via the provider configuration instead."
if isProviderConfiguration {
withEnvDefault = func(key string, dv interface{}) schema.SchemaDefaultFunc { return schema.EnvDefaultFunc(key, dv) }
- deprecationMessage = ""
// RequireWith validation isn't compatible when used in conjunction with DefaultFunc
usernameRequiredWithValidation = nil
@@ -32,8 +228,8 @@ func GetEsConnectionSchema(keyName string, isProviderConfiguration bool) *schema
}
return &schema.Schema{
- Description: fmt.Sprintf("Elasticsearch connection configuration block. %s", deprecationMessage),
- Deprecated: deprecationMessage,
+ Description: fmt.Sprintf("Elasticsearch connection configuration block. %s", getDeprecationMessage(isProviderConfiguration)),
+ Deprecated: getDeprecationMessage(isProviderConfiguration),
Type: schema.TypeList,
MaxItems: 1,
Optional: true,
@@ -145,7 +341,7 @@ func GetKibanaConnectionSchema() *schema.Schema {
RequiredWith: []string{"kibana.0.username"},
},
"endpoints": {
- Description: "A list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number.",
+ Description: "A comma-separated list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number.",
Type: schema.TypeList,
Optional: true,
Sensitive: true,
@@ -187,7 +383,7 @@ func GetFleetConnectionSchema() *schema.Schema {
RequiredWith: []string{"fleet.0.username"},
},
"api_key": {
- Description: "API key to use for API authentication to Fleet.",
+ Description: "API Key to use for authentication to Fleet.",
Type: schema.TypeString,
Optional: true,
Sensitive: true,
@@ -220,3 +416,10 @@ func GetFleetConnectionSchema() *schema.Schema {
func makePathRef(keyName string, keyValue string) string {
return fmt.Sprintf("%s.0.%s", keyName, keyValue)
}
+
+func getDeprecationMessage(isProviderConfiguration bool) string {
+ if isProviderConfiguration {
+ return ""
+ }
+ return "This property will be removed in a future provider version. Configure the Elasticsearch connection via the provider configuration instead."
+}
diff --git a/provider/factory.go b/provider/factory.go
index cf7404f61..1c3745f7d 100644
--- a/provider/factory.go
+++ b/provider/factory.go
@@ -2,7 +2,9 @@ package provider
import (
"context"
+ "fmt"
+ "github.com/hashicorp/terraform-plugin-framework/providerserver"
"github.com/hashicorp/terraform-plugin-go/tfprotov5"
"github.com/hashicorp/terraform-plugin-mux/tf5muxserver"
)
@@ -10,14 +12,16 @@ import (
// ProtoV5ProviderServerFactory returns a muxed terraform-plugin-go protocol v5 provider factory function.
func ProtoV5ProviderServerFactory(ctx context.Context, version string) (func() tfprotov5.ProviderServer, error) {
sdkv2Provider := New(version)
+ frameworkProvider := providerserver.NewProtocol5(NewFrameworkProvider(version))
servers := []func() tfprotov5.ProviderServer{
+ frameworkProvider,
sdkv2Provider.GRPCProvider,
}
muxServer, err := tf5muxserver.NewMuxServer(ctx, servers...)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("initialize mux server: %w", err)
}
return muxServer.ProviderServer, nil
diff --git a/provider/factory_test.go b/provider/factory_test.go
new file mode 100644
index 000000000..568360e58
--- /dev/null
+++ b/provider/factory_test.go
@@ -0,0 +1,40 @@
+package provider
+
+import (
+ "context"
+ "fmt"
+ "testing"
+
+ "github.com/hashicorp/terraform-plugin-go/tfprotov5"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
+)
+
+func TestMuxServer(t *testing.T) {
+ const providerConfig = `
+ provider "elasticstack" {
+ elasticsearch {
+ username = "sup"
+ password = "dawg"
+ endpoints = ["http://localhost:9200"]
+ }
+ }
+ `
+ resource.Test(t, resource.TestCase{
+ ProtoV5ProviderFactories: map[string]func() (tfprotov5.ProviderServer, error){
+ "elasticstack": func() (tfprotov5.ProviderServer, error) {
+ version := "acceptance_test"
+ server, err := ProtoV5ProviderServerFactory(context.Background(), version)
+ if err != nil {
+ return nil, err
+ }
+
+ return server(), nil
+ },
+ },
+ Steps: []resource.TestStep{
+ {
+ Config: fmt.Sprintf(providerConfig),
+ },
+ },
+ })
+}
diff --git a/provider/plugin_framework.go b/provider/plugin_framework.go
new file mode 100644
index 000000000..ad5895141
--- /dev/null
+++ b/provider/plugin_framework.go
@@ -0,0 +1,68 @@
+package provider
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients/config"
+ "github.com/elastic/terraform-provider-elasticstack/internal/kibana/import_saved_objects"
+ "github.com/elastic/terraform-provider-elasticstack/internal/schema"
+ "github.com/hashicorp/terraform-plugin-framework/datasource"
+ fwprovider "github.com/hashicorp/terraform-plugin-framework/provider"
+ fwschema "github.com/hashicorp/terraform-plugin-framework/provider/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+)
+
+type Provider struct {
+ version string
+}
+
+// NewFrameworkProvider instantiates plugin framework's provider
+func NewFrameworkProvider(version string) fwprovider.Provider {
+ return &Provider{
+ version: version,
+ }
+}
+
+func (p *Provider) Metadata(_ context.Context, _ fwprovider.MetadataRequest, res *fwprovider.MetadataResponse) {
+ res.TypeName = "elasticstack"
+ res.Version = p.version
+}
+
+func (p *Provider) Schema(ctx context.Context, req fwprovider.SchemaRequest, res *fwprovider.SchemaResponse) {
+ res.Schema = fwschema.Schema{
+ Blocks: map[string]fwschema.Block{
+ esKeyName: schema.GetEsFWConnectionBlock(esKeyName),
+ kbKeyName: schema.GetKbFWConnectionBlock(kbKeyName),
+ fleetKeyName: schema.GetFleetFWConnectionBlock(fleetKeyName),
+ },
+ }
+}
+
+func (p *Provider) Configure(ctx context.Context, req fwprovider.ConfigureRequest, res *fwprovider.ConfigureResponse) {
+ var config config.ProviderConfiguration
+
+ res.Diagnostics.Append(req.Config.Get(ctx, &config)...)
+ if res.Diagnostics.HasError() {
+ return
+ }
+
+ client, diags := clients.NewApiClientFromFramework(ctx, config, p.version)
+ res.Diagnostics.Append(diags...)
+ if res.Diagnostics.HasError() {
+ return
+ }
+
+ res.DataSourceData = client
+ res.ResourceData = client
+}
+
+func (p *Provider) DataSources(ctx context.Context) []func() datasource.DataSource {
+ return []func() datasource.DataSource{}
+}
+
+func (p *Provider) Resources(ctx context.Context) []func() resource.Resource {
+ return []func() resource.Resource{
+ func() resource.Resource { return &import_saved_objects.Resource{} },
+ }
+}
diff --git a/provider/provider.go b/provider/provider.go
index 408e3b58a..beb174451 100644
--- a/provider/provider.go
+++ b/provider/provider.go
@@ -17,6 +17,8 @@ import (
)
const esKeyName = "elasticsearch"
+const kbKeyName = "kibana"
+const fleetKeyName = "fleet"
func init() {
// Set descriptions to support markdown syntax, this will be used in document generation
@@ -27,9 +29,9 @@ func init() {
func New(version string) *schema.Provider {
p := &schema.Provider{
Schema: map[string]*schema.Schema{
- esKeyName: providerSchema.GetEsConnectionSchema(esKeyName, true),
- "kibana": providerSchema.GetKibanaConnectionSchema(),
- "fleet": providerSchema.GetFleetConnectionSchema(),
+ esKeyName: providerSchema.GetEsConnectionSchema(esKeyName, true),
+ kbKeyName: providerSchema.GetKibanaConnectionSchema(),
+ fleetKeyName: providerSchema.GetFleetConnectionSchema(),
},
DataSourcesMap: map[string]*schema.Resource{
"elasticstack_elasticsearch_ingest_processor_append": ingest.DataSourceProcessorAppend(),
@@ -110,7 +112,7 @@ func New(version string) *schema.Provider {
},
}
- p.ConfigureContextFunc = clients.NewApiClientFunc(version)
+ p.ConfigureContextFunc = clients.NewApiClientFuncFromSDK(version)
return p
}
diff --git a/templates/resources/kibana_import_saved_objects.md.tmpl b/templates/resources/kibana_import_saved_objects.md.tmpl
new file mode 100644
index 000000000..7679d6f13
--- /dev/null
+++ b/templates/resources/kibana_import_saved_objects.md.tmpl
@@ -0,0 +1,21 @@
+---
+subcategory: "Kibana"
+layout: ""
+page_title: "Elasticstack: elasticstack_kibana_import_saved_objects Resource"
+description: |-
+ Create sets of Kibana saved objects from a file created by the export API.
+---
+
+# Resource: elasticstack_kibana_import_saved_objects
+
+Create sets of Kibana saved objects from a file created by the export API. See https://www.elastic.co/guide/en/kibana/current/saved-objects-api-import.html
+
+## Example Usage
+
+{{ tffile "examples/resources/elasticstack_kibana_import_saved_objects/resource.tf" }}
+
+{{ .SchemaMarkdown | trimspace }}
+
+## Import
+
+Import is not supported.