diff --git a/go.mod b/go.mod index 144c296741..7d2a2f31b8 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/clb v1.0.283 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cloudaudit v1.0.199 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cls v1.0.412 - github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.438 + github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.443 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm v1.0.385 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cynosdb v1.0.359 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/dayu v1.0.335 @@ -46,7 +46,7 @@ require ( github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.199 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/lighthouse v1.0.413 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/mongodb v1.0.199 - github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor v1.0.438 + github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor v1.0.443 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/postgres v1.0.391 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/privatedns v1.0.290 github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/redis v1.0.199 diff --git a/go.sum b/go.sum index 27b7ff6a38..ea59f7be97 100644 --- a/go.sum +++ b/go.sum @@ -496,6 +496,8 @@ github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.430 h1:mGlG github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.430/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.438 h1:tz7YTepMgtehsPI7nTXEZ6W9eOaXR0rvLMtqwHDzhZ8= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.438/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.443 h1:5kS48/GyzW49t4eB8dkNAoDGMeVLEadfTrB1Wpr07qw= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.443/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm v1.0.385 h1:8bwloRxRwSADSK48KxaUeO9JHmmgniNGJbA7Or/HUEk= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cvm v1.0.385/go.mod h1:PMxA0L4o8Fbx/6+ju1cAMAU7x2bV4C6e/LTqVe745yM= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/cynosdb v1.0.359 h1:cNKqelPgUxrJkLY0Azd2QHr/UMYOPPnmqs88clt2akk= @@ -527,6 +529,8 @@ github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor v1.0.430 h1:cnB github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor v1.0.430/go.mod h1:Iw2Q3oExnpSR7pGnC/2nKfUaUS2GAniG1HKSfWU5Pgg= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor v1.0.438 h1:zf+NkKb0fgurjgCdpApnLjip7bxU7fxejI8HW5pTHl8= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor v1.0.438/go.mod h1:VgHc7QaLfYqdurDBb+HwXaDRuFbpsnZIKkjSPl748JA= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor v1.0.443 h1:5uPNOJb4uC9oZuXzSv0GtQOGGBEWA9JA1Yhoyh+9mfk= +github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor v1.0.443/go.mod h1:QGyCdlNx2tu94GMlJDQAwz5eVFa2Gfq3JD0WZ65QqGE= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/postgres v1.0.391 h1:1yZh5MrlFqawVGhLdd38hi5HoaKc4LgB+8cEgLT2Qo4= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/postgres v1.0.391/go.mod h1:BZSQiBjNQ+6/gL1fFXBr/0BOuPTdHmSYoIg4/AkmZB4= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/privatedns v1.0.290 h1:osYZxh9ApEc8UpvIMwjAUfdl7ytRcWUpcnnqLIpiJ/U= diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index f3885fcaae..83ef85d498 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -426,9 +426,12 @@ Monitor tencentcloud_monitor_alarm_policy tencentcloud_monitor_tmp_instance tencentcloud_monitor_tmp_cvm_agent + tencentcloud_monitor_tmp_scrape_job + tencentcloud_monitor_tmp_exporter_integration tencentcloud_monitor_tmp_alert_rule tencentcloud_monitor_tmp_recording_rule tencentcloud_monitor_tmp_tke_template + tencentcloud_monitor_tmp_tke_alert_policy PostgreSQL Data Source @@ -1073,9 +1076,12 @@ func Provider() terraform.ResourceProvider { "tencentcloud_monitor_alarm_policy": resourceTencentCloudMonitorAlarmPolicy(), "tencentcloud_monitor_tmp_instance": resourceTencentCloudMonitorTmpInstance(), "tencentcloud_monitor_tmp_cvm_agent": resourceTencentCloudMonitorTmpCvmAgent(), + "tencentcloud_monitor_tmp_scrape_job": resourceTencentCloudMonitorTmpScrapeJob(), + "tencentcloud_monitor_tmp_exporter_integration": resourceTencentCloudMonitorTmpExporterIntegration(), "tencentcloud_monitor_tmp_alert_rule": resourceTencentCloudMonitorTmpAlertRule(), "tencentcloud_monitor_tmp_recording_rule": resourceTencentCloudMonitorTmpRecordingRule(), "tencentcloud_monitor_tmp_tke_template": resourceTencentCloudMonitorTmpTkeTemplate(), + "tencentcloud_monitor_tmp_tke_alert_policy": resourceTencentCloudMonitorTmpTkeAlertPolicy(), "tencentcloud_mongodb_standby_instance": resourceTencentCloudMongodbStandbyInstance(), "tencentcloud_elasticsearch_instance": resourceTencentCloudElasticsearchInstance(), "tencentcloud_postgresql_instance": resourceTencentCloudPostgresqlInstance(), diff --git a/tencentcloud/resource_tc_cls_index.go b/tencentcloud/resource_tc_cls_index.go index 0d6cf471ad..d01f980814 100644 --- a/tencentcloud/resource_tc_cls_index.go +++ b/tencentcloud/resource_tc_cls_index.go @@ -95,6 +95,7 @@ func resourceTencentCloudClsIndex() *schema.Resource { Type: schema.TypeList, MaxItems: 1, Optional: true, + Computed: true, Description: "Index rule.", Elem: &schema.Resource{ Schema: map[string]*schema.Schema{ @@ -438,38 +439,54 @@ func resourceTencentCloudClsIndexRead(d *schema.ResourceData, meta interface{}) ruleMap := map[string]interface{}{} if res.Rule.FullText != nil { - fullTextMap := map[string]interface{}{ - "case_sensitive": res.Rule.FullText.CaseSensitive, - "tokenizer": res.Rule.FullText.Tokenizer, - "contain_z_h": res.Rule.FullText.ContainZH, + FullTextMap := map[string]interface{}{} + if res.Rule.FullText.CaseSensitive != nil { + FullTextMap["case_sensitive"] = res.Rule.FullText.CaseSensitive + } + if res.Rule.FullText.Tokenizer != nil { + FullTextMap["tokenizer"] = res.Rule.FullText.Tokenizer + } + if res.Rule.FullText.ContainZH != nil { + FullTextMap["contain_z_h"] = res.Rule.FullText.ContainZH } - ruleMap["full_text"] = []interface{}{fullTextMap} + + ruleMap["full_text"] = []interface{}{FullTextMap} } if res.Rule.KeyValue != nil { - ruleKeyValueMap := map[string]interface{}{ - "case_sensitive": res.Rule.KeyValue.CaseSensitive, + RuleKeyValueMap := map[string]interface{}{} + if res.Rule.KeyValue.CaseSensitive != nil { + RuleKeyValueMap["case_sensitive"] = res.Rule.KeyValue.CaseSensitive } + if res.Rule.KeyValue.KeyValues != nil { keyValuesList := []interface{}{} for _, keyValueInfo := range res.Rule.KeyValue.KeyValues { - keyValueInfoMap := map[string]interface{}{ - "key": keyValueInfo.Key, + keyValueInfoMap := map[string]interface{}{} + if keyValueInfo.Key != nil { + keyValueInfoMap["key"] = keyValueInfo.Key } if keyValueInfo.Value != nil { - valueInfoMap := map[string]interface{}{ - "type": keyValueInfo.Value.Type, - "tokenizer": keyValueInfo.Value.Tokenizer, - "sql_flag": keyValueInfo.Value.SqlFlag, - "contain_z_h": keyValueInfo.Value.ContainZH, + valueInfoMap := map[string]interface{}{} + if keyValueInfo.Value.Type != nil { + valueInfoMap["type"] = keyValueInfo.Value.Type + } + if keyValueInfo.Value.Tokenizer != nil { + valueInfoMap["tokenizer"] = keyValueInfo.Value.Tokenizer + } + if keyValueInfo.Value.SqlFlag != nil { + valueInfoMap["sql_flag"] = keyValueInfo.Value.SqlFlag + } + if keyValueInfo.Value.ContainZH != nil { + valueInfoMap["contain_z_h"] = keyValueInfo.Value.ContainZH } keyValueInfoMap["value"] = []interface{}{valueInfoMap} } keyValuesList = append(keyValuesList, keyValueInfoMap) } - ruleKeyValueMap["key_values"] = keyValuesList + RuleKeyValueMap["key_values"] = keyValuesList } - ruleMap["key_value"] = []interface{}{ruleKeyValueMap} + ruleMap["key_value"] = []interface{}{RuleKeyValueMap} } if res.Rule.Tag != nil { @@ -529,7 +546,7 @@ func resourceTencentCloudClsIndexUpdate(d *schema.ResourceData, meta interface{} request.TopicId = &id - if d.HasChange("rule") || d.HasChange("status") || d.HasChange("include_internal_fields") || d.HasChange("metadata_flag") { + if d.HasChange("rule") { if dMap, ok := helper.InterfacesHeadMap(d, "rule"); ok { ruleInfo := cls.RuleInfo{} if fullTextMap, ok := helper.InterfaceToMap(dMap, "full_text"); ok { @@ -618,34 +635,38 @@ func resourceTencentCloudClsIndexUpdate(d *schema.ResourceData, meta interface{} } request.Rule = &ruleInfo } + } + if d.HasChange("status") { if v, ok := d.GetOk("status"); ok { request.Status = helper.Bool(v.(bool)) } - + } + if d.HasChange("include_internal_fields") { if v, ok := d.GetOk("include_internal_fields"); ok { request.IncludeInternalFields = helper.Bool(v.(bool)) } - + } + if d.HasChange("metadata_flag") { if v, ok := d.GetOk("metadata_flag"); ok { request.MetadataFlag = helper.IntUint64(v.(int)) } - - err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { - result, e := meta.(*TencentCloudClient).apiV3Conn.UseClsClient().ModifyIndex(request) - if e != nil { - return retryError(e) - } else { - log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", - logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) - } - return nil - }) - - if err != nil { - return err + } + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseClsClient().ModifyIndex(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) } + return nil + }) + + if err != nil { + return err } + return resourceTencentCloudClsIndexRead(d, meta) } diff --git a/tencentcloud/resource_tc_monitor_tmp_cvm_agent.go b/tencentcloud/resource_tc_monitor_tmp_cvm_agent.go index 90f0361a8f..6fb4e8ffe9 100644 --- a/tencentcloud/resource_tc_monitor_tmp_cvm_agent.go +++ b/tencentcloud/resource_tc_monitor_tmp_cvm_agent.go @@ -5,8 +5,8 @@ Example Usage ```hcl resource "tencentcloud_monitor_tmp_cvm_agent" "tmpCvmAgent" { - instance_id = "prom-c89b3b3u" - name = "test" + instance_id = "prom-dko9d0nu" + name = "agent" } ``` @@ -14,7 +14,7 @@ Import monitor tmpCvmAgent can be imported using the id, e.g. ``` -$ terraform import tencentcloud_monitor_tmp_cvm_agent.tmpCvmAgent instanceId#agentName +$ terraform import tencentcloud_monitor_tmp_cvm_agent.tmpCvmAgent tmpCvmAgent_id ``` */ package tencentcloud @@ -35,7 +35,7 @@ func resourceTencentCloudMonitorTmpCvmAgent() *schema.Resource { return &schema.Resource{ Read: resourceTencentCloudMonitorTmpCvmAgentRead, Create: resourceTencentCloudMonitorTmpCvmAgentCreate, - Update: resourceTencentCloudMonitorTmpCvmAgentUpdate, + //Update: resourceTencentCloudMonitorTmpCvmAgentUpdate, Delete: resourceTencentCloudMonitorTmpCvmAgentDelete, Importer: &schema.ResourceImporter{ State: schema.ImportStatePassthrough, @@ -44,11 +44,14 @@ func resourceTencentCloudMonitorTmpCvmAgent() *schema.Resource { "instance_id": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: "Instance id.", }, + "name": { Type: schema.TypeString, Required: true, + ForceNew: true, Description: "Agent name.", }, }, @@ -62,20 +65,19 @@ func resourceTencentCloudMonitorTmpCvmAgentCreate(d *schema.ResourceData, meta i logId := getLogId(contextNil) var ( - request = monitor.NewCreatePrometheusAgentRequest() - //response *monitor.CreatePrometheusAgentResponse + request = monitor.NewCreatePrometheusAgentRequest() + response *monitor.CreatePrometheusAgentResponse ) var instanceId string - var agentName string if v, ok := d.GetOk("instance_id"); ok { instanceId = v.(string) request.InstanceId = helper.String(instanceId) } + if v, ok := d.GetOk("name"); ok { - agentName = v.(string) - request.Name = helper.String(agentName) + request.Name = helper.String(v.(string)) } err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { @@ -86,7 +88,7 @@ func resourceTencentCloudMonitorTmpCvmAgentCreate(d *schema.ResourceData, meta i log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) } - //response = result + response = result return nil }) @@ -95,13 +97,14 @@ func resourceTencentCloudMonitorTmpCvmAgentCreate(d *schema.ResourceData, meta i return err } - d.SetId(strings.Join([]string{instanceId, agentName}, FILED_SP)) + tmpCvmAgentId := *response.Response.AgentId + d.SetId(strings.Join([]string{instanceId, tmpCvmAgentId}, FILED_SP)) return resourceTencentCloudMonitorTmpCvmAgentRead(d, meta) } func resourceTencentCloudMonitorTmpCvmAgentRead(d *schema.ResourceData, meta interface{}) error { - defer logElapsed("resource.tencentcloud_monitor_tmp_cvm_agent.read")() + defer logElapsed("resource.tencentcloud_monitor_tmpCvmAgent.read")() defer inconsistentCheck(d, meta)() logId := getLogId(contextNil) @@ -114,7 +117,7 @@ func resourceTencentCloudMonitorTmpCvmAgentRead(d *schema.ResourceData, meta int return fmt.Errorf("id is broken, id is %s", d.Id()) } - tmpCvmAgent, err := service.DescribeMonitorTmpCvmAgentById(ctx, ids[0], ids[1]) + tmpCvmAgent, err := service.DescribeMonitorTmpCvmAgent(ctx, ids[0], ids[1]) if err != nil { return err @@ -122,12 +125,13 @@ func resourceTencentCloudMonitorTmpCvmAgentRead(d *schema.ResourceData, meta int if tmpCvmAgent == nil { d.SetId("") - return fmt.Errorf("resource `tencentcloud_monitor_tmp_cvm_agent` does not exist") + return fmt.Errorf("resource `tmpCvmAgent` %s does not exist", ids[1]) } if tmpCvmAgent.InstanceId != nil { _ = d.Set("instance_id", tmpCvmAgent.InstanceId) } + if tmpCvmAgent.Name != nil { _ = d.Set("name", tmpCvmAgent.Name) } @@ -146,5 +150,5 @@ func resourceTencentCloudMonitorTmpCvmAgentDelete(d *schema.ResourceData, meta i defer logElapsed("resource.tencentcloud_monitor_tmp_cvm_agent.delete")() defer inconsistentCheck(d, meta)() - return fmt.Errorf("resource `tencentcloud_monitor_tmp_cvm_agent` does not support delete") + return nil } diff --git a/tencentcloud/resource_tc_monitor_tmp_exporter_integration.go b/tencentcloud/resource_tc_monitor_tmp_exporter_integration.go new file mode 100644 index 0000000000..aee9c4c18f --- /dev/null +++ b/tencentcloud/resource_tc_monitor_tmp_exporter_integration.go @@ -0,0 +1,229 @@ +/* +Provides a resource to create a monitor tmpExporterIntegration + +Example Usage + +```hcl +resource "tencentcloud_monitor_tmp_exporter_integration" "tmpExporterIntegration" { + instance_id = "prom-dko9d0nu" + kind = "blackbox-exporter" + content = "{\"name\":\"test\",\"kind\":\"blackbox-exporter\",\"spec\":{\"instanceSpec\":{\"module\":\"http_get\",\"urls\":[\"xx\"]}}}" + kube_type = 1 + cluster_id = "cls-bmuaukfu" +} + +*/ +package tencentcloud + +import ( + "context" + "fmt" + "log" + "strconv" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + monitor "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor/v20180724" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func resourceTencentCloudMonitorTmpExporterIntegration() *schema.Resource { + return &schema.Resource{ + Read: resourceTencentCloudMonitorTmpExporterIntegrationRead, + Create: resourceTencentCloudMonitorTmpExporterIntegrationCreate, + Update: resourceTencentCloudMonitorTmpExporterIntegrationUpdate, + Delete: resourceTencentCloudMonitorTmpExporterIntegrationDelete, + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Instance id.", + }, + + "kind": { + Type: schema.TypeString, + Required: true, + Description: "Type.", + }, + + "content": { + Type: schema.TypeString, + Required: true, + Description: "Integration config.", + }, + + "kube_type": { + Type: schema.TypeInt, + Required: true, + Description: "Integration config.", + }, + + "cluster_id": { + Type: schema.TypeString, + Required: true, + Description: "Cluster ID.", + }, + }, + } +} + +func resourceTencentCloudMonitorTmpExporterIntegrationCreate(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_monitor_tmp_exporter_integration.create")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + var ( + instanceId string + kubeType int + clusterId string + ) + + var ( + request = monitor.NewCreateExporterIntegrationRequest() + response *monitor.CreateExporterIntegrationResponse + ) + + if v, ok := d.GetOk("instance_id"); ok { + instanceId = v.(string) + request.InstanceId = helper.String(instanceId) + } + + if v, ok := d.GetOk("kind"); ok { + request.Kind = helper.String(v.(string)) + } + + if v, ok := d.GetOk("content"); ok { + request.Content = helper.String(v.(string)) + } + + if v, ok := d.GetOk("kube_type"); ok { + kubeType = v.(int) + request.KubeType = helper.IntInt64(kubeType) + } + + if v, ok := d.GetOk("cluster_id"); ok { + clusterId = v.(string) + request.ClusterId = helper.String(clusterId) + } + + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseMonitorClient().CreateExporterIntegration(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + + if err != nil { + log.Printf("[CRITAL]%s create monitor tmpExporterIntegration failed, reason:%+v", logId, err) + return err + } + + tmpExporterIntegrationId := *response.Response.Names[0] + + d.SetId(strings.Join([]string{tmpExporterIntegrationId, instanceId, strconv.Itoa(kubeType), clusterId}, FILED_SP)) + + return resourceTencentCloudMonitorTmpExporterIntegrationRead(d, meta) +} + +func resourceTencentCloudMonitorTmpExporterIntegrationRead(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_monitor_tmpExporterIntegration.read")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + ctx := context.WithValue(context.TODO(), logIdKey, logId) + + service := MonitorService{client: meta.(*TencentCloudClient).apiV3Conn} + + tmpExporterIntegrationId := d.Id() + + tmpExporterIntegration, err := service.DescribeMonitorTmpExporterIntegration(ctx, tmpExporterIntegrationId) + + if err != nil { + return err + } + + if tmpExporterIntegration == nil { + d.SetId("") + return fmt.Errorf("resource `tmpExporterIntegration` %s does not exist", tmpExporterIntegrationId) + } + + if tmpExporterIntegration.Kind != nil { + _ = d.Set("kind", tmpExporterIntegration.Kind) + } + + if tmpExporterIntegration.Content != nil { + _ = d.Set("content", tmpExporterIntegration.Content) + } + + return nil +} + +func resourceTencentCloudMonitorTmpExporterIntegrationUpdate(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_monitor_tmp_exporter_integration.update")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + request := monitor.NewUpdateExporterIntegrationRequest() + + if v, ok := d.GetOk("instance_id"); ok { + request.InstanceId = helper.String(v.(string)) + } + + if v, ok := d.GetOk("kind"); ok { + request.Kind = helper.String(v.(string)) + } + + if v, ok := d.GetOk("content"); ok { + request.Content = helper.String(v.(string)) + } + + if v, ok := d.GetOk("kube_type"); ok { + request.KubeType = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOk("cluster_id"); ok { + request.ClusterId = helper.String(v.(string)) + } + + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseMonitorClient().UpdateExporterIntegration(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + + if err != nil { + return err + } + + return resourceTencentCloudMonitorTmpExporterIntegrationRead(d, meta) +} + +func resourceTencentCloudMonitorTmpExporterIntegrationDelete(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_monitor_tmp_exporter_integration.delete")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + ctx := context.WithValue(context.TODO(), logIdKey, logId) + + service := MonitorService{client: meta.(*TencentCloudClient).apiV3Conn} + tmpExporterIntegrationId := d.Id() + + if err := service.DeleteMonitorTmpExporterIntegrationById(ctx, tmpExporterIntegrationId); err != nil { + return err + } + + return nil +} diff --git a/tencentcloud/resource_tc_monitor_tmp_instance.go b/tencentcloud/resource_tc_monitor_tmp_instance.go index 3d3b0b47bb..1d2e808616 100644 --- a/tencentcloud/resource_tc_monitor_tmp_instance.go +++ b/tencentcloud/resource_tc_monitor_tmp_instance.go @@ -5,17 +5,20 @@ Example Usage ```hcl resource "tencentcloud_monitor_tmp_instance" "tmpInstance" { - instance_name = "logset-hello" + instance_name = "demo" vpc_id = "vpc-2hfyray3" subnet_id = "subnet-rdkj0agk" data_retention_time = 30 zone = "ap-guangzhou-3" + tags = { + "createdBy" = "terraform" + } } ``` Import -monitor tmp instance can be imported using the id, e.g. +monitor tmpInstance can be imported using the id, e.g. ``` $ terraform import tencentcloud_monitor_tmp_instance.tmpInstance tmpInstance_id ``` @@ -48,30 +51,35 @@ func resourceTencentCloudMonitorTmpInstance() *schema.Resource { Required: true, Description: "Instance name.", }, + "vpc_id": { Type: schema.TypeString, Required: true, Description: "Vpc Id.", }, + "subnet_id": { Type: schema.TypeString, Required: true, Description: "Subnet Id.", }, + "data_retention_time": { Type: schema.TypeInt, Required: true, Description: "Data retention time.", }, + "zone": { Type: schema.TypeString, Required: true, Description: "Available zone.", }, - "grafana_instance_id": { - Type: schema.TypeString, + + "tags": { + Type: schema.TypeMap, Optional: true, - Description: "Associated grafana instance id.", + Description: "Tag description list.", }, }, } @@ -91,21 +99,22 @@ func resourceTencentCloudMonitorTmpInstanceCreate(d *schema.ResourceData, meta i if v, ok := d.GetOk("instance_name"); ok { request.InstanceName = helper.String(v.(string)) } + if v, ok := d.GetOk("vpc_id"); ok { request.VpcId = helper.String(v.(string)) } + if v, ok := d.GetOk("subnet_id"); ok { request.SubnetId = helper.String(v.(string)) } + if v, ok := d.GetOk("data_retention_time"); ok { request.DataRetentionTime = helper.IntInt64(v.(int)) } + if v, ok := d.GetOk("zone"); ok { request.Zone = helper.String(v.(string)) } - if v, ok := d.GetOk("grafana_instance_id"); ok { - request.GrafanaInstanceId = helper.String(v.(string)) - } err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { result, e := meta.(*TencentCloudClient).apiV3Conn.UseMonitorClient().CreatePrometheusMultiTenantInstancePostPayMode(request) @@ -125,13 +134,41 @@ func resourceTencentCloudMonitorTmpInstanceCreate(d *schema.ResourceData, meta i } tmpInstanceId := *response.Response.InstanceId - d.SetId(tmpInstanceId) + service := MonitorService{client: meta.(*TencentCloudClient).apiV3Conn} + ctx := context.WithValue(context.TODO(), logIdKey, logId) + + err = resource.Retry(1*readRetryTimeout, func() *resource.RetryError { + instance, errRet := service.DescribeMonitorTmpInstance(ctx, tmpInstanceId) + if errRet != nil { + return retryError(errRet, InternalError) + } + if *instance.InstanceStatus == 2 { + return nil + } + if *instance.InstanceStatus == 3 { + return resource.NonRetryableError(fmt.Errorf("tmpInstance status is %v, operate failed.", *instance.InstanceStatus)) + } + return resource.RetryableError(fmt.Errorf("tmpInstance status is %v, retry...", *instance.InstanceStatus)) + }) + if err != nil { + return err + } + + if tags := helper.GetTags(d, "tags"); len(tags) > 0 { + tagService := TagService{client: meta.(*TencentCloudClient).apiV3Conn} + region := meta.(*TencentCloudClient).apiV3Conn.Region + resourceName := fmt.Sprintf("qcs::monitor:%s:uin/:prom-instance/%s", region, tmpInstanceId) + if err := tagService.ModifyTags(ctx, resourceName, tags, nil); err != nil { + return err + } + } + d.SetId(tmpInstanceId) return resourceTencentCloudMonitorTmpInstanceRead(d, meta) } func resourceTencentCloudMonitorTmpInstanceRead(d *schema.ResourceData, meta interface{}) error { - defer logElapsed("resource.tencentcloud_monitor_tmp_instance.read")() + defer logElapsed("resource.tencentcloud_monitor_tmpInstance.read")() defer inconsistentCheck(d, meta)() logId := getLogId(contextNil) @@ -141,7 +178,7 @@ func resourceTencentCloudMonitorTmpInstanceRead(d *schema.ResourceData, meta int tmpInstanceId := d.Id() - tmpInstance, err := service.DescribeMonitorTmpInstanceById(ctx, tmpInstanceId) + tmpInstance, err := service.DescribeMonitorTmpInstance(ctx, tmpInstanceId) if err != nil { return err @@ -155,21 +192,30 @@ func resourceTencentCloudMonitorTmpInstanceRead(d *schema.ResourceData, meta int if tmpInstance.InstanceName != nil { _ = d.Set("instance_name", tmpInstance.InstanceName) } + if tmpInstance.VpcId != nil { _ = d.Set("vpc_id", tmpInstance.VpcId) } + if tmpInstance.SubnetId != nil { _ = d.Set("subnet_id", tmpInstance.SubnetId) } + if tmpInstance.DataRetentionTime != nil { _ = d.Set("data_retention_time", tmpInstance.DataRetentionTime) } + if tmpInstance.Zone != nil { _ = d.Set("zone", tmpInstance.Zone) } - if tmpInstance.GrafanaInstanceId != nil { - _ = d.Set("grafana_instance_id", tmpInstance.GrafanaInstanceId) + + tcClient := meta.(*TencentCloudClient).apiV3Conn + tagService := &TagService{client: tcClient} + tags, err := tagService.DescribeResourceTags(ctx, "monitor", "prom-instance", tcClient.Region, d.Id()) + if err != nil { + return err } + _ = d.Set("tags", tags) return nil } @@ -179,15 +225,22 @@ func resourceTencentCloudMonitorTmpInstanceUpdate(d *schema.ResourceData, meta i defer inconsistentCheck(d, meta)() logId := getLogId(contextNil) + ctx := context.WithValue(context.TODO(), logIdKey, logId) request := monitor.NewModifyPrometheusInstanceAttributesRequest() request.InstanceId = helper.String(d.Id()) - if d.HasChange("instance_name") { - if v, ok := d.GetOk("instance_name"); ok { - request.InstanceName = helper.String(v.(string)) - } + if v, ok := d.GetOk("instance_name"); ok { + request.InstanceName = helper.String(v.(string)) + } + + if d.HasChange("vpc_id") { + return fmt.Errorf("`vpc_id` do not support change now.") + } + + if d.HasChange("subnet_id") { + return fmt.Errorf("`subnet_id` do not support change now.") } if d.HasChange("data_retention_time") { @@ -196,6 +249,10 @@ func resourceTencentCloudMonitorTmpInstanceUpdate(d *schema.ResourceData, meta i } } + if d.HasChange("zone") { + return fmt.Errorf("`zone` do not support change now.") + } + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { result, e := meta.(*TencentCloudClient).apiV3Conn.UseMonitorClient().ModifyPrometheusInstanceAttributes(request) if e != nil { @@ -211,6 +268,17 @@ func resourceTencentCloudMonitorTmpInstanceUpdate(d *schema.ResourceData, meta i return err } + if d.HasChange("tags") { + tcClient := meta.(*TencentCloudClient).apiV3Conn + tagService := &TagService{client: tcClient} + oldTags, newTags := d.GetChange("tags") + replaceTags, deleteTags := diffTags(oldTags.(map[string]interface{}), newTags.(map[string]interface{})) + resourceName := BuildTagResourceName("monitor", "prom-instance", tcClient.Region, d.Id()) + if err := tagService.ModifyTags(ctx, resourceName, replaceTags, deleteTags); err != nil { + return err + } + } + return resourceTencentCloudMonitorTmpInstanceRead(d, meta) } @@ -222,11 +290,31 @@ func resourceTencentCloudMonitorTmpInstanceDelete(d *schema.ResourceData, meta i ctx := context.WithValue(context.TODO(), logIdKey, logId) service := MonitorService{client: meta.(*TencentCloudClient).apiV3Conn} - id := d.Id() + tmpInstanceId := d.Id() + + if err := service.IsolateMonitorTmpInstanceById(ctx, tmpInstanceId); err != nil { + return err + } - if err := service.DeleteMonitorTmpInstance(ctx, id); err != nil { + err := resource.Retry(1*readRetryTimeout, func() *resource.RetryError { + instance, errRet := service.DescribeMonitorTmpInstance(ctx, tmpInstanceId) + if errRet != nil { + return retryError(errRet, InternalError) + } + if *instance.InstanceStatus == 6 { + return nil + } + if *instance.InstanceStatus == 3 { + return resource.NonRetryableError(fmt.Errorf("tmpInstance status is %v, operate failed.", *instance.InstanceStatus)) + } + return resource.RetryableError(fmt.Errorf("tmpInstance status is %v, retry...", *instance.InstanceStatus)) + }) + if err != nil { return err } + if err := service.DeleteMonitorTmpInstanceById(ctx, tmpInstanceId); err != nil { + return err + } return nil } diff --git a/tencentcloud/resource_tc_monitor_tmp_scrape_job.go b/tencentcloud/resource_tc_monitor_tmp_scrape_job.go new file mode 100644 index 0000000000..675fe164fc --- /dev/null +++ b/tencentcloud/resource_tc_monitor_tmp_scrape_job.go @@ -0,0 +1,217 @@ +/* +Provides a resource to create a monitor tmpScrapeJob + +Example Usage + +```hcl +resource "tencentcloud_monitor_tmp_scrape_job" "tmpScrapeJob" { + instance_id = "prom-dko9d0nu" + agent_id = "agent-6a7g40k2" + config = <<-EOT +job_name: demo-config +honor_timestamps: true +metrics_path: /metrics +scheme: https +EOT +} + +``` +Import + +monitor tmpScrapeJob can be imported using the id, e.g. +``` +$ terraform import tencentcloud_monitor_tmp_scrape_job.tmpScrapeJob tmpScrapeJob_id +``` +*/ +package tencentcloud + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + monitor "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor/v20180724" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func resourceTencentCloudMonitorTmpScrapeJob() *schema.Resource { + return &schema.Resource{ + Read: resourceTencentCloudMonitorTmpScrapeJobRead, + Create: resourceTencentCloudMonitorTmpScrapeJobCreate, + Update: resourceTencentCloudMonitorTmpScrapeJobUpdate, + Delete: resourceTencentCloudMonitorTmpScrapeJobDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Instance id.", + }, + + "agent_id": { + Type: schema.TypeString, + Required: true, + Description: "Agent id.", + }, + + "config": { + Type: schema.TypeString, + Optional: true, + Description: "Job content.", + }, + }, + } +} + +func resourceTencentCloudMonitorTmpScrapeJobCreate(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_monitor_tmp_scrape_job.create")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + var instanceId string + var agentId string + + var ( + request = monitor.NewCreatePrometheusScrapeJobRequest() + response *monitor.CreatePrometheusScrapeJobResponse + ) + + if v, ok := d.GetOk("instance_id"); ok { + instanceId = v.(string) + request.InstanceId = helper.String(instanceId) + } + + if v, ok := d.GetOk("agent_id"); ok { + agentId = v.(string) + request.AgentId = helper.String(agentId) + } + + if v, ok := d.GetOk("config"); ok { + request.Config = helper.String(v.(string)) + } + + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseMonitorClient().CreatePrometheusScrapeJob(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + + if err != nil { + log.Printf("[CRITAL]%s create monitor tmpScrapeJob failed, reason:%+v", logId, err) + return err + } + + tmpScrapeJobId := *response.Response.JobId + + d.SetId(strings.Join([]string{tmpScrapeJobId, instanceId, agentId}, FILED_SP)) + + return resourceTencentCloudMonitorTmpScrapeJobRead(d, meta) +} + +func resourceTencentCloudMonitorTmpScrapeJobRead(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_monitor_tmpScrapeJob.read")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + ctx := context.WithValue(context.TODO(), logIdKey, logId) + + service := MonitorService{client: meta.(*TencentCloudClient).apiV3Conn} + + tmpScrapeJobId := d.Id() + + tmpScrapeJob, err := service.DescribeMonitorTmpScrapeJob(ctx, tmpScrapeJobId) + + if err != nil { + return err + } + + if tmpScrapeJob == nil { + d.SetId("") + return fmt.Errorf("resource `tmpScrapeJob` %s does not exist", tmpScrapeJobId) + } + + if tmpScrapeJob.AgentId != nil { + _ = d.Set("agent_id", tmpScrapeJob.AgentId) + } + + if tmpScrapeJob.Config != nil { + _ = d.Set("config", tmpScrapeJob.Config) + } + + return nil +} + +func resourceTencentCloudMonitorTmpScrapeJobUpdate(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_monitor_tmp_scrape_job.update")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + request := monitor.NewUpdatePrometheusScrapeJobRequest() + + ids := strings.Split(d.Id(), FILED_SP) + + request.JobId = &ids[0] + request.InstanceId = &ids[1] + request.AgentId = &ids[2] + + if d.HasChange("instance_id") { + return fmt.Errorf("`instance_id` do not support change now.") + } + + if d.HasChange("agent_id") { + return fmt.Errorf("`agent_id` do not support change now.") + } + + if d.HasChange("config") { + if v, ok := d.GetOk("config"); ok { + request.Config = helper.String(v.(string)) + } + } + + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseMonitorClient().UpdatePrometheusScrapeJob(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + + if err != nil { + return err + } + + return resourceTencentCloudMonitorTmpScrapeJobRead(d, meta) +} + +func resourceTencentCloudMonitorTmpScrapeJobDelete(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_monitor_tmp_scrape_job.delete")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + ctx := context.WithValue(context.TODO(), logIdKey, logId) + + service := MonitorService{client: meta.(*TencentCloudClient).apiV3Conn} + tmpScrapeJobId := d.Id() + + if err := service.DeleteMonitorTmpScrapeJobById(ctx, tmpScrapeJobId); err != nil { + return err + } + + return nil +} diff --git a/tencentcloud/resource_tc_monitor_tmp_tke_alert_policy.go b/tencentcloud/resource_tc_monitor_tmp_tke_alert_policy.go new file mode 100644 index 0000000000..7cfa7003e7 --- /dev/null +++ b/tencentcloud/resource_tc_monitor_tmp_tke_alert_policy.go @@ -0,0 +1,515 @@ +/* +Provides a resource to create a tke tmpAlertPolicy + +Example Usage + +```hcl + +resource "tencentcloud_monitor_tmp_tke_alert_policy" "tmpAlertPolicy" { + instance_id = "xxxxx" + alert_rule { + name = "xxx" + rules { + name = "xx" + rule = "xx" + template = "xx" + for = "xx" + labels { + name = "xx" + value = "xx" + } + annotations { + name = "xx" + value = "xx" + } + } + notification { + type = "xx" + enabled = true + alert_manager { + url = "xx" + cluster_id = "xx" + cluster_type = "xx" + } + } + } +} + +*/ +package tencentcloud + +import ( + "context" + "fmt" + "log" + + "github.com/hashicorp/terraform-plugin-sdk/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/helper/schema" + tke "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func resourceTencentCloudMonitorTmpTkeAlertPolicy() *schema.Resource { + return &schema.Resource{ + Read: resourceTencentCloudTkeTmpAlertPolicyRead, + Create: resourceTencentCloudTkeTmpAlertPolicyCreate, + Update: resourceTencentCloudTkeTmpAlertPolicyUpdate, + Delete: resourceTencentCloudTkeTmpAlertPolicyDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "instance_id": { + Type: schema.TypeString, + Required: true, + Description: "Instance Id.", + }, + + "alert_rule": { + Type: schema.TypeList, + MaxItems: 1, + Required: true, + Description: "Alarm notification channels.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Policy name.", + }, + "rules": { + Type: schema.TypeList, + Required: true, + Description: "A list of rules.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Rule name.", + }, + "rule": { + Type: schema.TypeString, + Required: true, + Description: "Prometheus statement.", + }, + "labels": { + Required: true, + Description: "Extra labels.", + Type: schema.TypeList, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of map.", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "Value of map.", + }, + }, + }, + }, + "template": { + Type: schema.TypeString, + Required: true, + Description: "Alert sending template.", + }, + "for": { + Type: schema.TypeString, + Required: true, + Description: "Time of duration.", + }, + "describe": { + Type: schema.TypeString, + Optional: true, + Description: "A description of the rule.", + }, + "annotations": { + Type: schema.TypeList, + Optional: true, + Description: "Refer to annotations in prometheus rule.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: "Name of map.", + }, + "value": { + Type: schema.TypeString, + Required: true, + Description: "Value of map.", + }, + }, + }, + }, + "rule_state": { + Type: schema.TypeInt, + Optional: true, + Description: "Alarm rule status.", + }, + }, + }, + }, + "id": { + Type: schema.TypeString, + Optional: true, + Description: "Alarm policy ID. Note: This field may return null, indicating that a valid value could not be retrieved.", + }, + "template_id": { + Type: schema.TypeString, + Optional: true, + Description: "If the alarm is sent from a template, the TemplateId is the template id.", + }, + "notification": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "Alarm channels, which may be returned using null in the template.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "enabled": { + Type: schema.TypeBool, + Required: true, + Description: "Whether it is enabled.", + }, + "type": { + Type: schema.TypeString, + Required: true, + Description: "The channel type, which defaults to amp, supports the following `amp`, `webhook`, `alertmanager`.", + }, + "web_hook": { + Type: schema.TypeString, + Optional: true, + Description: "If Type is webhook, the field is required. Note: This field may return null, indicating that a valid value could not be retrieved.", + }, + "alert_manager": { + Type: schema.TypeList, + MaxItems: 1, + Optional: true, + Description: "If Type is alertmanager, the field is required. Note: This field may return null, indicating that a valid value could not be retrieved..", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "url": { + Type: schema.TypeString, + Required: true, + Description: "Alertmanager url.", + }, + "cluster_type": { + Type: schema.TypeString, + Optional: true, + Description: "Alertmanager is deployed in the cluster type. Note: This field may return null, indicating that a valid value could not be retrieved.", + }, + "cluster_id": { + Type: schema.TypeString, + Optional: true, + Description: "The ID of the cluster where the alertmanager is deployed. Note: This field may return null, indicating that a valid value could not be retrieved.", + }, + }, + }, + }, + "repeat_interval": { + Type: schema.TypeString, + Optional: true, + Description: "Convergence time.", + }, + "time_range_start": { + Type: schema.TypeString, + Optional: true, + Description: "The time from which it takes effect.", + }, + "time_range_end": { + Type: schema.TypeString, + Optional: true, + Description: "Effective end time.", + }, + "notify_way": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Optional: true, + Description: "Alarm notification method. At present, there are SMS, EMAIL, CALL, WECHAT methods.", + }, + "receiver_groups": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + Optional: true, + Description: "Alert Receiving Group (User Group).", + }, + "phone_notify_order": { + Type: schema.TypeSet, + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + Optional: true, + Description: "Telephone alarm sequence.", + }, + "phone_circle_times": { + Type: schema.TypeInt, + Optional: true, + Description: "PhoneCircleTimes.", + }, + "phone_inner_interval": { + Type: schema.TypeInt, + Optional: true, + Description: "Telephone alarm wheel intervals. Units: Seconds.", + }, + "phone_circle_interval": { + Type: schema.TypeInt, + Optional: true, + Description: "Effective end timeTelephone alarm wheel interval. Units: Seconds.", + }, + "phone_arrive_notice": { + Type: schema.TypeBool, + Optional: true, + Description: "Telephone alerts reach notifications.", + }, + }, + }, + }, + "updated_at": { + Type: schema.TypeString, + Optional: true, + Description: "Last modified time.", + }, + "cluster_id": { + Type: schema.TypeString, + Optional: true, + Description: "If the alarm policy is derived from the CRD resource definition of the user cluster, the ClusterId is the cluster ID to which it belongs.", + }, + }, + }, + }, + }, + } +} + +func resourceTencentCloudTkeTmpAlertPolicyCreate(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_tke_tmp_alert_policy.create")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + var ( + request = tke.NewCreatePrometheusAlertPolicyRequest() + response *tke.CreatePrometheusAlertPolicyResponse + ) + + if v, ok := d.GetOk("instance_id"); ok { + request.InstanceId = helper.String(v.(string)) + } + + if dMap, ok := helper.InterfacesHeadMap(d, "alert_rule"); ok { + prometheusAlertPolicyItem := tke.PrometheusAlertPolicyItem{} + if v, ok := dMap["name"]; ok { + prometheusAlertPolicyItem.Name = helper.String(v.(string)) + } + if v, ok := dMap["rules"]; ok { + for _, item := range v.([]interface{}) { + RulesMap := item.(map[string]interface{}) + prometheusAlertRule := tke.PrometheusAlertRule{} + if v, ok := RulesMap["name"]; ok { + prometheusAlertRule.Name = helper.String(v.(string)) + } + if v, ok := RulesMap["rule"]; ok { + prometheusAlertRule.Rule = helper.String(v.(string)) + } + if v, ok := RulesMap["template"]; ok { + prometheusAlertRule.Template = helper.String(v.(string)) + } + if v, ok := RulesMap["for"]; ok { + prometheusAlertRule.For = helper.String(v.(string)) + } + if v, ok := RulesMap["describe"]; ok { + prometheusAlertRule.Describe = helper.String(v.(string)) + } + if v, ok := RulesMap["annotations"]; ok { + for _, item := range v.([]interface{}) { + AnnotationsMap := item.(map[string]interface{}) + label := tke.Label{} + if v, ok := AnnotationsMap["name"]; ok { + label.Name = helper.String(v.(string)) + } + if v, ok := AnnotationsMap["value"]; ok { + label.Value = helper.String(v.(string)) + } + prometheusAlertRule.Annotations = append(prometheusAlertRule.Annotations, &label) + } + } + if v, ok := RulesMap["rule_state"]; ok { + prometheusAlertRule.RuleState = helper.IntInt64(v.(int)) + } + prometheusAlertPolicyItem.Rules = append(prometheusAlertPolicyItem.Rules, &prometheusAlertRule) + } + } + if v, ok := dMap["id"]; ok { + prometheusAlertPolicyItem.Id = helper.String(v.(string)) + } + if v, ok := dMap["template_id"]; ok { + prometheusAlertPolicyItem.TemplateId = helper.String(v.(string)) + } + if NotificationMap, ok := helper.InterfaceToMap(dMap, "notification"); ok { + prometheusNotificationItem := tke.PrometheusNotificationItem{} + if v, ok := NotificationMap["enabled"]; ok { + prometheusNotificationItem.Enabled = helper.Bool(v.(bool)) + } + if v, ok := NotificationMap["type"]; ok { + prometheusNotificationItem.Type = helper.String(v.(string)) + } + if v, ok := NotificationMap["web_hook"]; ok { + prometheusNotificationItem.WebHook = helper.String(v.(string)) + } + if AlertManagerMap, ok := helper.InterfaceToMap(NotificationMap, "alert_manager"); ok { + prometheusAlertManagerConfig := tke.PrometheusAlertManagerConfig{} + if v, ok := AlertManagerMap["url"]; ok { + prometheusAlertManagerConfig.Url = helper.String(v.(string)) + } + if v, ok := AlertManagerMap["cluster_type"]; ok { + prometheusAlertManagerConfig.ClusterType = helper.String(v.(string)) + } + if v, ok := AlertManagerMap["cluster_id"]; ok { + prometheusAlertManagerConfig.ClusterId = helper.String(v.(string)) + } + prometheusNotificationItem.AlertManager = &prometheusAlertManagerConfig + } + if v, ok := NotificationMap["repeat_interval"]; ok { + prometheusNotificationItem.RepeatInterval = helper.String(v.(string)) + } + if v, ok := NotificationMap["time_range_start"]; ok { + prometheusNotificationItem.TimeRangeStart = helper.String(v.(string)) + } + if v, ok := NotificationMap["time_range_end"]; ok { + prometheusNotificationItem.TimeRangeEnd = helper.String(v.(string)) + } + if v, ok := NotificationMap["notify_way"]; ok { + notifyWaySet := v.(*schema.Set).List() + for i := range notifyWaySet { + notifyWay := notifyWaySet[i].(string) + prometheusNotificationItem.NotifyWay = append(prometheusNotificationItem.NotifyWay, ¬ifyWay) + } + } + if v, ok := NotificationMap["receiver_groups"]; ok { + receiverGroupsSet := v.(*schema.Set).List() + for i := range receiverGroupsSet { + receiverGroups := receiverGroupsSet[i].(string) + prometheusNotificationItem.ReceiverGroups = append(prometheusNotificationItem.ReceiverGroups, &receiverGroups) + } + } + if v, ok := NotificationMap["phone_notify_order"]; ok { + phoneNotifyOrderSet := v.(*schema.Set).List() + for i := range phoneNotifyOrderSet { + phoneNotifyOrder := phoneNotifyOrderSet[i].(int) + prometheusNotificationItem.PhoneNotifyOrder = append(prometheusNotificationItem.PhoneNotifyOrder, helper.IntUint64(phoneNotifyOrder)) + } + } + if v, ok := NotificationMap["phone_circle_times"]; ok { + prometheusNotificationItem.PhoneCircleTimes = helper.IntInt64(v.(int)) + } + if v, ok := NotificationMap["phone_inner_interval"]; ok { + prometheusNotificationItem.PhoneInnerInterval = helper.IntInt64(v.(int)) + } + if v, ok := NotificationMap["phone_circle_interval"]; ok { + prometheusNotificationItem.PhoneCircleInterval = helper.IntInt64(v.(int)) + } + if v, ok := NotificationMap["phone_arrive_notice"]; ok { + prometheusNotificationItem.PhoneArriveNotice = helper.Bool(v.(bool)) + } + prometheusAlertPolicyItem.Notification = &prometheusNotificationItem + } + if v, ok := dMap["updated_at"]; ok { + prometheusAlertPolicyItem.UpdatedAt = helper.String(v.(string)) + } + if v, ok := dMap["cluster_id"]; ok { + prometheusAlertPolicyItem.ClusterId = helper.String(v.(string)) + } + + } + + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseTkeClient().CreatePrometheusAlertPolicy(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + + if err != nil { + log.Printf("[CRITAL]%s create tke tmpAlertPolicy failed, reason:%+v", logId, err) + return err + } + + tmpAlertPolicyId := *response.Response.Id + + d.SetId(tmpAlertPolicyId) + return resourceTencentCloudTkeTmpAlertPolicyRead(d, meta) +} + +func resourceTencentCloudTkeTmpAlertPolicyRead(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_tke_tmpAlertPolicy.read")() + defer inconsistentCheck(d, meta)() + + return nil +} + +func resourceTencentCloudTkeTmpAlertPolicyUpdate(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_tke_tmp_alert_policy.update")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + + request := tke.NewModifyPrometheusAlertPolicyRequest() + + request.InstanceId = helper.String(d.Id()) + + if d.HasChange("instance_id") { + return fmt.Errorf("`instance_id` do not support change now.") + } + + if d.HasChange("alert_rule") { + return fmt.Errorf("`alert_rule` do not support change now.") + } + + err := resource.Retry(writeRetryTimeout, func() *resource.RetryError { + result, e := meta.(*TencentCloudClient).apiV3Conn.UseTkeClient().ModifyPrometheusAlertPolicy(request) + if e != nil { + return retryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + + if err != nil { + return err + } + + return resourceTencentCloudTkeTmpAlertPolicyRead(d, meta) +} + +func resourceTencentCloudTkeTmpAlertPolicyDelete(d *schema.ResourceData, meta interface{}) error { + defer logElapsed("resource.tencentcloud_tke_tmp_alert_policy.delete")() + defer inconsistentCheck(d, meta)() + + logId := getLogId(contextNil) + ctx := context.WithValue(context.TODO(), logIdKey, logId) + + service := TkeService{client: meta.(*TencentCloudClient).apiV3Conn} + tmpAlertPolicyId := d.Id() + + if err := service.DeleteTkeTmpAlertPolicyById(ctx, tmpAlertPolicyId); err != nil { + return err + } + + return nil +} diff --git a/tencentcloud/resource_tc_monitor_tmp_tke_template.go b/tencentcloud/resource_tc_monitor_tmp_tke_template.go index 678c00b050..1597e9de42 100644 --- a/tencentcloud/resource_tc_monitor_tmp_tke_template.go +++ b/tencentcloud/resource_tc_monitor_tmp_tke_template.go @@ -16,13 +16,6 @@ resource "tencentcloud_monitor_tmp_tke_template" "template" { } } -``` -Import - -tmp tke template can be imported using the id, e.g. -``` -$ terraform import tencentcloud_monitor_tmp_tke_template.template template_id -``` */ package tencentcloud diff --git a/tencentcloud/service_tencentcloud_monitor.go b/tencentcloud/service_tencentcloud_monitor.go index 702438c18b..f12f6a0bb6 100644 --- a/tencentcloud/service_tencentcloud_monitor.go +++ b/tencentcloud/service_tencentcloud_monitor.go @@ -4,6 +4,8 @@ import ( "context" "fmt" "log" + "strconv" + "strings" "time" "github.com/hashicorp/terraform-plugin-sdk/helper/resource" @@ -262,7 +264,7 @@ func (me *MonitorService) DescribeBindingAlarmPolicyObjectList(ctx context.Conte } // tmp -func (me *MonitorService) DescribeMonitorTmpInstanceById(ctx context.Context, tmpInstanceId string) (instance *monitor.PrometheusInstancesItem, errRet error) { +func (me *MonitorService) DescribeMonitorTmpInstance(ctx context.Context, tmpInstanceId string) (tmpInstance *monitor.PrometheusInstancesItem, errRet error) { var ( logId = getLogId(ctx) request = monitor.NewDescribePrometheusInstancesRequest() @@ -274,10 +276,8 @@ func (me *MonitorService) DescribeMonitorTmpInstanceById(ctx context.Context, tm logId, "query object", request.ToJsonString(), errRet.Error()) } }() - request.InstanceIds = []*string{&tmpInstanceId} - ratelimit.Check(request.GetAction()) response, err := me.client.UseMonitorClient().DescribePrometheusInstances(request) if err != nil { log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", @@ -291,11 +291,11 @@ func (me *MonitorService) DescribeMonitorTmpInstanceById(ctx context.Context, tm if len(response.Response.InstanceSet) < 1 { return } - instance = response.Response.InstanceSet[0] + tmpInstance = response.Response.InstanceSet[0] return } -func (me *MonitorService) DeleteMonitorTmpInstance(ctx context.Context, tmpInstanceId string) (errRet error) { +func (me *MonitorService) IsolateMonitorTmpInstanceById(ctx context.Context, tmpInstanceId string) (errRet error) { logId := getLogId(ctx) request := monitor.NewTerminatePrometheusInstancesRequest() @@ -320,7 +320,32 @@ func (me *MonitorService) DeleteMonitorTmpInstance(ctx context.Context, tmpInsta return } -func (me *MonitorService) DescribeMonitorTmpCvmAgentById(ctx context.Context, tmpId string, tmpCvmAgentName string) (instance *monitor.PrometheusAgent, errRet error) { +func (me *MonitorService) DeleteMonitorTmpInstanceById(ctx context.Context, tmpInstanceId string) (errRet error) { + logId := getLogId(ctx) + + request := monitor.NewDestroyPrometheusInstanceRequest() + request.InstanceId = &tmpInstanceId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, "delete object", request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + response, err := me.client.UseMonitorClient().DestroyPrometheusInstance(request) + if err != nil { + errRet = err + return err + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + return +} + +func (me *MonitorService) DescribeMonitorTmpCvmAgent(ctx context.Context, instanceId string, tmpCvmAgentId string) (tmpCvmAgent *monitor.PrometheusAgent, errRet error) { var ( logId = getLogId(ctx) request = monitor.NewDescribePrometheusAgentsRequest() @@ -332,11 +357,9 @@ func (me *MonitorService) DescribeMonitorTmpCvmAgentById(ctx context.Context, tm logId, "query object", request.ToJsonString(), errRet.Error()) } }() + request.InstanceId = &instanceId + request.AgentIds = []*string{&tmpCvmAgentId} - request.InstanceId = &tmpId - request.Name = &tmpCvmAgentName - - ratelimit.Check(request.GetAction()) response, err := me.client.UseMonitorClient().DescribePrometheusAgents(request) if err != nil { log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", @@ -350,7 +373,137 @@ func (me *MonitorService) DescribeMonitorTmpCvmAgentById(ctx context.Context, tm if len(response.Response.AgentSet) < 1 { return } - instance = response.Response.AgentSet[0] + tmpCvmAgent = response.Response.AgentSet[0] + return +} + +func (me *MonitorService) DescribeMonitorTmpScrapeJob(ctx context.Context, tmpScrapeJobId string) (tmpScrapeJob *monitor.PrometheusScrapeJob, errRet error) { + var ( + logId = getLogId(ctx) + request = monitor.NewDescribePrometheusScrapeJobsRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, "query object", request.ToJsonString(), errRet.Error()) + } + }() + + ids := strings.Split(tmpScrapeJobId, FILED_SP) + + request.JobIds = []*string{&ids[0]} + request.InstanceId = &ids[1] + request.AgentId = &ids[2] + + response, err := me.client.UseMonitorClient().DescribePrometheusScrapeJobs(request) + if err != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, request.GetAction(), request.ToJsonString(), err.Error()) + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if len(response.Response.ScrapeJobSet) < 1 { + return + } + tmpScrapeJob = response.Response.ScrapeJobSet[0] + return +} + +func (me *MonitorService) DeleteMonitorTmpScrapeJobById(ctx context.Context, tmpScrapeJobId string) (errRet error) { + logId := getLogId(ctx) + + ids := strings.Split(tmpScrapeJobId, FILED_SP) + request := monitor.NewDeletePrometheusScrapeJobsRequest() + request.JobIds = []*string{&ids[0]} + request.InstanceId = &ids[1] + request.AgentId = &ids[2] + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, "delete object", request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + response, err := me.client.UseMonitorClient().DeletePrometheusScrapeJobs(request) + if err != nil { + errRet = err + return err + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + return +} + +func (me *MonitorService) DescribeMonitorTmpExporterIntegration(ctx context.Context, tmpExporterIntegrationId string) (tmpExporterIntegration *monitor.IntegrationConfiguration, errRet error) { + var ( + logId = getLogId(ctx) + request = monitor.NewDescribeExporterIntegrationsRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, "query object", request.ToJsonString(), errRet.Error()) + } + }() + + ids := strings.Split(tmpExporterIntegrationId, FILED_SP) + request.Name = &ids[0] + request.InstanceId = &ids[1] + kubeType, _ := strconv.Atoi(ids[2]) + request.KubeType = helper.IntInt64(kubeType) + request.ClusterId = &ids[3] + + response, err := me.client.UseMonitorClient().DescribeExporterIntegrations(request) + if err != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, request.GetAction(), request.ToJsonString(), err.Error()) + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if len(response.Response.IntegrationSet) < 1 { + return + } + tmpExporterIntegration = response.Response.IntegrationSet[0] + return +} + +func (me *MonitorService) DeleteMonitorTmpExporterIntegrationById(ctx context.Context, tmpExporterIntegrationId string) (errRet error) { + logId := getLogId(ctx) + + request := monitor.NewDeleteExporterIntegrationRequest() + ids := strings.Split(tmpExporterIntegrationId, FILED_SP) + request.Name = &ids[0] + request.InstanceId = &ids[1] + kubeType, _ := strconv.Atoi(ids[2]) + request.KubeType = helper.IntInt64(kubeType) + request.ClusterId = &ids[3] + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, "delete object", request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + response, err := me.client.UseMonitorClient().DeleteExporterIntegration(request) + if err != nil { + errRet = err + return err + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) return } diff --git a/tencentcloud/service_tencentcloud_tke.go b/tencentcloud/service_tencentcloud_tke.go index 02f202ef06..d790d35771 100644 --- a/tencentcloud/service_tencentcloud_tke.go +++ b/tencentcloud/service_tencentcloud_tke.go @@ -1833,3 +1833,59 @@ func (me *TkeService) DeleteTmpTkeTemplate(ctx context.Context, tempId string) ( return } + +func (me *TkeService) DescribeTkeTmpAlertPolicy(ctx context.Context, tmpAlertPolicyId string) (tmpAlertPolicy *tke.PrometheusAlertPolicyItem, errRet error) { + var ( + logId = getLogId(ctx) + request = tke.NewDescribePrometheusAlertPolicyRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, "query object", request.ToJsonString(), errRet.Error()) + } + }() + request.InstanceId = &tmpAlertPolicyId + + response, err := me.client.UseTkeClient().DescribePrometheusAlertPolicy(request) + if err != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, request.GetAction(), request.ToJsonString(), err.Error()) + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if len(response.Response.AlertRules) < 1 { + return + } + tmpAlertPolicy = response.Response.AlertRules[0] + return +} + +func (me *TkeService) DeleteTkeTmpAlertPolicyById(ctx context.Context, tmpAlertPolicyId string) (errRet error) { + logId := getLogId(ctx) + + request := tke.NewDeletePrometheusAlertPolicyRequest() + request.InstanceId = &tmpAlertPolicyId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", + logId, "delete object", request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + response, err := me.client.UseTkeClient().DeletePrometheusAlertPolicy(request) + if err != nil { + errRet = err + return err + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", + logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + return +} diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/request.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/request.go index 95441f0885..27a45f5374 100644 --- a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/request.go +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http/request.go @@ -254,7 +254,7 @@ func CompleteCommonParams(request Request, region string) { params["Action"] = request.GetAction() params["Timestamp"] = strconv.FormatInt(time.Now().Unix(), 10) params["Nonce"] = strconv.Itoa(rand.Int()) - params["RequestClient"] = "SDK_GO_1.0.438" + params["RequestClient"] = "SDK_GO_1.0.443" } func ConstructParams(req Request) (err error) { diff --git a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor/v20180724/models.go b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor/v20180724/models.go index 9e74490caf..941a3489a4 100644 --- a/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor/v20180724/models.go +++ b/vendor/github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/monitor/v20180724/models.go @@ -1140,6 +1140,12 @@ type CreateExporterIntegrationRequestParams struct { // 实例 ID InstanceId *string `json:"InstanceId,omitempty" name:"InstanceId"` + // 类型 + Kind *string `json:"Kind,omitempty" name:"Kind"` + + // 集成配置 + Content *string `json:"Content,omitempty" name:"Content"` + // Kubernetes 集群类型,取值如下: //