diff --git a/.changelog/3547.txt b/.changelog/3547.txt new file mode 100644 index 0000000000..71c9817fad --- /dev/null +++ b/.changelog/3547.txt @@ -0,0 +1,55 @@ +```release-note:new-data-source +tencentcloud_wedata_ops_workflows +``` + +```release-note:new-data-source +tencentcloud_wedata_ops_workflow +``` + +```release-note:new-data-source +tencentcloud_wedata_ops_alarm_rules +``` + +```release-note:new-data-source +tencentcloud_wedata_task_instance +``` + +```release-note:new-data-source +tencentcloud_wedata_task_instances +``` + +```release-note:new-data-source +tencentcloud_wedata_task_instance_log +``` + +```release-note:new-data-source +tencentcloud_wedata_upstream_task_instances +``` + +```release-note:new-data-source +tencentcloud_wedata_downstream_task_instances +``` + +```release-note:new-resource +tencentcloud_wedata_ops_stop_task_async +``` + +```release-note:new-resource +tencentcloud_wedata_ops_task_owner +``` + +```release-note:new-resource +tencentcloud_wedata_ops_alarm_rule +``` + +```release-note:new-resource +tencentcloud_wedata_task_kill_instance_async +``` + +```release-note:new-resource +tencentcloud_wedata_task_rerun_instance_async +``` + +```release-note:new-resource +tencentcloud_wedata_task_set_success_instance_async +``` diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index b8b1d43cdf..fd56f0d8aa 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -1159,6 +1159,16 @@ func Provider() *schema.Provider { "tencentcloud_eb_platform_products": eb.DataSourceTencentCloudEbPlatformProducts(), "tencentcloud_eb_plateform_event_template": eb.DataSourceTencentCloudEbPlateformEventTemplate(), "tencentcloud_wedata_rule_templates": wedata.DataSourceTencentCloudWedataRuleTemplates(), + "tencentcloud_wedata_ops_workflows": wedata.DataSourceTencentCloudWedataOpsWorkflows(), + "tencentcloud_wedata_ops_workflow": wedata.DataSourceTencentCloudWedataOpsWorkflow(), + "tencentcloud_wedata_ops_async_job": wedata.DataSourceTencentCloudWedataOpsAsyncJob(), + "tencentcloud_wedata_ops_alarm_rules": wedata.DataSourceTencentCloudWedataOpsAlarmRules(), + "tencentcloud_wedata_task_instance": wedata.DataSourceTencentCloudWedataTaskInstance(), + "tencentcloud_wedata_task_instances": wedata.DataSourceTencentCloudWedataTaskInstances(), + "tencentcloud_wedata_task_instance_log": wedata.DataSourceTencentCloudWedataTaskInstanceLog(), + "tencentcloud_wedata_upstream_task_instances": wedata.DataSourceTencentCloudWedataUpstreamTaskInstances(), + "tencentcloud_wedata_downstream_task_instances": wedata.DataSourceTencentCloudWedataDownstreamTaskInstances(), + "tencentcloud_wedata_task_instance_executions": wedata.DataSourceTencentCloudWedataTaskInstanceExecutions(), "tencentcloud_wedata_data_source_list": wedata.DataSourceTencentCloudWedataDataSourceList(), "tencentcloud_private_dns_records": privatedns.DataSourceTencentCloudPrivateDnsRecords(), "tencentcloud_private_dns_private_zone_list": privatedns.DataSourceTencentCloudPrivateDnsPrivateZoneList(), @@ -2280,6 +2290,12 @@ func Provider() *schema.Provider { "tencentcloud_wedata_function": wedata.ResourceTencentCloudWedataFunction(), "tencentcloud_wedata_script": wedata.ResourceTencentCloudWedataScript(), "tencentcloud_wedata_dq_rule": wedata.ResourceTencentCloudWedataDqRule(), + "tencentcloud_wedata_ops_stop_task_async": wedata.ResourceTencentCloudWedataOpsStopTaskAsync(), + "tencentcloud_wedata_ops_task_owner": wedata.ResourceTencentCloudWedataOpsTaskOwner(), + "tencentcloud_wedata_ops_alarm_rule": wedata.ResourceTencentCloudWedataOpsAlarmRule(), + "tencentcloud_wedata_task_kill_instance_async": wedata.ResourceTencentCloudWedataTaskKillInstanceAsync(), + "tencentcloud_wedata_task_rerun_instance_async": wedata.ResourceTencentCloudWedataTaskRerunInstanceAsync(), + "tencentcloud_wedata_task_set_success_instance_async": wedata.ResourceTencentCloudWedataTaskSetSuccessInstanceAsync(), "tencentcloud_wedata_integration_offline_task": wedata.ResourceTencentCloudWedataIntegrationOfflineTask(), "tencentcloud_wedata_integration_realtime_task": wedata.ResourceTencentCloudWedataIntegrationRealtimeTask(), "tencentcloud_wedata_integration_task_node": wedata.ResourceTencentCloudWedataIntegrationTaskNode(), diff --git a/tencentcloud/provider.md b/tencentcloud/provider.md index 7d90d39fb0..a8ace65e52 100644 --- a/tencentcloud/provider.md +++ b/tencentcloud/provider.md @@ -2220,6 +2220,15 @@ tencentcloud_waf_attack_white_rule Wedata Data Source tencentcloud_wedata_rule_templates +tencentcloud_wedata_ops_workflows +tencentcloud_wedata_ops_workflow +tencentcloud_wedata_ops_alarm_rules +tencentcloud_wedata_task_instance +tencentcloud_wedata_task_instances +tencentcloud_wedata_task_instance_log +tencentcloud_wedata_upstream_task_instances +tencentcloud_wedata_downstream_task_instances +tencentcloud_wedata_task_instance_executions tencentcloud_wedata_data_source_list Resource @@ -2228,6 +2237,12 @@ tencentcloud_wedata_function tencentcloud_wedata_script tencentcloud_wedata_dq_rule tencentcloud_wedata_rule_template +tencentcloud_wedata_ops_stop_task_async +tencentcloud_wedata_ops_task_owner +tencentcloud_wedata_ops_alarm_rule +tencentcloud_wedata_task_kill_instance_async +tencentcloud_wedata_task_rerun_instance_async +tencentcloud_wedata_task_set_success_instance_async tencentcloud_wedata_integration_offline_task tencentcloud_wedata_integration_realtime_task tencentcloud_wedata_integration_task_node diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_down_task_instances.go b/tencentcloud/services/wedata/data_source_tc_wedata_down_task_instances.go new file mode 100644 index 0000000000..4f47eb3a4d --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_down_task_instances.go @@ -0,0 +1,396 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataDownstreamTaskInstances() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataDownstreamTaskInstancesRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Instance unique identifier.", + }, + + "time_zone": { + Type: schema.TypeString, + Optional: true, + Description: "Time zone timeZone, default UTC+8.", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Direct downstream task instances list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_count": { + Type: schema.TypeInt, + Required: true, + Description: "Total count.", + }, + "total_page_number": { + Type: schema.TypeInt, + Required: true, + Description: "Total page number.", + }, + "page_number": { + Type: schema.TypeInt, + Required: true, + Description: "Page number.", + }, + "page_size": { + Type: schema.TypeInt, + Required: true, + Description: "Page size.", + }, + "items": { + Type: schema.TypeList, + Required: true, + Description: "Data list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID to which it belongs.", + }, + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Instance unique identifier.", + }, + "folder_id": { + Type: schema.TypeString, + Required: true, + Description: "Folder ID.", + }, + "folder_name": { + Type: schema.TypeString, + Required: true, + Description: "Folder name.", + }, + "workflow_id": { + Type: schema.TypeString, + Required: true, + Description: "Workflow ID.", + }, + "workflow_name": { + Type: schema.TypeString, + Required: true, + Description: "Workflow name.", + }, + "task_id": { + Type: schema.TypeString, + Required: true, + Description: "Task ID.", + }, + "task_name": { + Type: schema.TypeString, + Required: true, + Description: "Task name.", + }, + "cur_run_date": { + Type: schema.TypeString, + Required: true, + Description: "Instance data time.", + }, + "instance_state": { + Type: schema.TypeString, + Required: true, + Description: "Instance state: WAIT_EVENT: Waiting for event, WAIT_UPSTREAM: Waiting for upstream, WAIT_RUN: Waiting to run, RUNNING: Running, SKIP_RUNNING: Skip running, FAILED_RETRY: Failed and retrying, EXPIRED: Failed, COMPLETED: Completed.", + }, + "instance_type": { + Type: schema.TypeInt, + Required: true, + Description: "Instance type: 0: Backfill instance, 1: Periodic instance, 2: Non-periodic instance.", + }, + "owner_uin_list": { + Type: schema.TypeSet, + Required: true, + Description: "List of owners.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "total_run_num": { + Type: schema.TypeInt, + Required: true, + Description: "Total run count.", + }, + "task_type": { + Type: schema.TypeString, + Required: true, + Description: "Task type description.", + }, + "task_type_id": { + Type: schema.TypeInt, + Required: true, + Description: "Task type ID.", + }, + "cycle_type": { + Type: schema.TypeString, + Required: true, + Description: "Task cycle type. Supports filtering multiple conditions, and the relationship between conditions is OR. O: ONEOFF_CYCLE, Y: YEAR_CYCLE, M: MONTH_CYCLE, W: WEEK_CYCLE, D: DAY_CYCLE, H: HOUR_CYCLE, I: MINUTE_CYCLE, C: CRONTAB_CYCLE.", + }, + "try_limit": { + Type: schema.TypeInt, + Required: true, + Description: "Retry limit after each run failure.", + }, + "tries": { + Type: schema.TypeInt, + Required: true, + Description: "Failure retry count. When triggered again by manual rerun or backfill instance, it will be reset to 0 and recounted.", + }, + "start_time": { + Type: schema.TypeString, + Required: true, + Description: "Run start time.", + }, + "end_time": { + Type: schema.TypeString, + Required: true, + Description: "Run end time.", + }, + "cost_time": { + Type: schema.TypeInt, + Required: true, + Description: "Cost time, in milliseconds.", + }, + "scheduler_time": { + Type: schema.TypeString, + Required: true, + Description: "Scheduled time", + }, + "last_update_time": { + Type: schema.TypeString, + Required: true, + Description: "Instance last update time, format: yyyy-MM-dd HH:mm:ss.", + }, + "executor_group_id": { + Type: schema.TypeString, + Required: true, + Description: "Executor resource group ID.", + }, + "executor_group_name": { + Type: schema.TypeString, + Required: true, + Description: "Resource group name.", + }, + }, + }, + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataDownstreamTaskInstancesRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_downstream_task_instances.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + paramMap["ProjectId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("instance_key"); ok { + paramMap["InstanceKey"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("time_zone"); ok { + paramMap["TimeZone"] = helper.String(v.(string)) + } + + var respData *wedatav20250806.ListDownstreamTaskInstancesResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataDownstreamTaskInstancesByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var projectId string + var taskId string + dataMap := map[string]interface{}{} + ids := []string{} + + if respData.Data != nil { + if respData.Data.TotalCount != nil { + dataMap["total_count"] = respData.Data.TotalCount + } + + if respData.Data.TotalPageNumber != nil { + dataMap["total_page_number"] = respData.Data.TotalPageNumber + } + + if respData.Data.PageNumber != nil { + dataMap["page_number"] = respData.Data.PageNumber + } + + if respData.Data.PageSize != nil { + dataMap["page_size"] = respData.Data.PageSize + } + + itemsList := make([]map[string]interface{}, 0, len(respData.Data.Items)) + if respData.Data.Items != nil { + for _, items := range respData.Data.Items { + itemsMap := map[string]interface{}{} + + if items.ProjectId != nil { + itemsMap["project_id"] = items.ProjectId + projectId = *items.ProjectId + } + + if items.InstanceKey != nil { + itemsMap["instance_key"] = items.InstanceKey + } + + if items.FolderId != nil { + itemsMap["folder_id"] = items.FolderId + } + + if items.FolderName != nil { + itemsMap["folder_name"] = items.FolderName + } + + if items.WorkflowId != nil { + itemsMap["workflow_id"] = items.WorkflowId + } + + if items.WorkflowName != nil { + itemsMap["workflow_name"] = items.WorkflowName + } + + if items.TaskId != nil { + itemsMap["task_id"] = items.TaskId + taskId = *items.TaskId + } + + if items.TaskName != nil { + itemsMap["task_name"] = items.TaskName + } + + if items.CurRunDate != nil { + itemsMap["cur_run_date"] = items.CurRunDate + } + + if items.InstanceState != nil { + itemsMap["instance_state"] = items.InstanceState + } + + if items.InstanceType != nil { + itemsMap["instance_type"] = items.InstanceType + } + + if items.OwnerUinList != nil { + itemsMap["owner_uin_list"] = items.OwnerUinList + } + + if items.TotalRunNum != nil { + itemsMap["total_run_num"] = items.TotalRunNum + } + + if items.TaskType != nil { + itemsMap["task_type"] = items.TaskType + } + + if items.TaskTypeId != nil { + itemsMap["task_type_id"] = items.TaskTypeId + } + + if items.CycleType != nil { + itemsMap["cycle_type"] = items.CycleType + } + + if items.TryLimit != nil { + itemsMap["try_limit"] = items.TryLimit + } + + if items.Tries != nil { + itemsMap["tries"] = items.Tries + } + + if items.StartTime != nil { + itemsMap["start_time"] = items.StartTime + } + + if items.EndTime != nil { + itemsMap["end_time"] = items.EndTime + } + + if items.CostTime != nil { + itemsMap["cost_time"] = items.CostTime + } + + if items.SchedulerTime != nil { + itemsMap["scheduler_time"] = items.SchedulerTime + } + + if items.LastUpdateTime != nil { + itemsMap["last_update_time"] = items.LastUpdateTime + } + + if items.ExecutorGroupId != nil { + itemsMap["executor_group_id"] = items.ExecutorGroupId + } + + if items.ExecutorGroupName != nil { + itemsMap["executor_group_name"] = items.ExecutorGroupName + } + + ids = append(ids, strings.Join([]string{projectId, taskId}, tccommon.FILED_SP)) + itemsList = append(itemsList, itemsMap) + } + + dataMap["items"] = itemsList + } + _ = d.Set("data", []interface{}{dataMap}) + } + + d.SetId(helper.DataResourceIdsHash(ids)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataMap); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_down_task_instances_extension.go b/tencentcloud/services/wedata/data_source_tc_wedata_down_task_instances_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_down_task_instances_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_down_task_instances_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_down_task_instances_test.go new file mode 100644 index 0000000000..78ffc8c803 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_down_task_instances_test.go @@ -0,0 +1,37 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataDownstreamTaskInstancesDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataDownTaskInstancesDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_downstream_task_instances.wedata_down_task_instances"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_downstream_task_instances.wedata_down_task_instances", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_downstream_task_instances.wedata_down_task_instances", "data.#", "1"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_downstream_task_instances.wedata_down_task_instances", "data.0.items.#", "1"), + ), + }, + }, + }) +} + +const testAccWedataDownTaskInstancesDataSource = ` + +data "tencentcloud_wedata_downstream_task_instances" "wedata_down_task_instances" { + project_id = "1859317240494305280" + instance_key = "20250731151633120_2025-10-13 17:00:00" +} +` diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_downstream_task_instances.md b/tencentcloud/services/wedata/data_source_tc_wedata_downstream_task_instances.md new file mode 100644 index 0000000000..f1671339a2 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_downstream_task_instances.md @@ -0,0 +1,10 @@ +Use this data source to query detailed information of wedata downstream task instances + +Example Usage + +```hcl +data "tencentcloud_wedata_downstream_task_instances" "wedata_down_task_instances" { + project_id = "1859317240494305280" + instance_key = "20250731151633120_2025-10-13 17:00:00" +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules.go new file mode 100644 index 0000000000..7b97b9df27 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules.go @@ -0,0 +1,857 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataOpsAlarmRules() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataOpsAlarmRulesRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project id.", + }, + + "monitor_object_type": { + Type: schema.TypeInt, + Optional: true, + Description: "Monitoring object type, Task dimension monitoring: can be configured according to task/workflow/project: 1.Task, 2.Workflow, 3.Project (default is 1.Task) Project dimension monitoring: Project overall task fluctuation alarm, 7: Project fluctuation monitoring alarm.", + }, + + "task_id": { + Type: schema.TypeString, + Optional: true, + Description: "Query alarm rules based on task ID.", + }, + + "alarm_type": { + Type: schema.TypeString, + Optional: true, + Description: "Alarm Rule Monitoring Types: failure: failure alarm; overtime: timeout alarm; success: success alarm; backTrackingOrRerunSuccess: backTrackingOrRerunSuccess: backTrackingOrRerunFailure: backTrackingOrRerunFailure. Project Fluctuation Alarms: projectFailureInstanceUpwardFluctuationAlarm: alarm if the upward fluctuation rate of failed instances exceeds the threshold. projectSuccessInstanceDownwardFluctuationAlarm: alarm if the downward fluctuation rate of successful instances exceeds the threshold. Offline Integration Task Reconciliation Alarms: reconciliationFailure: offline reconciliation task failure alarm; reconciliationOvertime: offline reconciliation task timeout alarm; reconciliationMismatch: alarm if the number of inconsistent entries in a data reconciliation task exceeds the threshold. Example value: [\"failure\"].", + }, + + "alarm_level": { + Type: schema.TypeInt, + Optional: true, + Description: "Alarm level: 1. Normal, 2. Major, 3. Urgent.", + }, + + "alarm_recipient_id": { + Type: schema.TypeString, + Optional: true, + Description: "Query the alarm rules configured for the corresponding alarm recipient.", + }, + + "keyword": { + Type: schema.TypeString, + Optional: true, + Description: "Query the corresponding alarm rule based on the alarm rule ID/rule name.", + }, + + "create_user_uin": { + Type: schema.TypeString, + Optional: true, + Description: "Alarm rule creator filtering.", + }, + + "create_time_from": { + Type: schema.TypeString, + Optional: true, + Description: "The start time of the alarm rule creation time range, in the format of 2025-08-17 00:00:00.", + }, + + "create_time_to": { + Type: schema.TypeString, + Optional: true, + Description: "The end time of the alarm rule creation time range, in the format of \"2025-08-26 23:59:59\".", + }, + + "update_time_from": { + Type: schema.TypeString, + Optional: true, + Description: "Last updated time filter alarm rules, format such as \"2025-08-26 00:00:00\".", + }, + + "update_time_to": { + Type: schema.TypeString, + Optional: true, + Description: "Last updated time filter alarm rule format such as: \"2025-08-26 23:59:59\".", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Alarm information response.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "page_number": { + Type: schema.TypeInt, + Required: true, + Description: "The number of pages in the pagination, the current page number.", + }, + "page_size": { + Type: schema.TypeInt, + Required: true, + Description: "Number of items displayed per page.", + }, + "total_page_number": { + Type: schema.TypeInt, + Required: true, + Description: "Total number of pages.", + }, + "total_count": { + Type: schema.TypeInt, + Required: true, + Description: "The total number of alarm rules.", + }, + "items": { + Type: schema.TypeList, + Required: true, + Description: "Alarm rule information list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_rule_id": { + Type: schema.TypeString, + Required: true, + Description: "Alarm rule ID.", + }, + "alarm_rule_name": { + Type: schema.TypeString, + Required: true, + Description: "Alarm rule name.", + }, + "description": { + Type: schema.TypeString, + Required: true, + Description: "Alarm rule description.", + }, + "monitor_object_type": { + Type: schema.TypeInt, + Required: true, + Description: "Monitoring object type, Task-based monitoring: Configurable by task/workflow/project: 1. Task, 2. Workflow, 3. Project (default is 1. Task). Project-based monitoring: Alerts for overall project task fluctuations, 7: Project fluctuation monitoring alerts.", + }, + "monitor_object_ids": { + Type: schema.TypeSet, + Required: true, + Description: "A list of monitored object business IDs. Different business IDs are passed in based on the MonitorType setting. For example, 1 (Task) - MonitorObjectIds is a list of task IDs; 2 (Workflow) - MonitorObjectIds is a list of workflow IDs (workflow IDs can be obtained from the ListWorkflows interface); 3 (Project) - MonitorObjectIds is a list of project IDs.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "alarm_types": { + Type: schema.TypeSet, + Required: true, + Description: "Alarm Rule Monitoring Types: failure: failure alarm; overtime: timeout alarm; success: success alarm; backTrackingOrRerunSuccess: backTrackingOrRerunSuccess: backTrackingOrRerunFailure: backTrackingOrRerunFailure. Project Fluctuation Alarms: projectFailureInstanceUpwardFluctuationAlarm: alarm if the upward fluctuation rate of failed instances exceeds the threshold. projectSuccessInstanceDownwardFluctuationAlarm: alarm if the downward fluctuation rate of successful instances exceeds the threshold. Offline Integration Task Reconciliation Alarms: reconciliationFailure: offline reconciliation task failure alarm; reconciliationOvertime: offline reconciliation task timeout alarm; reconciliationMismatch: alarm if the number of inconsistent entries in a data reconciliation task exceeds the threshold.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "status": { + Type: schema.TypeInt, + Required: true, + Description: "Whether the alarm rule is enabled: 0-disable, 1-enable.", + }, + "alarm_rule_detail": { + Type: schema.TypeList, + Required: true, + MaxItems: 1, + Description: "Alarm rule configuration information: Success alarms do not require configuration. Failure alarms can be configured as either first-failure alarms or all retry failure alarms. Timeout configuration requires the timeout type and timeout threshold. Project fluctuation alarms require the fluctuation rate and anti-shake period.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "trigger": { + Type: schema.TypeInt, + Optional: true, + Description: "Failure trigger timing: 1 - Triggered on first failure; 2 -- Triggered when all retries complete (default).", + }, + "data_backfill_or_rerun_trigger": { + Type: schema.TypeInt, + Optional: true, + Description: "Re-recording trigger timing: 1 - Triggered by the first failure; 2 - Triggered by completion of all retries.", + }, + "time_out_ext_info": { + Type: schema.TypeList, + Optional: true, + Description: "Periodic instance timeout configuration details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_type": { + Type: schema.TypeInt, + Optional: true, + Description: "Timeout alarm configuration: 1. Estimated running time exceeded, 2. Estimated completion time exceeded, 3. Estimated waiting time for scheduling exceeded, 4. Estimated completion within the period but not completed.", + }, + "type": { + Type: schema.TypeInt, + Optional: true, + Description: "Timeout value configuration type: 1-Specified value; 2-Average value.", + }, + "hour": { + Type: schema.TypeInt, + Optional: true, + Description: "Specify the timeout value in hours. The default value is 0.", + }, + "min": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout value is specified in minutes. The default value is 1.", + }, + "schedule_time_zone": { + Type: schema.TypeString, + Optional: true, + Description: "The time zone configuration corresponding to the timeout period, such as UTC+7, the default is UTC+8.", + }, + }, + }, + }, + "data_backfill_or_rerun_time_out_ext_info": { + Type: schema.TypeList, + Optional: true, + Description: "Detailed configuration of re-running and re-recording instance timeout.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_type": { + Type: schema.TypeInt, + Optional: true, + Description: "Timeout alarm configuration: 1. Estimated running time exceeded, 2. Estimated completion time exceeded, 3. Estimated waiting time for scheduling exceeded, 4. Estimated completion within the period but not completed.", + }, + "type": { + Type: schema.TypeInt, + Optional: true, + Description: "Timeout value configuration type: 1-Specified value; 2-Average value.", + }, + "hour": { + Type: schema.TypeInt, + Optional: true, + Description: "Specify the timeout value in hours. The default value is 0.", + }, + "min": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout value is specified in minutes. The default value is 1.", + }, + "schedule_time_zone": { + Type: schema.TypeString, + Optional: true, + Description: "The time zone configuration corresponding to the timeout period, such as UTC+7, the default is UTC+8.", + }, + }, + }, + }, + "project_instance_statistics_alarm_info_list": { + Type: schema.TypeList, + Optional: true, + Description: "Project fluctuation alarm configuration details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_type": { + Type: schema.TypeString, + Required: true, + Description: "Alarm type: projectFailureInstanceUpwardFluctuationAlarm: Failure instance upward fluctuation alarm; projectSuccessInstanceDownwardFluctuationAlarm: Success instance downward fluctuation alarm.", + }, + "instance_threshold_count_percent": { + Type: schema.TypeInt, + Optional: true, + Description: "The alarm threshold for the proportion of instance successes fluctuating downwards; the alarm threshold for the proportion of instance failures fluctuating upwards.", + }, + "instance_threshold_count": { + Type: schema.TypeInt, + Optional: true, + Description: "The cumulative instance number fluctuation threshold.", + }, + "stabilize_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: "Stability threshold (number of statistical cycles for anti-shake configuration).", + }, + "stabilize_statistics_cycle": { + Type: schema.TypeInt, + Optional: true, + Description: "Stability statistics period (number of anti-shake configuration statistics periods).", + }, + "is_cumulant": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to calculate cumulatively, false: continuous, true: cumulative.", + }, + "instance_count": { + Type: schema.TypeInt, + Optional: true, + Description: "The cumulative number of instances on the day; the downward fluctuation of the number of failed instances on the day.", + }, + }, + }, + }, + "reconciliation_ext_info": { + Type: schema.TypeList, + Optional: true, + Description: "Offline integrated reconciliation alarm configuration information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_type": { + Type: schema.TypeString, + Optional: true, + Description: "Offline alarm rule types: reconciliationFailure: Offline reconciliation failure alarm; reconciliationOvertime: Offline reconciliation task timeout alarm (timeout must be configured); reconciliationMismatch: Offline reconciliation mismatch alarm (mismatch threshold must be configured).", + }, + "mismatch_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Reconciliation inconsistency threshold, RuleType=reconciliationMismatch. This field needs to be configured and has no default value.", + }, + "hour": { + Type: schema.TypeInt, + Optional: true, + Description: "Reconciliation task timeout threshold: hours, default is 0.", + }, + "min": { + Type: schema.TypeInt, + Optional: true, + Description: "Reconciliation task timeout threshold: minutes, default is 1.", + }, + }, + }, + }, + }, + }, + }, + "alarm_level": { + Type: schema.TypeInt, + Required: true, + Description: "Alarm level: 1. Normal, 2. Major, 3. Urgent.", + }, + "owner_uin": { + Type: schema.TypeString, + Required: true, + Description: "Alarm rule creator uid.", + }, + "bundle_id": { + Type: schema.TypeString, + Required: true, + Description: "Alarm rules bound to the bundle client: If it is empty, it is a normal alarm rule; if it is not empty, it corresponds to the rule bound to the bundle client.", + }, + "bundle_info": { + Type: schema.TypeString, + Required: true, + Description: "If bundleId is not empty, it indicates the name of the bound bundle client.", + }, + "alarm_groups": { + Type: schema.TypeList, + Required: true, + Description: "Alarm recipient configuration list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_escalation_recipient_ids": { + Type: schema.TypeSet, + Optional: true, + Description: "Alarm escalator ID list. If the alarm receiver or the upper escalator does not confirm the alarm within the alarm interval, the alarm will be sent to the next level escalator.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "alarm_escalation_interval": { + Type: schema.TypeInt, + Optional: true, + Description: "Alarm escalation interval.", + }, + "notification_fatigue": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Alarm notification fatigue configuration.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "notify_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Number of alarms.", + }, + "notify_interval": { + Type: schema.TypeInt, + Optional: true, + Description: "Alarm interval, in minutes.", + }, + "quiet_intervals": { + Type: schema.TypeList, + Optional: true, + Description: "Do not disturb time, for example, the example value [{DaysOfWeek: [1, 2], StartTime: \"00:00:00\", EndTime: \"09:00:00\"}] means do not disturb from 00:00 to 09:00 every Monday and Tuesday.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "days_of_week": { + Type: schema.TypeSet, + Optional: true, + Description: "According to the ISO standard, 1 represents Monday and 7 represents Sunday.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + Description: "Start time, with precision of hours, minutes, and seconds, in the format of HH:mm:ss.", + }, + "end_time": { + Type: schema.TypeString, + Optional: true, + Description: "End time, with precision of hours, minutes, and seconds, in the format of HH:mm:ss.", + }, + }, + }, + }, + }, + }, + }, + "alarm_ways": { + Type: schema.TypeSet, + Optional: true, + Description: "Alert Channels: 1: Email, 2: SMS, 3: WeChat, 4: Voice, 5: WeChat Enterprise, 6: Http, 7: WeChat Enterprise Group, 8: Lark Group, 9: DingTalk Group, 10: Slack Group, 11: Teams Group (Default: Email), Only one channel can be selected.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "web_hooks": { + Type: schema.TypeList, + Optional: true, + Description: "List of webhook addresses for corporate WeChat groups, Feishu groups, DingTalk groups, Slack groups, and Teams groups.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_way": { + Type: schema.TypeString, + Optional: true, + Description: "Alert channel value: 7. Enterprise WeChat group, 8. Feishu group, 9. DingTalk group, 10. Slack group, 11. Teams group.", + }, + "web_hooks": { + Type: schema.TypeSet, + Optional: true, + Description: "List of webhook addresses for the alarm group.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "alarm_recipient_type": { + Type: schema.TypeInt, + Optional: true, + Description: "Alarm Recipient Type: 1. Designated Personnel, 2. Task Responsible Personnel, 3. Duty Roster (Default: 1. Designated Personnel).", + }, + "alarm_recipient_ids": { + Type: schema.TypeSet, + Optional: true, + Description: "Depending on the type of AlarmRecipientType, this list has different business IDs: 1 (Specified Person): Alarm Recipient ID List; 2 (Task Responsible Person): No configuration required; 3 (Duty Roster): Duty Roster ID List.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataOpsAlarmRulesRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_ops_alarm_rules.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + paramMap["ProjectId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("monitor_object_type"); ok { + paramMap["MonitorObjectType"] = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOk("task_id"); ok { + paramMap["TaskId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("alarm_type"); ok { + paramMap["AlarmType"] = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("alarm_level"); ok { + paramMap["AlarmLevel"] = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOk("alarm_recipient_id"); ok { + paramMap["AlarmRecipientId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("keyword"); ok { + paramMap["Keyword"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("create_user_uin"); ok { + paramMap["CreateUserUin"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("create_time_from"); ok { + paramMap["CreateTimeFrom"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("create_time_to"); ok { + paramMap["CreateTimeTo"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("update_time_from"); ok { + paramMap["UpdateTimeFrom"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("update_time_to"); ok { + paramMap["UpdateTimeTo"] = helper.String(v.(string)) + } + + var respData *wedatav20250806.ListOpsAlarmRulesResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataOpsAlarmRulesByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var projectId string + var alarmRuleId string + dataMap := map[string]interface{}{} + ids := []string{} + if respData.Data != nil { + if respData.Data.PageNumber != nil { + dataMap["page_number"] = respData.Data.PageNumber + } + + if respData.Data.PageSize != nil { + dataMap["page_size"] = respData.Data.PageSize + } + + if respData.Data.TotalPageNumber != nil { + dataMap["total_page_number"] = respData.Data.TotalPageNumber + } + + if respData.Data.TotalCount != nil { + dataMap["total_count"] = respData.Data.TotalCount + } + + itemsList := make([]map[string]interface{}, 0, len(respData.Data.Items)) + if respData.Data.Items != nil { + for _, items := range respData.Data.Items { + itemsMap := map[string]interface{}{} + + if items.AlarmRuleId != nil { + itemsMap["alarm_rule_id"] = items.AlarmRuleId + alarmRuleId = *items.AlarmRuleId + } + + if items.AlarmRuleName != nil { + itemsMap["alarm_rule_name"] = items.AlarmRuleName + } + + if items.Description != nil { + itemsMap["description"] = items.Description + } + + if items.MonitorObjectType != nil { + itemsMap["monitor_object_type"] = items.MonitorObjectType + } + + if items.MonitorObjectIds != nil { + itemsMap["monitor_object_ids"] = items.MonitorObjectIds + } + + if items.AlarmTypes != nil { + itemsMap["alarm_types"] = items.AlarmTypes + } + + if items.Status != nil { + itemsMap["status"] = items.Status + } + + alarmRuleDetailMap := map[string]interface{}{} + + if items.AlarmRuleDetail != nil { + if items.AlarmRuleDetail.Trigger != nil { + alarmRuleDetailMap["trigger"] = items.AlarmRuleDetail.Trigger + } + + if items.AlarmRuleDetail.DataBackfillOrRerunTrigger != nil { + alarmRuleDetailMap["data_backfill_or_rerun_trigger"] = items.AlarmRuleDetail.DataBackfillOrRerunTrigger + } + + timeOutExtInfoList := make([]map[string]interface{}, 0, len(items.AlarmRuleDetail.TimeOutExtInfo)) + if items.AlarmRuleDetail.TimeOutExtInfo != nil { + for _, timeOutExtInfo := range items.AlarmRuleDetail.TimeOutExtInfo { + timeOutExtInfoMap := map[string]interface{}{} + + if timeOutExtInfo.RuleType != nil { + timeOutExtInfoMap["rule_type"] = timeOutExtInfo.RuleType + } + + if timeOutExtInfo.Type != nil { + timeOutExtInfoMap["type"] = timeOutExtInfo.Type + } + + if timeOutExtInfo.Hour != nil { + timeOutExtInfoMap["hour"] = timeOutExtInfo.Hour + } + + if timeOutExtInfo.Min != nil { + timeOutExtInfoMap["min"] = timeOutExtInfo.Min + } + + if timeOutExtInfo.ScheduleTimeZone != nil { + timeOutExtInfoMap["schedule_time_zone"] = timeOutExtInfo.ScheduleTimeZone + } + + timeOutExtInfoList = append(timeOutExtInfoList, timeOutExtInfoMap) + } + + alarmRuleDetailMap["time_out_ext_info"] = timeOutExtInfoList + } + dataBackfillOrRerunTimeOutExtInfoList := make([]map[string]interface{}, 0, len(items.AlarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo)) + if items.AlarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo != nil { + for _, dataBackfillOrRerunTimeOutExtInfo := range items.AlarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo { + dataBackfillOrRerunTimeOutExtInfoMap := map[string]interface{}{} + + if dataBackfillOrRerunTimeOutExtInfo.RuleType != nil { + dataBackfillOrRerunTimeOutExtInfoMap["rule_type"] = dataBackfillOrRerunTimeOutExtInfo.RuleType + } + + if dataBackfillOrRerunTimeOutExtInfo.Type != nil { + dataBackfillOrRerunTimeOutExtInfoMap["type"] = dataBackfillOrRerunTimeOutExtInfo.Type + } + + if dataBackfillOrRerunTimeOutExtInfo.Hour != nil { + dataBackfillOrRerunTimeOutExtInfoMap["hour"] = dataBackfillOrRerunTimeOutExtInfo.Hour + } + + if dataBackfillOrRerunTimeOutExtInfo.Min != nil { + dataBackfillOrRerunTimeOutExtInfoMap["min"] = dataBackfillOrRerunTimeOutExtInfo.Min + } + + if dataBackfillOrRerunTimeOutExtInfo.ScheduleTimeZone != nil { + dataBackfillOrRerunTimeOutExtInfoMap["schedule_time_zone"] = dataBackfillOrRerunTimeOutExtInfo.ScheduleTimeZone + } + + dataBackfillOrRerunTimeOutExtInfoList = append(dataBackfillOrRerunTimeOutExtInfoList, dataBackfillOrRerunTimeOutExtInfoMap) + } + + alarmRuleDetailMap["data_backfill_or_rerun_time_out_ext_info"] = dataBackfillOrRerunTimeOutExtInfoList + } + projectInstanceStatisticsAlarmInfoListList := make([]map[string]interface{}, 0, len(items.AlarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList)) + if items.AlarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList != nil { + for _, projectInstanceStatisticsAlarmInfoList := range items.AlarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList { + projectInstanceStatisticsAlarmInfoListMap := map[string]interface{}{} + + if projectInstanceStatisticsAlarmInfoList.AlarmType != nil { + projectInstanceStatisticsAlarmInfoListMap["alarm_type"] = projectInstanceStatisticsAlarmInfoList.AlarmType + } + + if projectInstanceStatisticsAlarmInfoList.InstanceThresholdCountPercent != nil { + projectInstanceStatisticsAlarmInfoListMap["instance_threshold_count_percent"] = projectInstanceStatisticsAlarmInfoList.InstanceThresholdCountPercent + } + + if projectInstanceStatisticsAlarmInfoList.InstanceThresholdCount != nil { + projectInstanceStatisticsAlarmInfoListMap["instance_threshold_count"] = projectInstanceStatisticsAlarmInfoList.InstanceThresholdCount + } + + if projectInstanceStatisticsAlarmInfoList.StabilizeThreshold != nil { + projectInstanceStatisticsAlarmInfoListMap["stabilize_threshold"] = projectInstanceStatisticsAlarmInfoList.StabilizeThreshold + } + + if projectInstanceStatisticsAlarmInfoList.StabilizeStatisticsCycle != nil { + projectInstanceStatisticsAlarmInfoListMap["stabilize_statistics_cycle"] = projectInstanceStatisticsAlarmInfoList.StabilizeStatisticsCycle + } + + if projectInstanceStatisticsAlarmInfoList.IsCumulant != nil { + projectInstanceStatisticsAlarmInfoListMap["is_cumulant"] = projectInstanceStatisticsAlarmInfoList.IsCumulant + } + + if projectInstanceStatisticsAlarmInfoList.InstanceCount != nil { + projectInstanceStatisticsAlarmInfoListMap["instance_count"] = projectInstanceStatisticsAlarmInfoList.InstanceCount + } + + projectInstanceStatisticsAlarmInfoListList = append(projectInstanceStatisticsAlarmInfoListList, projectInstanceStatisticsAlarmInfoListMap) + } + + alarmRuleDetailMap["project_instance_statistics_alarm_info_list"] = projectInstanceStatisticsAlarmInfoListList + } + reconciliationExtInfoList := make([]map[string]interface{}, 0, len(items.AlarmRuleDetail.ReconciliationExtInfo)) + if items.AlarmRuleDetail.ReconciliationExtInfo != nil { + for _, reconciliationExtInfo := range items.AlarmRuleDetail.ReconciliationExtInfo { + reconciliationExtInfoMap := map[string]interface{}{} + + if reconciliationExtInfo.RuleType != nil { + reconciliationExtInfoMap["rule_type"] = reconciliationExtInfo.RuleType + } + + if reconciliationExtInfo.MismatchCount != nil { + reconciliationExtInfoMap["mismatch_count"] = reconciliationExtInfo.MismatchCount + } + + if reconciliationExtInfo.Hour != nil { + reconciliationExtInfoMap["hour"] = reconciliationExtInfo.Hour + } + + if reconciliationExtInfo.Min != nil { + reconciliationExtInfoMap["min"] = reconciliationExtInfo.Min + } + + reconciliationExtInfoList = append(reconciliationExtInfoList, reconciliationExtInfoMap) + } + + alarmRuleDetailMap["reconciliation_ext_info"] = reconciliationExtInfoList + } + itemsMap["alarm_rule_detail"] = []interface{}{alarmRuleDetailMap} + } + + if items.AlarmLevel != nil { + itemsMap["alarm_level"] = items.AlarmLevel + } + + if items.OwnerUin != nil { + itemsMap["owner_uin"] = items.OwnerUin + } + + if items.BundleId != nil { + itemsMap["bundle_id"] = items.BundleId + } + + if items.BundleInfo != nil { + itemsMap["bundle_info"] = items.BundleInfo + } + + alarmGroupsList := make([]map[string]interface{}, 0, len(items.AlarmGroups)) + if items.AlarmGroups != nil { + for _, alarmGroups := range items.AlarmGroups { + alarmGroupsMap := map[string]interface{}{} + + if alarmGroups.AlarmEscalationRecipientIds != nil { + alarmGroupsMap["alarm_escalation_recipient_ids"] = alarmGroups.AlarmEscalationRecipientIds + } + + if alarmGroups.AlarmEscalationInterval != nil { + alarmGroupsMap["alarm_escalation_interval"] = alarmGroups.AlarmEscalationInterval + } + + notificationFatigueMap := map[string]interface{}{} + + if alarmGroups.NotificationFatigue != nil { + if alarmGroups.NotificationFatigue.NotifyCount != nil { + notificationFatigueMap["notify_count"] = alarmGroups.NotificationFatigue.NotifyCount + } + + if alarmGroups.NotificationFatigue.NotifyInterval != nil { + notificationFatigueMap["notify_interval"] = alarmGroups.NotificationFatigue.NotifyInterval + } + + quietIntervalsList := make([]map[string]interface{}, 0, len(alarmGroups.NotificationFatigue.QuietIntervals)) + if alarmGroups.NotificationFatigue.QuietIntervals != nil { + for _, quietIntervals := range alarmGroups.NotificationFatigue.QuietIntervals { + quietIntervalsMap := map[string]interface{}{} + + if quietIntervals.DaysOfWeek != nil { + quietIntervalsMap["days_of_week"] = quietIntervals.DaysOfWeek + } + + if quietIntervals.StartTime != nil { + quietIntervalsMap["start_time"] = quietIntervals.StartTime + } + + if quietIntervals.EndTime != nil { + quietIntervalsMap["end_time"] = quietIntervals.EndTime + } + + quietIntervalsList = append(quietIntervalsList, quietIntervalsMap) + } + + notificationFatigueMap["quiet_intervals"] = quietIntervalsList + } + alarmGroupsMap["notification_fatigue"] = []interface{}{notificationFatigueMap} + } + + if alarmGroups.AlarmWays != nil { + alarmGroupsMap["alarm_ways"] = alarmGroups.AlarmWays + } + + webHooksList := make([]map[string]interface{}, 0, len(alarmGroups.WebHooks)) + if alarmGroups.WebHooks != nil { + for _, webHooks := range alarmGroups.WebHooks { + webHooksMap := map[string]interface{}{} + + if webHooks.AlarmWay != nil { + webHooksMap["alarm_way"] = webHooks.AlarmWay + } + + if webHooks.WebHooks != nil { + webHooksMap["web_hooks"] = webHooks.WebHooks + } + + webHooksList = append(webHooksList, webHooksMap) + } + + alarmGroupsMap["web_hooks"] = webHooksList + } + if alarmGroups.AlarmRecipientType != nil { + alarmGroupsMap["alarm_recipient_type"] = alarmGroups.AlarmRecipientType + } + + if alarmGroups.AlarmRecipientIds != nil { + alarmGroupsMap["alarm_recipient_ids"] = alarmGroups.AlarmRecipientIds + } + + alarmGroupsList = append(alarmGroupsList, alarmGroupsMap) + } + + itemsMap["alarm_groups"] = alarmGroupsList + } + + ids = append(ids, strings.Join([]string{projectId, alarmRuleId}, tccommon.FILED_SP)) + itemsList = append(itemsList, itemsMap) + } + + dataMap["items"] = itemsList + } + _ = d.Set("data", []interface{}{dataMap}) + } + + d.SetId(helper.DataResourceIdsHash(ids)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataMap); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules.md b/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules.md new file mode 100644 index 0000000000..80ae95930a --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules.md @@ -0,0 +1,9 @@ +Use this data source to query detailed information of wedata ops alarm rules + +Example Usage + +```hcl +data "tencentcloud_wedata_ops_alarm_rules" "wedata_ops_alarm_rules" { + project_id = "1859317240494305280" +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules_extension.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules_test.go new file mode 100644 index 0000000000..20df39f27b --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_alarm_rules_test.go @@ -0,0 +1,36 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataOpsAlarmRulesDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataOpsAlarmRulesDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_ops_alarm_rules.wedata_ops_alarm_rules"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_ops_alarm_rules.wedata_ops_alarm_rules", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_ops_alarm_rules.wedata_ops_alarm_rules", "data.#", "1"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_ops_alarm_rules.wedata_ops_alarm_rules", "data.0.items.#", "20"), + ), + }, + }, + }) +} + +const testAccWedataOpsAlarmRulesDataSource = ` + +data "tencentcloud_wedata_ops_alarm_rules" "wedata_ops_alarm_rules" { + project_id = "1859317240494305280" +} +` diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job.go new file mode 100644 index 0000000000..8443022611 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job.go @@ -0,0 +1,200 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataOpsAsyncJob() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataOpsAsyncJobRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + + "async_id": { + Type: schema.TypeString, + Required: true, + Description: "Asynchronous operation ID.", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Asynchronous operation detail result.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Optional: true, + Description: "Project ID.", + }, + "async_id": { + Type: schema.TypeString, + Optional: true, + Description: "Operation ID.", + }, + "async_type": { + Type: schema.TypeString, + Optional: true, + Description: "Asynchronous operation type.", + }, + "status": { + Type: schema.TypeString, + Optional: true, + Description: "Asynchronous operation status: Initial state: INIT; Running: RUNNING; Success: SUCCESS; Failure: FAIL; Partial success: PART_SUCCESS.", + }, + "error_desc": { + Type: schema.TypeString, + Optional: true, + Description: "Error message.", + }, + "total_sub_process_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Total number of sub-operations.", + }, + "finished_sub_process_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Number of completed sub-operations.", + }, + "success_sub_process_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Number of successful sub-operations.", + }, + "create_user_uin": { + Type: schema.TypeString, + Optional: true, + Description: "Operator ID.", + }, + "create_time": { + Type: schema.TypeString, + Optional: true, + Description: "Operation creation time.", + }, + "update_time": { + Type: schema.TypeString, + Optional: true, + Description: "Update time.", + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataOpsAsyncJobRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_ops_async_job.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + paramMap["ProjectId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("async_id"); ok { + paramMap["AsyncId"] = helper.String(v.(string)) + } + + var respData *wedatav20250806.GetOpsAsyncJobResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataOpsAsyncJobByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var projectId string + var asyncId string + dataMap := map[string]interface{}{} + + if respData.Data != nil { + if respData.Data.ProjectId != nil { + projectId = *respData.Data.ProjectId + dataMap["project_id"] = respData.Data.ProjectId + } + + if respData.Data.AsyncId != nil { + asyncId = *respData.Data.AsyncId + dataMap["async_id"] = respData.Data.AsyncId + } + + if respData.Data.AsyncType != nil { + dataMap["async_type"] = respData.Data.AsyncType + } + + if respData.Data.Status != nil { + dataMap["status"] = respData.Data.Status + } + + if respData.Data.ErrorDesc != nil { + dataMap["error_desc"] = respData.Data.ErrorDesc + } + + if respData.Data.TotalSubProcessCount != nil { + dataMap["total_sub_process_count"] = respData.Data.TotalSubProcessCount + } + + if respData.Data.FinishedSubProcessCount != nil { + dataMap["finished_sub_process_count"] = respData.Data.FinishedSubProcessCount + } + + if respData.Data.SuccessSubProcessCount != nil { + dataMap["success_sub_process_count"] = respData.Data.SuccessSubProcessCount + } + + if respData.Data.CreateUserUin != nil { + dataMap["create_user_uin"] = respData.Data.CreateUserUin + } + + if respData.Data.CreateTime != nil { + dataMap["create_time"] = respData.Data.CreateTime + } + + if respData.Data.UpdateTime != nil { + dataMap["update_time"] = respData.Data.UpdateTime + } + + _ = d.Set("data", []interface{}{dataMap}) + } + + d.SetId(strings.Join([]string{projectId, asyncId}, tccommon.FILED_SP)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataMap); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job.md b/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job.md new file mode 100644 index 0000000000..332951688c --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job.md @@ -0,0 +1,10 @@ +Use this data source to query detailed information of wedata ops workflow + +Example Usage + +```hcl +data "tencentcloud_wedata_ops_async_job" "wedata_ops_async_job" { + project_id = "1859317240494305280" + async_id = "9ba294ff-46d9-4a77-ae4a-acd0b4bdca3c" +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job_extension.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job_test.go new file mode 100644 index 0000000000..881b071a5d --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_async_job_test.go @@ -0,0 +1,36 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataOpsAsyncJobDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataOpsAsyncJobDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_ops_async_job.wedata_ops_async_job"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_ops_async_job.wedata_ops_async_job", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_ops_async_job.wedata_ops_async_job", "data.#", "1"), + ), + }, + }, + }) +} + +const testAccWedataOpsAsyncJobDataSource = ` + +data "tencentcloud_wedata_ops_async_job" "wedata_ops_async_job" { + project_id = "1859317240494305280" + async_id = "9ba294ff-46d9-4a77-ae4a-acd0b4bdca3c" +} +` diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow.go new file mode 100644 index 0000000000..46cadc87f3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow.go @@ -0,0 +1,264 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataOpsWorkflow() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataOpsWorkflowRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + + "workflow_id": { + Type: schema.TypeString, + Required: true, + Description: "Workflow ID.", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Workflow scheduling details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "workflow_id": { + Type: schema.TypeString, + Required: true, + Description: "Workflow ID.", + }, + "workflow_desc": { + Type: schema.TypeString, + Required: true, + Description: "Workflow description.", + }, + "workflow_type": { + Type: schema.TypeString, + Required: true, + Description: "Workflow type:\n - cycle: Periodic;\n - manual: Manual.", + }, + "create_time": { + Type: schema.TypeString, + Required: true, + Description: "Creation time.", + }, + "create_user_uin": { + Type: schema.TypeString, + Required: true, + Description: "Creator.", + }, + "update_time": { + Type: schema.TypeString, + Required: true, + Description: "Modification time.", + }, + "startup_time": { + Type: schema.TypeInt, + Required: true, + Description: "Delayed execution time, unit=minute.", + }, + "start_time": { + Type: schema.TypeString, + Required: true, + Description: "Configuration effective date - start date.", + }, + "end_time": { + Type: schema.TypeString, + Required: true, + Description: "Configuration end date - end date.", + }, + "cycle_type": { + Type: schema.TypeString, + Required: true, + Description: "Task cycle type. `ONEOFF_CYCLE`: One-time; `YEAR_CYCLE`: Year; `MONTH_CYCLE`: Month; `WEEK_CYCLE`: Week; `DAY_CYCLE`: Day; `HOUR_CYCLE`: Hour; `MINUTE_CYCLE`: Minute; `CRONTAB_CYCLE`: Crontab expression type.", + }, + "folder_id": { + Type: schema.TypeString, + Required: true, + Description: "Folder ID.", + }, + "instance_init_strategy": { + Type: schema.TypeString, + Required: true, + Description: "Task instance initialization strategy: \n - T_PLUS_1 (T+1): Initialize after one day delay;\n - T_PLUS_0 (T+0): Initialize on the same day;\n - T_MINUS_1 (T-1): Initialize one day in advance.", + }, + "scheduler_desc": { + Type: schema.TypeString, + Required: true, + Description: "Scheduling plan description.", + }, + "first_submit_time": { + Type: schema.TypeString, + Required: true, + Description: "Workflow first submission time.", + }, + "latest_submit_time": { + Type: schema.TypeString, + Required: true, + Description: "Workflow latest submission time.", + }, + "status": { + Type: schema.TypeString, + Required: true, + Description: "Workflow status: `ALL_RUNNING`: All scheduled; `ALL_FREEZED`: All paused; `ALL_STOPPTED`: All offline; `PART_RUNNING`: Partially scheduled; `ALL_NO_RUNNING`: All unscheduled; `ALL_INVALID`: All invalid.", + }, + "owner_uin": { + Type: schema.TypeString, + Required: true, + Description: "Responsible persons, multiple separated by ';'.", + }, + "workflow_name": { + Type: schema.TypeString, + Required: true, + Description: "Workflow name.", + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataOpsWorkflowRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_ops_workflow.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + var projectId string + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + projectId = v.(string) + paramMap["ProjectId"] = helper.String(v.(string)) + projectId = v.(string) + } + + if v, ok := d.GetOk("workflow_id"); ok { + paramMap["WorkflowId"] = helper.String(v.(string)) + } + + var respData *wedatav20250806.GetOpsWorkflowResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataOpsWorkflowByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var workflowId string + dataMap := map[string]interface{}{} + + if respData.Data != nil { + if respData.Data.WorkflowId != nil { + dataMap["workflow_id"] = respData.Data.WorkflowId + workflowId = *respData.Data.WorkflowId + } + + if respData.Data.WorkflowDesc != nil { + dataMap["workflow_desc"] = respData.Data.WorkflowDesc + } + + if respData.Data.WorkflowType != nil { + dataMap["workflow_type"] = respData.Data.WorkflowType + } + + if respData.Data.CreateTime != nil { + dataMap["create_time"] = respData.Data.CreateTime + } + + if respData.Data.CreateUserUin != nil { + dataMap["create_user_uin"] = respData.Data.CreateUserUin + } + + if respData.Data.UpdateTime != nil { + dataMap["update_time"] = respData.Data.UpdateTime + } + + if respData.Data.StartupTime != nil { + dataMap["startup_time"] = respData.Data.StartupTime + } + + if respData.Data.StartTime != nil { + dataMap["start_time"] = respData.Data.StartTime + } + + if respData.Data.EndTime != nil { + dataMap["end_time"] = respData.Data.EndTime + } + + if respData.Data.CycleType != nil { + dataMap["cycle_type"] = respData.Data.CycleType + } + + if respData.Data.FolderId != nil { + dataMap["folder_id"] = respData.Data.FolderId + } + + if respData.Data.InstanceInitStrategy != nil { + dataMap["instance_init_strategy"] = respData.Data.InstanceInitStrategy + } + + if respData.Data.SchedulerDesc != nil { + dataMap["scheduler_desc"] = respData.Data.SchedulerDesc + } + + if respData.Data.FirstSubmitTime != nil { + dataMap["first_submit_time"] = respData.Data.FirstSubmitTime + } + + if respData.Data.LatestSubmitTime != nil { + dataMap["latest_submit_time"] = respData.Data.LatestSubmitTime + } + + if respData.Data.Status != nil { + dataMap["status"] = respData.Data.Status + } + + if respData.Data.OwnerUin != nil { + dataMap["owner_uin"] = respData.Data.OwnerUin + } + + if respData.Data.WorkflowName != nil { + dataMap["workflow_name"] = respData.Data.WorkflowName + } + + _ = d.Set("data", []interface{}{dataMap}) + } + + d.SetId(strings.Join([]string{projectId, workflowId}, tccommon.FILED_SP)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataMap); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow.md b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow.md new file mode 100644 index 0000000000..9cf43731fb --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow.md @@ -0,0 +1,10 @@ +Use this data source to query detailed information of wedata ops workflow + +Example Usage + +```hcl +data "tencentcloud_wedata_ops_workflow" "wedata_ops_workflow" { + project_id = "2905622749543821312" + workflow_id = "f328ab83-62e1-4b0a-9a18-a79b42722792" +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow_extension.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow_test.go new file mode 100644 index 0000000000..d180ea7e36 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflow_test.go @@ -0,0 +1,36 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataOpsWorkflowDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataOpsWorkflowDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_ops_workflow.wedata_ops_workflow"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_ops_workflow.wedata_ops_workflow", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_ops_workflow.wedata_ops_workflow", "data.#", "1"), + ), + }, + }, + }) +} + +const testAccWedataOpsWorkflowDataSource = ` + +data "tencentcloud_wedata_ops_workflow" "wedata_ops_workflow" { + project_id = "2905622749543821312" + workflow_id = "f328ab83-62e1-4b0a-9a18-a79b42722792" +} +` diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows.go new file mode 100644 index 0000000000..83fd80b1cc --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows.go @@ -0,0 +1,324 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataOpsWorkflows() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataOpsWorkflowsRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + + "folder_id": { + Type: schema.TypeString, + Optional: true, + Description: "File ID.", + }, + + "status": { + Type: schema.TypeString, + Optional: true, + Description: "Workflow status filter: `ALL_RUNNING`: All scheduled, `ALL_FREEZED`: All paused, `ALL_STOPPTED`: All offline, `PART_RUNNING`: Partially scheduled, `ALL_NO_RUNNING`: All unscheduled, `ALL_INVALID`: All invalid.", + }, + + "owner_uin": { + Type: schema.TypeString, + Optional: true, + Description: "Responsible person ID.", + }, + + "workflow_type": { + Type: schema.TypeString, + Optional: true, + Description: "Workflow type filter, supported values: `Cycle` or `Manual`. By default, only `Cycle` is queried.", + }, + + "key_word": { + Type: schema.TypeString, + Optional: true, + Description: "Workflow keyword filter, supports fuzzy matching by workflow ID/name.", + }, + + "sort_item": { + Type: schema.TypeString, + Optional: true, + Description: "Sorting field, optional values: `CreateTime`, `TaskCount`.", + }, + + "sort_type": { + Type: schema.TypeString, + Optional: true, + Description: "Sorting order, `DESC` or `ASC`, uppercase.", + }, + + "create_user_uin": { + Type: schema.TypeString, + Optional: true, + Description: "Creator ID.", + }, + + "modify_time": { + Type: schema.TypeString, + Optional: true, + Description: "Update time, format yyyy-MM-dd HH:mm:ss.", + }, + + "create_time": { + Type: schema.TypeString, + Optional: true, + Description: "Creation time, format yyyy-MM-dd HH:mm:ss.", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Record list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "task_count": { + Type: schema.TypeInt, + Required: true, + Description: "Task count.", + }, + "folder_name": { + Type: schema.TypeString, + Required: true, + Description: "File name.", + }, + "folder_id": { + Type: schema.TypeString, + Required: true, + Description: "Workflow file ID.", + }, + "workflow_id": { + Type: schema.TypeString, + Required: true, + Description: "Workflow ID.", + }, + "workflow_name": { + Type: schema.TypeString, + Required: true, + Description: "Workflow name.", + }, + "workflow_type": { + Type: schema.TypeString, + Required: true, + Description: "Workflow type: - `Cycle`: Periodic; - `Manual`: Manual.", + }, + "workflow_desc": { + Type: schema.TypeString, + Required: true, + Description: "Workflow description.", + }, + "owner_uin": { + Type: schema.TypeString, + Required: true, + Description: "Responsible person's user ID, multiple IDs separated by ';'.", + }, + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + "project_name": { + Type: schema.TypeString, + Required: true, + Description: "Project name.", + }, + "status": { + Type: schema.TypeString, + Required: true, + Description: "Workflow status: `ALL_RUNNING`: All scheduled; `ALL_FREEZED`: All paused; `ALL_STOPPTED`: All offline; `PART_RUNNING`: Partially scheduled; `ALL_NO_RUNNING`: All unscheduled; `ALL_INVALID`: All invalid.", + }, + "create_time": { + Type: schema.TypeString, + Required: true, + Description: "Workflow creation time.", + }, + "update_time": { + Type: schema.TypeString, + Required: true, + Description: "Last update time, including development and production changes.", + }, + "update_user_uin": { + Type: schema.TypeString, + Required: true, + Description: "Last updated by, including development and production changes.", + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataOpsWorkflowsRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_ops_workflows.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + paramMap["ProjectId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("folder_id"); ok { + paramMap["FolderId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("status"); ok { + paramMap["Status"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("owner_uin"); ok { + paramMap["OwnerUin"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("workflow_type"); ok { + paramMap["WorkflowType"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("key_word"); ok { + paramMap["KeyWord"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("sort_item"); ok { + paramMap["SortItem"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("sort_type"); ok { + paramMap["SortType"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("create_user_uin"); ok { + paramMap["CreateUserUin"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("modify_time"); ok { + paramMap["ModifyTime"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("create_time"); ok { + paramMap["CreateTime"] = helper.String(v.(string)) + } + + var respData []*wedatav20250806.OpsWorkflow + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataOpsWorkflowsByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var projectId string + var workflowId string + + ids := make([]string, 0, len(respData)) + dataList := make([]map[string]interface{}, 0, len(respData)) + if respData != nil { + for _, data := range respData { + dataMap := map[string]interface{}{} + + if data.TaskCount != nil { + dataMap["task_count"] = data.TaskCount + } + + if data.FolderName != nil { + dataMap["folder_name"] = data.FolderName + } + + if data.FolderId != nil { + dataMap["folder_id"] = data.FolderId + } + + if data.WorkflowId != nil { + dataMap["workflow_id"] = data.WorkflowId + } + + if data.WorkflowName != nil { + dataMap["workflow_name"] = data.WorkflowName + } + + if data.WorkflowType != nil { + dataMap["workflow_type"] = data.WorkflowType + } + + if data.WorkflowDesc != nil { + dataMap["workflow_desc"] = data.WorkflowDesc + } + + if data.OwnerUin != nil { + dataMap["owner_uin"] = data.OwnerUin + } + + if data.ProjectId != nil { + dataMap["project_id"] = data.ProjectId + projectId = *data.ProjectId + } + + if data.ProjectName != nil { + dataMap["project_name"] = data.ProjectName + } + + if data.Status != nil { + dataMap["status"] = data.Status + } + + if data.CreateTime != nil { + dataMap["create_time"] = data.CreateTime + } + + if data.UpdateTime != nil { + dataMap["update_time"] = data.UpdateTime + } + + if data.UpdateUserUin != nil { + dataMap["update_user_uin"] = data.UpdateUserUin + } + + ids = append(ids, strings.Join([]string{projectId, workflowId}, tccommon.FILED_SP)) + dataList = append(dataList, dataMap) + } + + _ = d.Set("data", dataList) + } + + d.SetId(helper.DataResourceIdsHash(ids)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataList); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows.md b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows.md new file mode 100644 index 0000000000..5f1b895003 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows.md @@ -0,0 +1,14 @@ +Use this data source to query detailed information of wedata ops workflows + +Example Usage + +```hcl +data "tencentcloud_wedata_ops_workflows" "wedata_ops_workflows" { + project_id = "2905622749543821312" + folder_id = "720ecbfb-7e5a-11f0-ba36-b8cef6a5af5c" + status = "ALL_RUNNING" + owner_uin = "100044349576" + workflow_type = "Cycle" + sort_type = "ASC" +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows_extension.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows_test.go new file mode 100644 index 0000000000..9b9849e4f4 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_ops_workflows_test.go @@ -0,0 +1,40 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataOpsWorkflowsDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataOpsWorkflowsDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_ops_workflows.wedata_ops_workflows"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_ops_workflows.wedata_ops_workflows", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_ops_workflows.wedata_ops_workflows", "data.#", "1"), + ), + }, + }, + }) +} + +const testAccWedataOpsWorkflowsDataSource = ` + +data "tencentcloud_wedata_ops_workflows" "wedata_ops_workflows" { + project_id = "2905622749543821312" + folder_id = "720ecbfb-7e5a-11f0-ba36-b8cef6a5af5c" + status = "ALL_RUNNING" + owner_uin = "100044349576" + workflow_type = "Cycle" + sort_type = "ASC" +} +` diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance.go new file mode 100644 index 0000000000..0b5477b000 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance.go @@ -0,0 +1,348 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataTaskInstance() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataTaskInstanceRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Unique instance identifier, can be obtained via ListInstances.", + }, + + "time_zone": { + Type: schema.TypeString, + Optional: true, + Description: "Time zone, the time zone of the input time string, default UTC+8.", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Instance details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Unique instance identifier.", + }, + "folder_id": { + Type: schema.TypeString, + Required: true, + Description: "Folder ID.", + }, + "folder_name": { + Type: schema.TypeString, + Required: true, + Description: "Folder name.", + }, + "workflow_id": { + Type: schema.TypeString, + Required: true, + Description: "Workflow ID.", + }, + "workflow_name": { + Type: schema.TypeString, + Required: true, + Description: "Workflow name.", + }, + "task_id": { + Type: schema.TypeString, + Required: true, + Description: "Task ID.", + }, + "task_name": { + Type: schema.TypeString, + Required: true, + Description: "Task name.", + }, + "task_type_id": { + Type: schema.TypeInt, + Required: true, + Description: "ID corresponding to taskType.", + }, + "task_type": { + Type: schema.TypeString, + Required: true, + Description: "Task type.", + }, + "cycle_type": { + Type: schema.TypeString, + Required: true, + Description: "**Task cycle type**.\n* ONEOFF_CYCLE: One-time\n* YEAR_CYCLE: Year\n* MONTH_CYCLE: Month\n* WEEK_CYCLE: Week\n* DAY_CYCLE: Day\n* HOUR_CYCLE: Hour\n* MINUTE_CYCLE: Minute\n* CRONTAB_CYCLE: Crontab expression type.", + }, + "cur_run_date": { + Type: schema.TypeString, + Required: true, + Description: "Instance data time.", + }, + "instance_state": { + Type: schema.TypeString, + Required: true, + Description: "**Instance status**.\n- WAIT_EVENT: Waiting for event\n- WAIT_UPSTREAM: Waiting for upstream\n- WAIT_RUN: Waiting to run\n- RUNNING: Running\n- SKIP_RUNNING: Skipped running\n- FAILED_RETRY: Failed retry\n- EXPIRED: Failed\n- COMPLETED: Success.", + }, + "instance_type": { + Type: schema.TypeInt, + Required: true, + Description: "Instance type.\n\n- 0: Backfill type\n- 1: Periodic instance\n- 2: Non-periodic instance.", + }, + "owner_uin_list": { + Type: schema.TypeSet, + Required: true, + Description: "Owner list.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "total_run_num": { + Type: schema.TypeInt, + Required: true, + Description: "Total run count.", + }, + "try_limit": { + Type: schema.TypeInt, + Required: true, + Description: "Retry limit per run failure.", + }, + "tries": { + Type: schema.TypeInt, + Required: true, + Description: "Failed retry count.\nReset to 0 when manually rerun or backfilled.", + }, + "cost_time": { + Type: schema.TypeInt, + Required: true, + Description: "Execution duration, in ms.", + }, + "start_time": { + Type: schema.TypeString, + Required: true, + Description: "Execution start time.", + }, + "end_time": { + Type: schema.TypeString, + Required: true, + Description: "Execution end time.", + }, + "scheduler_time": { + Type: schema.TypeString, + Required: true, + Description: "Scheduled time.", + }, + "last_update_time": { + Type: schema.TypeString, + Required: true, + Description: "Last update time, format yyyy-MM-dd HH:mm:ss.", + }, + "executor_group_id": { + Type: schema.TypeString, + Required: true, + Description: "Executor resource group ID.", + }, + "executor_group_name": { + Type: schema.TypeString, + Required: true, + Description: "Resource group name.", + }, + "job_error_msg": { + Type: schema.TypeString, + Required: true, + Description: "Brief task failure message.", + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataTaskInstanceRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_task_instance.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + paramMap["ProjectId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("instance_key"); ok { + paramMap["InstanceKey"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("time_zone"); ok { + paramMap["TimeZone"] = helper.String(v.(string)) + } + + var respData *wedatav20250806.GetTaskInstanceResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataTaskInstanceByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var projectId string + var instanceKey string + dataMap := map[string]interface{}{} + + if respData.Data != nil { + if respData.Data.ProjectId != nil { + dataMap["project_id"] = respData.Data.ProjectId + projectId = *respData.Data.ProjectId + } + + if respData.Data.InstanceKey != nil { + dataMap["instance_key"] = respData.Data.InstanceKey + instanceKey = *respData.Data.InstanceKey + } + + if respData.Data.FolderId != nil { + dataMap["folder_id"] = respData.Data.FolderId + } + + if respData.Data.FolderName != nil { + dataMap["folder_name"] = respData.Data.FolderName + } + + if respData.Data.WorkflowId != nil { + dataMap["workflow_id"] = respData.Data.WorkflowId + } + + if respData.Data.WorkflowName != nil { + dataMap["workflow_name"] = respData.Data.WorkflowName + } + + if respData.Data.TaskId != nil { + dataMap["task_id"] = respData.Data.TaskId + } + + if respData.Data.TaskName != nil { + dataMap["task_name"] = respData.Data.TaskName + } + + if respData.Data.TaskTypeId != nil { + dataMap["task_type_id"] = respData.Data.TaskTypeId + } + + if respData.Data.TaskType != nil { + dataMap["task_type"] = respData.Data.TaskType + } + + if respData.Data.CycleType != nil { + dataMap["cycle_type"] = respData.Data.CycleType + } + + if respData.Data.CurRunDate != nil { + dataMap["cur_run_date"] = respData.Data.CurRunDate + } + + if respData.Data.InstanceState != nil { + dataMap["instance_state"] = respData.Data.InstanceState + } + + if respData.Data.InstanceType != nil { + dataMap["instance_type"] = respData.Data.InstanceType + } + + if respData.Data.OwnerUinList != nil { + dataMap["owner_uin_list"] = respData.Data.OwnerUinList + } + + if respData.Data.TotalRunNum != nil { + dataMap["total_run_num"] = respData.Data.TotalRunNum + } + + if respData.Data.TryLimit != nil { + dataMap["try_limit"] = respData.Data.TryLimit + } + + if respData.Data.Tries != nil { + dataMap["tries"] = respData.Data.Tries + } + + if respData.Data.CostTime != nil { + dataMap["cost_time"] = respData.Data.CostTime + } + + if respData.Data.StartTime != nil { + dataMap["start_time"] = respData.Data.StartTime + } + + if respData.Data.EndTime != nil { + dataMap["end_time"] = respData.Data.EndTime + } + + if respData.Data.SchedulerTime != nil { + dataMap["scheduler_time"] = respData.Data.SchedulerTime + } + + if respData.Data.LastUpdateTime != nil { + dataMap["last_update_time"] = respData.Data.LastUpdateTime + } + + if respData.Data.ExecutorGroupId != nil { + dataMap["executor_group_id"] = respData.Data.ExecutorGroupId + } + + if respData.Data.ExecutorGroupName != nil { + dataMap["executor_group_name"] = respData.Data.ExecutorGroupName + } + + if respData.Data.JobErrorMsg != nil { + dataMap["job_error_msg"] = respData.Data.JobErrorMsg + } + + _ = d.Set("data", []interface{}{dataMap}) + } + + d.SetId(strings.Join([]string{projectId, instanceKey}, tccommon.FILED_SP)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataMap); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance.md b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance.md new file mode 100644 index 0000000000..a28e397964 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance.md @@ -0,0 +1,10 @@ +Use this data source to query detailed information of wedata task instance + +Example Usage + +```hcl +data "tencentcloud_wedata_task_instance" "wedata_task_instance" { + project_id = "1859317240494305280" + instance_key = "20250324192240178_2025-10-13 11:50:00" +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions.go new file mode 100644 index 0000000000..26dc682a70 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions.go @@ -0,0 +1,266 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataTaskInstanceExecutions() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataTaskInstanceExecutionsRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID to which it belongs.", + }, + + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Instance unique identifier, can be obtained via ListInstances.", + }, + + "time_zone": { + Type: schema.TypeString, + Optional: true, + Description: "**Time zone** timeZone, the time zone of the input time string, default UTC+8.", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Instance details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_count": { + Type: schema.TypeInt, + Required: true, + Description: "Total count of results.", + }, + "total_page_number": { + Type: schema.TypeInt, + Required: true, + Description: "Total number of pages.", + }, + "items": { + Type: schema.TypeList, + Required: true, + Description: "Record list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Instance unique identifier.", + }, + "life_round_num": { + Type: schema.TypeInt, + Required: true, + Description: "Instance lifecycle round number, identifying a specific execution of the instance. For example: The first run of a periodic instance has a round number of 0; if the user reruns the instance later, the second execution will have a round number of 1.", + }, + "instance_state": { + Type: schema.TypeString, + Required: true, + Description: "Instance state, - WAIT_EVENT: Waiting for event, - WAIT_UPSTREAM: Waiting for upstream, - WAIT_RUN: Waiting to run, - RUNNING: Running, - SKIP_RUNNING: Skip running, - FAILED_RETRY: Failed and retrying, - EXPIRED: Failed, - COMPLETED: Completed.", + }, + "run_type": { + Type: schema.TypeString, + Required: true, + Description: "Instance run trigger type, - RERUN: Rerun, - ADDITION: Backfill, - PERIODIC: Periodic, - APERIODIC: Non-periodic, - RERUN_SKIP_RUN: Rerun - Skip run, - ADDITION_SKIP_RUN: Backfill - Skip run, - PERIODIC_SKIP_RUN: Periodic - Skip run, - APERIODIC_SKIP_RUN: Non-periodic - Skip run, - MANUAL_TRIGGER: Manual trigger, - RERUN_MANUAL_TRIGGER: Manual trigger - Rerun.", + }, + "tries": { + Type: schema.TypeInt, + Required: true, + Description: "Failure retry count.", + }, + "execution_phase_list": { + Type: schema.TypeList, + Required: true, + Description: "Instance execution lifecycle list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start_time": { + Type: schema.TypeString, + Optional: true, + Description: "Start time of this state.", + }, + "detail_state": { + Type: schema.TypeString, + Optional: true, + Description: "Instance lifecycle phase state, - WAIT_UPSTREAM: Waiting for event/upstream, - WAIT_RUN: Waiting to run, - RUNNING: Running, - COMPLETE: Final state - Completed, - FAILED: Final state - Failed and retrying, - EXPIRED: Final state - Failed, - SKIP_RUNNING: Final state - Branch skipped by upstream branch node, - HISTORY: For compatibility with historical instances before 2024-03-30, instances after that date do not need to pay attention to this enum type.", + }, + "end_time": { + Type: schema.TypeString, + Optional: true, + Description: "End time of this state.", + }, + }, + }, + }, + "cost_time": { + Type: schema.TypeInt, + Required: true, + Description: "Cost time, in milliseconds.", + }, + }, + }, + }, + "page_number": { + Type: schema.TypeInt, + Required: true, + Description: "Page number.", + }, + "page_size": { + Type: schema.TypeInt, + Required: true, + Description: "Page size.", + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataTaskInstanceExecutionsRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_task_instance_executions.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + paramMap["ProjectId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("instance_key"); ok { + paramMap["InstanceKey"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("time_zone"); ok { + paramMap["TimeZone"] = helper.String(v.(string)) + } + + var respData *wedatav20250806.ListTaskInstanceExecutionsResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataTaskInstanceExecutionsByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var projectId string + var instanceKey string + dataMap := map[string]interface{}{} + ids := []string{} + + if respData.Data != nil { + if respData.Data.TotalCount != nil { + dataMap["total_count"] = respData.Data.TotalCount + } + + if respData.Data.TotalPageNumber != nil { + dataMap["total_page_number"] = respData.Data.TotalPageNumber + } + + itemsList := make([]map[string]interface{}, 0, len(respData.Data.Items)) + if respData.Data.Items != nil { + for _, items := range respData.Data.Items { + itemsMap := map[string]interface{}{} + + if items.InstanceKey != nil { + itemsMap["instance_key"] = items.InstanceKey + } + + if items.LifeRoundNum != nil { + itemsMap["life_round_num"] = items.LifeRoundNum + } + + if items.InstanceState != nil { + itemsMap["instance_state"] = items.InstanceState + } + + if items.RunType != nil { + itemsMap["run_type"] = items.RunType + } + + if items.Tries != nil { + itemsMap["tries"] = items.Tries + } + + executionPhaseListList := make([]map[string]interface{}, 0, len(items.ExecutionPhaseList)) + if items.ExecutionPhaseList != nil { + for _, executionPhaseList := range items.ExecutionPhaseList { + executionPhaseListMap := map[string]interface{}{} + + if executionPhaseList.StartTime != nil { + executionPhaseListMap["start_time"] = executionPhaseList.StartTime + } + + if executionPhaseList.DetailState != nil { + executionPhaseListMap["detail_state"] = executionPhaseList.DetailState + } + + if executionPhaseList.EndTime != nil { + executionPhaseListMap["end_time"] = executionPhaseList.EndTime + } + + executionPhaseListList = append(executionPhaseListList, executionPhaseListMap) + } + + itemsMap["execution_phase_list"] = executionPhaseListList + } + if items.CostTime != nil { + itemsMap["cost_time"] = items.CostTime + } + + ids = append(ids, strings.Join([]string{projectId, instanceKey}, tccommon.FILED_SP)) + itemsList = append(itemsList, itemsMap) + } + + dataMap["items"] = itemsList + } + if respData.Data.PageNumber != nil { + dataMap["page_number"] = respData.Data.PageNumber + } + + if respData.Data.PageSize != nil { + dataMap["page_size"] = respData.Data.PageSize + } + + _ = d.Set("data", []interface{}{dataMap}) + } + + d.SetId(helper.DataResourceIdsHash(ids)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataMap); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions.md b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions.md new file mode 100644 index 0000000000..268cd4bf58 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions.md @@ -0,0 +1,10 @@ +Use this data source to query detailed information of wedata task instance executions + +Example Usage + +```hcl +data "tencentcloud_wedata_task_instance_executions" "wedata_task_instance_executions" { + project_id = "1859317240494305280" + instance_key = "20250731151633120_2025-10-13 17:00:00" +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions_extension.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions_test.go new file mode 100644 index 0000000000..b921bbcb01 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_executions_test.go @@ -0,0 +1,36 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataTaskInstanceExecutionsDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataTaskInstanceExecutionsDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_task_instance_executions.wedata_task_instance_executions"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_task_instance_executions.wedata_task_instance_executions", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_task_instance_executions.wedata_task_instance_executions", "data.#", "1"), + ), + }, + }, + }) +} + +const testAccWedataTaskInstanceExecutionsDataSource = ` + +data "tencentcloud_wedata_task_instance_executions" "wedata_task_instance_executions" { + project_id = "1859317240494305280" + instance_key = "20250731151633120_2025-10-13 17:00:00" +} +` diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_extension.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log.go new file mode 100644 index 0000000000..73c8cd595e --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log.go @@ -0,0 +1,176 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataTaskInstanceLog() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataTaskInstanceLogRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Unique instance identifier.", + }, + + "life_round_num": { + Type: schema.TypeInt, + Optional: true, + Description: "Instance lifecycle number, identifying a specific execution of the instance. For example: the first run of a periodic instance is 0, if manually rerun the second execution is 1; defaults to the latest execution.", + }, + + "log_level": { + Type: schema.TypeString, + Optional: true, + Description: "Log level, default All - Info - Debug - Warn - Error - All.", + }, + + "next_cursor": { + Type: schema.TypeString, + Optional: true, + Description: "Pagination cursor for log queries, no business meaning. First query uses null, subsequent queries use NextCursor from previous response.", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Scheduled instance details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Unique instance identifier.", + }, + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + "code_content": { + Type: schema.TypeString, + Required: true, + Description: "Execution code content.", + }, + "log_info": { + Type: schema.TypeString, + Required: true, + Description: "Log content.", + }, + "next_cursor": { + Type: schema.TypeString, + Required: true, + Description: "Pagination cursor for log queries, no business meaning. First query uses null, subsequent queries use NextCursor from previous response.", + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataTaskInstanceLogRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_task_instance_log.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + paramMap["ProjectId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("instance_key"); ok { + paramMap["InstanceKey"] = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("life_round_num"); ok { + paramMap["LifeRoundNum"] = helper.IntUint64(v.(int)) + } + + if v, ok := d.GetOk("log_level"); ok { + paramMap["LogLevel"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("next_cursor"); ok { + paramMap["NextCursor"] = helper.String(v.(string)) + } + + var respData *wedatav20250806.GetTaskInstanceLogResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataTaskInstanceLogByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var projectId string + var instanceKey string + dataMap := map[string]interface{}{} + + if respData.Data != nil { + if respData.Data.InstanceKey != nil { + dataMap["instance_key"] = respData.Data.InstanceKey + instanceKey = *respData.Data.InstanceKey + } + + if respData.Data.ProjectId != nil { + dataMap["project_id"] = respData.Data.ProjectId + projectId = *respData.Data.ProjectId + } + + if respData.Data.CodeContent != nil { + dataMap["code_content"] = respData.Data.CodeContent + } + + if respData.Data.LogInfo != nil { + dataMap["log_info"] = respData.Data.LogInfo + } + + if respData.Data.NextCursor != nil { + dataMap["next_cursor"] = respData.Data.NextCursor + } + + _ = d.Set("data", []interface{}{dataMap}) + } + + d.SetId(strings.Join([]string{projectId, instanceKey}, tccommon.FILED_SP)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataMap); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log.md b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log.md new file mode 100644 index 0000000000..0a56ba5196 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log.md @@ -0,0 +1,10 @@ +Use this data source to query detailed information of wedata task instance log + +Example Usage + +```hcl +data "tencentcloud_wedata_task_instance_log" "wedata_task_instance_log" { + project_id = "1859317240494305280" + instance_key = "20250324192240178_2025-10-13 11:50:00" +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log_extension.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log_test.go new file mode 100644 index 0000000000..4ff259fad9 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_log_test.go @@ -0,0 +1,37 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataTaskInstanceLogDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataTaskInstanceLogDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_task_instance_log.wedata_task_instance_log"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_task_instance_log.wedata_task_instance_log", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_task_instance_log.wedata_task_instance_log", "data.#", "1"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_task_instance_log.wedata_task_instance_log", "data.0.log_info"), + ), + }, + }, + }) +} + +const testAccWedataTaskInstanceLogDataSource = ` + +data "tencentcloud_wedata_task_instance_log" "wedata_task_instance_log" { + project_id = "1859317240494305280" + instance_key = "20250324192240178_2025-10-13 11:50:00" +} +` diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_test.go new file mode 100644 index 0000000000..88f880aee1 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instance_test.go @@ -0,0 +1,36 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataTaskInstanceDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataTaskInstanceDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_task_instance.wedata_task_instance"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_task_instance.wedata_task_instance", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_task_instance.wedata_task_instance", "data.#", "1"), + ), + }, + }, + }) +} + +const testAccWedataTaskInstanceDataSource = ` + +data "tencentcloud_wedata_task_instance" "wedata_task_instance" { + project_id = "1859317240494305280" + instance_key = "20250324192240178_2025-10-13 11:50:00" +} +` diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instances.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instances.go new file mode 100644 index 0000000000..33e455498c --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instances.go @@ -0,0 +1,556 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataTaskInstances() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataTaskInstancesRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + + "keyword": { + Type: schema.TypeString, + Optional: true, + Description: "Task name or Task ID. Supports fuzzy search filtering. Multiple values separated by commas.", + }, + + "time_zone": { + Type: schema.TypeString, + Optional: true, + Description: "Time zone. The time zone of the input time string, default UTC+8.", + }, + + "instance_type": { + Type: schema.TypeInt, + Optional: true, + Description: "Instance type - 0: Backfill type - 1: Periodic instance - 2: Non-periodic instance.", + }, + + "instance_state": { + Type: schema.TypeString, + Optional: true, + Description: "Instance status - WAIT_EVENT: Waiting for event - WAIT_UPSTREAM: Waiting for upstream - WAIT_RUN: Waiting to run - RUNNING: Running - SKIP_RUNNING: Skipped running - FAILED_RETRY: Failed retry - EXPIRED: Failed - COMPLETED: Success.", + }, + + "task_type_id": { + Type: schema.TypeInt, + Optional: true, + Description: "Task type ID.", + }, + + "cycle_type": { + Type: schema.TypeString, + Optional: true, + Description: "Task cycle type * ONEOFF_CYCLE: One-time * YEAR_CYCLE: Year * MONTH_CYCLE: Month * WEEK_CYCLE: Week * DAY_CYCLE: Day * HOUR_CYCLE: Hour * MINUTE_CYCLE: Minute * CRONTAB_CYCLE: Crontab expression type.", + }, + + "owner_uin": { + Type: schema.TypeString, + Optional: true, + Description: "Task owner ID.", + }, + + "folder_id": { + Type: schema.TypeString, + Optional: true, + Description: "Task folder ID.", + }, + + "workflow_id": { + Type: schema.TypeString, + Optional: true, + Description: "Task workflow ID.", + }, + + "executor_group_id": { + Type: schema.TypeString, + Optional: true, + Description: "Executor resource group ID.", + }, + + "schedule_time_from": { + Type: schema.TypeString, + Optional: true, + Description: "Instance scheduled time filter condition Start time, format yyyy-MM-dd HH:mm:ss.", + }, + + "schedule_time_to": { + Type: schema.TypeString, + Optional: true, + Description: "Instance scheduled time filter condition End time, format yyyy-MM-dd HH:mm:ss.", + }, + + "start_time_from": { + Type: schema.TypeString, + Optional: true, + Description: "Instance execution start time filter condition Start time, format yyyy-MM-dd HH:mm:ss.", + }, + + "start_time_to": { + Type: schema.TypeString, + Optional: true, + Description: "Instance execution start time filter condition.End time, format yyyy-MM-dd HH:mm:ss.", + }, + + "last_update_time_from": { + Type: schema.TypeString, + Optional: true, + Description: "Instance last update time filter condition.Start time, format yyyy-MM-dd HH:mm:ss.", + }, + + "last_update_time_to": { + Type: schema.TypeString, + Optional: true, + Description: "Instance last update time filter condition.End time, format yyyy-MM-dd HH:mm:ss.", + }, + + "sort_column": { + Type: schema.TypeString, + Optional: true, + Description: "Result sorting field- SCHEDULE_DATE: Sort by scheduled time- START_TIME: Sort by execution start time- END_TIME: Sort by execution end time- COST_TIME: Sort by execution duration.", + }, + + "sort_type": { + Type: schema.TypeString, + Optional: true, + Description: "Sorting order: - ASC; - DESC.", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Instance result set.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_count": { + Type: schema.TypeInt, + Required: true, + Description: "Total count.", + }, + "total_page_number": { + Type: schema.TypeInt, + Required: true, + Description: "Total pages.", + }, + "page_number": { + Type: schema.TypeInt, + Required: true, + Description: "Page number.", + }, + "page_size": { + Type: schema.TypeInt, + Required: true, + Description: "Page size.", + }, + "items": { + Type: schema.TypeList, + Required: true, + Description: "Data list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Unique instance identifier.", + }, + "folder_id": { + Type: schema.TypeString, + Required: true, + Description: "Folder ID.", + }, + "folder_name": { + Type: schema.TypeString, + Required: true, + Description: "Folder name.", + }, + "workflow_id": { + Type: schema.TypeString, + Required: true, + Description: "Workflow ID.", + }, + "workflow_name": { + Type: schema.TypeString, + Required: true, + Description: "Workflow name.", + }, + "task_id": { + Type: schema.TypeString, + Required: true, + Description: "Task ID.", + }, + "task_name": { + Type: schema.TypeString, + Required: true, + Description: "Task name.", + }, + "cur_run_date": { + Type: schema.TypeString, + Required: true, + Description: "Instance data time.", + }, + "instance_state": { + Type: schema.TypeString, + Required: true, + Description: "Instance status.- WAIT_EVENT: Waiting for even: - WAIT_UPSTREAM: Waiting for upstream; - WAIT_RUN: Waiting to run; - RUNNING: Running; - SKIP_RUNNING: Skipped running; - FAILED_RETRY: Failed retry; - EXPIRED: Failed; - COMPLETED: Success.", + }, + "instance_type": { + Type: schema.TypeInt, + Required: true, + Description: "Instance type. - 0: Backfill type; - 1: Periodic instance; - 2: Non-periodic instance.", + }, + "owner_uin_list": { + Type: schema.TypeSet, + Required: true, + Description: "Owner list.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "total_run_num": { + Type: schema.TypeInt, + Required: true, + Description: "Total run count.", + }, + "task_type": { + Type: schema.TypeString, + Required: true, + Description: "Task type description.", + }, + "task_type_id": { + Type: schema.TypeInt, + Required: true, + Description: "Task type ID.", + }, + "cycle_type": { + Type: schema.TypeString, + Required: true, + Description: "Task cycle type.Supports filtering multiple types with OR relationship. O: ONEOFF_CYCLE; Y: YEAR_CYCLE; M: MONTH_CYCLE; W: WEEK_CYCLE; D: DAY_CYCLE; H: HOUR_CYCLE; I: MINUTE_CYCLE; C: CRONTAB_CYCLE.", + }, + "try_limit": { + Type: schema.TypeInt, + Required: true, + Description: "Retry limit per run failure.", + }, + "tries": { + Type: schema.TypeInt, + Required: true, + Description: "Failed retry count. Reset to 0 when manually rerun or backfilled.", + }, + "start_time": { + Type: schema.TypeString, + Required: true, + Description: "Execution start time.", + }, + "end_time": { + Type: schema.TypeString, + Required: true, + Description: "Execution end time.", + }, + "cost_time": { + Type: schema.TypeInt, + Required: true, + Description: "Execution duration, in ms.", + }, + "scheduler_time": { + Type: schema.TypeString, + Required: true, + Description: "Scheduled time.", + }, + "last_update_time": { + Type: schema.TypeString, + Required: true, + Description: "Last update time, format yyyy-MM-dd HH:mm:ss.", + }, + "executor_group_id": { + Type: schema.TypeString, + Required: true, + Description: "Executor resource group ID.", + }, + "executor_group_name": { + Type: schema.TypeString, + Required: true, + Description: "Resource group name.", + }, + }, + }, + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataTaskInstancesRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_task_instances.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + paramMap["ProjectId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("keyword"); ok { + paramMap["Keyword"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("time_zone"); ok { + paramMap["TimeZone"] = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("instance_type"); ok { + paramMap["InstanceType"] = helper.IntUint64(v.(int)) + } + + if v, ok := d.GetOk("instance_state"); ok { + paramMap["InstanceState"] = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("task_type_id"); ok { + paramMap["TaskTypeId"] = helper.IntUint64(v.(int)) + } + + if v, ok := d.GetOk("cycle_type"); ok { + paramMap["CycleType"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("owner_uin"); ok { + paramMap["OwnerUin"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("folder_id"); ok { + paramMap["FolderId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("workflow_id"); ok { + paramMap["WorkflowId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("executor_group_id"); ok { + paramMap["ExecutorGroupId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("schedule_time_from"); ok { + paramMap["ScheduleTimeFrom"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("schedule_time_to"); ok { + paramMap["ScheduleTimeTo"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("start_time_from"); ok { + paramMap["StartTimeFrom"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("start_time_to"); ok { + paramMap["StartTimeTo"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("last_update_time_from"); ok { + paramMap["LastUpdateTimeFrom"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("last_update_time_to"); ok { + paramMap["LastUpdateTimeTo"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("sort_column"); ok { + paramMap["SortColumn"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("sort_type"); ok { + paramMap["SortType"] = helper.String(v.(string)) + } + + var respData *wedatav20250806.ListTaskInstancesResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataTaskInstancesByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var projectId string + var instanceKey string + dataMap := map[string]interface{}{} + ids := []string{} + + if respData.Data != nil { + if respData.Data.TotalCount != nil { + dataMap["total_count"] = respData.Data.TotalCount + } + + if respData.Data.TotalPageNumber != nil { + dataMap["total_page_number"] = respData.Data.TotalPageNumber + } + + if respData.Data.PageNumber != nil { + dataMap["page_number"] = respData.Data.PageNumber + } + + if respData.Data.PageSize != nil { + dataMap["page_size"] = respData.Data.PageSize + } + + itemsList := make([]map[string]interface{}, 0, len(respData.Data.Items)) + if respData.Data.Items != nil { + for _, items := range respData.Data.Items { + itemsMap := map[string]interface{}{} + + if items.ProjectId != nil { + itemsMap["project_id"] = items.ProjectId + projectId = *items.ProjectId + } + + if items.InstanceKey != nil { + itemsMap["instance_key"] = items.InstanceKey + instanceKey = *items.InstanceKey + } + + if items.FolderId != nil { + itemsMap["folder_id"] = items.FolderId + } + + if items.FolderName != nil { + itemsMap["folder_name"] = items.FolderName + } + + if items.WorkflowId != nil { + itemsMap["workflow_id"] = items.WorkflowId + } + + if items.WorkflowName != nil { + itemsMap["workflow_name"] = items.WorkflowName + } + + if items.TaskId != nil { + itemsMap["task_id"] = items.TaskId + } + + if items.TaskName != nil { + itemsMap["task_name"] = items.TaskName + } + + if items.CurRunDate != nil { + itemsMap["cur_run_date"] = items.CurRunDate + } + + if items.InstanceState != nil { + itemsMap["instance_state"] = items.InstanceState + } + + if items.InstanceType != nil { + itemsMap["instance_type"] = items.InstanceType + } + + if items.OwnerUinList != nil { + itemsMap["owner_uin_list"] = items.OwnerUinList + } + + if items.TotalRunNum != nil { + itemsMap["total_run_num"] = items.TotalRunNum + } + + if items.TaskType != nil { + itemsMap["task_type"] = items.TaskType + } + + if items.TaskTypeId != nil { + itemsMap["task_type_id"] = items.TaskTypeId + } + + if items.CycleType != nil { + itemsMap["cycle_type"] = items.CycleType + } + + if items.TryLimit != nil { + itemsMap["try_limit"] = items.TryLimit + } + + if items.Tries != nil { + itemsMap["tries"] = items.Tries + } + + if items.StartTime != nil { + itemsMap["start_time"] = items.StartTime + } + + if items.EndTime != nil { + itemsMap["end_time"] = items.EndTime + } + + if items.CostTime != nil { + itemsMap["cost_time"] = items.CostTime + } + + if items.SchedulerTime != nil { + itemsMap["scheduler_time"] = items.SchedulerTime + } + + if items.LastUpdateTime != nil { + itemsMap["last_update_time"] = items.LastUpdateTime + } + + if items.ExecutorGroupId != nil { + itemsMap["executor_group_id"] = items.ExecutorGroupId + } + + if items.ExecutorGroupName != nil { + itemsMap["executor_group_name"] = items.ExecutorGroupName + } + + ids = append(ids, strings.Join([]string{projectId, instanceKey}, tccommon.FILED_SP)) + itemsList = append(itemsList, itemsMap) + } + + dataMap["items"] = itemsList + } + _ = d.Set("data", []interface{}{dataMap}) + } + + d.SetId(helper.DataResourceIdsHash(ids)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataMap); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instances.md b/tencentcloud/services/wedata/data_source_tc_wedata_task_instances.md new file mode 100644 index 0000000000..83225685bd --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instances.md @@ -0,0 +1,9 @@ +Use this data source to query detailed information of wedata task instances + +Example Usage + +```hcl +data "tencentcloud_wedata_task_instances" "wedata_task_instances" { + project_id = "1859317240494305280" +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instances_extension.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instances_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instances_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_task_instances_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_task_instances_test.go new file mode 100644 index 0000000000..360288f6bb --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_task_instances_test.go @@ -0,0 +1,36 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataTaskInstancesDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataTaskInstancesDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_task_instances.wedata_task_instances"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_task_instances.wedata_task_instances", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_task_instances.wedata_task_instances", "data.#", "1"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_task_instances.wedata_task_instances", "data.0.items.#", "10"), + ), + }, + }, + }) +} + +const testAccWedataTaskInstancesDataSource = ` + +data "tencentcloud_wedata_task_instances" "wedata_task_instances" { + project_id = "1859317240494305280" +} +` diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_upstream_task_instances.go b/tencentcloud/services/wedata/data_source_tc_wedata_upstream_task_instances.go new file mode 100644 index 0000000000..a8df69fb03 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_upstream_task_instances.go @@ -0,0 +1,396 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func DataSourceTencentCloudWedataUpstreamTaskInstances() *schema.Resource { + return &schema.Resource{ + Read: dataSourceTencentCloudWedataUpstreamTaskInstancesRead, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Unique instance identifier.", + }, + + "time_zone": { + Type: schema.TypeString, + Optional: true, + Description: "Time zone, default UTC+8.", + }, + + "data": { + Type: schema.TypeList, + Computed: true, + Description: "Upstream instance list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "total_count": { + Type: schema.TypeInt, + Required: true, + Description: "Total count.", + }, + "total_page_number": { + Type: schema.TypeInt, + Required: true, + Description: "Total pages.", + }, + "page_number": { + Type: schema.TypeInt, + Required: true, + Description: "Page number.", + }, + "page_size": { + Type: schema.TypeInt, + Required: true, + Description: "Page size.", + }, + "items": { + Type: schema.TypeList, + Required: true, + Description: "Data list.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project ID.", + }, + "instance_key": { + Type: schema.TypeString, + Required: true, + Description: "Unique instance identifier.", + }, + "folder_id": { + Type: schema.TypeString, + Required: true, + Description: "Folder ID.", + }, + "folder_name": { + Type: schema.TypeString, + Required: true, + Description: "Folder name.", + }, + "workflow_id": { + Type: schema.TypeString, + Required: true, + Description: "Workflow ID.", + }, + "workflow_name": { + Type: schema.TypeString, + Required: true, + Description: "Workflow name.", + }, + "task_id": { + Type: schema.TypeString, + Required: true, + Description: "Task ID.", + }, + "task_name": { + Type: schema.TypeString, + Required: true, + Description: "Task name.", + }, + "cur_run_date": { + Type: schema.TypeString, + Required: true, + Description: "Instance data time.", + }, + "instance_state": { + Type: schema.TypeString, + Required: true, + Description: "Instance status.\n- WAIT_EVENT: Waiting for event\n- WAIT_UPSTREAM: Waiting for upstream\n- WAIT_RUN: Waiting to run\n- RUNNING: Running\n- SKIP_RUNNING: Skipped running\n- FAILED_RETRY: Failed retry\n- EXPIRED: Failed\n- COMPLETED: Success.", + }, + "instance_type": { + Type: schema.TypeInt, + Required: true, + Description: "Instance type.\n\n- 0: Backfill type\n- 1: Periodic instance\n- 2: Non-periodic instance.", + }, + "owner_uin_list": { + Type: schema.TypeSet, + Required: true, + Description: "Owner list.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "total_run_num": { + Type: schema.TypeInt, + Required: true, + Description: "Total run count.", + }, + "task_type": { + Type: schema.TypeString, + Required: true, + Description: "Task type description.", + }, + "task_type_id": { + Type: schema.TypeInt, + Required: true, + Description: "Task type ID.", + }, + "cycle_type": { + Type: schema.TypeString, + Required: true, + Description: "Task cycle type.\nSupports filtering multiple types with OR relationship.\n* O: ONEOFF_CYCLE\n* Y: YEAR_CYCLE\n* M: MONTH_CYCLE\n* W: WEEK_CYCLE\n* D: DAY_CYCLE\n* H: HOUR_CYCLE\n* I: MINUTE_CYCLE\n* C: CRONTAB_CYCLE.", + }, + "try_limit": { + Type: schema.TypeInt, + Required: true, + Description: "Retry limit per run failure.", + }, + "tries": { + Type: schema.TypeInt, + Required: true, + Description: "Failed retry count.\nReset to 0 when manually rerun or backfilled.", + }, + "start_time": { + Type: schema.TypeString, + Required: true, + Description: "Execution start time.", + }, + "end_time": { + Type: schema.TypeString, + Required: true, + Description: "Execution end time.", + }, + "cost_time": { + Type: schema.TypeInt, + Required: true, + Description: "Execution duration, in ms.", + }, + "scheduler_time": { + Type: schema.TypeString, + Required: true, + Description: "Scheduled time.", + }, + "last_update_time": { + Type: schema.TypeString, + Required: true, + Description: "Last update time, format yyyy-MM-dd HH:mm:ss.", + }, + "executor_group_id": { + Type: schema.TypeString, + Required: true, + Description: "Executor resource group ID.", + }, + "executor_group_name": { + Type: schema.TypeString, + Required: true, + Description: "Resource group name.", + }, + }, + }, + }, + }, + }, + }, + + "result_output_file": { + Type: schema.TypeString, + Optional: true, + Description: "Used to save results.", + }, + }, + } +} + +func dataSourceTencentCloudWedataUpstreamTaskInstancesRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("data_source.tencentcloud_wedata_upstream_task_instances.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(nil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + paramMap := make(map[string]interface{}) + if v, ok := d.GetOk("project_id"); ok { + paramMap["ProjectId"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("instance_key"); ok { + paramMap["InstanceKey"] = helper.String(v.(string)) + } + + if v, ok := d.GetOk("time_zone"); ok { + paramMap["TimeZone"] = helper.String(v.(string)) + } + + var respData *wedatav20250806.ListUpstreamTaskInstancesResponseParams + reqErr := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + result, e := service.DescribeWedataUpstreamTaskInstancesByFilter(ctx, paramMap) + if e != nil { + return tccommon.RetryError(e) + } + respData = result + return nil + }) + if reqErr != nil { + return reqErr + } + + var projectId string + var instanceKey string + dataMap := map[string]interface{}{} + ids := []string{} + + if respData.Data != nil { + if respData.Data.TotalCount != nil { + dataMap["total_count"] = respData.Data.TotalCount + } + + if respData.Data.TotalPageNumber != nil { + dataMap["total_page_number"] = respData.Data.TotalPageNumber + } + + if respData.Data.PageNumber != nil { + dataMap["page_number"] = respData.Data.PageNumber + } + + if respData.Data.PageSize != nil { + dataMap["page_size"] = respData.Data.PageSize + } + + itemsList := make([]map[string]interface{}, 0, len(respData.Data.Items)) + if respData.Data.Items != nil { + for _, items := range respData.Data.Items { + itemsMap := map[string]interface{}{} + + if items.ProjectId != nil { + itemsMap["project_id"] = items.ProjectId + projectId = *items.ProjectId + } + + if items.InstanceKey != nil { + itemsMap["instance_key"] = items.InstanceKey + instanceKey = *items.InstanceKey + } + + if items.FolderId != nil { + itemsMap["folder_id"] = items.FolderId + } + + if items.FolderName != nil { + itemsMap["folder_name"] = items.FolderName + } + + if items.WorkflowId != nil { + itemsMap["workflow_id"] = items.WorkflowId + } + + if items.WorkflowName != nil { + itemsMap["workflow_name"] = items.WorkflowName + } + + if items.TaskId != nil { + itemsMap["task_id"] = items.TaskId + } + + if items.TaskName != nil { + itemsMap["task_name"] = items.TaskName + } + + if items.CurRunDate != nil { + itemsMap["cur_run_date"] = items.CurRunDate + } + + if items.InstanceState != nil { + itemsMap["instance_state"] = items.InstanceState + } + + if items.InstanceType != nil { + itemsMap["instance_type"] = items.InstanceType + } + + if items.OwnerUinList != nil { + itemsMap["owner_uin_list"] = items.OwnerUinList + } + + if items.TotalRunNum != nil { + itemsMap["total_run_num"] = items.TotalRunNum + } + + if items.TaskType != nil { + itemsMap["task_type"] = items.TaskType + } + + if items.TaskTypeId != nil { + itemsMap["task_type_id"] = items.TaskTypeId + } + + if items.CycleType != nil { + itemsMap["cycle_type"] = items.CycleType + } + + if items.TryLimit != nil { + itemsMap["try_limit"] = items.TryLimit + } + + if items.Tries != nil { + itemsMap["tries"] = items.Tries + } + + if items.StartTime != nil { + itemsMap["start_time"] = items.StartTime + } + + if items.EndTime != nil { + itemsMap["end_time"] = items.EndTime + } + + if items.CostTime != nil { + itemsMap["cost_time"] = items.CostTime + } + + if items.SchedulerTime != nil { + itemsMap["scheduler_time"] = items.SchedulerTime + } + + if items.LastUpdateTime != nil { + itemsMap["last_update_time"] = items.LastUpdateTime + } + + if items.ExecutorGroupId != nil { + itemsMap["executor_group_id"] = items.ExecutorGroupId + } + + if items.ExecutorGroupName != nil { + itemsMap["executor_group_name"] = items.ExecutorGroupName + } + + ids = append(ids, strings.Join([]string{projectId, instanceKey}, tccommon.FILED_SP)) + itemsList = append(itemsList, itemsMap) + } + + dataMap["items"] = itemsList + } + _ = d.Set("data", []interface{}{dataMap}) + } + + d.SetId(helper.DataResourceIdsHash(ids)) + + output, ok := d.GetOk("result_output_file") + if ok && output.(string) != "" { + if e := tccommon.WriteToFile(output.(string), dataMap); e != nil { + return e + } + } + + return nil +} diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_upstream_task_instances.md b/tencentcloud/services/wedata/data_source_tc_wedata_upstream_task_instances.md new file mode 100644 index 0000000000..e63d0a479d --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_upstream_task_instances.md @@ -0,0 +1,20 @@ +Use this data source to query detailed information of wedata upstream task instances + +Example Usage + +```hcl +data "tencentcloud_wedata_task_instances" "wedata_task_instances" { + project_id = "1859317240494305280" +} + +locals { + instance_keys = data.tencentcloud_wedata_task_instances.wedata_task_instances.data[0].items[*].instance_key +} + +data "tencentcloud_wedata_upstream_task_instances" "wedata_upstream_task_instances" { + for_each = toset(local.instance_keys) + + project_id = "1859317240494305280" + instance_key = each.value +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/data_source_tc_wedata_upstream_task_instances_test.go b/tencentcloud/services/wedata/data_source_tc_wedata_upstream_task_instances_test.go new file mode 100644 index 0000000000..ac209e4b57 --- /dev/null +++ b/tencentcloud/services/wedata/data_source_tc_wedata_upstream_task_instances_test.go @@ -0,0 +1,37 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataUpstreamTaskInstancesDataSource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataUpstreamTaskInstancesDataSource, + Check: resource.ComposeTestCheckFunc( + tcacctest.AccCheckTencentCloudDataSourceID("data.tencentcloud_wedata_upstream_task_instances.wedata_upstream_task_instances"), + resource.TestCheckResourceAttrSet("data.tencentcloud_wedata_upstream_task_instances.wedata_upstream_task_instances", "id"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_upstream_task_instances.wedata_upstream_task_instances", "data.#", "1"), + resource.TestCheckResourceAttr("data.tencentcloud_wedata_upstream_task_instances.wedata_upstream_task_instances", "data.0.items.#", "2"), + ), + }, + }, + }) +} + +const testAccWedataUpstreamTaskInstancesDataSource = ` + +data "tencentcloud_wedata_upstream_task_instances" "wedata_upstream_task_instances" { + project_id = "1859317240494305280" + instance_key = "20250820150153594_2025-10-13 17:00:00" +} +` diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule.go b/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule.go new file mode 100644 index 0000000000..df997cd046 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule.go @@ -0,0 +1,1207 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudWedataOpsAlarmRule() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudWedataOpsAlarmRuleCreate, + Read: resourceTencentCloudWedataOpsAlarmRuleRead, + Update: resourceTencentCloudWedataOpsAlarmRuleUpdate, + Delete: resourceTencentCloudWedataOpsAlarmRuleDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Project id.", + }, + + "alarm_rule_name": { + Type: schema.TypeString, + Required: true, + Description: "Alert rule name.", + }, + + "monitor_object_ids": { + Type: schema.TypeSet, + Required: true, + Description: "A list of monitored object business IDs. Different business IDs are passed in based on the MonitorType setting. For example, 1 (Task) - MonitorObjectIds is a list of task IDs; 2 (Workflow) - MonitorObjectIds is a list of workflow IDs (workflow IDs can be obtained from the ListWorkflows interface); 3 (Project) - MonitorObjectIds is a list of project IDs. Example value: [\"ddc\"].", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "alarm_types": { + Type: schema.TypeSet, + Required: true, + Description: "Alarm Rule Monitoring Types: failure: failure alarm; overtime: timeout alarm; success: success alarm; backTrackingOrRerunSuccess: backTrackingOrRerunSuccess: backTrackingOrRerunFailure: backTrackingOrRerunFailure. Project Fluctuation Alarms: projectFailureInstanceUpwardFluctuationAlarm: alarm if the upward fluctuation rate of failed instances exceeds the threshold. projectSuccessInstanceDownwardFluctuationAlarm: alarm if the downward fluctuation rate of successful instances exceeds the threshold. Offline Integration Task Reconciliation Alarms: reconciliationFailure: offline reconciliation task failure alarm; reconciliationOvertime: offline reconciliation task timeout alarm; reconciliationMismatch: alarm if the number of inconsistent entries in a data reconciliation task exceeds the threshold. Example value: [\"failure\"].", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "alarm_groups": { + Type: schema.TypeList, + Required: true, + Description: "Alarm receiver configuration information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_escalation_recipient_ids": { + Type: schema.TypeSet, + Optional: true, + Description: "Alarm escalator ID list. If the alarm receiver or the upper escalator does not confirm the alarm within the alarm interval, the alarm will be sent to the next level escalator.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "alarm_escalation_interval": { + Type: schema.TypeInt, + Optional: true, + Description: "Alarm escalation interval.", + }, + "notification_fatigue": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Alarm notification fatigue configuration.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "notify_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Number of alarms.", + }, + "notify_interval": { + Type: schema.TypeInt, + Optional: true, + Description: "Alarm interval, in minutes.", + }, + "quiet_intervals": { + Type: schema.TypeList, + Optional: true, + Description: "Do not disturb time, for example, the example value [{DaysOfWeek: [1, 2], StartTime: \"00:00:00\", EndTime: \"09:00:00\"}] means do not disturb from 00:00 to 09:00 every Monday and Tuesday.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "days_of_week": { + Type: schema.TypeSet, + Optional: true, + Description: "According to the ISO standard, 1 represents Monday and 7 represents Sunday.", + Elem: &schema.Schema{ + Type: schema.TypeInt, + }, + }, + "start_time": { + Type: schema.TypeString, + Optional: true, + Description: "Start time, with precision of hours, minutes, and seconds, in the format of HH:mm:ss.", + }, + "end_time": { + Type: schema.TypeString, + Optional: true, + Description: "End time, with precision of hours, minutes, and seconds, in the format of HH:mm:ss.", + }, + }, + }, + }, + }, + }, + }, + "alarm_ways": { + Type: schema.TypeSet, + Optional: true, + Description: "Alert Channels: 1: Email, 2: SMS, 3: WeChat, 4: Voice, 5: WeChat Enterprise, 6: Http, 7: WeChat Enterprise Group, 8: Lark Group, 9: DingTalk Group, 10: Slack Group, 11: Teams Group (Default: Email), Only one channel can be selected.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "web_hooks": { + Type: schema.TypeList, + Optional: true, + Description: "List of webhook addresses for corporate WeChat groups, Feishu groups, DingTalk groups, Slack groups, and Teams groups.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_way": { + Type: schema.TypeString, + Optional: true, + Description: "Alert channel value: 7. Enterprise WeChat group, 8. Feishu group, 9. DingTalk group, 10. Slack group, 11. Teams group.", + }, + "web_hooks": { + Type: schema.TypeSet, + Optional: true, + Description: "List of webhook addresses for the alarm group.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "alarm_recipient_type": { + Type: schema.TypeInt, + Optional: true, + Description: "Alarm Recipient Type: 1. Designated Personnel, 2. Task Responsible Personnel, 3. Duty Roster (Default: 1. Designated Personnel).", + }, + "alarm_recipient_ids": { + Type: schema.TypeSet, + Optional: true, + Description: "Depending on the type of AlarmRecipientType, this list has different business IDs: 1 (Specified Person): Alarm Recipient ID List; 2 (Task Responsible Person): No configuration required; 3 (Duty Roster): Duty Roster ID List.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + + "monitor_object_type": { + Type: schema.TypeInt, + Optional: true, + Description: "Monitoring object type, Task-based monitoring: Configurable by task/workflow/project: 1. Task, 2. Workflow, 3. Project (default is 1. Task). Project-based monitoring: Alerts for overall project task fluctuations, 7: Project fluctuation monitoring alerts.", + }, + + "alarm_rule_detail": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Alarm rule configuration information: Success alarms do not require configuration. Failure alarms can be configured as either first-failure alarms or all retry failure alarms. Timeout configuration requires the timeout type and timeout threshold. Project fluctuation alarms require the fluctuation rate and anti-shake period.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "trigger": { + Type: schema.TypeInt, + Optional: true, + Description: "Failure trigger timing: 1 - Triggered on first failure; 2 -- Triggered when all retries complete (default).", + }, + "data_backfill_or_rerun_trigger": { + Type: schema.TypeInt, + Optional: true, + Description: "Re-recording trigger timing: 1 - Triggered by the first failure; 2 - Triggered by completion of all retries.", + }, + "time_out_ext_info": { + Type: schema.TypeList, + Optional: true, + Description: "Periodic instance timeout configuration details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_type": { + Type: schema.TypeInt, + Optional: true, + Description: "Timeout alarm configuration: 1. Estimated running time exceeded, 2. Estimated completion time exceeded, 3. Estimated waiting time for scheduling exceeded, 4. Estimated completion within the period but not completed.", + }, + "type": { + Type: schema.TypeInt, + Optional: true, + Description: "Timeout value configuration type: 1-Specified value; 2-Average value.", + }, + "hour": { + Type: schema.TypeInt, + Optional: true, + Description: "Specify the timeout value in hours. The default value is 0.", + }, + "min": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout value is specified in minutes. The default value is 1.", + }, + "schedule_time_zone": { + Type: schema.TypeString, + Optional: true, + Description: "The time zone configuration corresponding to the timeout period, such as UTC+7, the default is UTC+8.", + }, + }, + }, + }, + "data_backfill_or_rerun_time_out_ext_info": { + Type: schema.TypeList, + Optional: true, + Description: "Detailed configuration of re-running and re-recording instance timeout.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_type": { + Type: schema.TypeInt, + Optional: true, + Description: "Timeout alarm configuration: 1. Estimated running time exceeded, 2. Estimated completion time exceeded, 3. Estimated waiting time for scheduling exceeded, 4. Estimated completion within the period but not completed.", + }, + "type": { + Type: schema.TypeInt, + Optional: true, + Description: "Timeout value configuration type: 1-Specified value; 2-Average value.", + }, + "hour": { + Type: schema.TypeInt, + Optional: true, + Description: "Specify the timeout value in hours. The default value is 0.", + }, + "min": { + Type: schema.TypeInt, + Optional: true, + Description: "The timeout value is specified in minutes. The default value is 1.", + }, + "schedule_time_zone": { + Type: schema.TypeString, + Optional: true, + Description: "The time zone configuration corresponding to the timeout period, such as UTC+7, the default is UTC+8.", + }, + }, + }, + }, + "project_instance_statistics_alarm_info_list": { + Type: schema.TypeList, + Optional: true, + Description: "Project fluctuation alarm configuration details.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "alarm_type": { + Type: schema.TypeString, + Required: true, + Description: "Alarm type: projectFailureInstanceUpwardFluctuationAlarm: Failure instance upward fluctuation alarm; projectSuccessInstanceDownwardFluctuationAlarm: Success instance downward fluctuation alarm.", + }, + "instance_threshold_count_percent": { + Type: schema.TypeInt, + Optional: true, + Description: "The alarm threshold for the proportion of instance successes fluctuating downwards; the alarm threshold for the proportion of instance failures fluctuating upwards.", + }, + "instance_threshold_count": { + Type: schema.TypeInt, + Optional: true, + Description: "The cumulative instance number fluctuation threshold.", + }, + "stabilize_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: "Stability threshold (number of statistical cycles for anti-shake configuration).", + }, + "stabilize_statistics_cycle": { + Type: schema.TypeInt, + Optional: true, + Description: "Stability statistics period (number of anti-shake configuration statistics periods).", + }, + "is_cumulant": { + Type: schema.TypeBool, + Optional: true, + Description: "Whether to calculate cumulatively, false: continuous, true: cumulative.", + }, + "instance_count": { + Type: schema.TypeInt, + Optional: true, + Description: "The cumulative number of instances on the day; the downward fluctuation of the number of failed instances on the day.", + }, + }, + }, + }, + "reconciliation_ext_info": { + Type: schema.TypeList, + Optional: true, + Description: "Offline integrated reconciliation alarm configuration information.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "rule_type": { + Type: schema.TypeString, + Optional: true, + Description: "Offline alarm rule types: reconciliationFailure: Offline reconciliation failure alarm; reconciliationOvertime: Offline reconciliation task timeout alarm (timeout must be configured); reconciliationMismatch: Offline reconciliation mismatch alarm (mismatch threshold must be configured).", + }, + "mismatch_count": { + Type: schema.TypeInt, + Optional: true, + Description: "Reconciliation inconsistency threshold, RuleType=reconciliationMismatch. This field needs to be configured and has no default value.", + }, + "hour": { + Type: schema.TypeInt, + Optional: true, + Description: "Reconciliation task timeout threshold: hours, default is 0.", + }, + "min": { + Type: schema.TypeInt, + Optional: true, + Description: "Reconciliation task timeout threshold: minutes, default is 1.", + }, + }, + }, + }, + }, + }, + }, + + "alarm_level": { + Type: schema.TypeInt, + Optional: true, + Description: "Alarm level: 1. Normal, 2. Major, 3. Urgent (default 1. Normal).", + }, + + "description": { + Type: schema.TypeString, + Optional: true, + Description: "Alarm rule description.", + }, + }, + } +} + +func resourceTencentCloudWedataOpsAlarmRuleCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_alarm_rule.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + var ( + projectId string + alarmRuleId string + ) + var ( + request = wedatav20250806.NewCreateOpsAlarmRuleRequest() + response = wedatav20250806.NewCreateOpsAlarmRuleResponse() + ) + + if v, ok := d.GetOk("project_id"); ok { + projectId = v.(string) + request.ProjectId = helper.String(v.(string)) + } + + if v, ok := d.GetOk("alarm_rule_name"); ok { + request.AlarmRuleName = helper.String(v.(string)) + } + + if v, ok := d.GetOk("monitor_object_ids"); ok { + monitorObjectIdsSet := v.(*schema.Set).List() + for i := range monitorObjectIdsSet { + monitorObjectIds := monitorObjectIdsSet[i].(string) + request.MonitorObjectIds = append(request.MonitorObjectIds, helper.String(monitorObjectIds)) + } + } + + if v, ok := d.GetOk("alarm_types"); ok { + alarmTypesSet := v.(*schema.Set).List() + for i := range alarmTypesSet { + alarmTypes := alarmTypesSet[i].(string) + request.AlarmTypes = append(request.AlarmTypes, helper.String(alarmTypes)) + } + } + + if v, ok := d.GetOk("alarm_groups"); ok { + for _, item := range v.([]interface{}) { + alarmGroupsMap := item.(map[string]interface{}) + alarmGroup := wedatav20250806.AlarmGroup{} + if v, ok := alarmGroupsMap["alarm_escalation_recipient_ids"]; ok { + alarmEscalationRecipientIdsSet := v.(*schema.Set).List() + for i := range alarmEscalationRecipientIdsSet { + alarmEscalationRecipientIds := alarmEscalationRecipientIdsSet[i].(string) + alarmGroup.AlarmEscalationRecipientIds = append(alarmGroup.AlarmEscalationRecipientIds, helper.String(alarmEscalationRecipientIds)) + } + } + if v, ok := alarmGroupsMap["alarm_escalation_interval"].(int); ok { + alarmGroup.AlarmEscalationInterval = helper.IntInt64(v) + } + if notificationFatigueMap, ok := helper.ConvertInterfacesHeadToMap(alarmGroupsMap["notification_fatigue"]); ok { + notificationFatigue := wedatav20250806.NotificationFatigue{} + if v, ok := notificationFatigueMap["notify_count"].(int); ok { + notificationFatigue.NotifyCount = helper.IntUint64(v) + } + if v, ok := notificationFatigueMap["notify_interval"].(int); ok { + notificationFatigue.NotifyInterval = helper.IntUint64(v) + } + if v, ok := notificationFatigueMap["quiet_intervals"]; ok { + for _, item := range v.([]interface{}) { + quietIntervalsMap := item.(map[string]interface{}) + alarmQuietInterval := wedatav20250806.AlarmQuietInterval{} + if v, ok := quietIntervalsMap["days_of_week"]; ok { + daysOfWeekSet := v.(*schema.Set).List() + for i := range daysOfWeekSet { + daysOfWeek := daysOfWeekSet[i].(int) + alarmQuietInterval.DaysOfWeek = append(alarmQuietInterval.DaysOfWeek, helper.IntUint64(daysOfWeek)) + } + } + if v, ok := quietIntervalsMap["start_time"].(string); ok && v != "" { + alarmQuietInterval.StartTime = helper.String(v) + } + if v, ok := quietIntervalsMap["end_time"].(string); ok && v != "" { + alarmQuietInterval.EndTime = helper.String(v) + } + notificationFatigue.QuietIntervals = append(notificationFatigue.QuietIntervals, &alarmQuietInterval) + } + } + alarmGroup.NotificationFatigue = ¬ificationFatigue + } + if v, ok := alarmGroupsMap["alarm_ways"]; ok { + alarmWaysSet := v.(*schema.Set).List() + for i := range alarmWaysSet { + alarmWays := alarmWaysSet[i].(string) + alarmGroup.AlarmWays = append(alarmGroup.AlarmWays, helper.String(alarmWays)) + } + } + if v, ok := alarmGroupsMap["web_hooks"]; ok { + for _, item := range v.([]interface{}) { + webHooksMap := item.(map[string]interface{}) + alarmWayWebHook := wedatav20250806.AlarmWayWebHook{} + if v, ok := webHooksMap["alarm_way"].(string); ok && v != "" { + alarmWayWebHook.AlarmWay = helper.String(v) + } + if v, ok := webHooksMap["web_hooks"]; ok { + webHooksSet := v.(*schema.Set).List() + for i := range webHooksSet { + webHooks := webHooksSet[i].(string) + alarmWayWebHook.WebHooks = append(alarmWayWebHook.WebHooks, helper.String(webHooks)) + } + } + alarmGroup.WebHooks = append(alarmGroup.WebHooks, &alarmWayWebHook) + } + } + if v, ok := alarmGroupsMap["alarm_recipient_type"].(int); ok { + alarmGroup.AlarmRecipientType = helper.IntInt64(v) + } + if v, ok := alarmGroupsMap["alarm_recipient_ids"]; ok { + alarmRecipientIdsSet := v.(*schema.Set).List() + for i := range alarmRecipientIdsSet { + alarmRecipientIds := alarmRecipientIdsSet[i].(string) + alarmGroup.AlarmRecipientIds = append(alarmGroup.AlarmRecipientIds, helper.String(alarmRecipientIds)) + } + } + request.AlarmGroups = append(request.AlarmGroups, &alarmGroup) + } + } + + if v, ok := d.GetOkExists("monitor_object_type"); ok { + request.MonitorObjectType = helper.IntUint64(v.(int)) + } + + if alarmRuleDetailMap, ok := helper.InterfacesHeadMap(d, "alarm_rule_detail"); ok { + alarmRuleDetail := wedatav20250806.AlarmRuleDetail{} + if v, ok := alarmRuleDetailMap["trigger"].(int); ok { + alarmRuleDetail.Trigger = helper.IntInt64(v) + } + if v, ok := alarmRuleDetailMap["data_backfill_or_rerun_trigger"].(int); ok { + alarmRuleDetail.DataBackfillOrRerunTrigger = helper.IntInt64(v) + } + if v, ok := alarmRuleDetailMap["time_out_ext_info"]; ok { + for _, item := range v.([]interface{}) { + timeOutExtInfoMap := item.(map[string]interface{}) + timeOutStrategyInfo := wedatav20250806.TimeOutStrategyInfo{} + if v, ok := timeOutExtInfoMap["rule_type"].(int); ok { + timeOutStrategyInfo.RuleType = helper.IntInt64(v) + } + if v, ok := timeOutExtInfoMap["type"].(int); ok { + timeOutStrategyInfo.Type = helper.IntInt64(v) + } + if v, ok := timeOutExtInfoMap["hour"].(int); ok { + timeOutStrategyInfo.Hour = helper.IntUint64(v) + } + if v, ok := timeOutExtInfoMap["min"].(int); ok { + timeOutStrategyInfo.Min = helper.IntInt64(v) + } + if v, ok := timeOutExtInfoMap["schedule_time_zone"].(string); ok && v != "" { + timeOutStrategyInfo.ScheduleTimeZone = helper.String(v) + } + alarmRuleDetail.TimeOutExtInfo = append(alarmRuleDetail.TimeOutExtInfo, &timeOutStrategyInfo) + } + } + if v, ok := alarmRuleDetailMap["data_backfill_or_rerun_time_out_ext_info"]; ok { + for _, item := range v.([]interface{}) { + dataBackfillOrRerunTimeOutExtInfoMap := item.(map[string]interface{}) + timeOutStrategyInfo := wedatav20250806.TimeOutStrategyInfo{} + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["rule_type"].(int); ok { + timeOutStrategyInfo.RuleType = helper.IntInt64(v) + } + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["type"].(int); ok { + timeOutStrategyInfo.Type = helper.IntInt64(v) + } + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["hour"].(int); ok { + timeOutStrategyInfo.Hour = helper.IntUint64(v) + } + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["min"].(int); ok { + timeOutStrategyInfo.Min = helper.IntInt64(v) + } + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["schedule_time_zone"].(string); ok && v != "" { + timeOutStrategyInfo.ScheduleTimeZone = helper.String(v) + } + alarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo = append(alarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo, &timeOutStrategyInfo) + } + } + if v, ok := alarmRuleDetailMap["project_instance_statistics_alarm_info_list"]; ok { + for _, item := range v.([]interface{}) { + projectInstanceStatisticsAlarmInfoListMap := item.(map[string]interface{}) + projectInstanceStatisticsAlarmInfo := wedatav20250806.ProjectInstanceStatisticsAlarmInfo{} + if v, ok := projectInstanceStatisticsAlarmInfoListMap["alarm_type"].(string); ok && v != "" { + projectInstanceStatisticsAlarmInfo.AlarmType = helper.String(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["instance_threshold_count_percent"].(int); ok { + projectInstanceStatisticsAlarmInfo.InstanceThresholdCountPercent = helper.IntUint64(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["instance_threshold_count"].(int); ok { + projectInstanceStatisticsAlarmInfo.InstanceThresholdCount = helper.IntUint64(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["stabilize_threshold"].(int); ok { + projectInstanceStatisticsAlarmInfo.StabilizeThreshold = helper.IntUint64(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["stabilize_statistics_cycle"].(int); ok { + projectInstanceStatisticsAlarmInfo.StabilizeStatisticsCycle = helper.IntUint64(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["is_cumulant"].(bool); ok { + projectInstanceStatisticsAlarmInfo.IsCumulant = helper.Bool(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["instance_count"].(int); ok { + projectInstanceStatisticsAlarmInfo.InstanceCount = helper.IntUint64(v) + } + alarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList = append(alarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList, &projectInstanceStatisticsAlarmInfo) + } + } + if v, ok := alarmRuleDetailMap["reconciliation_ext_info"]; ok { + for _, item := range v.([]interface{}) { + reconciliationExtInfoMap := item.(map[string]interface{}) + reconciliationStrategyInfo := wedatav20250806.ReconciliationStrategyInfo{} + if v, ok := reconciliationExtInfoMap["rule_type"].(string); ok && v != "" { + reconciliationStrategyInfo.RuleType = helper.String(v) + } + if v, ok := reconciliationExtInfoMap["mismatch_count"].(int); ok { + reconciliationStrategyInfo.MismatchCount = helper.IntUint64(v) + } + if v, ok := reconciliationExtInfoMap["hour"].(int); ok { + reconciliationStrategyInfo.Hour = helper.IntInt64(v) + } + if v, ok := reconciliationExtInfoMap["min"].(int); ok { + reconciliationStrategyInfo.Min = helper.IntInt64(v) + } + alarmRuleDetail.ReconciliationExtInfo = append(alarmRuleDetail.ReconciliationExtInfo, &reconciliationStrategyInfo) + } + } + request.AlarmRuleDetail = &alarmRuleDetail + } + + if v, ok := d.GetOkExists("alarm_level"); ok { + request.AlarmLevel = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOk("description"); ok { + request.Description = helper.String(v.(string)) + } + + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().CreateOpsAlarmRuleWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if reqErr != nil { + log.Printf("[CRITAL]%s create wedata ops alarm rule failed, reason:%+v", logId, reqErr) + return reqErr + } + + if response == nil || response.Response == nil || response.Response.Data == nil || response.Response.Data.AlarmRuleId == nil { + return fmt.Errorf("Create an alarm rule. AlarmRuleId is empty.") + } + + alarmRuleId = *response.Response.Data.AlarmRuleId + + d.SetId(strings.Join([]string{projectId, alarmRuleId}, tccommon.FILED_SP)) + + return resourceTencentCloudWedataOpsAlarmRuleRead(d, meta) +} + +func resourceTencentCloudWedataOpsAlarmRuleRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_alarm_rule.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + projectId := idSplit[0] + alarmRuleId := idSplit[1] + + respData, err := service.DescribeWedataOpsAlarmRuleById(ctx, projectId, alarmRuleId) + if err != nil { + return err + } + + if respData == nil { + d.SetId("") + log.Printf("[WARN]%s resource `wedata_ops_alarm_rule` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + + _ = d.Set("project_id", projectId) + + if respData.AlarmRuleName != nil { + _ = d.Set("alarm_rule_name", respData.AlarmRuleName) + } + + if respData.Description != nil { + _ = d.Set("description", respData.Description) + } + + if respData.MonitorObjectType != nil { + _ = d.Set("monitor_object_type", respData.MonitorObjectType) + } + + if respData.MonitorObjectIds != nil { + _ = d.Set("monitor_object_ids", respData.MonitorObjectIds) + } + + if respData.AlarmTypes != nil { + _ = d.Set("alarm_types", respData.AlarmTypes) + } + + alarmRuleDetailMap := map[string]interface{}{} + + if respData.AlarmRuleDetail != nil { + if respData.AlarmRuleDetail.Trigger != nil { + alarmRuleDetailMap["trigger"] = respData.AlarmRuleDetail.Trigger + } + + if respData.AlarmRuleDetail.DataBackfillOrRerunTrigger != nil { + alarmRuleDetailMap["data_backfill_or_rerun_trigger"] = respData.AlarmRuleDetail.DataBackfillOrRerunTrigger + } + + timeOutExtInfoList := make([]map[string]interface{}, 0, len(respData.AlarmRuleDetail.TimeOutExtInfo)) + if respData.AlarmRuleDetail.TimeOutExtInfo != nil { + for _, timeOutExtInfo := range respData.AlarmRuleDetail.TimeOutExtInfo { + timeOutExtInfoMap := map[string]interface{}{} + + if timeOutExtInfo.RuleType != nil { + timeOutExtInfoMap["rule_type"] = timeOutExtInfo.RuleType + } + + if timeOutExtInfo.Type != nil { + timeOutExtInfoMap["type"] = timeOutExtInfo.Type + } + + if timeOutExtInfo.Hour != nil { + timeOutExtInfoMap["hour"] = timeOutExtInfo.Hour + } + + if timeOutExtInfo.Min != nil { + timeOutExtInfoMap["min"] = timeOutExtInfo.Min + } + + if timeOutExtInfo.ScheduleTimeZone != nil { + timeOutExtInfoMap["schedule_time_zone"] = timeOutExtInfo.ScheduleTimeZone + } + + timeOutExtInfoList = append(timeOutExtInfoList, timeOutExtInfoMap) + } + + alarmRuleDetailMap["time_out_ext_info"] = timeOutExtInfoList + } + dataBackfillOrRerunTimeOutExtInfoList := make([]map[string]interface{}, 0, len(respData.AlarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo)) + if respData.AlarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo != nil { + for _, dataBackfillOrRerunTimeOutExtInfo := range respData.AlarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo { + dataBackfillOrRerunTimeOutExtInfoMap := map[string]interface{}{} + + if dataBackfillOrRerunTimeOutExtInfo.RuleType != nil { + dataBackfillOrRerunTimeOutExtInfoMap["rule_type"] = dataBackfillOrRerunTimeOutExtInfo.RuleType + } + + if dataBackfillOrRerunTimeOutExtInfo.Type != nil { + dataBackfillOrRerunTimeOutExtInfoMap["type"] = dataBackfillOrRerunTimeOutExtInfo.Type + } + + if dataBackfillOrRerunTimeOutExtInfo.Hour != nil { + dataBackfillOrRerunTimeOutExtInfoMap["hour"] = dataBackfillOrRerunTimeOutExtInfo.Hour + } + + if dataBackfillOrRerunTimeOutExtInfo.Min != nil { + dataBackfillOrRerunTimeOutExtInfoMap["min"] = dataBackfillOrRerunTimeOutExtInfo.Min + } + + if dataBackfillOrRerunTimeOutExtInfo.ScheduleTimeZone != nil { + dataBackfillOrRerunTimeOutExtInfoMap["schedule_time_zone"] = dataBackfillOrRerunTimeOutExtInfo.ScheduleTimeZone + } + + dataBackfillOrRerunTimeOutExtInfoList = append(dataBackfillOrRerunTimeOutExtInfoList, dataBackfillOrRerunTimeOutExtInfoMap) + } + + alarmRuleDetailMap["data_backfill_or_rerun_time_out_ext_info"] = dataBackfillOrRerunTimeOutExtInfoList + } + projectInstanceStatisticsAlarmInfoListList := make([]map[string]interface{}, 0, len(respData.AlarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList)) + if respData.AlarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList != nil { + for _, projectInstanceStatisticsAlarmInfoList := range respData.AlarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList { + projectInstanceStatisticsAlarmInfoListMap := map[string]interface{}{} + + if projectInstanceStatisticsAlarmInfoList.AlarmType != nil { + projectInstanceStatisticsAlarmInfoListMap["alarm_type"] = projectInstanceStatisticsAlarmInfoList.AlarmType + } + + if projectInstanceStatisticsAlarmInfoList.InstanceThresholdCountPercent != nil { + projectInstanceStatisticsAlarmInfoListMap["instance_threshold_count_percent"] = projectInstanceStatisticsAlarmInfoList.InstanceThresholdCountPercent + } + + if projectInstanceStatisticsAlarmInfoList.InstanceThresholdCount != nil { + projectInstanceStatisticsAlarmInfoListMap["instance_threshold_count"] = projectInstanceStatisticsAlarmInfoList.InstanceThresholdCount + } + + if projectInstanceStatisticsAlarmInfoList.StabilizeThreshold != nil { + projectInstanceStatisticsAlarmInfoListMap["stabilize_threshold"] = projectInstanceStatisticsAlarmInfoList.StabilizeThreshold + } + + if projectInstanceStatisticsAlarmInfoList.StabilizeStatisticsCycle != nil { + projectInstanceStatisticsAlarmInfoListMap["stabilize_statistics_cycle"] = projectInstanceStatisticsAlarmInfoList.StabilizeStatisticsCycle + } + + if projectInstanceStatisticsAlarmInfoList.IsCumulant != nil { + projectInstanceStatisticsAlarmInfoListMap["is_cumulant"] = projectInstanceStatisticsAlarmInfoList.IsCumulant + } + + if projectInstanceStatisticsAlarmInfoList.InstanceCount != nil { + projectInstanceStatisticsAlarmInfoListMap["instance_count"] = projectInstanceStatisticsAlarmInfoList.InstanceCount + } + + projectInstanceStatisticsAlarmInfoListList = append(projectInstanceStatisticsAlarmInfoListList, projectInstanceStatisticsAlarmInfoListMap) + } + + alarmRuleDetailMap["project_instance_statistics_alarm_info_list"] = projectInstanceStatisticsAlarmInfoListList + } + reconciliationExtInfoList := make([]map[string]interface{}, 0, len(respData.AlarmRuleDetail.ReconciliationExtInfo)) + if respData.AlarmRuleDetail.ReconciliationExtInfo != nil { + for _, reconciliationExtInfo := range respData.AlarmRuleDetail.ReconciliationExtInfo { + reconciliationExtInfoMap := map[string]interface{}{} + + if reconciliationExtInfo.RuleType != nil { + reconciliationExtInfoMap["rule_type"] = reconciliationExtInfo.RuleType + } + + if reconciliationExtInfo.MismatchCount != nil { + reconciliationExtInfoMap["mismatch_count"] = reconciliationExtInfo.MismatchCount + } + + if reconciliationExtInfo.Hour != nil { + reconciliationExtInfoMap["hour"] = reconciliationExtInfo.Hour + } + + if reconciliationExtInfo.Min != nil { + reconciliationExtInfoMap["min"] = reconciliationExtInfo.Min + } + + reconciliationExtInfoList = append(reconciliationExtInfoList, reconciliationExtInfoMap) + } + + alarmRuleDetailMap["reconciliation_ext_info"] = reconciliationExtInfoList + } + _ = d.Set("alarm_rule_detail", []interface{}{alarmRuleDetailMap}) + } + + if respData.AlarmLevel != nil { + _ = d.Set("alarm_level", respData.AlarmLevel) + } + + alarmGroupsList := make([]map[string]interface{}, 0, len(respData.AlarmGroups)) + if respData.AlarmGroups != nil { + for _, alarmGroups := range respData.AlarmGroups { + alarmGroupsMap := map[string]interface{}{} + + if alarmGroups.AlarmEscalationRecipientIds != nil { + alarmGroupsMap["alarm_escalation_recipient_ids"] = alarmGroups.AlarmEscalationRecipientIds + } + + if alarmGroups.AlarmEscalationInterval != nil { + alarmGroupsMap["alarm_escalation_interval"] = alarmGroups.AlarmEscalationInterval + } + + notificationFatigueMap := map[string]interface{}{} + + if alarmGroups.NotificationFatigue != nil { + if alarmGroups.NotificationFatigue.NotifyCount != nil { + notificationFatigueMap["notify_count"] = alarmGroups.NotificationFatigue.NotifyCount + } + + if alarmGroups.NotificationFatigue.NotifyInterval != nil { + notificationFatigueMap["notify_interval"] = alarmGroups.NotificationFatigue.NotifyInterval + } + + quietIntervalsList := make([]map[string]interface{}, 0, len(alarmGroups.NotificationFatigue.QuietIntervals)) + if alarmGroups.NotificationFatigue.QuietIntervals != nil { + for _, quietIntervals := range alarmGroups.NotificationFatigue.QuietIntervals { + quietIntervalsMap := map[string]interface{}{} + + if quietIntervals.DaysOfWeek != nil { + quietIntervalsMap["days_of_week"] = quietIntervals.DaysOfWeek + } + + if quietIntervals.StartTime != nil { + quietIntervalsMap["start_time"] = quietIntervals.StartTime + } + + if quietIntervals.EndTime != nil { + quietIntervalsMap["end_time"] = quietIntervals.EndTime + } + + quietIntervalsList = append(quietIntervalsList, quietIntervalsMap) + } + + notificationFatigueMap["quiet_intervals"] = quietIntervalsList + } + alarmGroupsMap["notification_fatigue"] = []interface{}{notificationFatigueMap} + } + + if alarmGroups.AlarmWays != nil { + alarmGroupsMap["alarm_ways"] = alarmGroups.AlarmWays + } + + webHooksList := make([]map[string]interface{}, 0, len(alarmGroups.WebHooks)) + if alarmGroups.WebHooks != nil { + for _, webHooks := range alarmGroups.WebHooks { + webHooksMap := map[string]interface{}{} + + if webHooks.AlarmWay != nil { + webHooksMap["alarm_way"] = webHooks.AlarmWay + } + + if webHooks.WebHooks != nil { + webHooksMap["web_hooks"] = webHooks.WebHooks + } + + webHooksList = append(webHooksList, webHooksMap) + } + + alarmGroupsMap["web_hooks"] = webHooksList + } + if alarmGroups.AlarmRecipientType != nil { + alarmGroupsMap["alarm_recipient_type"] = alarmGroups.AlarmRecipientType + } + + if alarmGroups.AlarmRecipientIds != nil { + alarmGroupsMap["alarm_recipient_ids"] = alarmGroups.AlarmRecipientIds + } + + alarmGroupsList = append(alarmGroupsList, alarmGroupsMap) + } + + _ = d.Set("alarm_groups", alarmGroupsList) + } + + return nil +} + +func resourceTencentCloudWedataOpsAlarmRuleUpdate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_alarm_rule.update")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + projectId := idSplit[0] + alarmRuleId := idSplit[1] + + needChange := false + mutableArgs := []string{"alarm_rule_name", "monitor_object_type", "monitor_object_ids", "alarm_types", "alarm_rule_detail", "status", "alarm_level", "alarm_groups", "description"} + for _, v := range mutableArgs { + if d.HasChange(v) { + needChange = true + break + } + } + + if needChange { + request := wedatav20250806.NewUpdateOpsAlarmRuleRequest() + request.ProjectId = helper.String(projectId) + request.AlarmRuleId = helper.String(alarmRuleId) + + if v, ok := d.GetOk("alarm_rule_name"); ok { + request.AlarmRuleName = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("monitor_object_type"); ok { + request.MonitorObjectType = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOk("monitor_object_ids"); ok { + monitorObjectIdsSet := v.(*schema.Set).List() + for i := range monitorObjectIdsSet { + monitorObjectIds := monitorObjectIdsSet[i].(string) + request.MonitorObjectIds = append(request.MonitorObjectIds, helper.String(monitorObjectIds)) + } + } + + if v, ok := d.GetOk("alarm_types"); ok { + alarmTypesSet := v.(*schema.Set).List() + for i := range alarmTypesSet { + alarmTypes := alarmTypesSet[i].(string) + request.AlarmTypes = append(request.AlarmTypes, helper.String(alarmTypes)) + } + } + + if alarmRuleDetailMap, ok := helper.InterfacesHeadMap(d, "alarm_rule_detail"); ok { + alarmRuleDetail := wedatav20250806.AlarmRuleDetail{} + if v, ok := alarmRuleDetailMap["trigger"].(int); ok { + alarmRuleDetail.Trigger = helper.IntInt64(v) + } + if v, ok := alarmRuleDetailMap["data_backfill_or_rerun_trigger"].(int); ok { + alarmRuleDetail.DataBackfillOrRerunTrigger = helper.IntInt64(v) + } + if v, ok := alarmRuleDetailMap["time_out_ext_info"]; ok { + for _, item := range v.([]interface{}) { + timeOutExtInfoMap := item.(map[string]interface{}) + timeOutStrategyInfo := wedatav20250806.TimeOutStrategyInfo{} + if v, ok := timeOutExtInfoMap["rule_type"].(int); ok { + timeOutStrategyInfo.RuleType = helper.IntInt64(v) + } + if v, ok := timeOutExtInfoMap["type"].(int); ok { + timeOutStrategyInfo.Type = helper.IntInt64(v) + } + if v, ok := timeOutExtInfoMap["hour"].(int); ok { + timeOutStrategyInfo.Hour = helper.IntUint64(v) + } + if v, ok := timeOutExtInfoMap["min"].(int); ok { + timeOutStrategyInfo.Min = helper.IntInt64(v) + } + if v, ok := timeOutExtInfoMap["schedule_time_zone"].(string); ok && v != "" { + timeOutStrategyInfo.ScheduleTimeZone = helper.String(v) + } + alarmRuleDetail.TimeOutExtInfo = append(alarmRuleDetail.TimeOutExtInfo, &timeOutStrategyInfo) + } + } + if v, ok := alarmRuleDetailMap["data_backfill_or_rerun_time_out_ext_info"]; ok { + for _, item := range v.([]interface{}) { + dataBackfillOrRerunTimeOutExtInfoMap := item.(map[string]interface{}) + timeOutStrategyInfo := wedatav20250806.TimeOutStrategyInfo{} + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["rule_type"].(int); ok { + timeOutStrategyInfo.RuleType = helper.IntInt64(v) + } + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["type"].(int); ok { + timeOutStrategyInfo.Type = helper.IntInt64(v) + } + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["hour"].(int); ok { + timeOutStrategyInfo.Hour = helper.IntUint64(v) + } + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["min"].(int); ok { + timeOutStrategyInfo.Min = helper.IntInt64(v) + } + if v, ok := dataBackfillOrRerunTimeOutExtInfoMap["schedule_time_zone"].(string); ok && v != "" { + timeOutStrategyInfo.ScheduleTimeZone = helper.String(v) + } + alarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo = append(alarmRuleDetail.DataBackfillOrRerunTimeOutExtInfo, &timeOutStrategyInfo) + } + } + if v, ok := alarmRuleDetailMap["project_instance_statistics_alarm_info_list"]; ok { + for _, item := range v.([]interface{}) { + projectInstanceStatisticsAlarmInfoListMap := item.(map[string]interface{}) + projectInstanceStatisticsAlarmInfo := wedatav20250806.ProjectInstanceStatisticsAlarmInfo{} + if v, ok := projectInstanceStatisticsAlarmInfoListMap["alarm_type"].(string); ok && v != "" { + projectInstanceStatisticsAlarmInfo.AlarmType = helper.String(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["instance_threshold_count_percent"].(int); ok { + projectInstanceStatisticsAlarmInfo.InstanceThresholdCountPercent = helper.IntUint64(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["instance_threshold_count"].(int); ok { + projectInstanceStatisticsAlarmInfo.InstanceThresholdCount = helper.IntUint64(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["stabilize_threshold"].(int); ok { + projectInstanceStatisticsAlarmInfo.StabilizeThreshold = helper.IntUint64(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["stabilize_statistics_cycle"].(int); ok { + projectInstanceStatisticsAlarmInfo.StabilizeStatisticsCycle = helper.IntUint64(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["is_cumulant"].(bool); ok { + projectInstanceStatisticsAlarmInfo.IsCumulant = helper.Bool(v) + } + if v, ok := projectInstanceStatisticsAlarmInfoListMap["instance_count"].(int); ok { + projectInstanceStatisticsAlarmInfo.InstanceCount = helper.IntUint64(v) + } + alarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList = append(alarmRuleDetail.ProjectInstanceStatisticsAlarmInfoList, &projectInstanceStatisticsAlarmInfo) + } + } + if v, ok := alarmRuleDetailMap["reconciliation_ext_info"]; ok { + for _, item := range v.([]interface{}) { + reconciliationExtInfoMap := item.(map[string]interface{}) + reconciliationStrategyInfo := wedatav20250806.ReconciliationStrategyInfo{} + if v, ok := reconciliationExtInfoMap["rule_type"].(string); ok && v != "" { + reconciliationStrategyInfo.RuleType = helper.String(v) + } + if v, ok := reconciliationExtInfoMap["mismatch_count"].(int); ok { + reconciliationStrategyInfo.MismatchCount = helper.IntUint64(v) + } + if v, ok := reconciliationExtInfoMap["hour"].(int); ok { + reconciliationStrategyInfo.Hour = helper.IntInt64(v) + } + if v, ok := reconciliationExtInfoMap["min"].(int); ok { + reconciliationStrategyInfo.Min = helper.IntInt64(v) + } + alarmRuleDetail.ReconciliationExtInfo = append(alarmRuleDetail.ReconciliationExtInfo, &reconciliationStrategyInfo) + } + } + request.AlarmRuleDetail = &alarmRuleDetail + } + + if v, ok := d.GetOkExists("status"); ok { + request.Status = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOkExists("alarm_level"); ok { + request.AlarmLevel = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOk("alarm_groups"); ok { + for _, item := range v.([]interface{}) { + alarmGroupsMap := item.(map[string]interface{}) + alarmGroup := wedatav20250806.AlarmGroup{} + if v, ok := alarmGroupsMap["alarm_escalation_recipient_ids"]; ok { + alarmEscalationRecipientIdsSet := v.(*schema.Set).List() + for i := range alarmEscalationRecipientIdsSet { + alarmEscalationRecipientIds := alarmEscalationRecipientIdsSet[i].(string) + alarmGroup.AlarmEscalationRecipientIds = append(alarmGroup.AlarmEscalationRecipientIds, helper.String(alarmEscalationRecipientIds)) + } + } + if v, ok := alarmGroupsMap["alarm_escalation_interval"].(int); ok { + alarmGroup.AlarmEscalationInterval = helper.IntInt64(v) + } + if notificationFatigueMap, ok := helper.ConvertInterfacesHeadToMap(alarmGroupsMap["notification_fatigue"]); ok { + notificationFatigue := wedatav20250806.NotificationFatigue{} + if v, ok := notificationFatigueMap["notify_count"].(int); ok { + notificationFatigue.NotifyCount = helper.IntUint64(v) + } + if v, ok := notificationFatigueMap["notify_interval"].(int); ok { + notificationFatigue.NotifyInterval = helper.IntUint64(v) + } + if v, ok := notificationFatigueMap["quiet_intervals"]; ok { + for _, item := range v.([]interface{}) { + quietIntervalsMap := item.(map[string]interface{}) + alarmQuietInterval := wedatav20250806.AlarmQuietInterval{} + if v, ok := quietIntervalsMap["days_of_week"]; ok { + daysOfWeekSet := v.(*schema.Set).List() + for i := range daysOfWeekSet { + daysOfWeek := daysOfWeekSet[i].(int) + alarmQuietInterval.DaysOfWeek = append(alarmQuietInterval.DaysOfWeek, helper.IntUint64(daysOfWeek)) + } + } + if v, ok := quietIntervalsMap["start_time"].(string); ok && v != "" { + alarmQuietInterval.StartTime = helper.String(v) + } + if v, ok := quietIntervalsMap["end_time"].(string); ok && v != "" { + alarmQuietInterval.EndTime = helper.String(v) + } + notificationFatigue.QuietIntervals = append(notificationFatigue.QuietIntervals, &alarmQuietInterval) + } + } + alarmGroup.NotificationFatigue = ¬ificationFatigue + } + if v, ok := alarmGroupsMap["alarm_ways"]; ok { + alarmWaysSet := v.(*schema.Set).List() + for i := range alarmWaysSet { + alarmWays := alarmWaysSet[i].(string) + alarmGroup.AlarmWays = append(alarmGroup.AlarmWays, helper.String(alarmWays)) + } + } + if v, ok := alarmGroupsMap["web_hooks"]; ok { + for _, item := range v.([]interface{}) { + webHooksMap := item.(map[string]interface{}) + alarmWayWebHook := wedatav20250806.AlarmWayWebHook{} + if v, ok := webHooksMap["alarm_way"].(string); ok && v != "" { + alarmWayWebHook.AlarmWay = helper.String(v) + } + if v, ok := webHooksMap["web_hooks"]; ok { + webHooksSet := v.(*schema.Set).List() + for i := range webHooksSet { + webHooks := webHooksSet[i].(string) + alarmWayWebHook.WebHooks = append(alarmWayWebHook.WebHooks, helper.String(webHooks)) + } + } + alarmGroup.WebHooks = append(alarmGroup.WebHooks, &alarmWayWebHook) + } + } + if v, ok := alarmGroupsMap["alarm_recipient_type"].(int); ok { + alarmGroup.AlarmRecipientType = helper.IntInt64(v) + } + if v, ok := alarmGroupsMap["alarm_recipient_ids"]; ok { + alarmRecipientIdsSet := v.(*schema.Set).List() + for i := range alarmRecipientIdsSet { + alarmRecipientIds := alarmRecipientIdsSet[i].(string) + alarmGroup.AlarmRecipientIds = append(alarmGroup.AlarmRecipientIds, helper.String(alarmRecipientIds)) + } + } + request.AlarmGroups = append(request.AlarmGroups, &alarmGroup) + } + } + + if v, ok := d.GetOk("description"); ok { + request.Description = helper.String(v.(string)) + } + + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().UpdateOpsAlarmRuleWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + if reqErr != nil { + log.Printf("[CRITAL]%s update wedata ops alarm rule failed, reason:%+v", logId, reqErr) + return reqErr + } + } + + _ = projectId + _ = alarmRuleId + return resourceTencentCloudWedataOpsAlarmRuleRead(d, meta) +} + +func resourceTencentCloudWedataOpsAlarmRuleDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_alarm_rule.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + projectId := idSplit[0] + alarmRuleId := idSplit[1] + + var ( + request = wedatav20250806.NewDeleteOpsAlarmRuleRequest() + response = wedatav20250806.NewDeleteOpsAlarmRuleResponse() + ) + request.ProjectId = helper.String(projectId) + request.AlarmRuleId = helper.String(alarmRuleId) + + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().DeleteOpsAlarmRuleWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if reqErr != nil { + log.Printf("[CRITAL]%s delete wedata ops alarm rule failed, reason:%+v", logId, reqErr) + return reqErr + } + + _ = response + _ = projectId + _ = alarmRuleId + return nil +} diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule.md b/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule.md new file mode 100644 index 0000000000..06ddfba784 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule.md @@ -0,0 +1,59 @@ +Provides a resource to create a wedata ops alarm rule + +Example Usage + +```hcl +resource "tencentcloud_wedata_ops_alarm_rule" "wedata_ops_alarm_rule" { + alarm_level = 1 + alarm_rule_name = "tf_test" + alarm_types = [ + "failure", + ] + description = "ccc" + monitor_object_ids = [ + "20230906105118824", + ] + monitor_object_type = 1 + project_id = "1859317240494305280" + + alarm_groups { + alarm_escalation_interval = 15 + alarm_escalation_recipient_ids = [] + alarm_recipient_ids = [ + "100029411056", + ] + alarm_recipient_type = 1 + alarm_ways = [ + "1", + ] + + notification_fatigue { + notify_count = 1 + notify_interval = 5 + + quiet_intervals { + days_of_week = [ + 6, + 7, + ] + end_time = "21:00:00" + start_time = "10:00:00" + } + } + } + + alarm_rule_detail { + data_backfill_or_rerun_trigger = 1 + trigger = 2 + } +} +``` + + +Import + +wedata ops alarm rule can be imported using the id, e.g. + +``` +terraform import tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule projectId#askId +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule_extension.go b/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule_test.go b/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule_test.go new file mode 100644 index 0000000000..5b47cb442a --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_alarm_rule_test.go @@ -0,0 +1,208 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataOpsAlarmRuleResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataOpsAlarmRule, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "id"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_level", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_name", "tf_test"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_types.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_types.0", "failure"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "description", "ccc"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "monitor_object_ids.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "monitor_object_ids.0", "20230906105118824"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "monitor_object_type", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "project_id", "1859317240494305280"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_escalation_interval", "15"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_recipient_ids.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_recipient_ids.0", "100029411056"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_recipient_type", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_ways.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_ways.0", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.notify_count", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.notify_interval", "5"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.0.days_of_week.#", "2"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.0.days_of_week.0", "6"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.0.days_of_week.1", "7"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.0.end_time", "21:00:00"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.0.start_time", "10:00:00"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.0.data_backfill_or_rerun_trigger", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.0.trigger", "2"), + ), + }, + { + ResourceName: "tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccWedataOpsAlarmRuleUp, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "id"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_level", "2"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_name", "tf_test_up"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_types.#", "2"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_types.0", "failure"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_types.1", "reconciliationFailure"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "description", "qqq"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "monitor_object_ids.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "monitor_object_ids.0", "3bec54e4-cd0a-4163-9318-65f0fe115ee9"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "monitor_object_type", "2"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "project_id", "1859317240494305280"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_escalation_interval", "15"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_recipient_ids.#", "2"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_recipient_ids.0", "100028448903"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_recipient_ids.1", "100029411056"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_recipient_type", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_ways.#", "4"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_ways.0", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_ways.1", "2"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_ways.2", "3"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.alarm_ways.3", "4"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.notify_count", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.notify_interval", "5"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.0.days_of_week.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.0.days_of_week.0", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.0.end_time", "01:00:00"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_groups.0.notification_fatigue.0.quiet_intervals.0.start_time", "00:00:00"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.0.data_backfill_or_rerun_trigger", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.0.trigger", "2"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.0.reconciliation_ext_info.#", "1"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.0.reconciliation_ext_info.0.hour", "0"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.0.reconciliation_ext_info.0.min", "0"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.0.reconciliation_ext_info.0.mismatch_count", "0"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule", "alarm_rule_detail.0.reconciliation_ext_info.0.rule_type", "reconciliationFailure"), + ), + }, + }, + }) +} + +const testAccWedataOpsAlarmRule = ` + +resource "tencentcloud_wedata_ops_alarm_rule" "wedata_ops_alarm_rule" { + alarm_level = 1 + alarm_rule_name = "tf_test" + alarm_types = [ + "failure", + ] + description = "ccc" + monitor_object_ids = [ + "20230906105118824", + ] + monitor_object_type = 1 + project_id = "1859317240494305280" + + alarm_groups { + alarm_escalation_interval = 15 + alarm_escalation_recipient_ids = [] + alarm_recipient_ids = [ + "100029411056", + ] + alarm_recipient_type = 1 + alarm_ways = [ + "1", + ] + + notification_fatigue { + notify_count = 1 + notify_interval = 5 + + quiet_intervals { + days_of_week = [ + 6, + 7, + ] + end_time = "21:00:00" + start_time = "10:00:00" + } + } + } + + alarm_rule_detail { + data_backfill_or_rerun_trigger = 1 + trigger = 2 + } +} +` + +const testAccWedataOpsAlarmRuleUp = ` +resource "tencentcloud_wedata_ops_alarm_rule" "wedata_ops_alarm_rule" { + alarm_level = 2 + alarm_rule_name = "tf_test_up" + alarm_types = [ + "failure", + "reconciliationFailure", + ] + description = "qqq" + monitor_object_ids = [ + "3bec54e4-cd0a-4163-9318-65f0fe115ee9", + ] + monitor_object_type = 2 + project_id = "1859317240494305280" + + alarm_groups { + alarm_escalation_interval = 15 + alarm_escalation_recipient_ids = [] + alarm_recipient_ids = [ + "100028448903", + "100029411056", + ] + alarm_recipient_type = 1 + alarm_ways = [ + "1", + "2", + "3", + "4", + ] + + notification_fatigue { + notify_count = 1 + notify_interval = 5 + + quiet_intervals { + days_of_week = [ + 1, + ] + end_time = "01:00:00" + start_time = "00:00:00" + } + } + } + + alarm_rule_detail { + data_backfill_or_rerun_trigger = 1 + trigger = 2 + + reconciliation_ext_info { + hour = 0 + min = 0 + mismatch_count = 0 + rule_type = "reconciliationFailure" + } + } +} +` diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async.go b/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async.go new file mode 100644 index 0000000000..7deff68923 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async.go @@ -0,0 +1,165 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudWedataOpsStopTaskAsync() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudWedataOpsStopTaskAsyncCreate, + Read: resourceTencentCloudWedataOpsStopTaskAsyncRead, + Delete: resourceTencentCloudWedataOpsStopTaskAsyncDelete, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Project id.", + }, + + "task_ids": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Description: "Task id list.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "kill_instance": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Whether to terminate the generated instance, the default is false; if true, it will wait for all forces to terminate.", + }, + }, + } +} + +func resourceTencentCloudWedataOpsStopTaskAsyncCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_stop_task_async.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + var ( + projectId string + ) + var ( + request = wedatav20250806.NewStopOpsTasksAsyncRequest() + response = wedatav20250806.NewStopOpsTasksAsyncResponse() + ) + + if v, ok := d.GetOk("project_id"); ok { + projectId = v.(string) + request.ProjectId = helper.String(v.(string)) + } + + if v, ok := d.GetOk("task_ids"); ok { + taskIdsSet := v.(*schema.Set).List() + for i := range taskIdsSet { + taskIds := taskIdsSet[i].(string) + request.TaskIds = append(request.TaskIds, helper.String(taskIds)) + } + } + + timeout := 180 * time.Second + if v, ok := d.GetOkExists("kill_instance"); ok { + request.KillInstance = helper.Bool(v.(bool)) + if v.(bool) { + // The maximum waiting time is set to 4 hours + timeout = 4 * time.Hour + } + } + + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().StopOpsTasksAsyncWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if reqErr != nil { + log.Printf("[CRITAL]%s create wedata stop ops task async failed, reason:%+v", logId, reqErr) + return reqErr + } + + if response == nil || response.Response == nil || response.Response.Data == nil || response.Response.Data.AsyncId == nil { + return fmt.Errorf("The AsyncId returned by the stopped task is empty") + } + asyncId := *response.Response.Data.AsyncId + + if _, err := (&resource.StateChangeConf{ + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, + Pending: []string{"INIT", "RUNNING"}, + Refresh: resourceWedataOpsStopTaskAsyncCreateStateRefreshFunc_0_0(ctx, projectId, asyncId), + Target: []string{"SUCCESS"}, + Timeout: timeout, + }).WaitForStateContext(ctx); err != nil { + return err + } + d.SetId(strings.Join([]string{projectId, asyncId}, tccommon.FILED_SP)) + + return resourceTencentCloudWedataOpsStopTaskAsyncRead(d, meta) +} + +func resourceTencentCloudWedataOpsStopTaskAsyncRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_stop_task_async.read")() + defer tccommon.InconsistentCheck(d, meta)() + + return nil +} + +func resourceTencentCloudWedataOpsStopTaskAsyncDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_stop_task_async.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + return nil +} + +func resourceWedataOpsStopTaskAsyncCreateStateRefreshFunc_0_0(ctx context.Context, projectId string, asyncId string) resource.StateRefreshFunc { + var req *wedatav20250806.GetOpsAsyncJobRequest + return func() (interface{}, string, error) { + meta := tccommon.ProviderMetaFromContext(ctx) + if meta == nil { + return nil, "", fmt.Errorf("resource data can not be nil") + } + if req == nil { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return nil, "", fmt.Errorf("resource data can not be nil") + } + _ = d + req = wedatav20250806.NewGetOpsAsyncJobRequest() + req.ProjectId = helper.String(projectId) + req.AsyncId = helper.String(asyncId) + } + resp, err := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().GetOpsAsyncJobWithContext(ctx, req) + if err != nil { + return nil, "", err + } + if resp == nil || resp.Response == nil || resp.Response.Data == nil || resp.Response.Data.Status == nil { + return nil, "", nil + } + state := fmt.Sprintf("%v", *resp.Response.Data.Status) + return resp.Response.Data, state, nil + } +} diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async.md b/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async.md new file mode 100644 index 0000000000..41ea6fc217 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async.md @@ -0,0 +1,10 @@ +Provides a resource to create a wedata ops stop task async + +Example Usage + +```hcl +resource "tencentcloud_wedata_ops_stop_task_async" "wedata_ops_stop_task_async" { + project_id = "1859317240494305280" + task_ids = ["20251013154418424"] +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async_extension.go b/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async_test.go b/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async_test.go new file mode 100644 index 0000000000..b381644f0a --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_stop_task_async_test.go @@ -0,0 +1,32 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataOpsStopTaskAsyncResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{{ + Config: testAccWedataOpsStopTaskAsync, + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_wedata_ops_stop_task_async.wedata_stop_ops_task_async", "id")), + }, { + ResourceName: "tencentcloud_wedata_ops_stop_task_async.wedata_stop_ops_task_async", + ImportState: true, + ImportStateVerify: true, + }}, + }) +} + +const testAccWedataOpsStopTaskAsync = ` + +resource "tencentcloud_wedata_ops_stop_task_async" "wedata_stop_ops_task_async" { +} +` diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner.go b/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner.go new file mode 100644 index 0000000000..221b2bf02a --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner.go @@ -0,0 +1,167 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "fmt" + "log" + "strings" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudWedataOpsTaskOwner() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudWedataOpsTaskOwnerCreate, + Read: resourceTencentCloudWedataOpsTaskOwnerRead, + Update: resourceTencentCloudWedataOpsTaskOwnerUpdate, + Delete: resourceTencentCloudWedataOpsTaskOwnerDelete, + Importer: &schema.ResourceImporter{ + State: schema.ImportStatePassthrough, + }, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + Description: "Project id.", + }, + + "task_id": { + Type: schema.TypeString, + Required: true, + Description: "Task id.", + }, + + "owner_uin": { + Type: schema.TypeString, + Required: true, + Description: "Task Owner ID. For multiple owners, separate them with `;`, for example: `100029411056;100042282926`.", + }, + }, + } +} + +func resourceTencentCloudWedataOpsTaskOwnerCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_task_owner.create")() + defer tccommon.InconsistentCheck(d, meta)() + + var ( + projectId string + taskId string + ) + + if v, ok := d.GetOk("project_id"); ok { + projectId = v.(string) + } + + if v, ok := d.GetOk("task_id"); ok { + taskId = v.(string) + } + + d.SetId(strings.Join([]string{projectId, taskId}, tccommon.FILED_SP)) + + return resourceTencentCloudWedataOpsTaskOwnerUpdate(d, meta) +} + +func resourceTencentCloudWedataOpsTaskOwnerRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_task_owner.read")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + service := WedataService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + projectId := idSplit[0] + taskId := idSplit[1] + + respData, err := service.DescribeWedataOpsTaskOwnerById(ctx, projectId, taskId) + if err != nil { + return err + } + + if respData == nil { + d.SetId("") + log.Printf("[WARN]%s resource `wedata_ops_task_owner` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + return nil + } + _ = d.Set("project_id", projectId) + _ = d.Set("task_id", taskId) + + if respData.TaskBaseAttribute != nil { + if respData.TaskBaseAttribute.OwnerUin != nil { + _ = d.Set("owner_uin", respData.TaskBaseAttribute.OwnerUin) + } + } + + return nil +} + +func resourceTencentCloudWedataOpsTaskOwnerUpdate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_task_owner.update")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 2 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + projectId := idSplit[0] + taskId := idSplit[1] + + needChange := false + mutableArgs := []string{"owner_uin"} + for _, v := range mutableArgs { + if d.HasChange(v) { + needChange = true + break + } + } + + if needChange { + request := wedatav20250806.NewUpdateOpsTasksOwnerRequest() + request.ProjectId = helper.String(projectId) + request.TaskIds = append(request.TaskIds, helper.String(taskId)) + + if v, ok := d.GetOk("owner_uin"); ok { + request.OwnerUin = helper.String(v.(string)) + } + + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().UpdateOpsTasksOwnerWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + return nil + }) + if reqErr != nil { + log.Printf("[CRITAL]%s update wedata ops task owner failed, reason:%+v", logId, reqErr) + return reqErr + } + } + + _ = projectId + _ = taskId + return resourceTencentCloudWedataOpsTaskOwnerRead(d, meta) +} + +func resourceTencentCloudWedataOpsTaskOwnerDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_ops_task_owner.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + return nil +} diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner.md b/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner.md new file mode 100644 index 0000000000..33db32007b --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner.md @@ -0,0 +1,19 @@ +Provides a resource to create a wedata ops task owner + +Example Usage + +```hcl +resource "tencentcloud_wedata_ops_task_owner" "wedata_ops_task_owner" { + owner_uin = "100029411056;100042282926" + project_id = "2430455587205529600" + task_id = "20251009144419600" +} +``` + +Import + +wedata ops task owner can be imported using the id, e.g. + +``` +terraform import tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner projectId#askId +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner_extension.go b/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner_test.go b/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner_test.go new file mode 100644 index 0000000000..6c14508372 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_ops_task_owner_test.go @@ -0,0 +1,61 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataOpsTaskOwnerResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataOpsTaskOwner, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner", "id"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner", "owner_uin", "100029411056;100042282926"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner", "project_id", "2430455587205529600"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner", "task_id", "20251009144419600"), + ), + }, + { + ResourceName: "tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner", + ImportState: true, + ImportStateVerify: true, + }, + { + Config: testAccWedataOpsTaskOwnerUp, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner", "id"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner", "owner_uin", "100029411056"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner", "project_id", "2430455587205529600"), + resource.TestCheckResourceAttr("tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner", "task_id", "20251009144419600"), + ), + }, + }, + }) +} + +const testAccWedataOpsTaskOwner = ` + +resource "tencentcloud_wedata_ops_task_owner" "wedata_ops_task_owner" { + owner_uin = "100029411056;100042282926" + project_id = "2430455587205529600" + task_id = "20251009144419600" +} +` + +const testAccWedataOpsTaskOwnerUp = ` + +resource "tencentcloud_wedata_ops_task_owner" "wedata_ops_task_owner" { + owner_uin = "100029411056" + project_id = "2430455587205529600" + task_id = "20251009144419600" +} +` diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async.go b/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async.go new file mode 100644 index 0000000000..923113f1be --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async.go @@ -0,0 +1,150 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudWedataTaskKillInstanceAsync() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudWedataTaskKillInstanceAsyncCreate, + Read: resourceTencentCloudWedataTaskKillInstanceAsyncRead, + Delete: resourceTencentCloudWedataTaskKillInstanceAsyncDelete, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Project Id.", + }, + + "instance_key_list": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Description: "Instance id list, which can be obtained from ListInstances.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceTencentCloudWedataTaskKillInstanceAsyncCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_task_kill_instance_async.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + var ( + projectId string + asyncId string + ) + var ( + request = wedatav20250806.NewKillTaskInstancesAsyncRequest() + response = wedatav20250806.NewKillTaskInstancesAsyncResponse() + ) + + if v, ok := d.GetOk("project_id"); ok { + projectId = v.(string) + request.ProjectId = helper.String(projectId) + } + + if v, ok := d.GetOk("instance_key_list"); ok { + instanceKeyListSet := v.(*schema.Set).List() + for i := range instanceKeyListSet { + instanceKeyList := instanceKeyListSet[i].(string) + request.InstanceKeyList = append(request.InstanceKeyList, helper.String(instanceKeyList)) + } + } + + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().KillTaskInstancesAsyncWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if reqErr != nil { + log.Printf("[CRITAL]%s create wedata task kill instance async failed, reason:%+v", logId, reqErr) + return reqErr + } + + if response == nil || response.Response == nil || response.Response.Data == nil || response.Response.Data.AsyncId == nil { + return fmt.Errorf("The returned AsyncId is empty.") + } + asyncId = *response.Response.Data.AsyncId + + if _, err := (&resource.StateChangeConf{ + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, + Pending: []string{"INIT", "RUNNING"}, + Refresh: resourceWedataTaskKillInstanceAsyncCreateStateRefreshFunc_0_0(ctx, projectId, asyncId), + Target: []string{"SUCCESS"}, + Timeout: 3600 * time.Second, + }).WaitForStateContext(ctx); err != nil { + return err + } + d.SetId(strings.Join([]string{projectId, asyncId}, tccommon.FILED_SP)) + + return resourceTencentCloudWedataTaskKillInstanceAsyncRead(d, meta) +} + +func resourceTencentCloudWedataTaskKillInstanceAsyncRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_task_kill_instance_async.read")() + defer tccommon.InconsistentCheck(d, meta)() + + return nil +} + +func resourceTencentCloudWedataTaskKillInstanceAsyncDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_task_kill_instance_async.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + return nil +} + +func resourceWedataTaskKillInstanceAsyncCreateStateRefreshFunc_0_0(ctx context.Context, projectId string, asyncId string) resource.StateRefreshFunc { + var req *wedatav20250806.GetOpsAsyncJobRequest + return func() (interface{}, string, error) { + meta := tccommon.ProviderMetaFromContext(ctx) + if meta == nil { + return nil, "", fmt.Errorf("resource data can not be nil") + } + if req == nil { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return nil, "", fmt.Errorf("resource data can not be nil") + } + _ = d + req = wedatav20250806.NewGetOpsAsyncJobRequest() + req.ProjectId = helper.String(projectId) + req.AsyncId = helper.String(asyncId) + } + resp, err := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().GetOpsAsyncJobWithContext(ctx, req) + if err != nil { + return nil, "", err + } + if resp == nil || resp.Response == nil { + return nil, "", nil + } + state := fmt.Sprintf("%v", *resp.Response.Data.Status) + return resp.Response, state, nil + } +} diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async.md b/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async.md new file mode 100644 index 0000000000..7738edb7af --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async.md @@ -0,0 +1,10 @@ +Provides a resource to create a wedata task kill instance + +Example Usage + +```hcl +resource "tencentcloud_wedata_task_kill_instance_async" "wedata_task_kill_instance_async" { + project_id = "1859317240494305280" + instance_key_list = ["20250324192240178_2025-10-13 17:00:00"] +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async_extension.go b/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async_test.go b/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async_test.go new file mode 100644 index 0000000000..c8fd75a8f1 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_kill_instance_async_test.go @@ -0,0 +1,32 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataTaskKillInstanceAsyncResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataTaskKillInstanceAsync, + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_wedata_task_kill_instance_async.wedata_task_kill_instance_async", "id")), + }, + }, + }) +} + +const testAccWedataTaskKillInstanceAsync = ` + +resource "tencentcloud_wedata_task_kill_instance_async" "wedata_task_kill_instance_async" { + project_id = "1859317240494305280" + instance_key_list = ["20251013154418424_2025-10-13 18:10:00"] +} +` diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async.go b/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async.go new file mode 100644 index 0000000000..d5ba8b91f0 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async.go @@ -0,0 +1,251 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudWedataTaskRerunInstanceAsync() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudWedataTaskRerunInstanceAsyncCreate, + Read: resourceTencentCloudWedataTaskRerunInstanceAsyncRead, + Delete: resourceTencentCloudWedataTaskRerunInstanceAsyncDelete, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Project Id.", + }, + + "instance_key_list": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Description: "Instance id list, which can be obtained from ListInstances.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + + "rerun_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Rerun type, 1: self; 3: children; 2: self and children, default 1.", + }, + + "check_parent_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Whether to check upstream tasks: ALL, MAKE_SCOPE (selected), NONE (do not check anything), default is NONE.", + }, + + "son_range_type": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Downstream instance scope WORKFLOW: workflow PROJECT: project ALL: all cross-workflow dependent projects, default WORKFLOW.", + }, + + "skip_event_listening": { + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + Description: "Whether to ignore event monitoring when rerunning.", + }, + + "redefine_parallel_num": { + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + Description: "Customize the instance running concurrency. If not configured, the original self-dependency of the task will be used.", + }, + + "redefine_self_workflow_dependency": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: "Customized workflow self-dependence: yes to enable, no to disable. If not configured, the original workflow self-dependence will be used.", + }, + + "redefine_param_list": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + MaxItems: 1, + Description: "Re-run instance custom parameters.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "k": { + Type: schema.TypeString, + Optional: true, + Description: "Key.", + }, + "v": { + Type: schema.TypeString, + Optional: true, + Description: "Value.", + }, + }, + }, + }, + }, + } +} + +func resourceTencentCloudWedataTaskRerunInstanceAsyncCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_task_rerun_instance_async.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + var ( + projectId string + asyncId string + ) + var ( + request = wedatav20250806.NewRerunTaskInstancesAsyncRequest() + response = wedatav20250806.NewRerunTaskInstancesAsyncResponse() + ) + + if v, ok := d.GetOk("project_id"); ok { + projectId = v.(string) + request.ProjectId = helper.String(projectId) + } + + if v, ok := d.GetOk("instance_key_list"); ok { + instanceKeyListSet := v.(*schema.Set).List() + for i := range instanceKeyListSet { + instanceKeyList := instanceKeyListSet[i].(string) + request.InstanceKeyList = append(request.InstanceKeyList, helper.String(instanceKeyList)) + } + } + + if v, ok := d.GetOk("rerun_type"); ok { + request.RerunType = helper.String(v.(string)) + } + + if v, ok := d.GetOk("check_parent_type"); ok { + request.CheckParentType = helper.String(v.(string)) + } + + if v, ok := d.GetOk("son_range_type"); ok { + request.SonRangeType = helper.String(v.(string)) + } + + if v, ok := d.GetOkExists("skip_event_listening"); ok { + request.SkipEventListening = helper.Bool(v.(bool)) + } + + if v, ok := d.GetOkExists("redefine_parallel_num"); ok { + request.RedefineParallelNum = helper.IntInt64(v.(int)) + } + + if v, ok := d.GetOk("redefine_self_workflow_dependency"); ok { + request.RedefineSelfWorkflowDependency = helper.String(v.(string)) + } + + if redefineParamListMap, ok := helper.InterfacesHeadMap(d, "redefine_param_list"); ok { + kVMap := wedatav20250806.KVMap{} + if v, ok := redefineParamListMap["k"].(string); ok && v != "" { + kVMap.K = helper.String(v) + } + if v, ok := redefineParamListMap["v"].(string); ok && v != "" { + kVMap.V = helper.String(v) + } + request.RedefineParamList = &kVMap + } + + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().RerunTaskInstancesAsyncWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if reqErr != nil { + log.Printf("[CRITAL]%s create wedata task rerun instance async failed, reason:%+v", logId, reqErr) + return reqErr + } + + if response == nil || response.Response == nil || response.Response.Data == nil || response.Response.Data.AsyncId == nil { + return fmt.Errorf("The returned AsyncId is empty.") + } + asyncId = *response.Response.Data.AsyncId + + if _, err := (&resource.StateChangeConf{ + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, + Pending: []string{"INIT", "RUNNING"}, + Refresh: resourceWedataTaskRerunInstanceAsyncCreateStateRefreshFunc_0_0(ctx, projectId, asyncId), + Target: []string{"SUCCESS"}, + Timeout: 3600 * time.Second, + }).WaitForStateContext(ctx); err != nil { + return err + } + d.SetId(strings.Join([]string{projectId, asyncId}, tccommon.FILED_SP)) + + return resourceTencentCloudWedataTaskRerunInstanceAsyncRead(d, meta) +} + +func resourceTencentCloudWedataTaskRerunInstanceAsyncRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_task_rerun_instance_async.read")() + defer tccommon.InconsistentCheck(d, meta)() + + return nil +} + +func resourceTencentCloudWedataTaskRerunInstanceAsyncDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_task_rerun_instance_async.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + return nil +} + +func resourceWedataTaskRerunInstanceAsyncCreateStateRefreshFunc_0_0(ctx context.Context, projectId string, asyncId string) resource.StateRefreshFunc { + var req *wedatav20250806.GetOpsAsyncJobRequest + return func() (interface{}, string, error) { + meta := tccommon.ProviderMetaFromContext(ctx) + if meta == nil { + return nil, "", fmt.Errorf("resource data can not be nil") + } + if req == nil { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return nil, "", fmt.Errorf("resource data can not be nil") + } + _ = d + req = wedatav20250806.NewGetOpsAsyncJobRequest() + req.ProjectId = helper.String(projectId) + + req.AsyncId = helper.String(asyncId) + + } + resp, err := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().GetOpsAsyncJobWithContext(ctx, req) + if err != nil { + return nil, "", err + } + if resp == nil || resp.Response == nil { + return nil, "", nil + } + state := fmt.Sprintf("%v", *resp.Response.Data.Status) + return resp.Response, state, nil + } +} diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async.md b/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async.md new file mode 100644 index 0000000000..55432e603a --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async.md @@ -0,0 +1,10 @@ +Provides a resource to create a wedata task re-run instance + +Example Usage + +```hcl +resource "tencentcloud_wedata_task_rerun_instance_async" "wedata_task_rerun_instance_async" { + project_id = "1859317240494305280" + instance_key_list = ["20250324192240178_2025-10-13 16:20:00"] +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async_extension.go b/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async_test.go b/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async_test.go new file mode 100644 index 0000000000..d7492fed65 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_rerun_instance_async_test.go @@ -0,0 +1,32 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataTaskRerunInstanceAsyncResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataTaskRerunInstanceAsync, + Check: resource.ComposeTestCheckFunc(resource.TestCheckResourceAttrSet("tencentcloud_wedata_task_rerun_instance_async.wedata_task_rerun_instance_async", "id")), + }, + }, + }) +} + +const testAccWedataTaskRerunInstanceAsync = ` + +resource "tencentcloud_wedata_task_rerun_instance_async" "wedata_task_rerun_instance_async" { + project_id = "1859317240494305280" + instance_key_list = ["20251013154418424_2025-10-13 18:10:00"] +} +` diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async.go b/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async.go new file mode 100644 index 0000000000..c0d964429c --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async.go @@ -0,0 +1,152 @@ +// Code generated by iacg; DO NOT EDIT. +package wedata + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudWedataTaskSetSuccessInstanceAsync() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudWedataTaskSetSuccessInstanceAsyncCreate, + Read: resourceTencentCloudWedataTaskSetSuccessInstanceAsyncRead, + Delete: resourceTencentCloudWedataTaskSetSuccessInstanceAsyncDelete, + Schema: map[string]*schema.Schema{ + "project_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Project Id.", + }, + + "instance_key_list": { + Type: schema.TypeSet, + Required: true, + ForceNew: true, + Description: "Instance id list, which can be obtained from ListInstances.", + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + } +} + +func resourceTencentCloudWedataTaskSetSuccessInstanceAsyncCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_task_set_success_instance_async.create")() + defer tccommon.InconsistentCheck(d, meta)() + + logId := tccommon.GetLogId(tccommon.ContextNil) + + ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + + var ( + projectId string + asyncId string + ) + var ( + request = wedatav20250806.NewSetSuccessTaskInstancesAsyncRequest() + response = wedatav20250806.NewSetSuccessTaskInstancesAsyncResponse() + ) + + if v, ok := d.GetOk("project_id"); ok { + projectId = v.(string) + request.ProjectId = helper.String(projectId) + } + + if v, ok := d.GetOk("instance_key_list"); ok { + instanceKeyListSet := v.(*schema.Set).List() + for i := range instanceKeyListSet { + instanceKeyList := instanceKeyListSet[i].(string) + request.InstanceKeyList = append(request.InstanceKeyList, helper.String(instanceKeyList)) + } + } + + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().SetSuccessTaskInstancesAsyncWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + response = result + return nil + }) + if reqErr != nil { + log.Printf("[CRITAL]%s create wedata task set success instance async failed, reason:%+v", logId, reqErr) + return reqErr + } + + if response == nil || response.Response == nil || response.Response.Data == nil || response.Response.Data.AsyncId == nil { + return fmt.Errorf("The returned AsyncId is empty.") + } + asyncId = *response.Response.Data.AsyncId + + if _, err := (&resource.StateChangeConf{ + Delay: 1 * time.Second, + MinTimeout: 3 * time.Second, + Pending: []string{"INIT", "RUNNING"}, + Refresh: resourceWedataTaskSetSuccessInstanceAsyncCreateStateRefreshFunc_0_0(ctx, projectId, asyncId), + Target: []string{"SUCCESS"}, + Timeout: 3600 * time.Second, + }).WaitForStateContext(ctx); err != nil { + return err + } + d.SetId(strings.Join([]string{projectId, asyncId}, tccommon.FILED_SP)) + + return resourceTencentCloudWedataTaskSetSuccessInstanceAsyncRead(d, meta) +} + +func resourceTencentCloudWedataTaskSetSuccessInstanceAsyncRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_task_set_success_instance_async.read")() + defer tccommon.InconsistentCheck(d, meta)() + + return nil +} + +func resourceTencentCloudWedataTaskSetSuccessInstanceAsyncDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_wedata_task_set_success_instance_async.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + return nil +} + +func resourceWedataTaskSetSuccessInstanceAsyncCreateStateRefreshFunc_0_0(ctx context.Context, projectId string, asyncId string) resource.StateRefreshFunc { + var req *wedatav20250806.GetOpsAsyncJobRequest + return func() (interface{}, string, error) { + meta := tccommon.ProviderMetaFromContext(ctx) + if meta == nil { + return nil, "", fmt.Errorf("resource data can not be nil") + } + if req == nil { + d := tccommon.ResourceDataFromContext(ctx) + if d == nil { + return nil, "", fmt.Errorf("resource data can not be nil") + } + _ = d + req = wedatav20250806.NewGetOpsAsyncJobRequest() + req.ProjectId = helper.String(projectId) + + req.AsyncId = helper.String(asyncId) + + } + resp, err := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseWedataV20250806Client().GetOpsAsyncJobWithContext(ctx, req) + if err != nil { + return nil, "", err + } + if resp == nil || resp.Response == nil { + return nil, "", nil + } + state := fmt.Sprintf("%v", *resp.Response.Data.Status) + return resp.Response, state, nil + } +} diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async.md b/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async.md new file mode 100644 index 0000000000..99bfee1419 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async.md @@ -0,0 +1,10 @@ +Provides a resource to create a wedata task set success instance + +Example Usage + +```hcl +resource "tencentcloud_wedata_task_set_success_instance_async" "wedata_task_set_success_instance_async" { + project_id = "1859317240494305280" + instance_key_list = ["20250324192240178_2025-10-13 17:00:00"] +} +``` \ No newline at end of file diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async_extension.go b/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async_extension.go new file mode 100644 index 0000000000..4a61b9ede3 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async_extension.go @@ -0,0 +1 @@ +package wedata diff --git a/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async_test.go b/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async_test.go new file mode 100644 index 0000000000..05b3bdcb81 --- /dev/null +++ b/tencentcloud/services/wedata/resource_tc_wedata_task_set_success_instance_async_test.go @@ -0,0 +1,34 @@ +package wedata_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudWedataTaskSetSuccessInstanceAsyncResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccWedataTaskSetSuccessInstanceAsync, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("tencentcloud_wedata_task_set_success_instance_async.wedata_task_set_success_instance_async", "id"), + ), + }, + }, + }) +} + +const testAccWedataTaskSetSuccessInstanceAsync = ` + +resource "tencentcloud_wedata_task_set_success_instance_async" "wedata_task_set_success_instance_async" { + project_id = "1859317240494305280" + instance_key_list = ["20251013154418424_2025-10-13 18:10:00"] +} +` diff --git a/tencentcloud/services/wedata/service_tencentcloud_wedata.go b/tencentcloud/services/wedata/service_tencentcloud_wedata.go index ac14f28899..9b1323036f 100644 --- a/tencentcloud/services/wedata/service_tencentcloud_wedata.go +++ b/tencentcloud/services/wedata/service_tencentcloud_wedata.go @@ -9,6 +9,7 @@ import ( "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" wedata "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20210820" + wedatav20250806 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/wedata/v20250806" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/connectivity" "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" @@ -115,6 +116,588 @@ func (me *WedataService) DescribeWedataRuleTemplatesByFilter(ctx context.Context return } +func (me *WedataService) DescribeWedataOpsWorkflowsByFilter(ctx context.Context, param map[string]interface{}) (ret []*wedatav20250806.OpsWorkflow, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewListOpsWorkflowsRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "FolderId" { + request.FolderId = v.(*string) + } + if k == "Status" { + request.Status = v.(*string) + } + if k == "OwnerUin" { + request.OwnerUin = v.(*string) + } + if k == "WorkflowType" { + request.WorkflowType = v.(*string) + } + if k == "KeyWord" { + request.KeyWord = v.(*string) + } + if k == "SortItem" { + request.SortItem = v.(*string) + } + if k == "SortType" { + request.SortType = v.(*string) + } + if k == "CreateUserUin" { + request.CreateUserUin = v.(*string) + } + if k == "ModifyTime" { + request.ModifyTime = v.(*string) + } + if k == "CreateTime" { + request.CreateTime = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + var ( + offset uint64 = 1 // page number starts from 1 + limit uint64 = 100 + ) + for { + request.PageNumber = &offset + request.PageSize = &limit + response, err := me.client.UseWedataV20250806Client().ListOpsWorkflows(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response.Data == nil || len(response.Response.Data.Items) < 1 { + break + } + ret = append(ret, response.Response.Data.Items...) + if len(response.Response.Data.Items) < int(limit) { + break + } + + offset += limit + } + + return +} + +func (me *WedataService) DescribeWedataOpsWorkflowByFilter(ctx context.Context, param map[string]interface{}) (ret *wedatav20250806.GetOpsWorkflowResponseParams, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewGetOpsWorkflowRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "WorkflowId" { + request.WorkflowId = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().GetOpsWorkflow(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + + ret = response.Response + return +} + +func (me *WedataService) DescribeWedataTaskInstancesByFilter(ctx context.Context, param map[string]interface{}) (ret *wedatav20250806.ListTaskInstancesResponseParams, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewListTaskInstancesRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "Keyword" { + request.Keyword = v.(*string) + } + if k == "TimeZone" { + request.TimeZone = v.(*string) + } + if k == "InstanceType" { + request.InstanceType = v.(*uint64) + } + if k == "InstanceState" { + request.InstanceState = v.(*string) + } + if k == "TaskTypeId" { + request.TaskTypeId = v.(*uint64) + } + if k == "CycleType" { + request.CycleType = v.(*string) + } + if k == "OwnerUin" { + request.OwnerUin = v.(*string) + } + if k == "FolderId" { + request.FolderId = v.(*string) + } + if k == "WorkflowId" { + request.WorkflowId = v.(*string) + } + if k == "ExecutorGroupId" { + request.ExecutorGroupId = v.(*string) + } + if k == "ScheduleTimeFrom" { + request.ScheduleTimeFrom = v.(*string) + } + if k == "ScheduleTimeTo" { + request.ScheduleTimeTo = v.(*string) + } + if k == "StartTimeFrom" { + request.StartTimeFrom = v.(*string) + } + if k == "StartTimeTo" { + request.StartTimeTo = v.(*string) + } + if k == "LastUpdateTimeFrom" { + request.LastUpdateTimeFrom = v.(*string) + } + if k == "LastUpdateTimeTo" { + request.LastUpdateTimeTo = v.(*string) + } + if k == "SortColumn" { + request.SortColumn = v.(*string) + } + if k == "SortType" { + request.SortType = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().ListTaskInstances(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + + ret = response.Response + return +} + +func (me *WedataService) DescribeWedataTaskInstanceByFilter(ctx context.Context, param map[string]interface{}) (ret *wedatav20250806.GetTaskInstanceResponseParams, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewGetTaskInstanceRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "InstanceKey" { + request.InstanceKey = v.(*string) + } + if k == "TimeZone" { + request.TimeZone = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().GetTaskInstance(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + + ret = response.Response + return +} + +func (me *WedataService) DescribeWedataTaskInstanceLogByFilter(ctx context.Context, param map[string]interface{}) (ret *wedatav20250806.GetTaskInstanceLogResponseParams, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewGetTaskInstanceLogRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "InstanceKey" { + request.InstanceKey = v.(*string) + } + if k == "LifeRoundNum" { + request.LifeRoundNum = v.(*uint64) + } + if k == "LogLevel" { + request.LogLevel = v.(*string) + } + if k == "NextCursor" { + request.NextCursor = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().GetTaskInstanceLog(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + + ret = response.Response + return +} + +func (me *WedataService) DescribeWedataTaskInstanceExecutionsByFilter(ctx context.Context, param map[string]interface{}) (ret *wedatav20250806.ListTaskInstanceExecutionsResponseParams, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewListTaskInstanceExecutionsRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "InstanceKey" { + request.InstanceKey = v.(*string) + } + if k == "TimeZone" { + request.TimeZone = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().ListTaskInstanceExecutions(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + + ret = response.Response + return +} + +func (me *WedataService) DescribeWedataUpstreamTaskInstancesByFilter(ctx context.Context, param map[string]interface{}) (ret *wedatav20250806.ListUpstreamTaskInstancesResponseParams, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewListUpstreamTaskInstancesRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "InstanceKey" { + request.InstanceKey = v.(*string) + } + if k == "TimeZone" { + request.TimeZone = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().ListUpstreamTaskInstances(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + + ret = response.Response + return +} + +func (me *WedataService) DescribeWedataDownstreamTaskInstancesByFilter(ctx context.Context, param map[string]interface{}) (ret *wedatav20250806.ListDownstreamTaskInstancesResponseParams, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewListDownstreamTaskInstancesRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "InstanceKey" { + request.InstanceKey = v.(*string) + } + if k == "TimeZone" { + request.TimeZone = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().ListDownstreamTaskInstances(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + + ret = response.Response + return +} + +func (me *WedataService) DescribeWedataOpsTaskOwnerById(ctx context.Context, projectId, taskId string) (ret *wedatav20250806.Task, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := wedatav20250806.NewGetOpsTaskRequest() + request.ProjectId = &projectId + request.TaskId = &taskId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().GetOpsTask(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response.Response == nil { + return + } + + ret = response.Response.Data + return +} + +func (me *WedataService) DescribeWedataOpsAsyncJobByFilter(ctx context.Context, param map[string]interface{}) (ret *wedatav20250806.GetOpsAsyncJobResponseParams, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewGetOpsAsyncJobRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "AsyncId" { + request.AsyncId = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().GetOpsAsyncJob(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + + ret = response.Response + return +} + +func (me *WedataService) DescribeWedataOpsAlarmRuleById(ctx context.Context, projectId, alarmRuleId string) (ret *wedatav20250806.AlarmRuleData, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := wedatav20250806.NewGetOpsAlarmRuleRequest() + request.ProjectId = &projectId + request.AlarmRuleId = &alarmRuleId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().GetOpsAlarmRule(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response.Response == nil { + return + } + + ret = response.Response.Data + return +} + +func (me *WedataService) DescribeWedataOpsAlarmRulesByFilter(ctx context.Context, param map[string]interface{}) (ret *wedatav20250806.ListOpsAlarmRulesResponseParams, errRet error) { + var ( + logId = tccommon.GetLogId(ctx) + request = wedatav20250806.NewListOpsAlarmRulesRequest() + ) + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + for k, v := range param { + if k == "ProjectId" { + request.ProjectId = v.(*string) + } + if k == "MonitorObjectType" { + request.MonitorObjectType = v.(*int64) + } + if k == "TaskId" { + request.TaskId = v.(*string) + } + if k == "AlarmType" { + request.AlarmType = v.(*string) + } + if k == "AlarmLevel" { + request.AlarmLevel = v.(*int64) + } + if k == "AlarmRecipientId" { + request.AlarmRecipientId = v.(*string) + } + if k == "Keyword" { + request.Keyword = v.(*string) + } + if k == "CreateUserUin" { + request.CreateUserUin = v.(*string) + } + if k == "CreateTimeFrom" { + request.CreateTimeFrom = v.(*string) + } + if k == "CreateTimeTo" { + request.CreateTimeTo = v.(*string) + } + if k == "UpdateTimeFrom" { + request.UpdateTimeFrom = v.(*string) + } + if k == "UpdateTimeTo" { + request.UpdateTimeTo = v.(*string) + } + } + + ratelimit.Check(request.GetAction()) + + response, err := me.client.UseWedataV20250806Client().ListOpsAlarmRules(request) + if err != nil { + errRet = err + return + } + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString()) + + if response == nil || response.Response == nil { + return + } + + ret = response.Response + return +} + func (me *WedataService) DescribeWedataDataSourceListByFilter(ctx context.Context, param map[string]interface{}) (dataSourceList []*wedata.DataSourceInfo, errRet error) { var ( logId = tccommon.GetLogId(ctx) diff --git a/website/docs/d/wedata_downstream_task_instances.html.markdown b/website/docs/d/wedata_downstream_task_instances.html.markdown new file mode 100644 index 0000000000..237119f6e5 --- /dev/null +++ b/website/docs/d/wedata_downstream_task_instances.html.markdown @@ -0,0 +1,38 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_downstream_task_instances" +sidebar_current: "docs-tencentcloud-datasource-wedata_downstream_task_instances" +description: |- + Use this data source to query detailed information of wedata downstream task instances +--- + +# tencentcloud_wedata_downstream_task_instances + +Use this data source to query detailed information of wedata downstream task instances + +## Example Usage + +```hcl +data "tencentcloud_wedata_downstream_task_instances" "wedata_down_task_instances" { + project_id = "1859317240494305280" + instance_key = "20250731151633120_2025-10-13 17:00:00" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_key` - (Required, String) Instance unique identifier. +* `project_id` - (Required, String) Project ID. +* `result_output_file` - (Optional, String) Used to save results. +* `time_zone` - (Optional, String) Time zone timeZone, default UTC+8. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `data` - Direct downstream task instances list. + + diff --git a/website/docs/d/wedata_ops_alarm_rules.html.markdown b/website/docs/d/wedata_ops_alarm_rules.html.markdown new file mode 100644 index 0000000000..7483f57448 --- /dev/null +++ b/website/docs/d/wedata_ops_alarm_rules.html.markdown @@ -0,0 +1,46 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_ops_alarm_rules" +sidebar_current: "docs-tencentcloud-datasource-wedata_ops_alarm_rules" +description: |- + Use this data source to query detailed information of wedata ops alarm rules +--- + +# tencentcloud_wedata_ops_alarm_rules + +Use this data source to query detailed information of wedata ops alarm rules + +## Example Usage + +```hcl +data "tencentcloud_wedata_ops_alarm_rules" "wedata_ops_alarm_rules" { + project_id = "1859317240494305280" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `project_id` - (Required, String) Project id. +* `alarm_level` - (Optional, Int) Alarm level: 1. Normal, 2. Major, 3. Urgent. +* `alarm_recipient_id` - (Optional, String) Query the alarm rules configured for the corresponding alarm recipient. +* `alarm_type` - (Optional, String) Alarm Rule Monitoring Types: failure: failure alarm; overtime: timeout alarm; success: success alarm; backTrackingOrRerunSuccess: backTrackingOrRerunSuccess: backTrackingOrRerunFailure: backTrackingOrRerunFailure. Project Fluctuation Alarms: projectFailureInstanceUpwardFluctuationAlarm: alarm if the upward fluctuation rate of failed instances exceeds the threshold. projectSuccessInstanceDownwardFluctuationAlarm: alarm if the downward fluctuation rate of successful instances exceeds the threshold. Offline Integration Task Reconciliation Alarms: reconciliationFailure: offline reconciliation task failure alarm; reconciliationOvertime: offline reconciliation task timeout alarm; reconciliationMismatch: alarm if the number of inconsistent entries in a data reconciliation task exceeds the threshold. Example value: ["failure"]. +* `create_time_from` - (Optional, String) The start time of the alarm rule creation time range, in the format of 2025-08-17 00:00:00. +* `create_time_to` - (Optional, String) The end time of the alarm rule creation time range, in the format of "2025-08-26 23:59:59". +* `create_user_uin` - (Optional, String) Alarm rule creator filtering. +* `keyword` - (Optional, String) Query the corresponding alarm rule based on the alarm rule ID/rule name. +* `monitor_object_type` - (Optional, Int) Monitoring object type, Task dimension monitoring: can be configured according to task/workflow/project: 1.Task, 2.Workflow, 3.Project (default is 1.Task) Project dimension monitoring: Project overall task fluctuation alarm, 7: Project fluctuation monitoring alarm. +* `result_output_file` - (Optional, String) Used to save results. +* `task_id` - (Optional, String) Query alarm rules based on task ID. +* `update_time_from` - (Optional, String) Last updated time filter alarm rules, format such as "2025-08-26 00:00:00". +* `update_time_to` - (Optional, String) Last updated time filter alarm rule format such as: "2025-08-26 23:59:59". + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `data` - Alarm information response. + + diff --git a/website/docs/d/wedata_ops_workflow.html.markdown b/website/docs/d/wedata_ops_workflow.html.markdown new file mode 100644 index 0000000000..6a8031aefb --- /dev/null +++ b/website/docs/d/wedata_ops_workflow.html.markdown @@ -0,0 +1,37 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_ops_workflow" +sidebar_current: "docs-tencentcloud-datasource-wedata_ops_workflow" +description: |- + Use this data source to query detailed information of wedata ops workflow +--- + +# tencentcloud_wedata_ops_workflow + +Use this data source to query detailed information of wedata ops workflow + +## Example Usage + +```hcl +data "tencentcloud_wedata_ops_workflow" "wedata_ops_workflow" { + project_id = "2905622749543821312" + workflow_id = "f328ab83-62e1-4b0a-9a18-a79b42722792" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `project_id` - (Required, String) Project ID. +* `workflow_id` - (Required, String) Workflow ID. +* `result_output_file` - (Optional, String) Used to save results. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `data` - Workflow scheduling details. + + diff --git a/website/docs/d/wedata_ops_workflows.html.markdown b/website/docs/d/wedata_ops_workflows.html.markdown new file mode 100644 index 0000000000..cca327ac67 --- /dev/null +++ b/website/docs/d/wedata_ops_workflows.html.markdown @@ -0,0 +1,50 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_ops_workflows" +sidebar_current: "docs-tencentcloud-datasource-wedata_ops_workflows" +description: |- + Use this data source to query detailed information of wedata ops workflows +--- + +# tencentcloud_wedata_ops_workflows + +Use this data source to query detailed information of wedata ops workflows + +## Example Usage + +```hcl +data "tencentcloud_wedata_ops_workflows" "wedata_ops_workflows" { + project_id = "2905622749543821312" + folder_id = "720ecbfb-7e5a-11f0-ba36-b8cef6a5af5c" + status = "ALL_RUNNING" + owner_uin = "100044349576" + workflow_type = "Cycle" + sort_type = "ASC" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `project_id` - (Required, String) Project ID. +* `create_time` - (Optional, String) Creation time, format yyyy-MM-dd HH:mm:ss. +* `create_user_uin` - (Optional, String) Creator ID. +* `folder_id` - (Optional, String) File ID. +* `key_word` - (Optional, String) Workflow keyword filter, supports fuzzy matching by workflow ID/name. +* `modify_time` - (Optional, String) Update time, format yyyy-MM-dd HH:mm:ss. +* `owner_uin` - (Optional, String) Responsible person ID. +* `result_output_file` - (Optional, String) Used to save results. +* `sort_item` - (Optional, String) Sorting field, optional values: `CreateTime`, `TaskCount`. +* `sort_type` - (Optional, String) Sorting order, `DESC` or `ASC`, uppercase. +* `status` - (Optional, String) Workflow status filter: `ALL_RUNNING`: All scheduled, `ALL_FREEZED`: All paused, `ALL_STOPPTED`: All offline, `PART_RUNNING`: Partially scheduled, `ALL_NO_RUNNING`: All unscheduled, `ALL_INVALID`: All invalid. +* `workflow_type` - (Optional, String) Workflow type filter, supported values: `Cycle` or `Manual`. By default, only `Cycle` is queried. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `data` - Record list. + + diff --git a/website/docs/d/wedata_task_instance.html.markdown b/website/docs/d/wedata_task_instance.html.markdown new file mode 100644 index 0000000000..66c186ae24 --- /dev/null +++ b/website/docs/d/wedata_task_instance.html.markdown @@ -0,0 +1,38 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_task_instance" +sidebar_current: "docs-tencentcloud-datasource-wedata_task_instance" +description: |- + Use this data source to query detailed information of wedata task instance +--- + +# tencentcloud_wedata_task_instance + +Use this data source to query detailed information of wedata task instance + +## Example Usage + +```hcl +data "tencentcloud_wedata_task_instance" "wedata_task_instance" { + project_id = "1859317240494305280" + instance_key = "20250324192240178_2025-10-13 11:50:00" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_key` - (Required, String) Unique instance identifier, can be obtained via ListInstances. +* `project_id` - (Required, String) Project ID. +* `result_output_file` - (Optional, String) Used to save results. +* `time_zone` - (Optional, String) Time zone, the time zone of the input time string, default UTC+8. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `data` - Instance details. + + diff --git a/website/docs/d/wedata_task_instance_executions.html.markdown b/website/docs/d/wedata_task_instance_executions.html.markdown new file mode 100644 index 0000000000..ecde0a03a2 --- /dev/null +++ b/website/docs/d/wedata_task_instance_executions.html.markdown @@ -0,0 +1,38 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_task_instance_executions" +sidebar_current: "docs-tencentcloud-datasource-wedata_task_instance_executions" +description: |- + Use this data source to query detailed information of wedata task instance executions +--- + +# tencentcloud_wedata_task_instance_executions + +Use this data source to query detailed information of wedata task instance executions + +## Example Usage + +```hcl +data "tencentcloud_wedata_task_instance_executions" "wedata_task_instance_executions" { + project_id = "1859317240494305280" + instance_key = "20250731151633120_2025-10-13 17:00:00" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_key` - (Required, String) Instance unique identifier, can be obtained via ListInstances. +* `project_id` - (Required, String) Project ID to which it belongs. +* `result_output_file` - (Optional, String) Used to save results. +* `time_zone` - (Optional, String) **Time zone** timeZone, the time zone of the input time string, default UTC+8. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `data` - Instance details. + + diff --git a/website/docs/d/wedata_task_instance_log.html.markdown b/website/docs/d/wedata_task_instance_log.html.markdown new file mode 100644 index 0000000000..06e87a2259 --- /dev/null +++ b/website/docs/d/wedata_task_instance_log.html.markdown @@ -0,0 +1,40 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_task_instance_log" +sidebar_current: "docs-tencentcloud-datasource-wedata_task_instance_log" +description: |- + Use this data source to query detailed information of wedata task instance log +--- + +# tencentcloud_wedata_task_instance_log + +Use this data source to query detailed information of wedata task instance log + +## Example Usage + +```hcl +data "tencentcloud_wedata_task_instance_log" "wedata_task_instance_log" { + project_id = "1859317240494305280" + instance_key = "20250324192240178_2025-10-13 11:50:00" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_key` - (Required, String) Unique instance identifier. +* `project_id` - (Required, String) Project ID. +* `life_round_num` - (Optional, Int) Instance lifecycle number, identifying a specific execution of the instance. For example: the first run of a periodic instance is 0, if manually rerun the second execution is 1; defaults to the latest execution. +* `log_level` - (Optional, String) Log level, default All - Info - Debug - Warn - Error - All. +* `next_cursor` - (Optional, String) Pagination cursor for log queries, no business meaning. First query uses null, subsequent queries use NextCursor from previous response. +* `result_output_file` - (Optional, String) Used to save results. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `data` - Scheduled instance details. + + diff --git a/website/docs/d/wedata_task_instances.html.markdown b/website/docs/d/wedata_task_instances.html.markdown new file mode 100644 index 0000000000..c313f478b5 --- /dev/null +++ b/website/docs/d/wedata_task_instances.html.markdown @@ -0,0 +1,53 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_task_instances" +sidebar_current: "docs-tencentcloud-datasource-wedata_task_instances" +description: |- + Use this data source to query detailed information of wedata task instances +--- + +# tencentcloud_wedata_task_instances + +Use this data source to query detailed information of wedata task instances + +## Example Usage + +```hcl +data "tencentcloud_wedata_task_instances" "wedata_task_instances" { + project_id = "1859317240494305280" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `project_id` - (Required, String) Project ID. +* `cycle_type` - (Optional, String) Task cycle type * ONEOFF_CYCLE: One-time * YEAR_CYCLE: Year * MONTH_CYCLE: Month * WEEK_CYCLE: Week * DAY_CYCLE: Day * HOUR_CYCLE: Hour * MINUTE_CYCLE: Minute * CRONTAB_CYCLE: Crontab expression type. +* `executor_group_id` - (Optional, String) Executor resource group ID. +* `folder_id` - (Optional, String) Task folder ID. +* `instance_state` - (Optional, String) Instance status - WAIT_EVENT: Waiting for event - WAIT_UPSTREAM: Waiting for upstream - WAIT_RUN: Waiting to run - RUNNING: Running - SKIP_RUNNING: Skipped running - FAILED_RETRY: Failed retry - EXPIRED: Failed - COMPLETED: Success. +* `instance_type` - (Optional, Int) Instance type - 0: Backfill type - 1: Periodic instance - 2: Non-periodic instance. +* `keyword` - (Optional, String) Task name or Task ID. Supports fuzzy search filtering. Multiple values separated by commas. +* `last_update_time_from` - (Optional, String) Instance last update time filter condition.Start time, format yyyy-MM-dd HH:mm:ss. +* `last_update_time_to` - (Optional, String) Instance last update time filter condition.End time, format yyyy-MM-dd HH:mm:ss. +* `owner_uin` - (Optional, String) Task owner ID. +* `result_output_file` - (Optional, String) Used to save results. +* `schedule_time_from` - (Optional, String) Instance scheduled time filter condition Start time, format yyyy-MM-dd HH:mm:ss. +* `schedule_time_to` - (Optional, String) Instance scheduled time filter condition End time, format yyyy-MM-dd HH:mm:ss. +* `sort_column` - (Optional, String) Result sorting field- SCHEDULE_DATE: Sort by scheduled time- START_TIME: Sort by execution start time- END_TIME: Sort by execution end time- COST_TIME: Sort by execution duration. +* `sort_type` - (Optional, String) Sorting order: - ASC; - DESC. +* `start_time_from` - (Optional, String) Instance execution start time filter condition Start time, format yyyy-MM-dd HH:mm:ss. +* `start_time_to` - (Optional, String) Instance execution start time filter condition.End time, format yyyy-MM-dd HH:mm:ss. +* `task_type_id` - (Optional, Int) Task type ID. +* `time_zone` - (Optional, String) Time zone. The time zone of the input time string, default UTC+8. +* `workflow_id` - (Optional, String) Task workflow ID. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `data` - Instance result set. + + diff --git a/website/docs/d/wedata_upstream_task_instances.html.markdown b/website/docs/d/wedata_upstream_task_instances.html.markdown new file mode 100644 index 0000000000..9c7b51b22f --- /dev/null +++ b/website/docs/d/wedata_upstream_task_instances.html.markdown @@ -0,0 +1,48 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_upstream_task_instances" +sidebar_current: "docs-tencentcloud-datasource-wedata_upstream_task_instances" +description: |- + Use this data source to query detailed information of wedata upstream task instances +--- + +# tencentcloud_wedata_upstream_task_instances + +Use this data source to query detailed information of wedata upstream task instances + +## Example Usage + +```hcl +data "tencentcloud_wedata_task_instances" "wedata_task_instances" { + project_id = "1859317240494305280" +} + +locals { + instance_keys = data.tencentcloud_wedata_task_instances.wedata_task_instances.data[0].items[*].instance_key +} + +data "tencentcloud_wedata_upstream_task_instances" "wedata_upstream_task_instances" { + for_each = toset(local.instance_keys) + + project_id = "1859317240494305280" + instance_key = each.value +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_key` - (Required, String) Unique instance identifier. +* `project_id` - (Required, String) Project ID. +* `result_output_file` - (Optional, String) Used to save results. +* `time_zone` - (Optional, String) Time zone, default UTC+8. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `data` - Upstream instance list. + + diff --git a/website/docs/r/wedata_ops_alarm_rule.html.markdown b/website/docs/r/wedata_ops_alarm_rule.html.markdown new file mode 100644 index 0000000000..913e5fd7ef --- /dev/null +++ b/website/docs/r/wedata_ops_alarm_rule.html.markdown @@ -0,0 +1,161 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_ops_alarm_rule" +sidebar_current: "docs-tencentcloud-resource-wedata_ops_alarm_rule" +description: |- + Provides a resource to create a wedata ops alarm rule +--- + +# tencentcloud_wedata_ops_alarm_rule + +Provides a resource to create a wedata ops alarm rule + +## Example Usage + +```hcl +resource "tencentcloud_wedata_ops_alarm_rule" "wedata_ops_alarm_rule" { + alarm_level = 1 + alarm_rule_name = "tf_test" + alarm_types = [ + "failure", + ] + description = "ccc" + monitor_object_ids = [ + "20230906105118824", + ] + monitor_object_type = 1 + project_id = "1859317240494305280" + + alarm_groups { + alarm_escalation_interval = 15 + alarm_escalation_recipient_ids = [] + alarm_recipient_ids = [ + "100029411056", + ] + alarm_recipient_type = 1 + alarm_ways = [ + "1", + ] + + notification_fatigue { + notify_count = 1 + notify_interval = 5 + + quiet_intervals { + days_of_week = [ + 6, + 7, + ] + end_time = "21:00:00" + start_time = "10:00:00" + } + } + } + + alarm_rule_detail { + data_backfill_or_rerun_trigger = 1 + trigger = 2 + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `alarm_groups` - (Required, List) Alarm receiver configuration information. +* `alarm_rule_name` - (Required, String) Alert rule name. +* `alarm_types` - (Required, Set: [`String`]) Alarm Rule Monitoring Types: failure: failure alarm; overtime: timeout alarm; success: success alarm; backTrackingOrRerunSuccess: backTrackingOrRerunSuccess: backTrackingOrRerunFailure: backTrackingOrRerunFailure. Project Fluctuation Alarms: projectFailureInstanceUpwardFluctuationAlarm: alarm if the upward fluctuation rate of failed instances exceeds the threshold. projectSuccessInstanceDownwardFluctuationAlarm: alarm if the downward fluctuation rate of successful instances exceeds the threshold. Offline Integration Task Reconciliation Alarms: reconciliationFailure: offline reconciliation task failure alarm; reconciliationOvertime: offline reconciliation task timeout alarm; reconciliationMismatch: alarm if the number of inconsistent entries in a data reconciliation task exceeds the threshold. Example value: ["failure"]. +* `monitor_object_ids` - (Required, Set: [`String`]) A list of monitored object business IDs. Different business IDs are passed in based on the MonitorType setting. For example, 1 (Task) - MonitorObjectIds is a list of task IDs; 2 (Workflow) - MonitorObjectIds is a list of workflow IDs (workflow IDs can be obtained from the ListWorkflows interface); 3 (Project) - MonitorObjectIds is a list of project IDs. Example value: ["ddc"]. +* `project_id` - (Required, String, ForceNew) Project id. +* `alarm_level` - (Optional, Int) Alarm level: 1. Normal, 2. Major, 3. Urgent (default 1. Normal). +* `alarm_rule_detail` - (Optional, List) Alarm rule configuration information: Success alarms do not require configuration. Failure alarms can be configured as either first-failure alarms or all retry failure alarms. Timeout configuration requires the timeout type and timeout threshold. Project fluctuation alarms require the fluctuation rate and anti-shake period. +* `description` - (Optional, String) Alarm rule description. +* `monitor_object_type` - (Optional, Int) Monitoring object type, Task-based monitoring: Configurable by task/workflow/project: 1. Task, 2. Workflow, 3. Project (default is 1. Task). Project-based monitoring: Alerts for overall project task fluctuations, 7: Project fluctuation monitoring alerts. + +The `alarm_groups` object supports the following: + +* `alarm_escalation_interval` - (Optional, Int) Alarm escalation interval. +* `alarm_escalation_recipient_ids` - (Optional, Set) Alarm escalator ID list. If the alarm receiver or the upper escalator does not confirm the alarm within the alarm interval, the alarm will be sent to the next level escalator. +* `alarm_recipient_ids` - (Optional, Set) Depending on the type of AlarmRecipientType, this list has different business IDs: 1 (Specified Person): Alarm Recipient ID List; 2 (Task Responsible Person): No configuration required; 3 (Duty Roster): Duty Roster ID List. +* `alarm_recipient_type` - (Optional, Int) Alarm Recipient Type: 1. Designated Personnel, 2. Task Responsible Personnel, 3. Duty Roster (Default: 1. Designated Personnel). +* `alarm_ways` - (Optional, Set) Alert Channels: 1: Email, 2: SMS, 3: WeChat, 4: Voice, 5: WeChat Enterprise, 6: Http, 7: WeChat Enterprise Group, 8: Lark Group, 9: DingTalk Group, 10: Slack Group, 11: Teams Group (Default: Email), Only one channel can be selected. +* `notification_fatigue` - (Optional, List) Alarm notification fatigue configuration. +* `web_hooks` - (Optional, List) List of webhook addresses for corporate WeChat groups, Feishu groups, DingTalk groups, Slack groups, and Teams groups. + +The `alarm_rule_detail` object supports the following: + +* `data_backfill_or_rerun_time_out_ext_info` - (Optional, List) Detailed configuration of re-running and re-recording instance timeout. +* `data_backfill_or_rerun_trigger` - (Optional, Int) Re-recording trigger timing: 1 - Triggered by the first failure; 2 - Triggered by completion of all retries. +* `project_instance_statistics_alarm_info_list` - (Optional, List) Project fluctuation alarm configuration details. +* `reconciliation_ext_info` - (Optional, List) Offline integrated reconciliation alarm configuration information. +* `time_out_ext_info` - (Optional, List) Periodic instance timeout configuration details. +* `trigger` - (Optional, Int) Failure trigger timing: 1 - Triggered on first failure; 2 -- Triggered when all retries complete (default). + +The `data_backfill_or_rerun_time_out_ext_info` object of `alarm_rule_detail` supports the following: + +* `hour` - (Optional, Int) Specify the timeout value in hours. The default value is 0. +* `min` - (Optional, Int) The timeout value is specified in minutes. The default value is 1. +* `rule_type` - (Optional, Int) Timeout alarm configuration: 1. Estimated running time exceeded, 2. Estimated completion time exceeded, 3. Estimated waiting time for scheduling exceeded, 4. Estimated completion within the period but not completed. +* `schedule_time_zone` - (Optional, String) The time zone configuration corresponding to the timeout period, such as UTC+7, the default is UTC+8. +* `type` - (Optional, Int) Timeout value configuration type: 1-Specified value; 2-Average value. + +The `notification_fatigue` object of `alarm_groups` supports the following: + +* `notify_count` - (Optional, Int) Number of alarms. +* `notify_interval` - (Optional, Int) Alarm interval, in minutes. +* `quiet_intervals` - (Optional, List) Do not disturb time, for example, the example value [{DaysOfWeek: [1, 2], StartTime: "00:00:00", EndTime: "09:00:00"}] means do not disturb from 00:00 to 09:00 every Monday and Tuesday. + +The `project_instance_statistics_alarm_info_list` object of `alarm_rule_detail` supports the following: + +* `alarm_type` - (Required, String) Alarm type: projectFailureInstanceUpwardFluctuationAlarm: Failure instance upward fluctuation alarm; projectSuccessInstanceDownwardFluctuationAlarm: Success instance downward fluctuation alarm. +* `instance_count` - (Optional, Int) The cumulative number of instances on the day; the downward fluctuation of the number of failed instances on the day. +* `instance_threshold_count_percent` - (Optional, Int) The alarm threshold for the proportion of instance successes fluctuating downwards; the alarm threshold for the proportion of instance failures fluctuating upwards. +* `instance_threshold_count` - (Optional, Int) The cumulative instance number fluctuation threshold. +* `is_cumulant` - (Optional, Bool) Whether to calculate cumulatively, false: continuous, true: cumulative. +* `stabilize_statistics_cycle` - (Optional, Int) Stability statistics period (number of anti-shake configuration statistics periods). +* `stabilize_threshold` - (Optional, Int) Stability threshold (number of statistical cycles for anti-shake configuration). + +The `quiet_intervals` object of `notification_fatigue` supports the following: + +* `days_of_week` - (Optional, Set) According to the ISO standard, 1 represents Monday and 7 represents Sunday. +* `end_time` - (Optional, String) End time, with precision of hours, minutes, and seconds, in the format of HH:mm:ss. +* `start_time` - (Optional, String) Start time, with precision of hours, minutes, and seconds, in the format of HH:mm:ss. + +The `reconciliation_ext_info` object of `alarm_rule_detail` supports the following: + +* `hour` - (Optional, Int) Reconciliation task timeout threshold: hours, default is 0. +* `min` - (Optional, Int) Reconciliation task timeout threshold: minutes, default is 1. +* `mismatch_count` - (Optional, Int) Reconciliation inconsistency threshold, RuleType=reconciliationMismatch. This field needs to be configured and has no default value. +* `rule_type` - (Optional, String) Offline alarm rule types: reconciliationFailure: Offline reconciliation failure alarm; reconciliationOvertime: Offline reconciliation task timeout alarm (timeout must be configured); reconciliationMismatch: Offline reconciliation mismatch alarm (mismatch threshold must be configured). + +The `time_out_ext_info` object of `alarm_rule_detail` supports the following: + +* `hour` - (Optional, Int) Specify the timeout value in hours. The default value is 0. +* `min` - (Optional, Int) The timeout value is specified in minutes. The default value is 1. +* `rule_type` - (Optional, Int) Timeout alarm configuration: 1. Estimated running time exceeded, 2. Estimated completion time exceeded, 3. Estimated waiting time for scheduling exceeded, 4. Estimated completion within the period but not completed. +* `schedule_time_zone` - (Optional, String) The time zone configuration corresponding to the timeout period, such as UTC+7, the default is UTC+8. +* `type` - (Optional, Int) Timeout value configuration type: 1-Specified value; 2-Average value. + +The `web_hooks` object of `alarm_groups` supports the following: + +* `alarm_way` - (Optional, String) Alert channel value: 7. Enterprise WeChat group, 8. Feishu group, 9. DingTalk group, 10. Slack group, 11. Teams group. +* `web_hooks` - (Optional, Set) List of webhook addresses for the alarm group. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + +## Import + +wedata ops alarm rule can be imported using the id, e.g. + +``` +terraform import tencentcloud_wedata_ops_alarm_rule.wedata_ops_alarm_rule projectId#askId +``` + diff --git a/website/docs/r/wedata_ops_stop_task_async.html.markdown b/website/docs/r/wedata_ops_stop_task_async.html.markdown new file mode 100644 index 0000000000..9bb5e14839 --- /dev/null +++ b/website/docs/r/wedata_ops_stop_task_async.html.markdown @@ -0,0 +1,38 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_ops_stop_task_async" +sidebar_current: "docs-tencentcloud-resource-wedata_ops_stop_task_async" +description: |- + Provides a resource to create a wedata ops stop task async +--- + +# tencentcloud_wedata_ops_stop_task_async + +Provides a resource to create a wedata ops stop task async + +## Example Usage + +```hcl +resource "tencentcloud_wedata_ops_stop_task_async" "wedata_ops_stop_task_async" { + project_id = "1859317240494305280" + task_ids = ["20251013154418424"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `project_id` - (Required, String, ForceNew) Project id. +* `task_ids` - (Required, Set: [`String`], ForceNew) Task id list. +* `kill_instance` - (Optional, Bool, ForceNew) Whether to terminate the generated instance, the default is false; if true, it will wait for all forces to terminate. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + diff --git a/website/docs/r/wedata_ops_task_owner.html.markdown b/website/docs/r/wedata_ops_task_owner.html.markdown new file mode 100644 index 0000000000..23a62234a8 --- /dev/null +++ b/website/docs/r/wedata_ops_task_owner.html.markdown @@ -0,0 +1,47 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_ops_task_owner" +sidebar_current: "docs-tencentcloud-resource-wedata_ops_task_owner" +description: |- + Provides a resource to create a wedata ops task owner +--- + +# tencentcloud_wedata_ops_task_owner + +Provides a resource to create a wedata ops task owner + +## Example Usage + +```hcl +resource "tencentcloud_wedata_ops_task_owner" "wedata_ops_task_owner" { + owner_uin = "100029411056;100042282926" + project_id = "2430455587205529600" + task_id = "20251009144419600" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `owner_uin` - (Required, String) Task Owner ID. For multiple owners, separate them with `;`, for example: `100029411056;100042282926`. +* `project_id` - (Required, String) Project id. +* `task_id` - (Required, String) Task id. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + +## Import + +wedata ops task owner can be imported using the id, e.g. + +``` +terraform import tencentcloud_wedata_ops_task_owner.wedata_ops_task_owner projectId#askId +``` + diff --git a/website/docs/r/wedata_task_kill_instance_async.html.markdown b/website/docs/r/wedata_task_kill_instance_async.html.markdown new file mode 100644 index 0000000000..41885a08ac --- /dev/null +++ b/website/docs/r/wedata_task_kill_instance_async.html.markdown @@ -0,0 +1,37 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_task_kill_instance_async" +sidebar_current: "docs-tencentcloud-resource-wedata_task_kill_instance_async" +description: |- + Provides a resource to create a wedata task kill instance +--- + +# tencentcloud_wedata_task_kill_instance_async + +Provides a resource to create a wedata task kill instance + +## Example Usage + +```hcl +resource "tencentcloud_wedata_task_kill_instance_async" "wedata_task_kill_instance_async" { + project_id = "1859317240494305280" + instance_key_list = ["20250324192240178_2025-10-13 17:00:00"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_key_list` - (Required, Set: [`String`], ForceNew) Instance id list, which can be obtained from ListInstances. +* `project_id` - (Required, String, ForceNew) Project Id. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + diff --git a/website/docs/r/wedata_task_rerun_instance_async.html.markdown b/website/docs/r/wedata_task_rerun_instance_async.html.markdown new file mode 100644 index 0000000000..639e7ddba2 --- /dev/null +++ b/website/docs/r/wedata_task_rerun_instance_async.html.markdown @@ -0,0 +1,49 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_task_rerun_instance_async" +sidebar_current: "docs-tencentcloud-resource-wedata_task_rerun_instance_async" +description: |- + Provides a resource to create a wedata task re-run instance +--- + +# tencentcloud_wedata_task_rerun_instance_async + +Provides a resource to create a wedata task re-run instance + +## Example Usage + +```hcl +resource "tencentcloud_wedata_task_rerun_instance_async" "wedata_task_rerun_instance_async" { + project_id = "1859317240494305280" + instance_key_list = ["20250324192240178_2025-10-13 16:20:00"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_key_list` - (Required, Set: [`String`], ForceNew) Instance id list, which can be obtained from ListInstances. +* `project_id` - (Required, String, ForceNew) Project Id. +* `check_parent_type` - (Optional, String, ForceNew) Whether to check upstream tasks: ALL, MAKE_SCOPE (selected), NONE (do not check anything), default is NONE. +* `redefine_parallel_num` - (Optional, Int, ForceNew) Customize the instance running concurrency. If not configured, the original self-dependency of the task will be used. +* `redefine_param_list` - (Optional, List, ForceNew) Re-run instance custom parameters. +* `redefine_self_workflow_dependency` - (Optional, String, ForceNew) Customized workflow self-dependence: yes to enable, no to disable. If not configured, the original workflow self-dependence will be used. +* `rerun_type` - (Optional, String, ForceNew) Rerun type, 1: self; 3: children; 2: self and children, default 1. +* `skip_event_listening` - (Optional, Bool, ForceNew) Whether to ignore event monitoring when rerunning. +* `son_range_type` - (Optional, String, ForceNew) Downstream instance scope WORKFLOW: workflow PROJECT: project ALL: all cross-workflow dependent projects, default WORKFLOW. + +The `redefine_param_list` object supports the following: + +* `k` - (Optional, String) Key. +* `v` - (Optional, String) Value. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + diff --git a/website/docs/r/wedata_task_set_success_instance_async.html.markdown b/website/docs/r/wedata_task_set_success_instance_async.html.markdown new file mode 100644 index 0000000000..08f736bbd1 --- /dev/null +++ b/website/docs/r/wedata_task_set_success_instance_async.html.markdown @@ -0,0 +1,37 @@ +--- +subcategory: "Wedata" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_wedata_task_set_success_instance_async" +sidebar_current: "docs-tencentcloud-resource-wedata_task_set_success_instance_async" +description: |- + Provides a resource to create a wedata task set success instance +--- + +# tencentcloud_wedata_task_set_success_instance_async + +Provides a resource to create a wedata task set success instance + +## Example Usage + +```hcl +resource "tencentcloud_wedata_task_set_success_instance_async" "wedata_task_set_success_instance_async" { + project_id = "1859317240494305280" + instance_key_list = ["20250324192240178_2025-10-13 17:00:00"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `instance_key_list` - (Required, Set: [`String`], ForceNew) Instance id list, which can be obtained from ListInstances. +* `project_id` - (Required, String, ForceNew) Project Id. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. + + + diff --git a/website/tencentcloud.erb b/website/tencentcloud.erb index 1032d09e99..a1e303c8af 100644 --- a/website/tencentcloud.erb +++ b/website/tencentcloud.erb @@ -7009,9 +7009,36 @@
  • tencentcloud_wedata_data_source_list
  • +
  • + tencentcloud_wedata_downstream_task_instances +
  • +
  • + tencentcloud_wedata_ops_alarm_rules +
  • +
  • + tencentcloud_wedata_ops_workflow +
  • +
  • + tencentcloud_wedata_ops_workflows +
  • tencentcloud_wedata_rule_templates
  • +
  • + tencentcloud_wedata_task_instance +
  • +
  • + tencentcloud_wedata_task_instance_executions +
  • +
  • + tencentcloud_wedata_task_instance_log +
  • +
  • + tencentcloud_wedata_task_instances +
  • +
  • + tencentcloud_wedata_upstream_task_instances +
  • @@ -7035,12 +7062,30 @@
  • tencentcloud_wedata_integration_task_node
  • +
  • + tencentcloud_wedata_ops_alarm_rule +
  • +
  • + tencentcloud_wedata_ops_stop_task_async +
  • +
  • + tencentcloud_wedata_ops_task_owner +
  • tencentcloud_wedata_rule_template
  • tencentcloud_wedata_script
  • +
  • + tencentcloud_wedata_task_kill_instance_async +
  • +
  • + tencentcloud_wedata_task_rerun_instance_async +
  • +
  • + tencentcloud_wedata_task_set_success_instance_async +