Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/3559.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:enhancement
resource/tencentcloud_kubernetes_health_check_policy: update params property
```
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
// Code generated by iacg; DO NOT EDIT.
package tke

import (
Expand Down Expand Up @@ -28,12 +27,14 @@ func ResourceTencentCloudKubernetesHealthCheckPolicy() *schema.Resource {
"cluster_id": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "ID of the cluster.",
},

"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: "Health Check Policy Name.",
},

Expand Down Expand Up @@ -69,115 +70,115 @@ func resourceTencentCloudKubernetesHealthCheckPolicyCreate(d *schema.ResourceDat
defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_health_check_policy.create")()
defer tccommon.InconsistentCheck(d, meta)()

logId := tccommon.GetLogId(tccommon.ContextNil)

ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta)

var (
logId = tccommon.GetLogId(tccommon.ContextNil)
ctx = tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta)
request = tkev20220501.NewCreateHealthCheckPolicyRequest()
response = tkev20220501.NewCreateHealthCheckPolicyResponse()
clusterId string
name string
)
var (
request = tkev20220501.NewCreateHealthCheckPolicyRequest()
response = tkev20220501.NewCreateHealthCheckPolicyResponse()
)

if v, ok := d.GetOk("cluster_id"); ok {
clusterId = v.(string)
}
if v, ok := d.GetOk("name"); ok {
name = v.(string)
}

if v, ok := d.GetOk("cluster_id"); ok {
request.ClusterId = helper.String(v.(string))
clusterId = v.(string)
}

healthCheckPolicy := tkev20220501.HealthCheckPolicy{}
if v, ok := d.GetOk("name"); ok {
healthCheckPolicy.Name = helper.String(v.(string))
}

if v, ok := d.GetOk("rules"); ok {
for _, item := range v.([]interface{}) {
rulesMap := item.(map[string]interface{})
healthCheckPolicyRule := tkev20220501.HealthCheckPolicyRule{}
if v, ok := rulesMap["auto_repair_enabled"]; ok {
healthCheckPolicyRule.AutoRepairEnabled = helper.Bool(v.(bool))
}

if v, ok := rulesMap["enabled"]; ok {
healthCheckPolicyRule.Enabled = helper.Bool(v.(bool))
}

if v, ok := rulesMap["name"]; ok {
healthCheckPolicyRule.Name = helper.String(v.(string))
}

healthCheckPolicy.Rules = append(healthCheckPolicy.Rules, &healthCheckPolicyRule)
}
}
request.HealthCheckPolicy = &healthCheckPolicy

request.HealthCheckPolicy = &healthCheckPolicy
err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError {
result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20220501Client().CreateHealthCheckPolicyWithContext(ctx, request)
if e != nil {
return tccommon.RetryError(e)
} else {
log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString())
}

if result == nil || result.Response == nil {
return resource.NonRetryableError(fmt.Errorf("Create kubernetes health check policy failed, Response is nil."))
}

response = result
return nil
})

if err != nil {
log.Printf("[CRITAL]%s create kubernetes health check policy failed, reason:%+v", logId, err)
return err
}

name = *response.Response.HealthCheckPolicyName
if response.Response.HealthCheckPolicyName == nil {
return fmt.Errorf("HealthCheckPolicyName is nil.")
}

name = *response.Response.HealthCheckPolicyName
d.SetId(strings.Join([]string{clusterId, name}, tccommon.FILED_SP))

return resourceTencentCloudKubernetesHealthCheckPolicyRead(d, meta)
}

func resourceTencentCloudKubernetesHealthCheckPolicyRead(d *schema.ResourceData, meta interface{}) error {
defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_health_check_policy.read")()
defer tccommon.InconsistentCheck(d, meta)()

logId := tccommon.GetLogId(tccommon.ContextNil)

ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta)

service := TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()}
var (
logId = tccommon.GetLogId(tccommon.ContextNil)
ctx = tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta)
service = TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()}
)

idSplit := strings.Split(d.Id(), tccommon.FILED_SP)
if len(idSplit) != 2 {
return fmt.Errorf("id is broken,%s", d.Id())
}

clusterId := idSplit[0]
name := idSplit[1]

_ = d.Set("cluster_id", clusterId)

_ = d.Set("name", name)

respData, err := service.DescribeKubernetesHealthCheckPolicyById(ctx, clusterId, name)
if err != nil {
return err
}

if respData == nil {
log.Printf("[WARN]%s resource `tencentcloud_kubernetes_health_check_policy` [%s] not found, please check if it has been deleted.\n", logId, d.Id())
d.SetId("")
log.Printf("[WARN]%s resource `kubernetes_health_check_policy` [%s] not found, please check if it has been deleted.\n", logId, d.Id())
return nil
}

_ = d.Set("cluster_id", clusterId)

if respData.Name != nil {
_ = d.Set("name", respData.Name)
name = *respData.Name
}

rulesList := make([]map[string]interface{}, 0, len(respData.Rules))
if respData.Rules != nil {
rulesList := make([]map[string]interface{}, 0, len(respData.Rules))
for _, rules := range respData.Rules {
rulesMap := map[string]interface{}{}

if rules.AutoRepairEnabled != nil {
rulesMap["auto_repair_enabled"] = rules.AutoRepairEnabled
}
Expand All @@ -203,37 +204,21 @@ func resourceTencentCloudKubernetesHealthCheckPolicyUpdate(d *schema.ResourceDat
defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_health_check_policy.update")()
defer tccommon.InconsistentCheck(d, meta)()

logId := tccommon.GetLogId(tccommon.ContextNil)

ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta)
var (
logId = tccommon.GetLogId(tccommon.ContextNil)
ctx = tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta)
)

immutableArgs := []string{"cluster_id", "name"}
for _, v := range immutableArgs {
if d.HasChange(v) {
return fmt.Errorf("argument `%s` cannot be changed", v)
}
}
idSplit := strings.Split(d.Id(), tccommon.FILED_SP)
if len(idSplit) != 2 {
return fmt.Errorf("id is broken,%s", d.Id())
}

clusterId := idSplit[0]
name := idSplit[1]

needChange := false
mutableArgs := []string{"rules"}
for _, v := range mutableArgs {
if d.HasChange(v) {
needChange = true
break
}
}

if needChange {
if d.HasChange("rules") {
request := tkev20220501.NewModifyHealthCheckPolicyRequest()

request.ClusterId = helper.String(clusterId)

healthCheckPolicy := tkev20220501.HealthCheckPolicy{}
healthCheckPolicy.Name = helper.String(name)
if v, ok := d.GetOk("rules"); ok {
Expand All @@ -243,26 +228,32 @@ func resourceTencentCloudKubernetesHealthCheckPolicyUpdate(d *schema.ResourceDat
if v, ok := rulesMap["auto_repair_enabled"]; ok {
healthCheckPolicyRule.AutoRepairEnabled = helper.Bool(v.(bool))
}

if v, ok := rulesMap["enabled"]; ok {
healthCheckPolicyRule.Enabled = helper.Bool(v.(bool))
}

if v, ok := rulesMap["name"]; ok {
healthCheckPolicyRule.Name = helper.String(v.(string))
}

healthCheckPolicy.Rules = append(healthCheckPolicy.Rules, &healthCheckPolicyRule)
}
}
request.HealthCheckPolicy = &healthCheckPolicy

request.HealthCheckPolicy = &healthCheckPolicy
request.ClusterId = helper.String(clusterId)
err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError {
result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20220501Client().ModifyHealthCheckPolicyWithContext(ctx, request)
if e != nil {
return tccommon.RetryError(e)
} else {
log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString())
}

return nil
})

if err != nil {
log.Printf("[CRITAL]%s update kubernetes health check policy failed, reason:%+v", logId, err)
return err
Expand All @@ -276,40 +267,37 @@ func resourceTencentCloudKubernetesHealthCheckPolicyDelete(d *schema.ResourceDat
defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_health_check_policy.delete")()
defer tccommon.InconsistentCheck(d, meta)()

logId := tccommon.GetLogId(tccommon.ContextNil)
ctx := tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta)
var (
logId = tccommon.GetLogId(tccommon.ContextNil)
ctx = tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta)
request = tkev20220501.NewDeleteHealthCheckPolicyRequest()
)

idSplit := strings.Split(d.Id(), tccommon.FILED_SP)
if len(idSplit) != 2 {
return fmt.Errorf("id is broken,%s", d.Id())
}

clusterId := idSplit[0]
name := idSplit[1]

var (
request = tkev20220501.NewDeleteHealthCheckPolicyRequest()
response = tkev20220501.NewDeleteHealthCheckPolicyResponse()
)

request.ClusterId = helper.String(clusterId)

request.HealthCheckPolicyName = helper.String(name)

err := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError {
result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20220501Client().DeleteHealthCheckPolicyWithContext(ctx, request)
if e != nil {
return tccommon.RetryError(e)
} else {
log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString())
}
response = result

return nil
})

if err != nil {
log.Printf("[CRITAL]%s delete kubernetes health check policy failed, reason:%+v", logId, err)
return err
}

_ = response
return nil
}
Original file line number Diff line number Diff line change
@@ -1,28 +1,29 @@
Provides a resource to create a tke kubernetes_health_check_policy
Provides a resource to create a TKE kubernetes health check policy

Example Usage

```hcl
resource "tencentcloud_kubernetes_health_check_policy" "kubernetes_health_check_policy" {
cluster_id = "cls-xxxxx"
name = "example"
rules {
name = "OOMKilling"
auto_repair_enabled = true
enabled = true
}
rules {
name = "KubeletUnhealthy"
auto_repair_enabled = true
enabled = true
}
resource "tencentcloud_kubernetes_health_check_policy" "example" {
cluster_id = "cls-fdy7hm1q"
name = "tf-example"
rules {
name = "OOMKilling"
auto_repair_enabled = true
enabled = true
}

rules {
name = "KubeletUnhealthy"
auto_repair_enabled = true
enabled = true
}
}
```

Import

tke kubernetes_health_check_policy can be imported using the id, e.g.
TKE kubernetes health check policy can be imported using the clusterId#name, e.g.

```
terraform import tencentcloud_kubernetes_health_check_policy.kubernetes_health_check_policy cls-xxxxx#healthcheckpolicyname
terraform import tencentcloud_kubernetes_health_check_policy.example cls-fdy7hm1q#tf-example
```
12 changes: 7 additions & 5 deletions tencentcloud/services/tke/service_tencentcloud_tke.go
Original file line number Diff line number Diff line change
Expand Up @@ -3606,26 +3606,28 @@ func (me *TkeService) DescribeKubernetesHealthCheckPolicyById(ctx context.Contex
}
}()

ratelimit.Check(request.GetAction())

var (
offset int64 = 0
limit int64 = 20
offset int64 = 0
limit int64 = 100
instances []*tke2.HealthCheckPolicy
)
var instances []*tke2.HealthCheckPolicy

for {
request.Offset = &offset
request.Limit = &limit
ratelimit.Check(request.GetAction())
response, err := me.client.UseTkeV20220501Client().DescribeHealthCheckPolicies(request)
if err != nil {
errRet = err
return
}

log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), response.ToJsonString())

if response == nil || len(response.Response.HealthCheckPolicies) < 1 {
break
}

instances = append(instances, response.Response.HealthCheckPolicies...)
if len(response.Response.HealthCheckPolicies) < int(limit) {
break
Expand Down
Loading
Loading