From c2e7c9499cdc109540853c79b1920995fe03f7f0 Mon Sep 17 00:00:00 2001 From: SevenEarth <391613297@qq.com> Date: Wed, 12 Nov 2025 11:23:31 +0800 Subject: [PATCH] add --- .changelog/3593.txt | 3 + tencentcloud/provider.go | 1 + tencentcloud/provider.md | 1 + .../resource_tc_kubernetes_cluster_release.go | 487 ++++++++++++++++++ .../resource_tc_kubernetes_cluster_release.md | 24 + ...urce_tc_kubernetes_cluster_release_test.go | 46 ++ .../services/tke/service_tencentcloud_tke.go | 111 ++++ .../kubernetes_cluster_release.html.markdown | 73 +++ website/tencentcloud.erb | 3 + 9 files changed, 749 insertions(+) create mode 100644 .changelog/3593.txt create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_release.go create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_release.md create mode 100644 tencentcloud/services/tke/resource_tc_kubernetes_cluster_release_test.go create mode 100644 website/docs/r/kubernetes_cluster_release.html.markdown diff --git a/.changelog/3593.txt b/.changelog/3593.txt new file mode 100644 index 0000000000..ce13101f88 --- /dev/null +++ b/.changelog/3593.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +tencentcloud_kubernetes_cluster_release +``` diff --git a/tencentcloud/provider.go b/tencentcloud/provider.go index 1ddec33217..6aae2ad727 100644 --- a/tencentcloud/provider.go +++ b/tencentcloud/provider.go @@ -1469,6 +1469,7 @@ func Provider() *schema.Provider { "tencentcloud_kubernetes_serverless_node_pool": tke.ResourceTencentCloudKubernetesServerlessNodePool(), "tencentcloud_kubernetes_encryption_protection": tke.ResourceTencentCloudKubernetesEncryptionProtection(), "tencentcloud_kubernetes_cluster_master_attachment": tke.ResourceTencentCloudKubernetesClusterMasterAttachment(), + "tencentcloud_kubernetes_cluster_release": tke.ResourceTencentCloudKubernetesClusterRelease(), "tencentcloud_mysql_backup_policy": cdb.ResourceTencentCloudMysqlBackupPolicy(), "tencentcloud_mysql_account": cdb.ResourceTencentCloudMysqlAccount(), "tencentcloud_mysql_account_privilege": cdb.ResourceTencentCloudMysqlAccountPrivilege(), diff --git a/tencentcloud/provider.md b/tencentcloud/provider.md index b9ddc2f3e4..0e8ddc8ba5 100644 --- a/tencentcloud/provider.md +++ b/tencentcloud/provider.md @@ -705,6 +705,7 @@ tencentcloud_kubernetes_native_node_pool tencentcloud_kubernetes_health_check_policy tencentcloud_kubernetes_log_config tencentcloud_kubernetes_cluster_master_attachment +tencentcloud_kubernetes_cluster_release TDMQ for Pulsar(tpulsar) Data Source diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_release.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_release.go new file mode 100644 index 0000000000..8fab9bdb74 --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_release.go @@ -0,0 +1,487 @@ +package tke + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + tkev20180525 "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tke/v20180525" + + tccommon "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/common" + "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/internal/helper" +) + +func ResourceTencentCloudKubernetesClusterRelease() *schema.Resource { + return &schema.Resource{ + Create: resourceTencentCloudKubernetesClusterReleaseCreate, + Read: resourceTencentCloudKubernetesClusterReleaseRead, + Update: resourceTencentCloudKubernetesClusterReleaseUpdate, + Delete: resourceTencentCloudKubernetesClusterReleaseDelete, + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(30 * time.Minute), + Update: schema.DefaultTimeout(30 * time.Minute), + }, + Schema: map[string]*schema.Schema{ + "cluster_id": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Cluster ID.", + }, + + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Application name, maximum 63 characters, can only contain lowercase letters, numbers, and the separator \"-\", and must start with a lowercase letter and end with a number or lowercase letter.", + }, + + "namespace": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: "Application namespace, obtained from the cluster details namespace.", + }, + + "chart": { + Type: schema.TypeString, + Required: true, + Description: "Chart name (obtained from the application market) or the download URL of the chart package when installing from a third-party repo, redirect-type chart URLs are not supported, must end with *.tgz.", + }, + + "values": { + Type: schema.TypeList, + Optional: true, + MaxItems: 1, + Description: "Custom parameters.", + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "raw_original": { + Type: schema.TypeString, + Required: true, + Description: "Custom parameter original value.", + }, + "values_type": { + Type: schema.TypeString, + Required: true, + Description: "Custom parameter value type.", + }, + }, + }, + }, + + "chart_from": { + Type: schema.TypeString, + Optional: true, + Description: "Chart source, range: tke-market or other, default value: tke-market.", + }, + + "chart_version": { + Type: schema.TypeString, + Optional: true, + Description: "Chart version.", + }, + + "chart_repo_url": { + Type: schema.TypeString, + Optional: true, + Description: "Chart repository URL address.", + }, + + "username": { + Type: schema.TypeString, + Optional: true, + Description: "Chart access username.", + }, + + "password": { + Type: schema.TypeString, + Optional: true, + Sensitive: true, + Description: "Chart access password.", + }, + + "chart_namespace": { + Type: schema.TypeString, + Optional: true, + Description: "Chart namespace, when ChartFrom is tke-market, ChartNamespace is not empty, value is the Namespace returned by the DescribeProducts interface.", + }, + + "cluster_type": { + Type: schema.TypeString, + Optional: true, + Description: "Cluster type, supports tke, eks, tkeedge, external (registered cluster).", + }, + + // computed + "cluster_release_id": { + Type: schema.TypeString, + Computed: true, + Description: "Cluster release ID.", + }, + + "release_status": { + Type: schema.TypeString, + Computed: true, + Description: "Cluster release status.", + }, + }, + } +} + +func resourceTencentCloudKubernetesClusterReleaseCreate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_release.create")() + defer tccommon.InconsistentCheck(d, meta)() + + var ( + logId = tccommon.GetLogId(tccommon.ContextNil) + ctx = tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + service = TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + request = tkev20180525.NewCreateClusterReleaseRequest() + response = tkev20180525.NewCreateClusterReleaseResponse() + clusterId string + name string + namespace string + clusterReleaseId string + ) + + if v, ok := d.GetOk("cluster_id"); ok { + request.ClusterId = helper.String(v.(string)) + clusterId = v.(string) + } + + if v, ok := d.GetOk("name"); ok { + request.Name = helper.String(v.(string)) + name = v.(string) + } + + if v, ok := d.GetOk("namespace"); ok { + request.Namespace = helper.String(v.(string)) + namespace = v.(string) + } + + if v, ok := d.GetOk("chart"); ok { + request.Chart = helper.String(v.(string)) + } + + if valuesMap, ok := helper.InterfacesHeadMap(d, "values"); ok { + releaseValues := tkev20180525.ReleaseValues{} + if v, ok := valuesMap["raw_original"].(string); ok && v != "" { + releaseValues.RawOriginal = helper.String(v) + } + + if v, ok := valuesMap["values_type"].(string); ok && v != "" { + releaseValues.ValuesType = helper.String(v) + } + + request.Values = &releaseValues + } + + if v, ok := d.GetOk("chart_from"); ok { + request.ChartFrom = helper.String(v.(string)) + } + + if v, ok := d.GetOk("chart_version"); ok { + request.ChartVersion = helper.String(v.(string)) + } + + if v, ok := d.GetOk("chart_repo_url"); ok { + request.ChartRepoURL = helper.String(v.(string)) + } + + if v, ok := d.GetOk("username"); ok { + request.Username = helper.String(v.(string)) + } + + if v, ok := d.GetOk("password"); ok { + request.Password = helper.String(v.(string)) + } + + if v, ok := d.GetOk("chart_namespace"); ok { + request.ChartNamespace = helper.String(v.(string)) + } + + if v, ok := d.GetOk("cluster_type"); ok { + request.ClusterType = helper.String(v.(string)) + } + + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().CreateClusterReleaseWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + + if result == nil || result.Response == nil || result.Response.Release == nil { + return resource.NonRetryableError(fmt.Errorf("Create kubernetes cluster release failed, Response is nil.")) + } + + response = result + return nil + }) + + if reqErr != nil { + log.Printf("[CRITAL]%s create kubernetes cluster release failed, reason:%+v", logId, reqErr) + return reqErr + } + + if response.Response.Release.ID == nil { + return fmt.Errorf("ID is nil.") + } + + clusterReleaseId = *response.Response.Release.ID + _ = d.Set("cluster_release_id", clusterReleaseId) + d.SetId(strings.Join([]string{clusterId, namespace, name}, tccommon.FILED_SP)) + + // wait + reqErr = resource.Retry(d.Timeout(schema.TimeoutCreate), func() *resource.RetryError { + result, e := service.DescribeKubernetesClusterPendingReleaseById(ctx, clusterId, clusterReleaseId) + if e != nil { + return tccommon.RetryError(e) + } + + if result == nil || result.Status == nil { + return resource.NonRetryableError(fmt.Errorf("Describe kubernetes cluster release failed, Response is nil.")) + } + + if *result.Status == "deployed" || *result.Status == "failed" { + return nil + } + + return resource.RetryableError(fmt.Errorf("cluster release is still install...Status is %s", *result.Status)) + }) + + if reqErr != nil { + log.Printf("[CRITAL]%s create kubernetes cluster release failed, reason:%+v", logId, reqErr) + return reqErr + } + + return resourceTencentCloudKubernetesClusterReleaseRead(d, meta) +} + +func resourceTencentCloudKubernetesClusterReleaseRead(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_release.read")() + defer tccommon.InconsistentCheck(d, meta)() + + var ( + logId = tccommon.GetLogId(tccommon.ContextNil) + ctx = tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + service = TkeService{client: meta.(tccommon.ProviderMeta).GetAPIV3Conn()} + ) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 3 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + + clusterId := idSplit[0] + namespace := idSplit[1] + name := idSplit[2] + + respData, err := service.DescribeKubernetesClusterReleaseById(ctx, clusterId, namespace, name) + if err != nil { + return err + } + + if respData == nil { + log.Printf("[WARN]%s resource `tencentcloud_kubernetes_cluster_release` [%s] not found, please check if it has been deleted.\n", logId, d.Id()) + d.SetId("") + return nil + } + + _ = d.Set("cluster_id", clusterId) + _ = d.Set("name", name) + _ = d.Set("namespace", namespace) + + if respData.ChartFrom != nil { + _ = d.Set("chart_from", respData.ChartFrom) + } + + if respData.ChartVersion != nil { + _ = d.Set("chart_version", respData.ChartVersion) + } + + if respData.Status != nil { + _ = d.Set("release_status", respData.Status) + } + + return nil +} + +func resourceTencentCloudKubernetesClusterReleaseUpdate(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_release.update")() + defer tccommon.InconsistentCheck(d, meta)() + + var ( + logId = tccommon.GetLogId(tccommon.ContextNil) + ctx = tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + ) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 3 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + + clusterId := idSplit[0] + namespace := idSplit[1] + name := idSplit[2] + + needChange := false + mutableArgs := []string{"chart", "values", "chart_from", "chart_version", "chart_repo_url", "username", "password", "chart_namespace", "cluster_type"} + for _, v := range mutableArgs { + if d.HasChange(v) { + needChange = true + break + } + } + + if needChange { + request := tkev20180525.NewUpgradeClusterReleaseRequest() + if v, ok := d.GetOk("chart"); ok { + request.Chart = helper.String(v.(string)) + } + + if valuesMap, ok := helper.InterfacesHeadMap(d, "values"); ok { + releaseValues := tkev20180525.ReleaseValues{} + if v, ok := valuesMap["raw_original"].(string); ok && v != "" { + releaseValues.RawOriginal = helper.String(v) + } + + if v, ok := valuesMap["values_type"].(string); ok && v != "" { + releaseValues.ValuesType = helper.String(v) + } + + request.Values = &releaseValues + } + + if v, ok := d.GetOk("chart_from"); ok { + request.ChartFrom = helper.String(v.(string)) + } + + if v, ok := d.GetOk("chart_version"); ok { + request.ChartVersion = helper.String(v.(string)) + } + + if v, ok := d.GetOk("chart_repo_url"); ok { + request.ChartRepoURL = helper.String(v.(string)) + } + + if v, ok := d.GetOk("username"); ok { + request.Username = helper.String(v.(string)) + } + + if v, ok := d.GetOk("password"); ok { + request.Password = helper.String(v.(string)) + } + + if v, ok := d.GetOk("chart_namespace"); ok { + request.ChartNamespace = helper.String(v.(string)) + } + + if v, ok := d.GetOk("cluster_type"); ok { + request.ClusterType = helper.String(v.(string)) + } + + request.ClusterId = &clusterId + request.Namespace = &namespace + request.Name = &name + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().UpgradeClusterReleaseWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + + return nil + }) + + if reqErr != nil { + log.Printf("[CRITAL]%s update kubernetes cluster release failed, reason:%+v", logId, reqErr) + return reqErr + } + + // wait + waitReq := tkev20180525.NewDescribeClusterReleasesRequest() + waitReq.ClusterId = &clusterId + waitReq.Namespace = &namespace + waitReq.ReleaseName = &name + reqErr = resource.Retry(d.Timeout(schema.TimeoutUpdate), func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().DescribeClusterReleasesWithContext(ctx, waitReq) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, waitReq.GetAction(), waitReq.ToJsonString(), result.ToJsonString()) + } + + if result == nil || result.Response == nil || result.Response.ReleaseSet == nil || len(result.Response.ReleaseSet) != 1 { + return resource.NonRetryableError(fmt.Errorf("Describe kubernetes cluster release failed, Response is nil.")) + } + + release := result.Response.ReleaseSet[0] + if release.Status == nil { + return resource.NonRetryableError(fmt.Errorf("Status is nil.")) + } + + if *release.Status == "deployed" || *release.Status == "failed" { + return nil + } + + return resource.RetryableError(fmt.Errorf("upgrade release is still running...status is %s", *release.Status)) + }) + + if reqErr != nil { + log.Printf("[CRITAL]%s upgrade kubernetes cluster release failed, reason:%+v", logId, reqErr) + return reqErr + } + } + + return resourceTencentCloudKubernetesClusterReleaseRead(d, meta) +} + +func resourceTencentCloudKubernetesClusterReleaseDelete(d *schema.ResourceData, meta interface{}) error { + defer tccommon.LogElapsed("resource.tencentcloud_kubernetes_cluster_release.delete")() + defer tccommon.InconsistentCheck(d, meta)() + + var ( + logId = tccommon.GetLogId(tccommon.ContextNil) + ctx = tccommon.NewResourceLifeCycleHandleFuncContext(context.Background(), logId, d, meta) + request = tkev20180525.NewUninstallClusterReleaseRequest() + ) + + idSplit := strings.Split(d.Id(), tccommon.FILED_SP) + if len(idSplit) != 3 { + return fmt.Errorf("id is broken,%s", d.Id()) + } + + clusterId := idSplit[0] + namespace := idSplit[1] + name := idSplit[2] + + request.ClusterId = &clusterId + request.Namespace = &namespace + request.Name = &name + reqErr := resource.Retry(tccommon.WriteRetryTimeout, func() *resource.RetryError { + result, e := meta.(tccommon.ProviderMeta).GetAPIV3Conn().UseTkeV20180525Client().UninstallClusterReleaseWithContext(ctx, request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + + return nil + }) + + if reqErr != nil { + log.Printf("[CRITAL]%s delete kubernetes cluster release failed, reason:%+v", logId, reqErr) + return reqErr + } + + // For now no need to perform asynchronous task progress queries + return nil +} diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_release.md b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_release.md new file mode 100644 index 0000000000..8c89334d4d --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_release.md @@ -0,0 +1,24 @@ +Provides a resource to create a TKE kubernetes cluster release + +~> **NOTE:** Cluster cluster release version currently does not support downgrading. + +Example Usage + +```hcl +resource "tencentcloud_kubernetes_cluster_release" "example" { + cluster_id = "cls-fdy7hm1q" + name = "tf-example" + namespace = "default" + chart = "nginx-ingress" + chart_from = "tke-market" + chart_version = "4.9.0" + chart_namespace = "opensource-stable" + cluster_type = "tke" + values { + raw_original = <<-EOF +## nginx configuration\n## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md\n##\n\n## Overrides for generated resource names\n# See templates/_helpers.tpl\n# nameOverride:\n# fullnameOverride:\n\n# -- Override the deployment namespace; defaults to .Release.Namespace\nnamespaceOverride: \"\"\n## Labels to apply to all resources\n##\ncommonLabels: {}\n# scmhash: abc123\n# myLabel: aakkmd\n\ncontroller:\n name: controller\n enableAnnotationValidations: false\n image:\n ## Keep false as default for now!\n chroot: false\n registry: ccr.ccs.tencentyun.com\n image: tke-market/ingress-nginx-controller\n ## for backwards compatibility consider setting the full image url via the repository value below\n ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n ## repository:\n tag: \"v1.9.5\"\n digest: \"\"\n digestChroot: \"\"\n pullPolicy: IfNotPresent\n runAsNonRoot: true\n # www-data -> uid 101\n runAsUser: 101\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n readOnlyRootFilesystem: false\n # -- Use an existing PSP instead of creating one\n existingPsp: \"\"\n # -- Configures the controller container name\n containerName: controller\n # -- Configures the ports that the nginx-controller listens on\n containerPort:\n http: 80\n https: 443\n # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/\n config: {}\n # -- Annotations to be added to the controller config configuration configmap.\n configAnnotations: {}\n # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers\n proxySetHeaders: {}\n # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers\n addHeaders: {}\n # -- Optionally customize the pod dnsConfig.\n dnsConfig: {}\n # -- Optionally customize the pod hostAliases.\n hostAliases: []\n # - ip: 127.0.0.1\n # hostnames:\n # - foo.local\n # - bar.local\n # - ip: 10.1.2.3\n # hostnames:\n # - foo.remote\n # - bar.remote\n # -- Optionally customize the pod hostname.\n hostname: {}\n # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.\n # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller\n # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.\n dnsPolicy: ClusterFirst\n # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network\n # Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply\n reportNodeInternalIp: false\n # -- Process Ingress objects without ingressClass annotation/ingressClassName field\n # Overrides value for --watch-ingress-without-class flag of the controller binary\n # Defaults to false\n watchIngressWithoutClass: false\n # -- Process IngressClass per name (additionally as per spec.controller).\n ingressClassByName: false\n # -- This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-mode=\"auto\"\n # Defaults to false\n enableTopologyAwareRouting: false\n # -- This configuration defines if Ingress Controller should allow users to set\n # their own *-snippet annotations, otherwise this is forbidden / dropped\n # when users add those annotations.\n # Global snippets in ConfigMap are still respected\n allowSnippetAnnotations: false\n # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),\n # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920\n # is merged\n hostNetwork: false\n ## Use host ports 80 and 443\n ## Disabled by default\n hostPort:\n # -- Enable 'hostPort' or not\n enabled: false\n ports:\n # -- 'hostPort' http port\n http: 80\n # -- 'hostPort' https port\n https: 443\n # NetworkPolicy for controller component.\n networkPolicy:\n # -- Enable 'networkPolicy' or not\n enabled: false\n # -- Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader'\n electionID: \"\"\n ## This section refers to the creation of the IngressClass resource\n ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19\n ingressClassResource:\n # -- Name of the ingressClass\n name: nginx\n # -- Is this ingressClass enabled or not\n enabled: true\n # -- Is this the default ingressClass for the cluster\n default: false\n # -- Controller-value of the controller that is processing this ingressClass\n controllerValue: \"k8s.io/ingress-nginx\"\n # -- Parameters is a link to a custom resource containing additional\n # configuration for the controller. This is optional if the controller\n # does not require extra parameters.\n parameters: {}\n # -- For backwards compatibility with ingress.class annotation, use ingressClass.\n # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation\n ingressClass: nginx\n # -- Labels to add to the pod container metadata\n podLabels: {}\n # key: value\n\n # -- Security context for controller pods\n podSecurityContext: {}\n # -- sysctls for controller pods\n ## Ref: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/\n sysctls: {}\n # sysctls:\n # \"net.core.somaxconn\": \"8192\"\n # -- Security context for controller containers\n containerSecurityContext: {}\n # -- Allows customization of the source of the IP address or FQDN to report\n # in the ingress status field. By default, it reads the information provided\n # by the service. If disable, the status field reports the IP address of the\n # node or nodes where an ingress controller pod is running.\n publishService:\n # -- Enable 'publishService' or not\n enabled: true\n # -- Allows overriding of the publish service to bind to\n # Must be /\n pathOverride: \"\"\n # Limit the scope of the controller to a specific namespace\n scope:\n # -- Enable 'scope' or not\n enabled: false\n # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE)\n namespace: \"\"\n # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels\n # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces.\n namespaceSelector: \"\"\n # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE)\n configMapNamespace: \"\"\n tcp:\n # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE)\n configMapNamespace: \"\"\n # -- Annotations to be added to the tcp config configmap\n annotations: {}\n udp:\n # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE)\n configMapNamespace: \"\"\n # -- Annotations to be added to the udp config configmap\n annotations: {}\n # -- Maxmind license key to download GeoLite2 Databases.\n ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases\n maxmindLicenseKey: \"\"\n # -- Additional command line arguments to pass to Ingress-Nginx Controller\n # E.g. to specify the default SSL certificate you can use\n extraArgs: {}\n ## extraArgs:\n ## default-ssl-certificate: \"/\"\n ## time-buckets: \"0.005,0.01,0.025,0.05,0.1,0.25,0.5,1,2.5,5,10\"\n ## length-buckets: \"10,20,30,40,50,60,70,80,90,100\"\n ## size-buckets: \"10,100,1000,10000,100000,1e+06,1e+07\"\n\n # -- Additional environment variables to set\n # extraEnvs:\n # - name: FOO\n # valueFrom:\n # secretKeyRef:\n # key: FOO\n # name: secret-resource\n extraEnvs: \n - name: TZ\n value: Asia/Shanghai\n\n # -- Use a `DaemonSet` or `Deployment`\n kind: Deployment\n # -- Annotations to be added to the controller Deployment or DaemonSet\n ##\n annotations: {}\n # keel.sh/pollSchedule: \"@every 60m\"\n\n # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels\n ##\n labels: {}\n # keel.sh/policy: patch\n # keel.sh/trigger: poll\n\n # -- The update strategy to apply to the Deployment or DaemonSet\n ##\n updateStrategy: {}\n # rollingUpdate:\n # maxUnavailable: 1\n # type: RollingUpdate\n\n # -- `minReadySeconds` to avoid killing pods before we are ready\n ##\n minReadySeconds: 0\n # -- Node tolerations for server scheduling to nodes with taints\n ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n ##\n tolerations: []\n # - key: \"key\"\n # operator: \"Equal|Exists\"\n # value: \"value\"\n # effect: \"NoSchedule|PreferNoSchedule|NoExecute(1.6 only)\"\n\n # -- Affinity and anti-affinity rules for server scheduling to nodes\n ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n ##\n affinity: {}\n # # An example of preferred pod anti-affinity, weight is in the range 1-100\n # podAntiAffinity:\n # preferredDuringSchedulingIgnoredDuringExecution:\n # - weight: 100\n # podAffinityTerm:\n # labelSelector:\n # matchExpressions:\n # - key: app.kubernetes.io/name\n # operator: In\n # values:\n # - ingress-nginx\n # - key: app.kubernetes.io/instance\n # operator: In\n # values:\n # - ingress-nginx\n # - key: app.kubernetes.io/component\n # operator: In\n # values:\n # - controller\n # topologyKey: kubernetes.io/hostname\n\n # # An example of required pod anti-affinity\n # podAntiAffinity:\n # requiredDuringSchedulingIgnoredDuringExecution:\n # - labelSelector:\n # matchExpressions:\n # - key: app.kubernetes.io/name\n # operator: In\n # values:\n # - ingress-nginx\n # - key: app.kubernetes.io/instance\n # operator: In\n # values:\n # - ingress-nginx\n # - key: app.kubernetes.io/component\n # operator: In\n # values:\n # - controller\n # topologyKey: \"kubernetes.io/hostname\"\n\n # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.\n ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/\n ##\n topologySpreadConstraints: []\n # - labelSelector:\n # matchLabels:\n # app.kubernetes.io/name: '{{ include \"ingress-nginx.name\" . }}'\n # app.kubernetes.io/instance: '{{ .Release.Name }}'\n # app.kubernetes.io/component: controller\n # topologyKey: topology.kubernetes.io/zone\n # maxSkew: 1\n # whenUnsatisfiable: ScheduleAnyway\n # - labelSelector:\n # matchLabels:\n # app.kubernetes.io/name: '{{ include \"ingress-nginx.name\" . }}'\n # app.kubernetes.io/instance: '{{ .Release.Name }}'\n # app.kubernetes.io/component: controller\n # topologyKey: kubernetes.io/hostname\n # maxSkew: 1\n # whenUnsatisfiable: ScheduleAnyway\n\n # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready\n ## wait up to five minutes for the drain of connections\n ##\n terminationGracePeriodSeconds: 300\n # -- Node labels for controller pod assignment\n ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/\n ##\n nodeSelector:\n kubernetes.io/os: linux\n ## Liveness and readiness probe values\n ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes\n ##\n ## startupProbe:\n ## httpGet:\n ## # should match container.healthCheckPath\n ## path: \"/healthz\"\n ## port: 10254\n ## scheme: HTTP\n ## initialDelaySeconds: 5\n ## periodSeconds: 5\n ## timeoutSeconds: 2\n ## successThreshold: 1\n ## failureThreshold: 5\n livenessProbe:\n httpGet:\n # should match container.healthCheckPath\n path: \"/healthz\"\n port: 10254\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 10\n timeoutSeconds: 1\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n # should match container.healthCheckPath\n path: \"/healthz\"\n port: 10254\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 10\n timeoutSeconds: 1\n successThreshold: 1\n failureThreshold: 3\n # -- Path of the health check endpoint. All requests received on the port defined by\n # the healthz-port parameter are forwarded internally to this path.\n healthCheckPath: \"/healthz\"\n # -- Address to bind the health check endpoint.\n # It is better to set this option to the internal node address\n # if the Ingress-Nginx Controller is running in the `hostNetwork: true` mode.\n healthCheckHost: \"\"\n # -- Annotations to be added to controller pods\n ##\n podAnnotations: {}\n replicaCount: 1\n # -- Minimum available pods set in PodDisruptionBudget.\n # Define either 'minAvailable' or 'maxUnavailable', never both.\n minAvailable: 1\n # -- Maximum unavailable pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored.\n # maxUnavailable: 1\n\n ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes\n ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903\n ## Ideally, there should be no limits.\n ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/\n resources:\n ## limits:\n ## cpu: 100m\n ## memory: 90Mi\n requests:\n cpu: 100m\n memory: 90Mi\n # Mutually exclusive with keda autoscaling\n autoscaling:\n enabled: false\n annotations: {}\n minReplicas: 1\n maxReplicas: 11\n targetCPUUtilizationPercentage: 50\n targetMemoryUtilizationPercentage: 50\n behavior: {}\n # scaleDown:\n # stabilizationWindowSeconds: 300\n # policies:\n # - type: Pods\n # value: 1\n # periodSeconds: 180\n # scaleUp:\n # stabilizationWindowSeconds: 300\n # policies:\n # - type: Pods\n # value: 2\n # periodSeconds: 60\n autoscalingTemplate: []\n # Custom or additional autoscaling metrics\n # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics\n # - type: Pods\n # pods:\n # metric:\n # name: k8s_pod_rate_cpu_core_used_limit\n # target:\n # averageValue: \"80\"\n # type: AverageValue\n\n # Mutually exclusive with hpa autoscaling\n keda:\n apiVersion: \"keda.sh/v1alpha1\"\n ## apiVersion changes with keda 1.x vs 2.x\n ## 2.x = keda.sh/v1alpha1\n ## 1.x = keda.k8s.io/v1alpha1\n enabled: false\n minReplicas: 1\n maxReplicas: 11\n pollingInterval: 30\n cooldownPeriod: 300\n # fallback:\n # failureThreshold: 3\n # replicas: 11\n restoreToOriginalReplicaCount: false\n scaledObject:\n annotations: {}\n # Custom annotations for ScaledObject resource\n # annotations:\n # key: value\n triggers: []\n # - type: prometheus\n # metadata:\n # serverAddress: http://:9090\n # metricName: http_requests_total\n # threshold: '100'\n # query: sum(rate(http_requests_total{deployment=\"my-deployment\"}[2m]))\n\n behavior: {}\n # scaleDown:\n # stabilizationWindowSeconds: 300\n # policies:\n # - type: Pods\n # value: 1\n # periodSeconds: 180\n # scaleUp:\n # stabilizationWindowSeconds: 300\n # policies:\n # - type: Pods\n # value: 2\n # periodSeconds: 60\n # -- Enable mimalloc as a drop-in replacement for malloc.\n ## ref: https://github.com/microsoft/mimalloc\n ##\n enableMimalloc: true\n ## Override NGINX template\n customTemplate:\n configMapName: \"\"\n configMapKey: \"\"\n service:\n # -- Enable controller services or not. This does not influence the creation of either the admission webhook or the metrics service.\n enabled: true\n external:\n # -- Enable the external controller service or not. Useful for internal-only deployments.\n enabled: true\n # -- Annotations to be added to the external controller service. See `controller.service.internal.annotations` for annotations to be added to the internal controller service.\n annotations: {}\n # -- Labels to be added to both controller services.\n labels: {}\n # -- Type of the external controller service.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types\n type: LoadBalancer\n # -- Pre-defined cluster internal IP address of the external controller service. Take care of collisions with existing services.\n # This value is immutable. Set once, it can not be changed without deleting and re-creating the service.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address\n clusterIP: \"\"\n # -- List of node IP addresses at which the external controller service is available.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips\n externalIPs: []\n # -- Deprecated: Pre-defined IP address of the external controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n loadBalancerIP: \"\"\n # -- Restrict access to the external controller service. Values must be CIDRs. Allows any source address by default.\n loadBalancerSourceRanges: []\n # -- Load balancer class of the external controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class\n loadBalancerClass: \"\"\n # -- Enable node port allocation for the external controller service or not. Applies to type `LoadBalancer` only.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation\n # allocateLoadBalancerNodePorts: true\n\n # -- External traffic policy of the external controller service. Set to \"Local\" to preserve source IP on providers supporting it.\n # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip\n externalTrafficPolicy: \"\"\n # -- Session affinity of the external controller service. Must be either \"None\" or \"ClientIP\" if set. Defaults to \"None\".\n # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity\n sessionAffinity: \"\"\n # -- Specifies the health check node port (numeric port number) for the external controller service.\n # If not specified, the service controller allocates a port from your cluster's node port range.\n # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip\n # healthCheckNodePort: 0\n\n # -- Represents the dual-stack capabilities of the external controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack.\n # Fields `ipFamilies` and `clusterIP` depend on the value of this field.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services\n ipFamilyPolicy: SingleStack\n # -- List of IP families (e.g. IPv4, IPv6) assigned to the external controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services\n ipFamilies:\n - IPv4\n # -- Enable the HTTP listener on both controller services or not.\n enableHttp: true\n # -- Enable the HTTPS listener on both controller services or not.\n enableHttps: true\n ports:\n # -- Port the external HTTP listener is published with.\n http: 80\n # -- Port the external HTTPS listener is published with.\n https: 443\n targetPorts:\n # -- Port of the ingress controller the external HTTP listener is mapped to.\n http: http\n # -- Port of the ingress controller the external HTTPS listener is mapped to.\n https: https\n # -- Declare the app protocol of the external HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol\n appProtocol: true\n nodePorts:\n # -- Node port allocated for the external HTTP listener. If left empty, the service controller allocates one from the configured node port range.\n http: \"\"\n # -- Node port allocated for the external HTTPS listener. If left empty, the service controller allocates one from the configured node port range.\n https: \"\"\n # -- Node port mapping for external TCP listeners. If left empty, the service controller allocates them from the configured node port range.\n # Example:\n # tcp:\n # 8080: 30080\n tcp: {}\n # -- Node port mapping for external UDP listeners. If left empty, the service controller allocates them from the configured node port range.\n # Example:\n # udp:\n # 53: 30053\n udp: {}\n internal:\n # -- Enable the internal controller service or not. Remember to configure `controller.service.internal.annotations` when enabling this.\n enabled: false\n # -- Annotations to be added to the internal controller service. Mandatory for the internal controller service to be created. Varies with the cloud service.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer\n annotations: {}\n # -- Type of the internal controller service.\n # Defaults to the value of `controller.service.type`.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types\n type: \"\"\n # -- Pre-defined cluster internal IP address of the internal controller service. Take care of collisions with existing services.\n # This value is immutable. Set once, it can not be changed without deleting and re-creating the service.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address\n clusterIP: \"\"\n # -- List of node IP addresses at which the internal controller service is available.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips\n externalIPs: []\n # -- Deprecated: Pre-defined IP address of the internal controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n loadBalancerIP: \"\"\n # -- Restrict access to the internal controller service. Values must be CIDRs. Allows any source address by default.\n loadBalancerSourceRanges: []\n # -- Load balancer class of the internal controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class\n loadBalancerClass: \"\"\n # -- Enable node port allocation for the internal controller service or not. Applies to type `LoadBalancer` only.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation\n # allocateLoadBalancerNodePorts: true\n\n # -- External traffic policy of the internal controller service. Set to \"Local\" to preserve source IP on providers supporting it.\n # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip\n externalTrafficPolicy: \"\"\n # -- Session affinity of the internal controller service. Must be either \"None\" or \"ClientIP\" if set. Defaults to \"None\".\n # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity\n sessionAffinity: \"\"\n # -- Specifies the health check node port (numeric port number) for the internal controller service.\n # If not specified, the service controller allocates a port from your cluster's node port range.\n # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip\n # healthCheckNodePort: 0\n\n # -- Represents the dual-stack capabilities of the internal controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack.\n # Fields `ipFamilies` and `clusterIP` depend on the value of this field.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services\n ipFamilyPolicy: SingleStack\n # -- List of IP families (e.g. IPv4, IPv6) assigned to the internal controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services\n ipFamilies:\n - IPv4\n ports: {}\n # -- Port the internal HTTP listener is published with.\n # Defaults to the value of `controller.service.ports.http`.\n # http: 80\n # -- Port the internal HTTPS listener is published with.\n # Defaults to the value of `controller.service.ports.https`.\n # https: 443\n\n targetPorts: {}\n # -- Port of the ingress controller the internal HTTP listener is mapped to.\n # Defaults to the value of `controller.service.targetPorts.http`.\n # http: http\n # -- Port of the ingress controller the internal HTTPS listener is mapped to.\n # Defaults to the value of `controller.service.targetPorts.https`.\n # https: https\n\n # -- Declare the app protocol of the internal HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol\n appProtocol: true\n nodePorts:\n # -- Node port allocated for the internal HTTP listener. If left empty, the service controller allocates one from the configured node port range.\n http: \"\"\n # -- Node port allocated for the internal HTTPS listener. If left empty, the service controller allocates one from the configured node port range.\n https: \"\"\n # -- Node port mapping for internal TCP listeners. If left empty, the service controller allocates them from the configured node port range.\n # Example:\n # tcp:\n # 8080: 30080\n tcp: {}\n # -- Node port mapping for internal UDP listeners. If left empty, the service controller allocates them from the configured node port range.\n # Example:\n # udp:\n # 53: 30053\n udp: {}\n # shareProcessNamespace enables process namespace sharing within the pod.\n # This can be used for example to signal log rotation using `kill -USR1` from a sidecar.\n shareProcessNamespace: false\n # -- Additional containers to be added to the controller pod.\n # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.\n extraContainers: []\n # - name: my-sidecar\n # image: nginx:latest\n\n # -- Additional volumeMounts to the controller main container.\n extraVolumeMounts: []\n # - name: copy-portal-skins\n # mountPath: /var/lib/lemonldap-ng/portal/skins\n\n # -- Additional volumes to the controller pod.\n extraVolumes: []\n # - name: copy-portal-skins\n # emptyDir: {}\n\n # -- Containers, which are run before the app containers are started.\n extraInitContainers: \n - command:\n - sh\n - -c\n - |-\n sysctl -w net.core.somaxconn=65535\n sysctl -w net.ipv4.ip_local_reserved_ports=9100\n sysctl -w net.ipv4.ip_local_port_range=\"1024 61999\"\n sysctl -w net.ipv4.tcp_tw_reuse=1\n sysctl -w fs.file-max=1048576\n image: ccr.ccs.tencentyun.com/tkeimages/busybox:latest\n imagePullPolicy: Always\n name: setsysctl\n securityContext:\n privileged: true\n\n # -- Modules, which are mounted into the core nginx image. See values.yaml for a sample to add opentelemetry module\n extraModules: []\n # - name: mytestmodule\n # image:\n # registry: registry.k8s.io\n # image: ingress-nginx/mytestmodule\n # ## for backwards compatibility consider setting the full image url via the repository value below\n # ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n # ## repository:\n # tag: \"v1.0.0\"\n # digest: \"\"\n # distroless: false\n # containerSecurityContext:\n # runAsNonRoot: true\n # runAsUser: \n # allowPrivilegeEscalation: false\n # seccompProfile:\n # type: RuntimeDefault\n # capabilities:\n # drop:\n # - ALL\n # readOnlyRootFilesystem: true\n # resources: {}\n #\n # The image must contain a `/usr/local/bin/init_module.sh` executable, which\n # will be executed as initContainers, to move its config files within the\n # mounted volume.\n\n opentelemetry:\n enabled: false\n name: opentelemetry\n image:\n registry: ccr.ccs.tencentyun.com\n image: tke-market/ingress-nginx-opentelemetry\n ## for backwards compatibility consider setting the full image url via the repository value below\n ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n ## repository:\n tag: \"v20230721-3e2062ee5\"\n digest: \"\"\n distroless: true\n containerSecurityContext:\n runAsNonRoot: true\n # -- The image's default user, inherited from its base image `cgr.dev/chainguard/static`.\n runAsUser: 65532\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n capabilities:\n drop:\n - ALL\n readOnlyRootFilesystem: true\n resources: {}\n admissionWebhooks:\n name: admission\n annotations: {}\n # ignore-check.kube-linter.io/no-read-only-rootfs: \"This deployment needs write access to root filesystem\".\n\n ## Additional annotations to the admission webhooks.\n ## These annotations will be added to the ValidatingWebhookConfiguration and\n ## the Jobs Spec of the admission webhooks.\n enabled: true\n # -- Additional environment variables to set\n extraEnvs: []\n # extraEnvs:\n # - name: FOO\n # valueFrom:\n # secretKeyRef:\n # key: FOO\n # name: secret-resource\n # -- Admission Webhook failure policy to use\n failurePolicy: Fail\n # timeoutSeconds: 10\n port: 8443\n certificate: \"/usr/local/certificates/cert\"\n key: \"/usr/local/certificates/key\"\n namespaceSelector: {}\n objectSelector: {}\n # -- Labels to be added to admission webhooks\n labels: {}\n # -- Use an existing PSP instead of creating one\n existingPsp: \"\"\n service:\n annotations: {}\n # clusterIP: \"\"\n externalIPs: []\n # loadBalancerIP: \"\"\n loadBalancerSourceRanges: []\n servicePort: 443\n type: ClusterIP\n createSecretJob:\n name: create\n # -- Security context for secret creation containers\n securityContext:\n runAsNonRoot: true\n runAsUser: 65532\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n capabilities:\n drop:\n - ALL\n readOnlyRootFilesystem: true\n resources: {}\n # limits:\n # cpu: 10m\n # memory: 20Mi\n # requests:\n # cpu: 10m\n # memory: 20Mi\n patchWebhookJob:\n name: patch\n # -- Security context for webhook patch containers\n securityContext:\n runAsNonRoot: true\n runAsUser: 65532\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n capabilities:\n drop:\n - ALL\n readOnlyRootFilesystem: true\n resources: {}\n patch:\n enabled: true\n image:\n registry: ccr.ccs.tencentyun.com\n image: tke-market/ingress-nginx-webhook-certgen\n ## for backwards compatibility consider setting the full image url via the repository value below\n ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n ## repository:\n tag: v20231011-8b53cabe0\n digest: \"\"\n pullPolicy: IfNotPresent\n # -- Provide a priority class name to the webhook patching job\n ##\n priorityClassName: \"\"\n podAnnotations: {}\n # NetworkPolicy for webhook patch\n networkPolicy:\n # -- Enable 'networkPolicy' or not\n enabled: false\n nodeSelector:\n kubernetes.io/os: linux\n tolerations: []\n # -- Labels to be added to patch job resources\n labels: {}\n # -- Security context for secret creation & webhook patch pods\n securityContext: {}\n # Use certmanager to generate webhook certs\n certManager:\n enabled: false\n # self-signed root certificate\n rootCert:\n # default to be 5y\n duration: \"\"\n admissionCert:\n # default to be 1y\n duration: \"\"\n # issuerRef:\n # name: \"issuer\"\n # kind: \"ClusterIssuer\"\n metrics:\n port: 10254\n portName: metrics\n # if this port is changed, change healthz-port: in extraArgs: accordingly\n enabled: false\n service:\n annotations: {}\n # prometheus.io/scrape: \"true\"\n # prometheus.io/port: \"10254\"\n # -- Labels to be added to the metrics service resource\n labels: {}\n # clusterIP: \"\"\n\n # -- List of IP addresses at which the stats-exporter service is available\n ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips\n ##\n externalIPs: []\n # loadBalancerIP: \"\"\n loadBalancerSourceRanges: []\n servicePort: 10254\n type: ClusterIP\n # externalTrafficPolicy: \"\"\n # nodePort: \"\"\n serviceMonitor:\n enabled: false\n additionalLabels: {}\n annotations: {}\n ## The label to use to retrieve the job name from.\n ## jobLabel: \"app.kubernetes.io/name\"\n namespace: \"\"\n namespaceSelector: {}\n ## Default: scrape .Release.Namespace or namespaceOverride only\n ## To scrape all, use the following:\n ## namespaceSelector:\n ## any: true\n scrapeInterval: 30s\n # honorLabels: true\n targetLabels: []\n relabelings: []\n metricRelabelings: []\n prometheusRule:\n enabled: false\n additionalLabels: {}\n # namespace: \"\"\n rules: []\n # # These are just examples rules, please adapt them to your needs\n # - alert: NGINXConfigFailed\n # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0\n # for: 1s\n # labels:\n # severity: critical\n # annotations:\n # description: bad ingress config - nginx config test failed\n # summary: uninstall the latest ingress changes to allow config reloads to resume\n # # By default a fake self-signed certificate is generated as default and\n # # it is fine if it expires. If `--default-ssl-certificate` flag is used\n # # and a valid certificate passed please do not filter for `host` label!\n # # (i.e. delete `{host!=\"_\"}` so also the default SSL certificate is\n # # checked for expiration)\n # - alert: NGINXCertificateExpiry\n # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds{host!=\"_\"}) by (host) - time()) < 604800\n # for: 1s\n # labels:\n # severity: critical\n # annotations:\n # description: ssl certificate(s) will expire in less then a week\n # summary: renew expiring certificates to avoid downtime\n # - alert: NGINXTooMany500s\n # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~\"5.+\"} ) / sum(nginx_ingress_controller_requests) ) > 5\n # for: 1m\n # labels:\n # severity: warning\n # annotations:\n # description: Too many 5XXs\n # summary: More than 5% of all requests returned 5XX, this requires your attention\n # - alert: NGINXTooMany400s\n # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~\"4.+\"} ) / sum(nginx_ingress_controller_requests) ) > 5\n # for: 1m\n # labels:\n # severity: warning\n # annotations:\n # description: Too many 4XXs\n # summary: More than 5% of all requests returned 4XX, this requires your attention\n # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook:\n # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds\n # to 300, allowing the draining of connections up to five minutes.\n # If the active connections end before that, the pod will terminate gracefully at that time.\n # To effectively take advantage of this feature, the Configmap feature\n # worker-shutdown-timeout new value is 240s instead of 10s.\n ##\n lifecycle:\n preStop:\n exec:\n command:\n - /wait-shutdown\n priorityClassName: \"\"\n# -- Rollback limit\n##\nrevisionHistoryLimit: 10\n## Default 404 backend\n##\ndefaultBackend:\n ##\n enabled: false\n name: defaultbackend\n image:\n registry: ccr.ccs.tencentyun.com\n image: \ttke-market/ingress-nginx-defaultbackend-amd64\n ## for backwards compatibility consider setting the full image url via the repository value below\n ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n ## repository:\n tag: \"1.5\"\n pullPolicy: IfNotPresent\n runAsNonRoot: true\n # nobody user -> uid 65534\n runAsUser: 65534\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n readOnlyRootFilesystem: true\n # -- Use an existing PSP instead of creating one\n existingPsp: \"\"\n extraArgs: {}\n serviceAccount:\n create: true\n name: \"\"\n automountServiceAccountToken: true\n # -- Additional environment variables to set for defaultBackend pods\n extraEnvs: []\n port: 8080\n ## Readiness and liveness probes for default backend\n ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/\n ##\n livenessProbe:\n failureThreshold: 3\n initialDelaySeconds: 30\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 5\n readinessProbe:\n failureThreshold: 6\n initialDelaySeconds: 0\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 5\n # -- The update strategy to apply to the Deployment or DaemonSet\n ##\n updateStrategy: {}\n # rollingUpdate:\n # maxUnavailable: 1\n # type: RollingUpdate\n\n # -- `minReadySeconds` to avoid killing pods before we are ready\n ##\n minReadySeconds: 0\n # -- Node tolerations for server scheduling to nodes with taints\n ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n ##\n tolerations: []\n # - key: \"key\"\n # operator: \"Equal|Exists\"\n # value: \"value\"\n # effect: \"NoSchedule|PreferNoSchedule|NoExecute(1.6 only)\"\n\n affinity: {}\n # -- Security context for default backend pods\n podSecurityContext: {}\n # -- Security context for default backend containers\n containerSecurityContext: {}\n # -- Labels to add to the pod container metadata\n podLabels: {}\n # key: value\n\n # -- Node labels for default backend pod assignment\n ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/\n ##\n nodeSelector:\n kubernetes.io/os: linux\n # -- Annotations to be added to default backend pods\n ##\n podAnnotations: {}\n replicaCount: 1\n minAvailable: 1\n resources: {}\n # limits:\n # cpu: 10m\n # memory: 20Mi\n # requests:\n # cpu: 10m\n # memory: 20Mi\n\n extraVolumeMounts: []\n ## Additional volumeMounts to the default backend container.\n # - name: copy-portal-skins\n # mountPath: /var/lib/lemonldap-ng/portal/skins\n\n extraVolumes: []\n ## Additional volumes to the default backend pod.\n # - name: copy-portal-skins\n # emptyDir: {}\n\n extraConfigMaps: []\n ## Additional configmaps to the default backend pod.\n # - name: my-extra-configmap-1\n # labels:\n # type: config-1\n # data:\n # extra_file_1.html: |\n # \n # - name: my-extra-configmap-2\n # labels:\n # type: config-2\n # data:\n # extra_file_2.html: |\n # \n\n autoscaling:\n annotations: {}\n enabled: false\n minReplicas: 1\n maxReplicas: 2\n targetCPUUtilizationPercentage: 50\n targetMemoryUtilizationPercentage: 50\n # NetworkPolicy for default backend component.\n networkPolicy:\n # -- Enable 'networkPolicy' or not\n enabled: false\n service:\n annotations: {}\n # clusterIP: \"\"\n\n # -- List of IP addresses at which the default backend service is available\n ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips\n ##\n externalIPs: []\n # loadBalancerIP: \"\"\n loadBalancerSourceRanges: []\n servicePort: 80\n type: ClusterIP\n priorityClassName: \"\"\n # -- Labels to be added to the default backend resources\n labels: {}\n## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266\nrbac:\n create: true\n scope: false\n## If true, create & use Pod Security Policy resources\n## https://kubernetes.io/docs/concepts/policy/pod-security-policy/\npodSecurityPolicy:\n enabled: false\nserviceAccount:\n create: true\n name: \"\"\n automountServiceAccountToken: true\n # -- Annotations for the controller service account\n annotations: {}\n# -- Optional array of imagePullSecrets containing private registry credentials\n## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\nimagePullSecrets: []\n# - name: secretName\n\n# -- TCP service key-value pairs\n## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md\n##\ntcp: {}\n# 8080: \"default/example-tcp-svc:9000\"\n\n# -- UDP service key-value pairs\n## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md\n##\nudp: {}\n# 53: \"kube-system/kube-dns:53\"\n\n# -- Prefix for TCP and UDP ports names in ingress controller service\n## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration\nportNamePrefix: \"\"\n# -- (string) A base64-encoded Diffie-Hellman parameter.\n# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64`\n## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param\ndhParam: \"\"\n +EOF + values_type = "yaml" + } +} +``` diff --git a/tencentcloud/services/tke/resource_tc_kubernetes_cluster_release_test.go b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_release_test.go new file mode 100644 index 0000000000..a2470bf95f --- /dev/null +++ b/tencentcloud/services/tke/resource_tc_kubernetes_cluster_release_test.go @@ -0,0 +1,46 @@ +package tke_test + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + tcacctest "github.com/tencentcloudstack/terraform-provider-tencentcloud/tencentcloud/acctest" +) + +func TestAccTencentCloudKubernetesClusterReleaseResource_basic(t *testing.T) { + t.Parallel() + resource.Test(t, resource.TestCase{ + PreCheck: func() { + tcacctest.AccPreCheck(t) + }, + Providers: tcacctest.AccProviders, + Steps: []resource.TestStep{ + { + Config: testAccKubernetesClusterRelease, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttrSet("tencentcloud_kubernetes_cluster_release.example", "id"), + ), + }, + }, + }) +} + +const testAccKubernetesClusterRelease = ` +resource "tencentcloud_kubernetes_cluster_release" "example" { + cluster_id = "cls-fdy7hm1q" + name = "tf-example" + namespace = "default" + chart = "nginx-ingress" + chart_from = "tke-market" + chart_version = "4.9.0" + chart_namespace = "opensource-stable" + cluster_type = "tke" + values { + raw_original = <<-EOF +## nginx configuration\n##......ndhParam: \"\"\n +EOF + values_type = "yaml" + } +} +` diff --git a/tencentcloud/services/tke/service_tencentcloud_tke.go b/tencentcloud/services/tke/service_tencentcloud_tke.go index bec39b856b..f84d6eaf23 100644 --- a/tencentcloud/services/tke/service_tencentcloud_tke.go +++ b/tencentcloud/services/tke/service_tencentcloud_tke.go @@ -3782,3 +3782,114 @@ func (me *TkeService) DescribeKubernetesClusterMasterAttachmentByIds(ctx context ret = response.Response return } + +func (me *TkeService) DescribeKubernetesClusterPendingReleaseById(ctx context.Context, clusterId, clusterReleaseId string) (ret *tke.PendingRelease, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClusterPendingReleasesRequest() + response := tke.NewDescribeClusterPendingReleasesResponse() + request.ClusterId = &clusterId + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + var ( + offset int64 = 0 + limit int64 = 100 + instances []*tke.PendingRelease + ) + + for { + request.Offset = &offset + request.Limit = &limit + err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + ratelimit.Check(request.GetAction()) + result, e := me.client.UseTkeV20180525Client().DescribeClusterPendingReleases(request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + + if result == nil || result.Response == nil { + return resource.NonRetryableError(fmt.Errorf("Describe kubernetes pending releases failed, Response is nil.")) + } + + response = result + return nil + }) + + if err != nil { + errRet = err + return + } + + if len(response.Response.ReleaseSet) < 1 { + break + } + + instances = append(instances, response.Response.ReleaseSet...) + if len(response.Response.ReleaseSet) < int(limit) { + break + } + + offset += limit + } + + if len(instances) < 1 { + return + } + + for _, item := range instances { + if item.ID != nil && *item.ID == clusterReleaseId { + ret = item + return + } + } + + return +} + +func (me *TkeService) DescribeKubernetesClusterReleaseById(ctx context.Context, clusterId, namespace, name string) (ret *tke.ReleaseDetails, errRet error) { + logId := tccommon.GetLogId(ctx) + + request := tke.NewDescribeClusterReleaseDetailsRequest() + response := tke.NewDescribeClusterReleaseDetailsResponse() + request.ClusterId = &clusterId + request.Namespace = &namespace + request.Name = &name + + defer func() { + if errRet != nil { + log.Printf("[CRITAL]%s api[%s] fail, request body [%s], reason[%s]\n", logId, request.GetAction(), request.ToJsonString(), errRet.Error()) + } + }() + + err := resource.Retry(tccommon.ReadRetryTimeout, func() *resource.RetryError { + ratelimit.Check(request.GetAction()) + result, e := me.client.UseTkeV20180525Client().DescribeClusterReleaseDetails(request) + if e != nil { + return tccommon.RetryError(e) + } else { + log.Printf("[DEBUG]%s api[%s] success, request body [%s], response body [%s]\n", logId, request.GetAction(), request.ToJsonString(), result.ToJsonString()) + } + + if result == nil || result.Response == nil || result.Response.Release == nil { + return resource.NonRetryableError(fmt.Errorf("Describe kubernetes releases failed, Response is nil.")) + } + + response = result + return nil + }) + + if err != nil { + errRet = err + return + } + + ret = response.Response.Release + return +} diff --git a/website/docs/r/kubernetes_cluster_release.html.markdown b/website/docs/r/kubernetes_cluster_release.html.markdown new file mode 100644 index 0000000000..61b798c9f7 --- /dev/null +++ b/website/docs/r/kubernetes_cluster_release.html.markdown @@ -0,0 +1,73 @@ +--- +subcategory: "Tencent Kubernetes Engine(TKE)" +layout: "tencentcloud" +page_title: "TencentCloud: tencentcloud_kubernetes_cluster_release" +sidebar_current: "docs-tencentcloud-resource-kubernetes_cluster_release" +description: |- + Provides a resource to create a TKE kubernetes cluster release +--- + +# tencentcloud_kubernetes_cluster_release + +Provides a resource to create a TKE kubernetes cluster release + +~> **NOTE:** Cluster cluster release version currently does not support downgrading. + +## Example Usage + +```hcl +resource "tencentcloud_kubernetes_cluster_release" "example" { + cluster_id = "cls-fdy7hm1q" + name = "tf-example" + namespace = "default" + chart = "nginx-ingress" + chart_from = "tke-market" + chart_version = "4.9.0" + chart_namespace = "opensource-stable" + cluster_type = "tke" + values { + raw_original = <<-EOF +## nginx configuration\n## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md\n##\n\n## Overrides for generated resource names\n# See templates/_helpers.tpl\n# nameOverride:\n# fullnameOverride:\n\n# -- Override the deployment namespace; defaults to .Release.Namespace\nnamespaceOverride: \"\"\n## Labels to apply to all resources\n##\ncommonLabels: {}\n# scmhash: abc123\n# myLabel: aakkmd\n\ncontroller:\n name: controller\n enableAnnotationValidations: false\n image:\n ## Keep false as default for now!\n chroot: false\n registry: ccr.ccs.tencentyun.com\n image: tke-market/ingress-nginx-controller\n ## for backwards compatibility consider setting the full image url via the repository value below\n ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n ## repository:\n tag: \"v1.9.5\"\n digest: \"\"\n digestChroot: \"\"\n pullPolicy: IfNotPresent\n runAsNonRoot: true\n # www-data -> uid 101\n runAsUser: 101\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n readOnlyRootFilesystem: false\n # -- Use an existing PSP instead of creating one\n existingPsp: \"\"\n # -- Configures the controller container name\n containerName: controller\n # -- Configures the ports that the nginx-controller listens on\n containerPort:\n http: 80\n https: 443\n # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/\n config: {}\n # -- Annotations to be added to the controller config configuration configmap.\n configAnnotations: {}\n # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers\n proxySetHeaders: {}\n # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers\n addHeaders: {}\n # -- Optionally customize the pod dnsConfig.\n dnsConfig: {}\n # -- Optionally customize the pod hostAliases.\n hostAliases: []\n # - ip: 127.0.0.1\n # hostnames:\n # - foo.local\n # - bar.local\n # - ip: 10.1.2.3\n # hostnames:\n # - foo.remote\n # - bar.remote\n # -- Optionally customize the pod hostname.\n hostname: {}\n # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'.\n # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller\n # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet.\n dnsPolicy: ClusterFirst\n # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network\n # Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply\n reportNodeInternalIp: false\n # -- Process Ingress objects without ingressClass annotation/ingressClassName field\n # Overrides value for --watch-ingress-without-class flag of the controller binary\n # Defaults to false\n watchIngressWithoutClass: false\n # -- Process IngressClass per name (additionally as per spec.controller).\n ingressClassByName: false\n # -- This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-mode=\"auto\"\n # Defaults to false\n enableTopologyAwareRouting: false\n # -- This configuration defines if Ingress Controller should allow users to set\n # their own *-snippet annotations, otherwise this is forbidden / dropped\n # when users add those annotations.\n # Global snippets in ConfigMap are still respected\n allowSnippetAnnotations: false\n # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm),\n # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920\n # is merged\n hostNetwork: false\n ## Use host ports 80 and 443\n ## Disabled by default\n hostPort:\n # -- Enable 'hostPort' or not\n enabled: false\n ports:\n # -- 'hostPort' http port\n http: 80\n # -- 'hostPort' https port\n https: 443\n # NetworkPolicy for controller component.\n networkPolicy:\n # -- Enable 'networkPolicy' or not\n enabled: false\n # -- Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader'\n electionID: \"\"\n ## This section refers to the creation of the IngressClass resource\n ## IngressClass resources are supported since k8s >= 1.18 and required since k8s >= 1.19\n ingressClassResource:\n # -- Name of the ingressClass\n name: nginx\n # -- Is this ingressClass enabled or not\n enabled: true\n # -- Is this the default ingressClass for the cluster\n default: false\n # -- Controller-value of the controller that is processing this ingressClass\n controllerValue: \"k8s.io/ingress-nginx\"\n # -- Parameters is a link to a custom resource containing additional\n # configuration for the controller. This is optional if the controller\n # does not require extra parameters.\n parameters: {}\n # -- For backwards compatibility with ingress.class annotation, use ingressClass.\n # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation\n ingressClass: nginx\n # -- Labels to add to the pod container metadata\n podLabels: {}\n # key: value\n\n # -- Security context for controller pods\n podSecurityContext: {}\n # -- sysctls for controller pods\n ## Ref: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/\n sysctls: {}\n # sysctls:\n # \"net.core.somaxconn\": \"8192\"\n # -- Security context for controller containers\n containerSecurityContext: {}\n # -- Allows customization of the source of the IP address or FQDN to report\n # in the ingress status field. By default, it reads the information provided\n # by the service. If disable, the status field reports the IP address of the\n # node or nodes where an ingress controller pod is running.\n publishService:\n # -- Enable 'publishService' or not\n enabled: true\n # -- Allows overriding of the publish service to bind to\n # Must be /\n pathOverride: \"\"\n # Limit the scope of the controller to a specific namespace\n scope:\n # -- Enable 'scope' or not\n enabled: false\n # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE)\n namespace: \"\"\n # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels\n # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces.\n namespaceSelector: \"\"\n # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE)\n configMapNamespace: \"\"\n tcp:\n # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE)\n configMapNamespace: \"\"\n # -- Annotations to be added to the tcp config configmap\n annotations: {}\n udp:\n # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE)\n configMapNamespace: \"\"\n # -- Annotations to be added to the udp config configmap\n annotations: {}\n # -- Maxmind license key to download GeoLite2 Databases.\n ## https://blog.maxmind.com/2019/12/18/significant-changes-to-accessing-and-using-geolite2-databases\n maxmindLicenseKey: \"\"\n # -- Additional command line arguments to pass to Ingress-Nginx Controller\n # E.g. to specify the default SSL certificate you can use\n extraArgs: {}\n ## extraArgs:\n ## default-ssl-certificate: \"/\"\n ## time-buckets: \"0.005,0.01,0.025,0.05,0.1,0.25,0.5,1,2.5,5,10\"\n ## length-buckets: \"10,20,30,40,50,60,70,80,90,100\"\n ## size-buckets: \"10,100,1000,10000,100000,1e+06,1e+07\"\n\n # -- Additional environment variables to set\n # extraEnvs:\n # - name: FOO\n # valueFrom:\n # secretKeyRef:\n # key: FOO\n # name: secret-resource\n extraEnvs: \n - name: TZ\n value: Asia/Shanghai\n\n # -- Use a `DaemonSet` or `Deployment`\n kind: Deployment\n # -- Annotations to be added to the controller Deployment or DaemonSet\n ##\n annotations: {}\n # keel.sh/pollSchedule: \"@every 60m\"\n\n # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels\n ##\n labels: {}\n # keel.sh/policy: patch\n # keel.sh/trigger: poll\n\n # -- The update strategy to apply to the Deployment or DaemonSet\n ##\n updateStrategy: {}\n # rollingUpdate:\n # maxUnavailable: 1\n # type: RollingUpdate\n\n # -- `minReadySeconds` to avoid killing pods before we are ready\n ##\n minReadySeconds: 0\n # -- Node tolerations for server scheduling to nodes with taints\n ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n ##\n tolerations: []\n # - key: \"key\"\n # operator: \"Equal|Exists\"\n # value: \"value\"\n # effect: \"NoSchedule|PreferNoSchedule|NoExecute(1.6 only)\"\n\n # -- Affinity and anti-affinity rules for server scheduling to nodes\n ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity\n ##\n affinity: {}\n # # An example of preferred pod anti-affinity, weight is in the range 1-100\n # podAntiAffinity:\n # preferredDuringSchedulingIgnoredDuringExecution:\n # - weight: 100\n # podAffinityTerm:\n # labelSelector:\n # matchExpressions:\n # - key: app.kubernetes.io/name\n # operator: In\n # values:\n # - ingress-nginx\n # - key: app.kubernetes.io/instance\n # operator: In\n # values:\n # - ingress-nginx\n # - key: app.kubernetes.io/component\n # operator: In\n # values:\n # - controller\n # topologyKey: kubernetes.io/hostname\n\n # # An example of required pod anti-affinity\n # podAntiAffinity:\n # requiredDuringSchedulingIgnoredDuringExecution:\n # - labelSelector:\n # matchExpressions:\n # - key: app.kubernetes.io/name\n # operator: In\n # values:\n # - ingress-nginx\n # - key: app.kubernetes.io/instance\n # operator: In\n # values:\n # - ingress-nginx\n # - key: app.kubernetes.io/component\n # operator: In\n # values:\n # - controller\n # topologyKey: \"kubernetes.io/hostname\"\n\n # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in.\n ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/\n ##\n topologySpreadConstraints: []\n # - labelSelector:\n # matchLabels:\n # app.kubernetes.io/name: '{{ include \"ingress-nginx.name\" . }}'\n # app.kubernetes.io/instance: '{{ .Release.Name }}'\n # app.kubernetes.io/component: controller\n # topologyKey: topology.kubernetes.io/zone\n # maxSkew: 1\n # whenUnsatisfiable: ScheduleAnyway\n # - labelSelector:\n # matchLabels:\n # app.kubernetes.io/name: '{{ include \"ingress-nginx.name\" . }}'\n # app.kubernetes.io/instance: '{{ .Release.Name }}'\n # app.kubernetes.io/component: controller\n # topologyKey: kubernetes.io/hostname\n # maxSkew: 1\n # whenUnsatisfiable: ScheduleAnyway\n\n # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready\n ## wait up to five minutes for the drain of connections\n ##\n terminationGracePeriodSeconds: 300\n # -- Node labels for controller pod assignment\n ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/\n ##\n nodeSelector:\n kubernetes.io/os: linux\n ## Liveness and readiness probe values\n ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes\n ##\n ## startupProbe:\n ## httpGet:\n ## # should match container.healthCheckPath\n ## path: \"/healthz\"\n ## port: 10254\n ## scheme: HTTP\n ## initialDelaySeconds: 5\n ## periodSeconds: 5\n ## timeoutSeconds: 2\n ## successThreshold: 1\n ## failureThreshold: 5\n livenessProbe:\n httpGet:\n # should match container.healthCheckPath\n path: \"/healthz\"\n port: 10254\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 10\n timeoutSeconds: 1\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n # should match container.healthCheckPath\n path: \"/healthz\"\n port: 10254\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 10\n timeoutSeconds: 1\n successThreshold: 1\n failureThreshold: 3\n # -- Path of the health check endpoint. All requests received on the port defined by\n # the healthz-port parameter are forwarded internally to this path.\n healthCheckPath: \"/healthz\"\n # -- Address to bind the health check endpoint.\n # It is better to set this option to the internal node address\n # if the Ingress-Nginx Controller is running in the `hostNetwork: true` mode.\n healthCheckHost: \"\"\n # -- Annotations to be added to controller pods\n ##\n podAnnotations: {}\n replicaCount: 1\n # -- Minimum available pods set in PodDisruptionBudget.\n # Define either 'minAvailable' or 'maxUnavailable', never both.\n minAvailable: 1\n # -- Maximum unavailable pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored.\n # maxUnavailable: 1\n\n ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes\n ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903\n ## Ideally, there should be no limits.\n ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/\n resources:\n ## limits:\n ## cpu: 100m\n ## memory: 90Mi\n requests:\n cpu: 100m\n memory: 90Mi\n # Mutually exclusive with keda autoscaling\n autoscaling:\n enabled: false\n annotations: {}\n minReplicas: 1\n maxReplicas: 11\n targetCPUUtilizationPercentage: 50\n targetMemoryUtilizationPercentage: 50\n behavior: {}\n # scaleDown:\n # stabilizationWindowSeconds: 300\n # policies:\n # - type: Pods\n # value: 1\n # periodSeconds: 180\n # scaleUp:\n # stabilizationWindowSeconds: 300\n # policies:\n # - type: Pods\n # value: 2\n # periodSeconds: 60\n autoscalingTemplate: []\n # Custom or additional autoscaling metrics\n # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics\n # - type: Pods\n # pods:\n # metric:\n # name: k8s_pod_rate_cpu_core_used_limit\n # target:\n # averageValue: \"80\"\n # type: AverageValue\n\n # Mutually exclusive with hpa autoscaling\n keda:\n apiVersion: \"keda.sh/v1alpha1\"\n ## apiVersion changes with keda 1.x vs 2.x\n ## 2.x = keda.sh/v1alpha1\n ## 1.x = keda.k8s.io/v1alpha1\n enabled: false\n minReplicas: 1\n maxReplicas: 11\n pollingInterval: 30\n cooldownPeriod: 300\n # fallback:\n # failureThreshold: 3\n # replicas: 11\n restoreToOriginalReplicaCount: false\n scaledObject:\n annotations: {}\n # Custom annotations for ScaledObject resource\n # annotations:\n # key: value\n triggers: []\n # - type: prometheus\n # metadata:\n # serverAddress: http://:9090\n # metricName: http_requests_total\n # threshold: '100'\n # query: sum(rate(http_requests_total{deployment=\"my-deployment\"}[2m]))\n\n behavior: {}\n # scaleDown:\n # stabilizationWindowSeconds: 300\n # policies:\n # - type: Pods\n # value: 1\n # periodSeconds: 180\n # scaleUp:\n # stabilizationWindowSeconds: 300\n # policies:\n # - type: Pods\n # value: 2\n # periodSeconds: 60\n # -- Enable mimalloc as a drop-in replacement for malloc.\n ## ref: https://github.com/microsoft/mimalloc\n ##\n enableMimalloc: true\n ## Override NGINX template\n customTemplate:\n configMapName: \"\"\n configMapKey: \"\"\n service:\n # -- Enable controller services or not. This does not influence the creation of either the admission webhook or the metrics service.\n enabled: true\n external:\n # -- Enable the external controller service or not. Useful for internal-only deployments.\n enabled: true\n # -- Annotations to be added to the external controller service. See `controller.service.internal.annotations` for annotations to be added to the internal controller service.\n annotations: {}\n # -- Labels to be added to both controller services.\n labels: {}\n # -- Type of the external controller service.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types\n type: LoadBalancer\n # -- Pre-defined cluster internal IP address of the external controller service. Take care of collisions with existing services.\n # This value is immutable. Set once, it can not be changed without deleting and re-creating the service.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address\n clusterIP: \"\"\n # -- List of node IP addresses at which the external controller service is available.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips\n externalIPs: []\n # -- Deprecated: Pre-defined IP address of the external controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n loadBalancerIP: \"\"\n # -- Restrict access to the external controller service. Values must be CIDRs. Allows any source address by default.\n loadBalancerSourceRanges: []\n # -- Load balancer class of the external controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class\n loadBalancerClass: \"\"\n # -- Enable node port allocation for the external controller service or not. Applies to type `LoadBalancer` only.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation\n # allocateLoadBalancerNodePorts: true\n\n # -- External traffic policy of the external controller service. Set to \"Local\" to preserve source IP on providers supporting it.\n # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip\n externalTrafficPolicy: \"\"\n # -- Session affinity of the external controller service. Must be either \"None\" or \"ClientIP\" if set. Defaults to \"None\".\n # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity\n sessionAffinity: \"\"\n # -- Specifies the health check node port (numeric port number) for the external controller service.\n # If not specified, the service controller allocates a port from your cluster's node port range.\n # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip\n # healthCheckNodePort: 0\n\n # -- Represents the dual-stack capabilities of the external controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack.\n # Fields `ipFamilies` and `clusterIP` depend on the value of this field.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services\n ipFamilyPolicy: SingleStack\n # -- List of IP families (e.g. IPv4, IPv6) assigned to the external controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services\n ipFamilies:\n - IPv4\n # -- Enable the HTTP listener on both controller services or not.\n enableHttp: true\n # -- Enable the HTTPS listener on both controller services or not.\n enableHttps: true\n ports:\n # -- Port the external HTTP listener is published with.\n http: 80\n # -- Port the external HTTPS listener is published with.\n https: 443\n targetPorts:\n # -- Port of the ingress controller the external HTTP listener is mapped to.\n http: http\n # -- Port of the ingress controller the external HTTPS listener is mapped to.\n https: https\n # -- Declare the app protocol of the external HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol\n appProtocol: true\n nodePorts:\n # -- Node port allocated for the external HTTP listener. If left empty, the service controller allocates one from the configured node port range.\n http: \"\"\n # -- Node port allocated for the external HTTPS listener. If left empty, the service controller allocates one from the configured node port range.\n https: \"\"\n # -- Node port mapping for external TCP listeners. If left empty, the service controller allocates them from the configured node port range.\n # Example:\n # tcp:\n # 8080: 30080\n tcp: {}\n # -- Node port mapping for external UDP listeners. If left empty, the service controller allocates them from the configured node port range.\n # Example:\n # udp:\n # 53: 30053\n udp: {}\n internal:\n # -- Enable the internal controller service or not. Remember to configure `controller.service.internal.annotations` when enabling this.\n enabled: false\n # -- Annotations to be added to the internal controller service. Mandatory for the internal controller service to be created. Varies with the cloud service.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer\n annotations: {}\n # -- Type of the internal controller service.\n # Defaults to the value of `controller.service.type`.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types\n type: \"\"\n # -- Pre-defined cluster internal IP address of the internal controller service. Take care of collisions with existing services.\n # This value is immutable. Set once, it can not be changed without deleting and re-creating the service.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address\n clusterIP: \"\"\n # -- List of node IP addresses at which the internal controller service is available.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips\n externalIPs: []\n # -- Deprecated: Pre-defined IP address of the internal controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer\n loadBalancerIP: \"\"\n # -- Restrict access to the internal controller service. Values must be CIDRs. Allows any source address by default.\n loadBalancerSourceRanges: []\n # -- Load balancer class of the internal controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class\n loadBalancerClass: \"\"\n # -- Enable node port allocation for the internal controller service or not. Applies to type `LoadBalancer` only.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation\n # allocateLoadBalancerNodePorts: true\n\n # -- External traffic policy of the internal controller service. Set to \"Local\" to preserve source IP on providers supporting it.\n # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip\n externalTrafficPolicy: \"\"\n # -- Session affinity of the internal controller service. Must be either \"None\" or \"ClientIP\" if set. Defaults to \"None\".\n # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity\n sessionAffinity: \"\"\n # -- Specifies the health check node port (numeric port number) for the internal controller service.\n # If not specified, the service controller allocates a port from your cluster's node port range.\n # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip\n # healthCheckNodePort: 0\n\n # -- Represents the dual-stack capabilities of the internal controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack.\n # Fields `ipFamilies` and `clusterIP` depend on the value of this field.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services\n ipFamilyPolicy: SingleStack\n # -- List of IP families (e.g. IPv4, IPv6) assigned to the internal controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services\n ipFamilies:\n - IPv4\n ports: {}\n # -- Port the internal HTTP listener is published with.\n # Defaults to the value of `controller.service.ports.http`.\n # http: 80\n # -- Port the internal HTTPS listener is published with.\n # Defaults to the value of `controller.service.ports.https`.\n # https: 443\n\n targetPorts: {}\n # -- Port of the ingress controller the internal HTTP listener is mapped to.\n # Defaults to the value of `controller.service.targetPorts.http`.\n # http: http\n # -- Port of the ingress controller the internal HTTPS listener is mapped to.\n # Defaults to the value of `controller.service.targetPorts.https`.\n # https: https\n\n # -- Declare the app protocol of the internal HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol.\n # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol\n appProtocol: true\n nodePorts:\n # -- Node port allocated for the internal HTTP listener. If left empty, the service controller allocates one from the configured node port range.\n http: \"\"\n # -- Node port allocated for the internal HTTPS listener. If left empty, the service controller allocates one from the configured node port range.\n https: \"\"\n # -- Node port mapping for internal TCP listeners. If left empty, the service controller allocates them from the configured node port range.\n # Example:\n # tcp:\n # 8080: 30080\n tcp: {}\n # -- Node port mapping for internal UDP listeners. If left empty, the service controller allocates them from the configured node port range.\n # Example:\n # udp:\n # 53: 30053\n udp: {}\n # shareProcessNamespace enables process namespace sharing within the pod.\n # This can be used for example to signal log rotation using `kill -USR1` from a sidecar.\n shareProcessNamespace: false\n # -- Additional containers to be added to the controller pod.\n # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example.\n extraContainers: []\n # - name: my-sidecar\n # image: nginx:latest\n\n # -- Additional volumeMounts to the controller main container.\n extraVolumeMounts: []\n # - name: copy-portal-skins\n # mountPath: /var/lib/lemonldap-ng/portal/skins\n\n # -- Additional volumes to the controller pod.\n extraVolumes: []\n # - name: copy-portal-skins\n # emptyDir: {}\n\n # -- Containers, which are run before the app containers are started.\n extraInitContainers: \n - command:\n - sh\n - -c\n - |-\n sysctl -w net.core.somaxconn=65535\n sysctl -w net.ipv4.ip_local_reserved_ports=9100\n sysctl -w net.ipv4.ip_local_port_range=\"1024 61999\"\n sysctl -w net.ipv4.tcp_tw_reuse=1\n sysctl -w fs.file-max=1048576\n image: ccr.ccs.tencentyun.com/tkeimages/busybox:latest\n imagePullPolicy: Always\n name: setsysctl\n securityContext:\n privileged: true\n\n # -- Modules, which are mounted into the core nginx image. See values.yaml for a sample to add opentelemetry module\n extraModules: []\n # - name: mytestmodule\n # image:\n # registry: registry.k8s.io\n # image: ingress-nginx/mytestmodule\n # ## for backwards compatibility consider setting the full image url via the repository value below\n # ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n # ## repository:\n # tag: \"v1.0.0\"\n # digest: \"\"\n # distroless: false\n # containerSecurityContext:\n # runAsNonRoot: true\n # runAsUser: \n # allowPrivilegeEscalation: false\n # seccompProfile:\n # type: RuntimeDefault\n # capabilities:\n # drop:\n # - ALL\n # readOnlyRootFilesystem: true\n # resources: {}\n #\n # The image must contain a `/usr/local/bin/init_module.sh` executable, which\n # will be executed as initContainers, to move its config files within the\n # mounted volume.\n\n opentelemetry:\n enabled: false\n name: opentelemetry\n image:\n registry: ccr.ccs.tencentyun.com\n image: tke-market/ingress-nginx-opentelemetry\n ## for backwards compatibility consider setting the full image url via the repository value below\n ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n ## repository:\n tag: \"v20230721-3e2062ee5\"\n digest: \"\"\n distroless: true\n containerSecurityContext:\n runAsNonRoot: true\n # -- The image's default user, inherited from its base image `cgr.dev/chainguard/static`.\n runAsUser: 65532\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n capabilities:\n drop:\n - ALL\n readOnlyRootFilesystem: true\n resources: {}\n admissionWebhooks:\n name: admission\n annotations: {}\n # ignore-check.kube-linter.io/no-read-only-rootfs: \"This deployment needs write access to root filesystem\".\n\n ## Additional annotations to the admission webhooks.\n ## These annotations will be added to the ValidatingWebhookConfiguration and\n ## the Jobs Spec of the admission webhooks.\n enabled: true\n # -- Additional environment variables to set\n extraEnvs: []\n # extraEnvs:\n # - name: FOO\n # valueFrom:\n # secretKeyRef:\n # key: FOO\n # name: secret-resource\n # -- Admission Webhook failure policy to use\n failurePolicy: Fail\n # timeoutSeconds: 10\n port: 8443\n certificate: \"/usr/local/certificates/cert\"\n key: \"/usr/local/certificates/key\"\n namespaceSelector: {}\n objectSelector: {}\n # -- Labels to be added to admission webhooks\n labels: {}\n # -- Use an existing PSP instead of creating one\n existingPsp: \"\"\n service:\n annotations: {}\n # clusterIP: \"\"\n externalIPs: []\n # loadBalancerIP: \"\"\n loadBalancerSourceRanges: []\n servicePort: 443\n type: ClusterIP\n createSecretJob:\n name: create\n # -- Security context for secret creation containers\n securityContext:\n runAsNonRoot: true\n runAsUser: 65532\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n capabilities:\n drop:\n - ALL\n readOnlyRootFilesystem: true\n resources: {}\n # limits:\n # cpu: 10m\n # memory: 20Mi\n # requests:\n # cpu: 10m\n # memory: 20Mi\n patchWebhookJob:\n name: patch\n # -- Security context for webhook patch containers\n securityContext:\n runAsNonRoot: true\n runAsUser: 65532\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n capabilities:\n drop:\n - ALL\n readOnlyRootFilesystem: true\n resources: {}\n patch:\n enabled: true\n image:\n registry: ccr.ccs.tencentyun.com\n image: tke-market/ingress-nginx-webhook-certgen\n ## for backwards compatibility consider setting the full image url via the repository value below\n ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n ## repository:\n tag: v20231011-8b53cabe0\n digest: \"\"\n pullPolicy: IfNotPresent\n # -- Provide a priority class name to the webhook patching job\n ##\n priorityClassName: \"\"\n podAnnotations: {}\n # NetworkPolicy for webhook patch\n networkPolicy:\n # -- Enable 'networkPolicy' or not\n enabled: false\n nodeSelector:\n kubernetes.io/os: linux\n tolerations: []\n # -- Labels to be added to patch job resources\n labels: {}\n # -- Security context for secret creation & webhook patch pods\n securityContext: {}\n # Use certmanager to generate webhook certs\n certManager:\n enabled: false\n # self-signed root certificate\n rootCert:\n # default to be 5y\n duration: \"\"\n admissionCert:\n # default to be 1y\n duration: \"\"\n # issuerRef:\n # name: \"issuer\"\n # kind: \"ClusterIssuer\"\n metrics:\n port: 10254\n portName: metrics\n # if this port is changed, change healthz-port: in extraArgs: accordingly\n enabled: false\n service:\n annotations: {}\n # prometheus.io/scrape: \"true\"\n # prometheus.io/port: \"10254\"\n # -- Labels to be added to the metrics service resource\n labels: {}\n # clusterIP: \"\"\n\n # -- List of IP addresses at which the stats-exporter service is available\n ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips\n ##\n externalIPs: []\n # loadBalancerIP: \"\"\n loadBalancerSourceRanges: []\n servicePort: 10254\n type: ClusterIP\n # externalTrafficPolicy: \"\"\n # nodePort: \"\"\n serviceMonitor:\n enabled: false\n additionalLabels: {}\n annotations: {}\n ## The label to use to retrieve the job name from.\n ## jobLabel: \"app.kubernetes.io/name\"\n namespace: \"\"\n namespaceSelector: {}\n ## Default: scrape .Release.Namespace or namespaceOverride only\n ## To scrape all, use the following:\n ## namespaceSelector:\n ## any: true\n scrapeInterval: 30s\n # honorLabels: true\n targetLabels: []\n relabelings: []\n metricRelabelings: []\n prometheusRule:\n enabled: false\n additionalLabels: {}\n # namespace: \"\"\n rules: []\n # # These are just examples rules, please adapt them to your needs\n # - alert: NGINXConfigFailed\n # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0\n # for: 1s\n # labels:\n # severity: critical\n # annotations:\n # description: bad ingress config - nginx config test failed\n # summary: uninstall the latest ingress changes to allow config reloads to resume\n # # By default a fake self-signed certificate is generated as default and\n # # it is fine if it expires. If `--default-ssl-certificate` flag is used\n # # and a valid certificate passed please do not filter for `host` label!\n # # (i.e. delete `{host!=\"_\"}` so also the default SSL certificate is\n # # checked for expiration)\n # - alert: NGINXCertificateExpiry\n # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds{host!=\"_\"}) by (host) - time()) < 604800\n # for: 1s\n # labels:\n # severity: critical\n # annotations:\n # description: ssl certificate(s) will expire in less then a week\n # summary: renew expiring certificates to avoid downtime\n # - alert: NGINXTooMany500s\n # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~\"5.+\"} ) / sum(nginx_ingress_controller_requests) ) > 5\n # for: 1m\n # labels:\n # severity: warning\n # annotations:\n # description: Too many 5XXs\n # summary: More than 5% of all requests returned 5XX, this requires your attention\n # - alert: NGINXTooMany400s\n # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~\"4.+\"} ) / sum(nginx_ingress_controller_requests) ) > 5\n # for: 1m\n # labels:\n # severity: warning\n # annotations:\n # description: Too many 4XXs\n # summary: More than 5% of all requests returned 4XX, this requires your attention\n # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook:\n # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds\n # to 300, allowing the draining of connections up to five minutes.\n # If the active connections end before that, the pod will terminate gracefully at that time.\n # To effectively take advantage of this feature, the Configmap feature\n # worker-shutdown-timeout new value is 240s instead of 10s.\n ##\n lifecycle:\n preStop:\n exec:\n command:\n - /wait-shutdown\n priorityClassName: \"\"\n# -- Rollback limit\n##\nrevisionHistoryLimit: 10\n## Default 404 backend\n##\ndefaultBackend:\n ##\n enabled: false\n name: defaultbackend\n image:\n registry: ccr.ccs.tencentyun.com\n image: \ttke-market/ingress-nginx-defaultbackend-amd64\n ## for backwards compatibility consider setting the full image url via the repository value below\n ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail\n ## repository:\n tag: \"1.5\"\n pullPolicy: IfNotPresent\n runAsNonRoot: true\n # nobody user -> uid 65534\n runAsUser: 65534\n allowPrivilegeEscalation: false\n seccompProfile:\n type: RuntimeDefault\n readOnlyRootFilesystem: true\n # -- Use an existing PSP instead of creating one\n existingPsp: \"\"\n extraArgs: {}\n serviceAccount:\n create: true\n name: \"\"\n automountServiceAccountToken: true\n # -- Additional environment variables to set for defaultBackend pods\n extraEnvs: []\n port: 8080\n ## Readiness and liveness probes for default backend\n ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/\n ##\n livenessProbe:\n failureThreshold: 3\n initialDelaySeconds: 30\n periodSeconds: 10\n successThreshold: 1\n timeoutSeconds: 5\n readinessProbe:\n failureThreshold: 6\n initialDelaySeconds: 0\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 5\n # -- The update strategy to apply to the Deployment or DaemonSet\n ##\n updateStrategy: {}\n # rollingUpdate:\n # maxUnavailable: 1\n # type: RollingUpdate\n\n # -- `minReadySeconds` to avoid killing pods before we are ready\n ##\n minReadySeconds: 0\n # -- Node tolerations for server scheduling to nodes with taints\n ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/\n ##\n tolerations: []\n # - key: \"key\"\n # operator: \"Equal|Exists\"\n # value: \"value\"\n # effect: \"NoSchedule|PreferNoSchedule|NoExecute(1.6 only)\"\n\n affinity: {}\n # -- Security context for default backend pods\n podSecurityContext: {}\n # -- Security context for default backend containers\n containerSecurityContext: {}\n # -- Labels to add to the pod container metadata\n podLabels: {}\n # key: value\n\n # -- Node labels for default backend pod assignment\n ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/\n ##\n nodeSelector:\n kubernetes.io/os: linux\n # -- Annotations to be added to default backend pods\n ##\n podAnnotations: {}\n replicaCount: 1\n minAvailable: 1\n resources: {}\n # limits:\n # cpu: 10m\n # memory: 20Mi\n # requests:\n # cpu: 10m\n # memory: 20Mi\n\n extraVolumeMounts: []\n ## Additional volumeMounts to the default backend container.\n # - name: copy-portal-skins\n # mountPath: /var/lib/lemonldap-ng/portal/skins\n\n extraVolumes: []\n ## Additional volumes to the default backend pod.\n # - name: copy-portal-skins\n # emptyDir: {}\n\n extraConfigMaps: []\n ## Additional configmaps to the default backend pod.\n # - name: my-extra-configmap-1\n # labels:\n # type: config-1\n # data:\n # extra_file_1.html: |\n # \n # - name: my-extra-configmap-2\n # labels:\n # type: config-2\n # data:\n # extra_file_2.html: |\n # \n\n autoscaling:\n annotations: {}\n enabled: false\n minReplicas: 1\n maxReplicas: 2\n targetCPUUtilizationPercentage: 50\n targetMemoryUtilizationPercentage: 50\n # NetworkPolicy for default backend component.\n networkPolicy:\n # -- Enable 'networkPolicy' or not\n enabled: false\n service:\n annotations: {}\n # clusterIP: \"\"\n\n # -- List of IP addresses at which the default backend service is available\n ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips\n ##\n externalIPs: []\n # loadBalancerIP: \"\"\n loadBalancerSourceRanges: []\n servicePort: 80\n type: ClusterIP\n priorityClassName: \"\"\n # -- Labels to be added to the default backend resources\n labels: {}\n## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266\nrbac:\n create: true\n scope: false\n## If true, create & use Pod Security Policy resources\n## https://kubernetes.io/docs/concepts/policy/pod-security-policy/\npodSecurityPolicy:\n enabled: false\nserviceAccount:\n create: true\n name: \"\"\n automountServiceAccountToken: true\n # -- Annotations for the controller service account\n annotations: {}\n# -- Optional array of imagePullSecrets containing private registry credentials\n## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\nimagePullSecrets: []\n# - name: secretName\n\n# -- TCP service key-value pairs\n## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md\n##\ntcp: {}\n# 8080: \"default/example-tcp-svc:9000\"\n\n# -- UDP service key-value pairs\n## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md\n##\nudp: {}\n# 53: \"kube-system/kube-dns:53\"\n\n# -- Prefix for TCP and UDP ports names in ingress controller service\n## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration\nportNamePrefix: \"\"\n# -- (string) A base64-encoded Diffie-Hellman parameter.\n# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64`\n## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param\ndhParam: \"\"\n +EOF + values_type = "yaml" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `chart` - (Required, String) Chart name (obtained from the application market) or the download URL of the chart package when installing from a third-party repo, redirect-type chart URLs are not supported, must end with *.tgz. +* `cluster_id` - (Required, String, ForceNew) Cluster ID. +* `name` - (Required, String, ForceNew) Application name, maximum 63 characters, can only contain lowercase letters, numbers, and the separator "-", and must start with a lowercase letter and end with a number or lowercase letter. +* `namespace` - (Required, String, ForceNew) Application namespace, obtained from the cluster details namespace. +* `chart_from` - (Optional, String) Chart source, range: tke-market or other, default value: tke-market. +* `chart_namespace` - (Optional, String) Chart namespace, when ChartFrom is tke-market, ChartNamespace is not empty, value is the Namespace returned by the DescribeProducts interface. +* `chart_repo_url` - (Optional, String) Chart repository URL address. +* `chart_version` - (Optional, String) Chart version. +* `cluster_type` - (Optional, String) Cluster type, supports tke, eks, tkeedge, external (registered cluster). +* `password` - (Optional, String) Chart access password. +* `username` - (Optional, String) Chart access username. +* `values` - (Optional, List) Custom parameters. + +The `values` object supports the following: + +* `raw_original` - (Required, String) Custom parameter original value. +* `values_type` - (Required, String) Custom parameter value type. + +## Attributes Reference + +In addition to all arguments above, the following attributes are exported: + +* `id` - ID of the resource. +* `cluster_release_id` - Cluster release ID. +* `release_status` - Cluster release status. + +## Timeouts + +The `timeouts` block allows you to specify [timeouts](https://developer.hashicorp.com/terraform/language/resources/syntax#operation-timeouts) for certain actions: + +* `create` - (Defaults to `30m`) Used when creating the resource. +* `update` - (Defaults to `30m`) Used when updating the resource. + diff --git a/website/tencentcloud.erb b/website/tencentcloud.erb index 5eb8f7bd5c..2ee6516d53 100644 --- a/website/tencentcloud.erb +++ b/website/tencentcloud.erb @@ -5145,6 +5145,9 @@
  • tencentcloud_kubernetes_cluster_master_attachment
  • +
  • + tencentcloud_kubernetes_cluster_release +
  • tencentcloud_kubernetes_encryption_protection