From c6c20266788e355bd3d33c848ec2a595d5b8f70b Mon Sep 17 00:00:00 2001 From: "docs-sourcer[bot]" <99042413+docs-sourcer[bot]@users.noreply.github.com> Date: Sat, 4 Feb 2023 10:08:43 +0000 Subject: [PATCH 1/2] Updated with the latest changes from the knowledge base discussions. --- docs/discussions/knowledge-base/635.mdx | 27 +++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 docs/discussions/knowledge-base/635.mdx diff --git a/docs/discussions/knowledge-base/635.mdx b/docs/discussions/knowledge-base/635.mdx new file mode 100644 index 0000000000..4b022eb012 --- /dev/null +++ b/docs/discussions/knowledge-base/635.mdx @@ -0,0 +1,27 @@ +--- +hide_table_of_contents: true +hide_title: true +custom_edit_url: null +--- + +import CenterLayout from "/src/components/CenterLayout" +import GitHub from "/src/components/GitHub" + +
+ + + +Hello all,
\ni ran into a problem during the EKS cluster upgrade: we recently deployed ECS deploy runner and have not yet experienced with it. When I upgraded the eks-core-service module, the CircleCI pipeline failed with these errors:
[ecs-deploy-runner][2023-01-16T16:42:43+0000] ╷\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ Error: Kubernetes cluster unreachable: the server has asked for the client to provide credentials\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ \n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ with module.alb_ingress_controller[\"enable\"].helm_release.aws_alb_ingress_controller,\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ on .terraform/modules/alb_ingress_controller/modules/eks-alb-ingress-controller/main.tf line 48, in resource \"helm_release\" \"aws_alb_ingress_controller\":\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ 48: resource \"helm_release\" \"aws_alb_ingress_controller\" {\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ \n[ecs-deploy-runner][2023-01-16T16:42:43+0000] ╵\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] ╷\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ Error: Kubernetes cluster unreachable: the server has asked for the client to provide credentials\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ \n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ with module.aws_for_fluent_bit[\"enable\"].helm_release.aws_for_fluent_bit,\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ on .terraform/modules/aws_for_fluent_bit/modules/eks-container-logs/main.tf line 48, in resource \"helm_release\" \"aws_for_fluent_bit\":\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ 48: resource \"helm_release\" \"aws_for_fluent_bit\" {\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ \n[ecs-deploy-runner][2023-01-16T16:42:43+0000] ╵\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] ╷\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ Error: Kubernetes cluster unreachable: the server has asked for the client to provide credentials\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ \n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ with module.k8s_external_dns[\"enable\"].helm_release.k8s_external_dns,\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ on .terraform/modules/k8s_external_dns/modules/eks-k8s-external-dns/main.tf line 54, in resource \"helm_release\" \"k8s_external_dns\":\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ 54: resource \"helm_release\" \"k8s_external_dns\" {\n[ecs-deploy-runner][2023-01-16T16:42:43+0000] │ \n[ecs-deploy-runner][2023-01-16T16:42:43+0000] ╵\nMy understanding is that ecs-deploy-runner ECS task does not perform Kubernetes authentication and does not have Kubernetes configuration. Does anybody know how to workaround this?
Without knowing the full details of your configuration, I'll try my best to explain...
\nFor the ecs-deploy-runner to be able to interact with the EKS cluster, the IAM Role the runner uses, must be mapped in the aws-auth ConfigMap. Had the cluster been created with the IAM Role ecs-deploy-runner is using, this would be unnecessary, as EKS implicitly grants admin RBAC for the IAM role that the cluster was created with. I'm assuming the cluster was created with a different role?
To fix the issue, the ECS Deploy Runner IAM Role has to be added to aws-auth ConfigMap. If you're using the eks-aws-auth-merger, you can use the eks-k8s-role-mapping to create an entry in the aws-auth ConfigMap, e.g.
module \"ecs_deploy_runner_eks_k8s_role_mapping\" {\n source = \"git::git@github.com:gruntwork-io/terraform-aws-eks.git//modules/eks-k8s-role-mapping?ref=v0.x.x\"\n\n name = \"ecs-deploy-runner\"\n namespace = \"whatever-namespace-you-use-for-auth-merger\"\n\n eks_worker_iam_role_arns = []\n eks_fargate_profile_executor_iam_role_arns = []\n\n iam_role_to_rbac_group_mappings = {\n # I'm assuming you want admin level permissions in the cluster, because you'll be deploying\n # RBAC resources, hence the system:masters\n \"your-ecs-deploy-runner-iam-role\" = [\"system:masters\"]\n }\n\n config_map_labels = {\n eks-cluster = module.eks_cluster.eks_cluster_name\n }\n}\nMake sure you're not overwriting the entire aws-auth ConfigMap aws-auth ConfigMap has been updated, applying with the ecs-deploy-runner should work.
Hope this helps!
"}}} /> + +I am trying to create an EC2 instance with an EBS volume attached to the said instance.
\nI have the code to create the EC2 instance using terragrunt, and it works fine.
However, to create the EBS volume and attach it to the instance I need to use some terraform code.
\ne.g.
\nLayout tree is:
\ndev
\n-ec2
\n--terragrunt.hcl
\n--ebs.tf
In the ebs.tf file we can have
\n\n\nresource \"aws_ebs_volume\" \"this\" {
\n
\navailability_zone = \"ap-southeast-2a\"
\nsize = 20
\n}resource \"aws_volume_attachment\" \"this\" {
\n
\ndevice_name = \"/dev/sdh\"
\nvolume_id = aws_ebs_volume.this.id
\ninstance_id = <instance.parameter.from.terragrunt>
\n}
terragrunt.hcl
\n\n\nlocals {
\n
\nenvironment_vars = read_terragrunt_config(find_in_parent_folders(\"env.hcl\"))
\nenv = local.environment_vars.locals.environmentproject_vars = read_terragrunt_config(find_in_parent_folders(\"project.hcl\"))
\n
\nproject = local.project_vars.locals.project_name
\napplication = local.project_vars.locals.application_name}
\ninclude {
\n
\npath = find_in_parent_folders()
\n}terraform {
\n
\nsource = \"git::git@github.com:terraform-aws-modules/terraform-aws-ec2-instance.git?ref=v3.3.0\"
\n}
\n``
\ndependency \"sg\" {
\nconfig_path = \"../sg-ec2\"mock_outputs = {
\n
\nsecurity_group_id = \"sg-xxxxxxxxxxxx\"
\n}
\n}inputs = {
\nname = \"ui01-${local.project}-${local.application}-${local.env}\"
\n
\ndescription = \"UI 01${local.project} $ {local.application} Instance for ${local.env}\"ami = \"ami-0bd2230cfb28832f7\" # Amazon Linux kernel 5.10
\n
\ninstance_type = \"c5.large\"
\nkey_name = \"key-test\" # This key is manually created
\nmonitoring = true
\niam_instance_profile = \"AmazonSSMRoleForInstancesQuickSetup\"vpc_id = \"vpc-xxxxxxx\"
\n
\nsubnet_id = \"subnet-xxxxxxxx\"vpc_security_group_ids = [\"${dependency.sg.outputs.security_group_id}\"]
\n}
\n
Is it possible to use the output of the instance and pass this parameter/object to the ebs.tf file so that the ebs volume gets attached to the instance on the fly?
\nAnother question is, is it possible for the *.tf files to use the variables defined in the .hcl files?
\ne.g.
\nIf you call in terragrunt
\n\nlocals {
\n
\nenvironment_vars = read_terragrunt_config(find_in_parent_folders(\"env.hcl\"))
\nenv = local.environment_vars.locals.environment
\n}env.hcl is:
\n
\nlocals {
\nenvironment = \"dev\"
\n}
you can use the variable env as ${local.env} for your inputs
\nCan you call this variable in the .tf file in some way?
OK so I have this almost working fully, well in fact it does work, I can grab the instance id and attach an ebs volume to this instance, but at the same time the ebs directory tries to create a new ec2 instance. This is not what I want as I have a ec2 directory looking after the entire ec2 instance creation.
\n├── ebs
\n│ ├── ebs.tf
\n│ └── terragrunt.hcl
\n└── ec2-instance
\n└── terragrunt.hcl
ebs.tf
\nvariable \"instance_id\" {\n type = string\n}\n\nresource \"aws_ebs_volume\" \"this\" {\n availability_zone = \"ap-southeast-2a\"\n size = 20\n}\n\nresource \"aws_volume_attachment\" \"this\" {\n device_name = \"/dev/sdh\"\n volume_id = aws_ebs_volume.this.id\n instance_id = \"${var.instance_id}\"\n}\nterragrunt.hcl
\nlocals { }\n\ninclude {\n path = find_in_parent_folders()\n}\n\nterraform {\n source = \"git::git@github.com:terraform-aws-modules/terraform-aws-ec2-instance.git?ref=v3.3.0\"\n}\n\ndependency \"ec2-linux-ui\" {\n config_path = \"../ec2-linux-ui\"\n mock_outputs = {\n instance_id = \"12345\"\n }\n}\n\ninputs = {\n instance_id = dependency.ec2-linux-ui.outputs.id\n}\nterragrunt.hcl for the ec2 instance
\nlocals {\n environment_vars = read_terragrunt_config(find_in_parent_folders(\"env.hcl\"))\n env = local.environment_vars.locals.environment\n project_vars = read_terragrunt_config(find_in_parent_folders(\"project.hcl\"))\n project = local.project_vars.locals.project_name\n application = local.project_vars.locals.application_name\n}\n\ninclude {\n path = find_in_parent_folders()\n}\n\nterraform {\n source = \"git::git@github.com:terraform-aws-modules/terraform-aws-ec2-instance.git?ref=v3.3.0\"\n}\n\n# Need the output of the correct Security Group ID to attach to the RDS instance\ndependency \"sg\" {\n config_path = \"../sg-ec2\"\n\n mock_outputs = {\n security_group_id = \"sg-xxxxxxxxxx\"\n }\n}\n\ninputs = {\n\n # Naming\n name = \"ui01-${local.project}-${local.application}-${local.env}\"\n description = \"UI 01 ${local.project} ${local.application} Instance for ${local.env}\"\n\n # EC2 Config\n ami = \"ami-0bd2230cfb28832f7\" # Amazon Linux kernel 5.10\n instance_type = \"c5.large\"\n key_name = \"xxxxxxx\" \n monitoring = true\n\n\n # Networking\n vpc_id = \"xxxxxxx\" \n subnet_id = \"xxxxxxxx\"\n\n # Security Group\n vpc_security_group_ids = [\"${dependency.sg.outputs.security_group_id}\"]\n\n}\nNot sure why the ebs/terragrunt.hcl file wants to create a new instance when I can successfully get the instance id returned from the ec2-linux-ui dependency? If I can fix that, we are done.
"}}} /> +I am trying to create an EC2 instance with an EBS volume attached to the said instance.
\nI have the code to create the EC2 instance using terragrunt, and it works fine.
However, to create the EBS volume and attach it to the instance I need to use some terraform code.
\ne.g.
\nLayout tree is:
\ndev
\n-ec2
\n--terragrunt.hcl
\n--ebs.tf
In the ebs.tf file we can have
\n\n\nresource \"aws_ebs_volume\" \"this\" {
\n
\navailability_zone = \"ap-southeast-2a\"
\nsize = 20
\n}resource \"aws_volume_attachment\" \"this\" {
\n
\ndevice_name = \"/dev/sdh\"
\nvolume_id = aws_ebs_volume.this.id
\ninstance_id = <instance.parameter.from.terragrunt>
\n}
terragrunt.hcl
\n\n\nlocals {
\n
\nenvironment_vars = read_terragrunt_config(find_in_parent_folders(\"env.hcl\"))
\nenv = local.environment_vars.locals.environmentproject_vars = read_terragrunt_config(find_in_parent_folders(\"project.hcl\"))
\n
\nproject = local.project_vars.locals.project_name
\napplication = local.project_vars.locals.application_name}
\ninclude {
\n
\npath = find_in_parent_folders()
\n}terraform {
\n
\nsource = \"git::git@github.com:terraform-aws-modules/terraform-aws-ec2-instance.git?ref=v3.3.0\"
\n}
\n``
\ndependency \"sg\" {
\nconfig_path = \"../sg-ec2\"mock_outputs = {
\n
\nsecurity_group_id = \"sg-xxxxxxxxxxxx\"
\n}
\n}inputs = {
\nname = \"ui01-${local.project}-${local.application}-${local.env}\"
\n
\ndescription = \"UI 01${local.project} $ {local.application} Instance for ${local.env}\"ami = \"ami-0bd2230cfb28832f7\" # Amazon Linux kernel 5.10
\n
\ninstance_type = \"c5.large\"
\nkey_name = \"key-test\" # This key is manually created
\nmonitoring = true
\niam_instance_profile = \"AmazonSSMRoleForInstancesQuickSetup\"vpc_id = \"vpc-xxxxxxx\"
\n
\nsubnet_id = \"subnet-xxxxxxxx\"vpc_security_group_ids = [\"${dependency.sg.outputs.security_group_id}\"]
\n}
\n
Is it possible to use the output of the instance and pass this parameter/object to the ebs.tf file so that the ebs volume gets attached to the instance on the fly?
\nAnother question is, is it possible for the *.tf files to use the variables defined in the .hcl files?
\ne.g.
\nIf you call in terragrunt
\n\nlocals {
\n
\nenvironment_vars = read_terragrunt_config(find_in_parent_folders(\"env.hcl\"))
\nenv = local.environment_vars.locals.environment
\n}env.hcl is:
\n
\nlocals {
\nenvironment = \"dev\"
\n}
you can use the variable env as ${local.env} for your inputs
\nCan you call this variable in the .tf file in some way?
OK so I have this almost working fully, well in fact it does work, I can grab the instance id and attach an ebs volume to this instance, but at the same time the ebs directory tries to create a new ec2 instance. This is not what I want as I have a ec2 directory looking after the entire ec2 instance creation.
\n├── ebs
\n│ ├── ebs.tf
\n│ └── terragrunt.hcl
\n└── ec2-instance
\n└── terragrunt.hcl
ebs.tf
\nvariable \"instance_id\" {\n type = string\n}\n\nresource \"aws_ebs_volume\" \"this\" {\n availability_zone = \"ap-southeast-2a\"\n size = 20\n}\n\nresource \"aws_volume_attachment\" \"this\" {\n device_name = \"/dev/sdh\"\n volume_id = aws_ebs_volume.this.id\n instance_id = \"${var.instance_id}\"\n}\nterragrunt.hcl
\nlocals { }\n\ninclude {\n path = find_in_parent_folders()\n}\n\nterraform {\n source = \"git::git@github.com:terraform-aws-modules/terraform-aws-ec2-instance.git?ref=v3.3.0\"\n}\n\ndependency \"ec2-linux-ui\" {\n config_path = \"../ec2-linux-ui\"\n mock_outputs = {\n instance_id = \"12345\"\n }\n}\n\ninputs = {\n instance_id = dependency.ec2-linux-ui.outputs.id\n}\nterragrunt.hcl for the ec2 instance
\nlocals {\n environment_vars = read_terragrunt_config(find_in_parent_folders(\"env.hcl\"))\n env = local.environment_vars.locals.environment\n project_vars = read_terragrunt_config(find_in_parent_folders(\"project.hcl\"))\n project = local.project_vars.locals.project_name\n application = local.project_vars.locals.application_name\n}\n\ninclude {\n path = find_in_parent_folders()\n}\n\nterraform {\n source = \"git::git@github.com:terraform-aws-modules/terraform-aws-ec2-instance.git?ref=v3.3.0\"\n}\n\n# Need the output of the correct Security Group ID to attach to the RDS instance\ndependency \"sg\" {\n config_path = \"../sg-ec2\"\n\n mock_outputs = {\n security_group_id = \"sg-xxxxxxxxxx\"\n }\n}\n\ninputs = {\n\n # Naming\n name = \"ui01-${local.project}-${local.application}-${local.env}\"\n description = \"UI 01 ${local.project} ${local.application} Instance for ${local.env}\"\n\n # EC2 Config\n ami = \"ami-0bd2230cfb28832f7\" # Amazon Linux kernel 5.10\n instance_type = \"c5.large\"\n key_name = \"xxxxxxx\" \n monitoring = true\n\n\n # Networking\n vpc_id = \"xxxxxxx\" \n subnet_id = \"xxxxxxxx\"\n\n # Security Group\n vpc_security_group_ids = [\"${dependency.sg.outputs.security_group_id}\"]\n\n}\nNot sure why the ebs/terragrunt.hcl file wants to create a new instance when I can successfully get the instance id returned from the ec2-linux-ui dependency? If I can fix that, we are done.
"}}} />When I try to delete a VPC using cloud-nuke I see an error
\nInvalidParameterValue: Network interface 'eni:2ad435344fe31c' is currently in use.'\nWe believe this is a problem with eventual consistency within AWS. After removing any eni resources within the VPC it takes a significant period of time before the VPC becomes eligible for deletion. As part of Gruntwork operations we typically run cloud-nuke multiple times to ensure VPCs are destroyed. We find that retrying after 30 minutes typically resolves the issue.
"}}} /> + +