You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I followed this example and I am stuck with the following status:
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 2m59s fargate-scheduler Your AWS account is currently blocked and thus cannot launch any Fargate pods
To Reproduce
Steps to reproduce the behavior:
Navigate to the serverless tutorial
Run it
See error
Code
terraform {
required_version="~> 1.2.4"required_providers {
aws={
source ="hashicorp/aws"
version ="~> 4.21.0"
}
kubernetes={
source ="hashicorp/kubernetes"
version ="~>2.12.0"
}
helm={
source ="hashicorp/helm"
version =">= 2.5"# "~>2.6.0"
}
null = {
source ="hashicorp/null"
version =">= 3.0"# "~>3.1.0"
}
}
}
provider"aws" {
profile="syncifyEKS-terraform-admin"region=local.regiondefault_tags {
tags={
Environment ="Staging"
Owner ="BT-Compliance"
Terraform ="True"
}
}
}
## Housekeeping#locals {
project_name="syncify-dev"cluster_name="${local.project_name}-eks-cluster"cluster_version="1.22"region="us-west-1"
}
/*The following 2 data resources are used get around the fact that we have to waitfor the EKS cluster to be initialised before we can attempt to authenticate.*/data"aws_eks_cluster""default" {
name=module.eks.cluster_id
}
data"aws_eks_cluster_auth""default" {
name=module.eks.cluster_id
}
provider"kubernetes" {
host=data.aws_eks_cluster.default.endpointcluster_ca_certificate=base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
token=data.aws_eks_cluster_auth.default.token
}
provider"helm" {
kubernetes {
host=data.aws_eks_cluster.default.endpointcluster_ca_certificate=base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)
token=data.aws_eks_cluster_auth.default.token
}
}
########################################################################################################################################################################################### Create EKS Cluster########################################################################################################################################################################################### Create VPC for EKS Clustermodule"vpc" {
source="terraform-aws-modules/vpc/aws"version="3.14.2"name=local.cluster_namecidr="10.0.0.0/16"azs=["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets=["10.0.1.0/24", "10.0.2.0/24"] #, "10.0.3.0/24"]public_subnets=["10.0.101.0/24", "10.0.102.0/24"] #, "10.0.103.0/24"]enable_nat_gateway=truesingle_nat_gateway=trueone_nat_gateway_per_az=falsemanage_default_network_acl=truedefault_network_acl_tags={ Name ="${local.cluster_name}-default" }
manage_default_route_table=truedefault_route_table_tags={ Name ="${local.cluster_name}-default" }
manage_default_security_group=truedefault_security_group_tags={ Name ="${local.cluster_name}-default" }
public_subnet_tags={
"kubernetes.io/cluster/${local.cluster_name}"="shared""kubernetes.io/role/elb"=1
}
private_subnet_tags={
"kubernetes.io/cluster/${local.cluster_name}"="shared""kubernetes.io/role/internal-elb"=1
}
}
module"eks" {
source="terraform-aws-modules/eks/aws"version="18.26.3"cluster_name=local.cluster_namecluster_version=local.cluster_versionvpc_id=module.vpc.vpc_idsubnet_ids=module.vpc.private_subnetscluster_addons={
kube-proxy = {
addon_version = data.aws_eks_addon_version.this["kube-proxy"].version
resolve_conflicts ="OVERWRITE"
}
vpc-cni = {
addon_version = data.aws_eks_addon_version.this["vpc-cni"].version
resolve_conflicts ="OVERWRITE"
}
}
# manage_aws_auth_configmap = truefargate_profiles={
default = {
name ="default"
selectors = [
{ namespace ="default" }
]
}
kube_system = {
name ="kube-system"
selectors = [
{ namespace ="kube-system" }
]
}
}
}
data"aws_eks_addon_version""this" {
for_each=toset(["coredns", "kube-proxy", "vpc-cni"])
addon_name=each.valuekubernetes_version=module.eks.cluster_versionmost_recent=true
}
################################################################################# Modify EKS CoreDNS Deployment################################################################################data"aws_eks_cluster_auth""this" {
name=module.eks.cluster_id
}
locals {
kubeconfig=yamlencode({
apiVersion ="v1"
kind ="Config"
current-context ="terraform"
clusters = [{
name = module.eks.cluster_id
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name ="terraform"
context = {
cluster = module.eks.cluster_id
user ="terraform"
}
}]
users = [{
name ="terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
}
# Separate resource so that this is only ever executed onceresource"null_resource""remove_default_coredns_deployment" {
triggers={}
provisioner"local-exec" {
interpreter=["/bin/bash", "-c"]
environment={
KUBECONFIG =base64encode(local.kubeconfig)
}
# We are removing the deployment provided by the EKS service and replacing it through the self-managed CoreDNS Helm addon# However, we are maintaing the existing kube-dns service and annotating it for Helm to assume controlcommand=<<-EOT kubectl --namespace kube-system delete deployment coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode) EOT
}
}
resource"null_resource""modify_kube_dns" {
triggers={}
provisioner"local-exec" {
interpreter=["/bin/bash", "-c"]
environment={
KUBECONFIG =base64encode(local.kubeconfig)
}
# We are maintaing the existing kube-dns service and annotating it for Helm to assume controlcommand=<<-EOT echo "Setting implicit dependency on ${module.eks.fargate_profiles["kube_system"].fargate_profile_pod_execution_role_arn}" kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode) kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode) kubectl --namespace kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm --kubeconfig <(echo $KUBECONFIG | base64 --decode) EOT
}
depends_on=[
null_resource.remove_default_coredns_deployment
]
}
################################################################################# CoreDNS Helm Chart (self-managed)################################################################################resource"helm_release""coredns" {
name="coredns"namespace="kube-system"create_namespace=falsedescription="CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services"chart="coredns"version="1.19.4"repository="https://coredns.github.io/helm"force_update=truerecreate_pods=true# For EKS image repositories https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.htmlvalues=[
<<-EOT image: repository: 602401143452.dkr.ecr.us-west-1.amazonaws.com/eks/coredns tag: ${data.aws_eks_addon_version.this["coredns"].version} deployment: name: coredns annotations: eks.amazonaws.com/compute-type: fargate service: name: kube-dns annotations: eks.amazonaws.com/compute-type: fargate podAnnotations: eks.amazonaws.com/compute-type: fargate EOT
]
depends_on=[
null_resource.modify_kube_dns
]
}
Expected behavior
coredns pods should have gotten scheduled.
Any help would be greatly appreciated.
The text was updated successfully, but these errors were encountered:
hi @arnav13081994 - this appears to be an issue with your account and is not related to the code provided here. Please reach out to your AWS support to resolve
Describe the bug
I followed this example and I am stuck with the following status:
To Reproduce
Steps to reproduce the behavior:
serverless
tutorialCode
Expected behavior
coredns pods should have gotten scheduled.
Any help would be greatly appreciated.
The text was updated successfully, but these errors were encountered: