From 6cf315a929860eb99f1861afd51735f03cd4929d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=BA=C5=A1=20Mrekaj?= Date: Fri, 3 May 2024 12:01:05 +0200 Subject: [PATCH] Fix/multiple providers (#1348) * update typo * update tf templates * update templates & docs * update docs * fix typo * Auto commit - update kustomization.yaml * update docs * Auto commit - update kustomization.yaml --------- Co-authored-by: CI/CD pipeline --- docs/contributing/local-testing.md | 15 +- docs/getting-started/detailed-guide.md | 20 +- docs/input-manifest/api-reference.md | 12 +- docs/input-manifest/example.md | 23 +- docs/input-manifest/gpu-example.md | 8 +- docs/input-manifest/providers/aws.md | 4 +- docs/input-manifest/providers/azure.md | 32 +-- docs/input-manifest/providers/cloudflare.md | 4 +- docs/input-manifest/providers/gcp.md | 4 +- docs/input-manifest/providers/genesiscloud.md | 8 +- docs/input-manifest/providers/hetzner.md | 28 +-- docs/input-manifest/providers/on-prem.md | 2 +- docs/storage/storage-solution.md | 18 +- internal/manifest/manifest.go | 24 +- internal/manifest/validate_node_pool.go | 5 + internal/utils/generic.go | 5 +- manifests/claudie/kustomization.yaml | 16 +- .../testing-framework/kustomization.yaml | 2 +- .../test-sets/test-set1/1.yaml | 8 +- .../test-sets/test-set1/2.yaml | 8 +- .../utils/cluster-builder/cluster_builder.go | 218 +++++++++++------- .../cluster-builder/cluster_builder_test.go | 2 +- .../terraformer/templates/aws/networking.tpl | 111 ++++----- services/terraformer/templates/aws/node.tpl | 57 ++--- .../templates/aws/node_networking.tpl | 25 ++ .../terraformer/templates/aws/provider.tpl | 10 +- .../templates/azure/networking.tpl | 88 ++----- services/terraformer/templates/azure/node.tpl | 50 ++-- .../templates/azure/node_networking.tpl | 58 +++++ .../terraformer/templates/azure/provider.tpl | 15 +- .../terraformer/templates/gcp/networking.tpl | 42 ++-- services/terraformer/templates/gcp/node.tpl | 48 ++-- .../templates/gcp/node_networking.tpl | 14 ++ .../terraformer/templates/gcp/provider.tpl | 6 +- .../templates/genesiscloud/networking.tpl | 32 ++- .../templates/genesiscloud/node.tpl | 60 ++--- .../templates/genesiscloud/provider.tpl | 6 +- .../templates/hetzner/networking.tpl | 28 ++- .../terraformer/templates/hetzner/node.tpl | 49 ++-- .../templates/hetzner/provider.tpl | 4 +- .../terraformer/templates/oci/networking.tpl | 75 +++--- services/terraformer/templates/oci/node.tpl | 53 ++--- .../templates/oci/node_networking.tpl | 23 ++ .../terraformer/templates/oci/provider.tpl | 10 +- 44 files changed, 724 insertions(+), 606 deletions(-) create mode 100644 services/terraformer/templates/aws/node_networking.tpl create mode 100644 services/terraformer/templates/azure/node_networking.tpl create mode 100644 services/terraformer/templates/gcp/node_networking.tpl create mode 100644 services/terraformer/templates/oci/node_networking.tpl diff --git a/docs/contributing/local-testing.md b/docs/contributing/local-testing.md index 75abd6da8..bfcc8a7a5 100644 --- a/docs/contributing/local-testing.md +++ b/docs/contributing/local-testing.md @@ -105,16 +105,7 @@ providers: nodePools: dynamic: - - name: hetzner-control - providerSpec: - name: hetzner-1 - region: nbg1 - zone: nbg1-dc3 - count: 1 - serverType: cpx11 - image: ubuntu-22.04 - - - name: hetzner-compute + - name: htz-compute providerSpec: name: hetzner-1 region: nbg1 @@ -137,11 +128,13 @@ nodePools: - name: static-pool nodes: - endpoint: "192.168.52.1" + username: root privateKey: | -----BEGIN RSA PRIVATE KEY----- ...... put the private key here ..... -----END RSA PRIVATE KEY----- - endpoint: "192.168.52.2" + username: root privateKey: | -----BEGIN RSA PRIVATE KEY----- ...... put the private key here ..... @@ -156,7 +149,7 @@ kubernetes: control: - static-pool compute: - - hetzner-compute + - htz-compute loadBalancers: roles: diff --git a/docs/getting-started/detailed-guide.md b/docs/getting-started/detailed-guide.md index 439165657..e9307ea2b 100644 --- a/docs/getting-started/detailed-guide.md +++ b/docs/getting-started/detailed-guide.md @@ -139,7 +139,7 @@ This detailed guide for Claudie serves as a resource for providing an overview o namespace: mynamespace nodePools: dynamic: - - name: aws-controlplane + - name: aws-control providerSpec: name: aws-1 region: eu-central-1 @@ -156,7 +156,7 @@ This detailed guide for Claudie serves as a resource for providing an overview o serverType: t3.medium image: ami-03df6dea56f8aa618 storageDiskSize: 200 - - name: aws-loadbalancer + - name: aws-lb providerSpec: name: aws-1 region: eu-central-2 @@ -167,11 +167,11 @@ This detailed guide for Claudie serves as a resource for providing an overview o kubernetes: clusters: - name: my-super-cluster - version: v1.24.0 + version: v1.27.0 network: 192.168.2.0/24 pools: control: - - aws-controlplane + - aws-control compute: - aws-worker loadBalancers: @@ -181,7 +181,7 @@ This detailed guide for Claudie serves as a resource for providing an overview o port: 6443 targetPort: 6443 targetPools: - - aws-controlplane + - aws-control clusters: - name: loadbalance-me roles: @@ -192,7 +192,7 @@ This detailed guide for Claudie serves as a resource for providing an overview o hostname: supercluster # the sub domain of the new cluster targetedK8s: my-super-cluster pools: - - aws-loadbalancer + - aws-lb ``` !!! note "Tip!" @@ -354,7 +354,7 @@ This detailed guide for Claudie serves as a resource for providing an overview o network: 192.168.2.0/24 pools: control: - - aws-controlplane + - aws-control compute: - aws-worker - hetzner-worker # add it to the compute list here @@ -388,8 +388,8 @@ This detailed guide for Claudie serves as a resource for providing an overview o protocol: tcp port: 6443 targetPort: 6443 - targetPools: # only loadbalances for port 6443 for the aws-controlplane nodepool - - aws-controlplane + targetPools: # only loadbalances for port 6443 for the aws-control nodepool + - aws-control - name: https protocol: tcp port: 443 @@ -408,7 +408,7 @@ This detailed guide for Claudie serves as a resource for providing an overview o hostname: supercluster targetedK8s: my-super-cluster pools: - - aws-loadbalancer + - aws-lb ``` !!! note Load balancing Please refer how our load balancing works by reading our [documentation](https://docs.claudie.io/latest/loadbalancing/loadbalancing-solution/). diff --git a/docs/input-manifest/api-reference.md b/docs/input-manifest/api-reference.md index 9061d38fb..a054f14de 100644 --- a/docs/input-manifest/api-reference.md +++ b/docs/input-manifest/api-reference.md @@ -33,7 +33,7 @@ needs to be defined. - `name` - The name of the provider specification. It has to be unique across all providers. + The name of the provider specification. The name is limited to 15 characters. It has to be unique across all providers. - `providerType` @@ -199,7 +199,7 @@ Dynamic nodepools are defined for cloud provider machines that Claudie is expect - `name` - Name of the nodepool. Each nodepool will have a random hash appended to the name, so the whole name will be of format `-`. + Name of the nodepool. The name is limited by 14 characters. Each nodepool will have a random hash appended to the name, so the whole name will be of format `-`. - `provideSpec` [Provider spec](#provider-spec) @@ -207,7 +207,7 @@ Dynamic nodepools are defined for cloud provider machines that Claudie is expect - `count` - Number of the nodes in the nodepool. Mutually exclusive with `autoscaler`. + Number of the nodes in the nodepool. Maximum value of 255. Mutually exclusive with `autoscaler`. - `serverType` @@ -294,7 +294,7 @@ Static nodepools are defined for static machines which Claudie will not manage. - `name` - Name of the static nodepool. + Name of the static nodepool. The name is limited by 14 characters. - `nodes` [Static Node](#static-node) @@ -361,7 +361,7 @@ Collection of data used to define a Kubernetes cluster. - `name` - Name of the Kubernetes cluster. Each cluster will have a random hash appended to the name, so the whole name will be of format `-`. + Name of the Kubernetes cluster. The name is limited by 28 characters. Each cluster will have a random hash appended to the name, so the whole name will be of format `-`. - `version` @@ -435,7 +435,7 @@ Collection of data used to define a loadbalancer cluster. - `name` - Name of the loadbalancer. + Name of the loadbalancer. The name is limited by 28 characters. - `roles` diff --git a/docs/input-manifest/example.md b/docs/input-manifest/example.md index 8c9d7140d..24567d71f 100644 --- a/docs/input-manifest/example.md +++ b/docs/input-manifest/example.md @@ -94,7 +94,7 @@ spec: # # Example definitions for each provider dynamic: - - name: control-hetzner + - name: control-htz providerSpec: name: hetzner-1 region: hel1 @@ -112,7 +112,7 @@ spec: value: finland effect: NoSchedule - - name: compute-hetzner + - name: compute-htz providerSpec: name: hetzner-1 region: hel1 @@ -127,7 +127,7 @@ spec: annotations: node.longhorn.io/default-node-tags: '["finland"]' - - name: compute-hetzner-autoscaled + - name: htz-autoscaled providerSpec: name: hetzner-1 region: hel1 @@ -295,8 +295,8 @@ spec: annotations: node.longhorn.io/default-node-tags: '["datacenter-1"]' taints: - key: datacenter - effect: NoExecute + - key: datacenter + effect: NoExecute # Kubernetes field is used to define the kubernetes clusters. @@ -318,25 +318,26 @@ spec: network: 192.168.2.0/24 pools: control: - - control-hetzner + - control-htz - control-gcp compute: - - compute-hetzner + - compute-htz - compute-gcp - compute-azure + - htz-autoscaled - name: prod-cluster version: v1.26.13 network: 192.168.2.0/24 pools: control: - - control-hetzner + - control-htz - control-gcp - control-oci - control-aws - control-azure compute: - - compute-hetzner + - compute-htz - compute-gcp - compute-oci - compute-aws @@ -349,7 +350,7 @@ spec: control: - datacenter-1 compute: - - compute-hetzner + - compute-htz - compute-gcp - compute-azure @@ -383,7 +384,7 @@ spec: port: 6443 targetPort: 6443 targetPools: - - k8s-control-gcp # make sure that this nodepools is acutally used by the targeted `dev-cluster` cluster. + - control-htz # make sure that this nodepools is acutally used by the targeted `dev-cluster` cluster. clusters: - name: apiserver-lb-dev roles: diff --git a/docs/input-manifest/gpu-example.md b/docs/input-manifest/gpu-example.md index a93e7b241..0f3560013 100644 --- a/docs/input-manifest/gpu-example.md +++ b/docs/input-manifest/gpu-example.md @@ -22,7 +22,7 @@ spec: nodePools: dynamic: - - name: genesiscloud-cpu + - name: gencloud-cpu providerSpec: name: genesiscloud region: ARC-IS-HAF-1 @@ -31,7 +31,7 @@ spec: image: "Ubuntu 22.04" storageDiskSize: 50 - - name: genesiscloud-gpu + - name: gencloud-gpu providerSpec: name: genesiscloud region: ARC-IS-HAF-1 @@ -47,9 +47,9 @@ spec: network: 172.16.2.0/24 pools: control: - - genesiscloud-cpu + - gencloud-cpu compute: - - genesiscloud-gpu + - gencloud-gpu ``` After the `InputManifest` was successfully build by claudie, we deploy the `gpu-operator` to the `gpu-examepl`kubernetes cluster. diff --git a/docs/input-manifest/providers/aws.md b/docs/input-manifest/providers/aws.md index eecf2db17..ccba1de87 100644 --- a/docs/input-manifest/providers/aws.md +++ b/docs/input-manifest/providers/aws.md @@ -161,7 +161,7 @@ spec: kubernetes: clusters: - name: aws-cluster - version: v1.24.0 + version: v1.27.0 network: 192.168.2.0/24 pools: control: @@ -266,7 +266,7 @@ spec: kubernetes: clusters: - name: aws-cluster - version: v1.24.0 + version: v1.27.0 network: 192.168.2.0/24 pools: control: diff --git a/docs/input-manifest/providers/azure.md b/docs/input-manifest/providers/azure.md index 3c3a4a5e4..b934cc854 100644 --- a/docs/input-manifest/providers/azure.md +++ b/docs/input-manifest/providers/azure.md @@ -104,7 +104,7 @@ spec: namespace: mynamespace nodePools: dynamic: - - name: control-azure + - name: control-az providerSpec: # Name of the provider instance. name: azure-1 @@ -118,7 +118,7 @@ spec: # URN of the image. image: Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts:22.04.202212120 - - name: compute-1-azure + - name: compute-1-az providerSpec: # Name of the provider instance. name: azure-1 @@ -133,7 +133,7 @@ spec: image: Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts:22.04.202212120 storageDiskSize: 50 - - name: compute-2-azure + - name: compute-2-az providerSpec: # Name of the provider instance. name: azure-1 @@ -151,14 +151,14 @@ spec: kubernetes: clusters: - name: azure-cluster - version: v1.24.0 + version: v1.27.0 network: 192.168.2.0/24 pools: control: - - control-azure + - control-az compute: - - compute-2-azure - - compute-1-azure + - compute-2-az + - compute-1-az ``` ### Multi provider, multi region clusters example @@ -193,7 +193,7 @@ spec: nodePools: dynamic: - - name: control-azure-1 + - name: control-az-1 providerSpec: # Name of the provider instance. name: azure-1 @@ -207,7 +207,7 @@ spec: # URN of the image. image: Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts:22.04.202212120 - - name: control-azure-2 + - name: control-az-2 providerSpec: # Name of the provider instance. name: azure-2 @@ -221,7 +221,7 @@ spec: # URN of the image. image: Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts:22.04.202212120 - - name: compute-azure-1 + - name: compute-az-1 providerSpec: # Name of the provider instance. name: azure-1 @@ -236,7 +236,7 @@ spec: image: Canonical:0001-com-ubuntu-minimal-jammy:minimal-22_04-lts:22.04.202212120 storageDiskSize: 50 - - name: compute-azure-2 + - name: compute-az-2 providerSpec: # Name of the provider instance. name: azure-2 @@ -254,13 +254,13 @@ spec: kubernetes: clusters: - name: azure-cluster - version: v1.24.0 + version: v1.27.0 network: 192.168.2.0/24 pools: control: - - control-azure-1 - - control-azure-2 + - control-az-1 + - control-az-2 compute: - - compute-azure-1 - - compute-azure-2 + - compute-az-1 + - compute-az-2 ``` diff --git a/docs/input-manifest/providers/cloudflare.md b/docs/input-manifest/providers/cloudflare.md index cae61d925..bceefc9e1 100644 --- a/docs/input-manifest/providers/cloudflare.md +++ b/docs/input-manifest/providers/cloudflare.md @@ -80,7 +80,7 @@ spec: kubernetes: clusters: - name: cluster - version: v1.24.0 + version: v1.27.0 network: 192.168.2.0/24 pools: control: [] @@ -103,6 +103,6 @@ spec: hostname: my.fancy.url targetedK8s: prod-cluster pools: - - loadbalancer-2 + - loadbalancer ``` \ No newline at end of file diff --git a/docs/input-manifest/providers/gcp.md b/docs/input-manifest/providers/gcp.md index f7fdd4424..ba0f3b7cd 100644 --- a/docs/input-manifest/providers/gcp.md +++ b/docs/input-manifest/providers/gcp.md @@ -144,7 +144,7 @@ spec: kubernetes: clusters: - name: gcp-cluster - version: v1.24.0 + version: v1.27.0 network: 192.168.2.0/24 pools: control: @@ -247,7 +247,7 @@ spec: kubernetes: clusters: - name: gcp-cluster - version: v1.24.0 + version: v1.27.0 network: 192.168.2.0/24 pools: control: diff --git a/docs/input-manifest/providers/genesiscloud.md b/docs/input-manifest/providers/genesiscloud.md index 521a6a32b..e1764fdf2 100644 --- a/docs/input-manifest/providers/genesiscloud.md +++ b/docs/input-manifest/providers/genesiscloud.md @@ -45,7 +45,7 @@ spec: nodePools: dynamic: - - name: control-genesiscloud + - name: control providerSpec: name: genesiscloud region: ARC-IS-HAF-1 @@ -54,7 +54,7 @@ spec: image: "Ubuntu 22.04" storageDiskSize: 50 - - name: compute-genesiscloud + - name: compute providerSpec: name: genesiscloud region: ARC-IS-HAF-1 @@ -70,7 +70,7 @@ spec: network: 172.16.2.0/24 pools: control: - - control-genesiscloud + - control compute: - - compute-genesiscloud + - compute ``` \ No newline at end of file diff --git a/docs/input-manifest/providers/hetzner.md b/docs/input-manifest/providers/hetzner.md index 2eb55b308..d8230d172 100644 --- a/docs/input-manifest/providers/hetzner.md +++ b/docs/input-manifest/providers/hetzner.md @@ -70,7 +70,7 @@ spec: nodePools: dynamic: - - name: control-hetzner + - name: control-htz providerSpec: # Name of the provider instance. name: hetzner-1 @@ -84,7 +84,7 @@ spec: # OS image name. image: ubuntu-22.04 - - name: compute-1-hetzner + - name: compute-1-htz providerSpec: # Name of the provider instance. name: hetzner-1 @@ -99,7 +99,7 @@ spec: image: ubuntu-22.04 storageDiskSize: 50 - - name: compute-2-hetzner + - name: compute-2-htz providerSpec: # Name of the provider instance. name: hetzner-1 @@ -121,10 +121,10 @@ spec: network: 192.168.2.0/24 pools: control: - - control-hetzner + - control-htz compute: - - compute-1-hetzner - - compute-2-hetzner + - compute-1-htz + - compute-2-htz ``` ### Multi provider, multi region clusters example @@ -158,7 +158,7 @@ spec: nodePools: dynamic: - - name: control-hetzner-1 + - name: control-htz-1 providerSpec: # Name of the provider instance. name: hetzner-1 @@ -172,7 +172,7 @@ spec: # OS image name. image: ubuntu-22.04 - - name: control-hetzner-2 + - name: control-htz-2 providerSpec: # Name of the provider instance. name: hetzner-2 @@ -186,7 +186,7 @@ spec: # OS image name. image: ubuntu-22.04 - - name: compute-hetzner-1 + - name: compute-htz-1 providerSpec: # Name of the provider instance. name: hetzner-1 @@ -201,7 +201,7 @@ spec: image: ubuntu-22.04 storageDiskSize: 50 - - name: compute-hetzner-2 + - name: compute-htz-2 providerSpec: # Name of the provider instance. name: hetzner-2 @@ -223,9 +223,9 @@ spec: network: 192.168.2.0/24 pools: control: - - control-hetzner-1 - - control-hetzner-2 + - control-htz-1 + - control-htz-2 compute: - - compute-hetzner-1 - - compute-hetzner-2 + - compute-htz-1 + - compute-htz-2 ``` diff --git a/docs/input-manifest/providers/on-prem.md b/docs/input-manifest/providers/on-prem.md index 2a1a92d0a..16c5b1fd9 100644 --- a/docs/input-manifest/providers/on-prem.md +++ b/docs/input-manifest/providers/on-prem.md @@ -99,7 +99,7 @@ spec: nodePools: dynamic: - - name: control-hetzner + - name: control-htz providerSpec: name: hetzner-1 region: fsn1 diff --git a/docs/storage/storage-solution.md b/docs/storage/storage-solution.md index 227f95f8c..6f7e6f3d8 100644 --- a/docs/storage/storage-solution.md +++ b/docs/storage/storage-solution.md @@ -50,7 +50,7 @@ spec: nodePools: dynamic: - - name: control-nodepool + - name: control providerSpec: name: compute-provider region: hel1 @@ -59,7 +59,7 @@ spec: serverType: cpx21 image: ubuntu-22.04 - - name: datastore-nodepool + - name: datastore providerSpec: name: storage-provider region: hel1 @@ -73,7 +73,7 @@ spec: value: datastore effect: NoSchedule - - name: compute-nodepool + - name: compute providerSpec: name: compute-provider region: hel1 @@ -86,7 +86,7 @@ spec: value: compute effect: NoSchedule - - name: loadbalancer-nodepool + - name: loadbalancer providerSpec: name: compute-provider region: hel1 @@ -102,10 +102,10 @@ spec: network: 192.168.2.0/24 pools: control: - - control-nodepool + - control compute: - - datastore-nodepool - - compute-nodepool + - datastore + - compute loadBalancers: roles: @@ -114,7 +114,7 @@ spec: port: 6443 targetPort: 6443 targetPools: - - control-nodepool + - control clusters: - name: apiserver-lb @@ -125,7 +125,7 @@ spec: provider: dns-provider targetedK8s: my-awesome-claudie-cluster pools: - - loadbalancer-nodepool + - loadbalancer ``` When Claudie applies this input manifest, the following storage classes are installed: diff --git a/internal/manifest/manifest.go b/internal/manifest/manifest.go index 8ba3ddc7a..0b18815f7 100644 --- a/internal/manifest/manifest.go +++ b/internal/manifest/manifest.go @@ -24,17 +24,17 @@ type Provider struct { } type HetznerDNS struct { - Name string `validate:"required" yaml:"name"` + Name string `validate:"required,max=15" yaml:"name"` ApiToken string `validate:"required" yaml:"apiToken"` } type Cloudflare struct { - Name string `validate:"required" yaml:"name"` + Name string `validate:"required,max=15" yaml:"name"` ApiToken string `validate:"required" yaml:"apiToken"` } type GCP struct { - Name string `validate:"required" yaml:"name"` + Name string `validate:"required,max=15" yaml:"name"` // We can only validate that the supplied string is a // valid formatted JSON. Credentials string `validate:"required,json" yaml:"credentials" json:"credentials"` @@ -42,7 +42,7 @@ type GCP struct { } type Hetzner struct { - Name string `validate:"required" yaml:"name"` + Name string `validate:"required,max=15" yaml:"name"` // We can only validate the length of the token // as Hetzner doesn't specify the structure of the token, @@ -53,17 +53,17 @@ type Hetzner struct { } type GenesisCloud struct { - Name string `validate:"required" yaml:"name"` + Name string `validate:"required,max=15" yaml:"name"` ApiToken string `validate:"required,alphanum" yaml:"apiToken"` } type AWS struct { - Name string `validate:"required" yaml:"name" json:"name"` + Name string `validate:"required,max=15" yaml:"name" json:"name"` AccessKey string `validate:"required,alphanum,len=20" yaml:"accessKey" json:"accessKey"` SecretKey string `validate:"required,len=40" yaml:"secretKey" json:"secretKey"` } type OCI struct { - Name string `validate:"required" yaml:"name"` + Name string `validate:"required,max=15" yaml:"name"` PrivateKey string `validate:"required" yaml:"privateKey"` KeyFingerprint string `validate:"required" yaml:"keyFingerprint"` TenancyOCID string `validate:"required" yaml:"tenancyOcid"` @@ -72,7 +72,7 @@ type OCI struct { } type Azure struct { - Name string `validate:"required" yaml:"name"` + Name string `validate:"required,max=15" yaml:"name"` SubscriptionId string `validate:"required" yaml:"subscriptionId"` TenantId string `validate:"required" yaml:"tenantId"` ClientId string `validate:"required" yaml:"clientId"` @@ -122,7 +122,7 @@ type MachineSpec struct { // that would get instantiated N >= 0 times depending on which clusters reference it. type DynamicNodePool struct { // Name of the nodepool. Each nodepool will have a random hash appended to the name, so the whole name will be of format -. - Name string `validate:"required" yaml:"name" json:"name"` + Name string `validate:"required,max=14" yaml:"name" json:"name"` // Collection of provider data to be used while creating the nodepool. ProviderSpec ProviderSpec `validate:"required" yaml:"providerSpec" json:"providerSpec"` // Number of the nodes in the nodepool. Mutually exclusive with autoscaler. @@ -175,7 +175,7 @@ type ProviderSpec struct { // StaticNodePool List of static nodepools of already existing machines, not created by Claudie, used for Kubernetes or loadbalancer clusters. type StaticNodePool struct { // Name of the static nodepool. - Name string `validate:"required" yaml:"name" json:"name"` + Name string `validate:"required,max=14" yaml:"name" json:"name"` // List of static nodes assigned to a particular nodepool. Nodes []Node `validate:"dive" yaml:"nodes" json:"nodes"` // User defined labels for this nodepool. @@ -202,7 +202,7 @@ type Node struct { // Collection of data used to define a Kubernetes cluster. type Cluster struct { // Name of the Kubernetes cluster. Each cluster will have a random hash appended to the name, so the whole name will be of format -. - Name string `validate:"required" yaml:"name" json:"name"` + Name string `validate:"required,max=28" yaml:"name" json:"name"` // Version should be defined in format vX.Y. In terms of supported versions of Kubernetes, // Claudie follows kubeone releases and their supported versions. // The current kubeone version used in Claudie is 1.5. @@ -242,7 +242,7 @@ type Role struct { // Collection of data used to define a loadbalancer cluster. Defines loadbalancer clusters. type LoadBalancerCluster struct { // Name of the loadbalancer. - Name string `validate:"required" yaml:"name" json:"name"` + Name string `validate:"required,max=28" yaml:"name" json:"name"` // List of roles the loadbalancer uses. Roles []string `yaml:"roles" json:"roles"` // Specification of the loadbalancer's DNS record. diff --git a/internal/manifest/validate_node_pool.go b/internal/manifest/validate_node_pool.go index 3747a794c..3bf49fa6e 100644 --- a/internal/manifest/validate_node_pool.go +++ b/internal/manifest/validate_node_pool.go @@ -2,6 +2,7 @@ package manifest import ( "fmt" + "math" "strings" "github.com/berops/claudie/internal/utils" @@ -119,6 +120,10 @@ func (d *DynamicNodePool) Validate(m *Manifest) error { return fmt.Errorf("storageDiskSize size must be either 0 or >= 50") } + if d.Count >= math.MaxUint8 { + return fmt.Errorf("max available count for a nodepool is 255") + } + validate := validator.New() validate.RegisterStructValidation(func(sl validator.StructLevel) { dnp := sl.Current().Interface().(DynamicNodePool) diff --git a/internal/utils/generic.go b/internal/utils/generic.go index 13b98b7b7..7126688c0 100644 --- a/internal/utils/generic.go +++ b/internal/utils/generic.go @@ -1,6 +1,7 @@ package utils import ( + "cmp" "golang.org/x/exp/constraints" "slices" ) @@ -16,7 +17,9 @@ func IterateInOrder[M ~map[K]V, K inorder, V any](m M, f func(k K, v V) error) e keys = append(keys, k) } - slices.Sort(keys) + slices.SortStableFunc(keys, func(first, second K) int { + return cmp.Compare(first, second) + }) for _, k := range keys { if err := f(k, m[k]); err != nil { diff --git a/manifests/claudie/kustomization.yaml b/manifests/claudie/kustomization.yaml index c5f83a33f..008b904ee 100644 --- a/manifests/claudie/kustomization.yaml +++ b/manifests/claudie/kustomization.yaml @@ -58,20 +58,20 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: ghcr.io/berops/claudie/ansibler - newTag: 45e9b32-2753 + newTag: 46ec064-2755 - name: ghcr.io/berops/claudie/autoscaler-adapter newTag: 688726a-1932 - name: ghcr.io/berops/claudie/builder - newTag: 45e9b32-2753 + newTag: 46ec064-2755 - name: ghcr.io/berops/claudie/claudie-operator - newTag: 45e9b32-2753 + newTag: 46ec064-2755 - name: ghcr.io/berops/claudie/context-box - newTag: 45e9b32-2753 + newTag: 46ec064-2755 - name: ghcr.io/berops/claudie/kube-eleven - newTag: 45e9b32-2753 + newTag: 46ec064-2755 - name: ghcr.io/berops/claudie/kuber - newTag: 45e9b32-2753 + newTag: 46ec064-2755 - name: ghcr.io/berops/claudie/scheduler - newTag: 45e9b32-2753 + newTag: 46ec064-2755 - name: ghcr.io/berops/claudie/terraformer - newTag: 45e9b32-2753 + newTag: 46ec064-2755 diff --git a/manifests/testing-framework/kustomization.yaml b/manifests/testing-framework/kustomization.yaml index 947ed5e0c..c329bbf87 100644 --- a/manifests/testing-framework/kustomization.yaml +++ b/manifests/testing-framework/kustomization.yaml @@ -37,4 +37,4 @@ secretGenerator: images: - name: ghcr.io/berops/claudie/testing-framework - newTag: 45e9b32-2753 + newTag: 46ec064-2755 diff --git a/manifests/testing-framework/test-sets/test-set1/1.yaml b/manifests/testing-framework/test-sets/test-set1/1.yaml index 2e7c95dca..610183366 100644 --- a/manifests/testing-framework/test-sets/test-set1/1.yaml +++ b/manifests/testing-framework/test-sets/test-set1/1.yaml @@ -31,7 +31,7 @@ spec: namespace: e2e-secrets nodePools: dynamic: - - name: hetzner-control + - name: htz-control providerSpec: name: hetzner-1 region: nbg1 @@ -44,7 +44,7 @@ spec: annotations: claudie.io/example-annotation: > ["test-set1"] - - name: hetzner-compute + - name: htz-compute providerSpec: name: hetzner-1 region: nbg1 @@ -172,9 +172,9 @@ spec: network: 192.168.2.0/24 pools: control: - - hetzner-control + - htz-control compute: - - hetzner-compute + - htz-compute - name: ts1-gcp version: v1.26.13 network: 192.168.2.0/24 diff --git a/manifests/testing-framework/test-sets/test-set1/2.yaml b/manifests/testing-framework/test-sets/test-set1/2.yaml index 66216c1b1..b75d8ec93 100644 --- a/manifests/testing-framework/test-sets/test-set1/2.yaml +++ b/manifests/testing-framework/test-sets/test-set1/2.yaml @@ -31,7 +31,7 @@ spec: namespace: e2e-secrets nodePools: dynamic: - - name: hetzner-control + - name: htz-control providerSpec: name: hetzner-1 region: nbg1 @@ -44,7 +44,7 @@ spec: annotations: claudie.io/example-annotation: > ["test-set1-new"] - - name: hetzner-compute + - name: htz-compute providerSpec: name: hetzner-1 region: nbg1 @@ -171,9 +171,9 @@ spec: network: 192.168.2.0/24 pools: control: - - hetzner-control + - htz-control compute: - - hetzner-compute + - htz-compute - name: ts1-gcp version: v1.26.13 network: 192.168.2.0/24 diff --git a/services/terraformer/server/domain/utils/cluster-builder/cluster_builder.go b/services/terraformer/server/domain/utils/cluster-builder/cluster_builder.go index 778fe20f2..2e3c5490b 100644 --- a/services/terraformer/server/domain/utils/cluster-builder/cluster_builder.go +++ b/services/terraformer/server/domain/utils/cluster-builder/cluster_builder.go @@ -54,13 +54,23 @@ type ClusterBuilder struct { SpawnProcessLimit chan struct{} } -type NodepoolsData struct { +type ClusterData struct { ClusterName string ClusterHash string ClusterType string +} + +type ProviderData struct { + ClusterData ClusterData + Provider *pb.Provider + Regions []string + Metadata map[string]any +} + +type NodepoolsData struct { + ClusterData ClusterData NodePools []NodePoolInfo Metadata map[string]any - Regions []string } type NodePoolInfo struct { @@ -234,20 +244,31 @@ func (c *ClusterBuilder) generateFiles(clusterID, clusterDir string) error { } } - // generate providers.tpl for all nodepools (current, desired). if err := generateProviderTemplates(c.CurrentClusterInfo, c.DesiredClusterInfo, clusterID, clusterDir); err != nil { return fmt.Errorf("error while generating provider templates: %w", err) } - // sort nodepools by a provider - sortedNodePools := utils.GroupNodepoolsByProviderNames(clusterInfo) - for providerNames, nodepools := range sortedNodePools { - providerName := providerNames.CloudProviderName - - if providerName == pb.StaticNodepoolInfo_STATIC_PROVIDER.String() { + groupedNodepools := utils.GroupNodepoolsByProviderNames(clusterInfo) + for providerNames, nodepools := range groupedNodepools { + if providerNames.CloudProviderName == pb.StaticNodepoolInfo_STATIC_PROVIDER.String() { continue } + providerData := &ProviderData{ + ClusterData: ClusterData{ + ClusterName: clusterInfo.Name, + ClusterHash: clusterInfo.Hash, + ClusterType: c.ClusterType.String(), + }, + Provider: nodepools[0].GetDynamicNodePool().GetProvider(), + Regions: utils.GetRegions(utils.GetDynamicNodePools(nodepools)), + Metadata: c.Metadata, + } + + if err := generateNetworkingCommon(clusterID, clusterDir, providerData); err != nil { + return fmt.Errorf("failed to generate networking_common template files: %w", err) + } + nps := make([]NodePoolInfo, 0, len(nodepools)) for _, np := range nodepools { if np.GetDynamicNodePool() == nil { @@ -263,40 +284,19 @@ func (c *ClusterBuilder) generateFiles(clusterID, clusterDir string) error { // based on the cluster type fill out the nodepools data to be used nodepoolData := NodepoolsData{ - NodePools: nps, - ClusterName: clusterInfo.Name, - ClusterHash: clusterInfo.Hash, - ClusterType: c.ClusterType.String(), - Metadata: c.Metadata, - Regions: utils.GetRegions(utils.GetDynamicNodePools(nodepools)), + ClusterData: ClusterData{ + ClusterName: clusterInfo.Name, + ClusterHash: clusterInfo.Hash, + ClusterType: c.ClusterType.String(), + }, + NodePools: nps, + Metadata: c.Metadata, } copyCIDRsToMetadata(&nodepoolData) - targetDirectory := templateUtils.Templates{Directory: clusterDir} - - file, err := templates.CloudProviderTemplates.ReadFile(filepath.Join(providerName, "networking.tpl")) - if err != nil { - return fmt.Errorf("error while reading networking template file %s : %w", providerName, err) - } - networking, err := templateUtils.LoadTemplate(string(file)) - if err != nil { - return fmt.Errorf("error while parsing networking template file %s : %w", providerName, err) - } - if err := targetDirectory.Generate(networking, fmt.Sprintf("%s-%s-networking.tf", clusterID, providerNames.SpecName), nodepoolData); err != nil { - return fmt.Errorf("error while generating %s file : %w", fmt.Sprintf("%s-%s.tf", clusterID, providerNames.SpecName), err) - } - - file, err = templates.CloudProviderTemplates.ReadFile(filepath.Join(providerName, "node.tpl")) - if err != nil { - return fmt.Errorf("error while reading nodepool template file %s: %w", providerName, err) - } - nodepool, err := templateUtils.LoadTemplate(string(file)) - if err != nil { - return fmt.Errorf("error while parsing nodepool template file %s: %w", providerName, err) - } - if err := targetDirectory.Generate(nodepool, fmt.Sprintf("%s-%s-nodepool.tf", clusterID, providerNames.SpecName), nodepoolData); err != nil { - return fmt.Errorf("error while generating %s file: %w", fmt.Sprintf("%s-%s.tf", clusterID, providerNames.SpecName), err) + if err := generateNodes(clusterID, clusterDir, &nodepoolData, providerData); err != nil { + return fmt.Errorf("failed to generate nodepool specific templates files: %w", err) } if err := utils.CreateKeyFile(clusterInfo.PublicKey, clusterDir, "public.pem"); err != nil { @@ -412,9 +412,9 @@ func readIPs(data string) (outputNodepools, error) { // getUniqueNodeName returns new node name, which is guaranteed to be unique, based on the provided existing names. func getUniqueNodeName(nodepoolID string, existingNames map[string]struct{}) string { - index := 1 + index := uint8(1) for { - candidate := fmt.Sprintf("%s-%d", nodepoolID, index) + candidate := fmt.Sprintf("%s-%02x", nodepoolID, index) if _, ok := existingNames[candidate]; !ok { return candidate } @@ -493,51 +493,115 @@ func generateProviderTemplates(current, desired *pb.ClusterInfo, clusterID, dire if providerName.CloudProviderName == pb.StaticNodepoolInfo_STATIC_PROVIDER.String() { continue } - nps := make([]NodePoolInfo, 0, len(nodepools)) - for _, np := range nodepools { - if np.GetDynamicNodePool() == nil { - continue - } - nps = append(nps, NodePoolInfo{ - Name: np.Name, - Nodes: np.Nodes, - NodePool: np.GetDynamicNodePool(), - IsControl: np.IsControl, - }) - } - providerSpecName := providerName.SpecName + providerData := ProviderData{ + ClusterData: ClusterData{ + ClusterName: info.Name, + ClusterHash: info.Hash, + ClusterType: "", // not needed. + }, + Provider: nodepools[0].GetDynamicNodePool().GetProvider(), + Regions: utils.GetRegions(utils.GetDynamicNodePools(nodepools)), + Metadata: nil, // not needed. + } - nodepoolData := NodepoolsData{ - NodePools: nps, - ClusterName: info.Name, - ClusterHash: info.Hash, - ClusterType: "", // not needed - Metadata: nil, // not needed - Regions: utils.GetRegions(utils.GetDynamicNodePools(nodepools)), + if err := generateProvider(clusterID, directory, &providerData); err != nil { + return fmt.Errorf("failed to generate provider templates: %w", err) } + } + + return nil +} + +func generateProvider(clusterID, directory string, data *ProviderData) error { + targetDirectory := templateUtils.Templates{Directory: directory} + + tplPath := filepath.Join(data.Provider.CloudProviderName, "provider.tpl") + + file, err := templates.CloudProviderTemplates.ReadFile(tplPath) + if err != nil { + return fmt.Errorf("error while reading template file %s : %w", tplPath, err) + } + + tpl, err := templateUtils.LoadTemplate(string(file)) + if err != nil { + return fmt.Errorf("error while parsing template file %s : %w", tplPath, err) + } + + providerSpecName := data.Provider.SpecName + + // Parse the templates and create Tf files + if err := targetDirectory.Generate(tpl, fmt.Sprintf("%s-%s-provider.tf", clusterID, providerSpecName), data); err != nil { + return fmt.Errorf("error while generating %s file : %w", fmt.Sprintf("%s-%s-provider.tf", clusterID, providerSpecName), err) + } + + // Save keys + if err = utils.CreateKeyFile(data.Provider.Credentials, directory, providerSpecName); err != nil { + return fmt.Errorf("error creating provider credential key file for provider %s in %s : %w", providerSpecName, directory, err) + } + + return nil +} + +func generateNetworkingCommon(clusterID, directory string, data *ProviderData) error { + var ( + targetDirectory = templateUtils.Templates{Directory: directory} + tplPath = filepath.Join(data.Provider.CloudProviderName, "networking.tpl") + provider = data.Provider.CloudProviderName + specName = data.Provider.SpecName + ) + + file, err := templates.CloudProviderTemplates.ReadFile(tplPath) + if err != nil { + return fmt.Errorf("error while reading networking template file %s: %w", provider, err) + } + + networking, err := templateUtils.LoadTemplate(string(file)) + if err != nil { + return fmt.Errorf("error while parsing networking_common template file %s : %w", provider, err) + } - // Load TF files of the specific cloud provider - targetDirectory := templateUtils.Templates{Directory: directory} - tplPath := filepath.Join(providerName.CloudProviderName, "provider.tpl") - file, err := templates.CloudProviderTemplates.ReadFile(tplPath) + err = targetDirectory.Generate(networking, fmt.Sprintf("%s-%s-networkingn.tf", clusterID, specName), data) + if err != nil { + return fmt.Errorf("error while generating %s file : %w", fmt.Sprintf("%s-%s-networking.tf", clusterID, specName), err) + } + + return nil +} + +func generateNodes(clusterID, directory string, data *NodepoolsData, providerData *ProviderData) error { + var ( + targetDirectory = templateUtils.Templates{Directory: directory} + networkingPath = filepath.Join(providerData.Provider.CloudProviderName, "node_networking.tpl") + nodesPath = filepath.Join(providerData.Provider.CloudProviderName, "node.tpl") + provider = providerData.Provider.CloudProviderName + specName = providerData.Provider.SpecName + ) + + file, err := templates.CloudProviderTemplates.ReadFile(networkingPath) + if err == nil { // the template file might not exists + networking, err := templateUtils.LoadTemplate(string(file)) if err != nil { - return fmt.Errorf("error while reading template file %s : %w", tplPath, err) + return fmt.Errorf("error while parsing node networking template file %s : %w", provider, err) } - tpl, err := templateUtils.LoadTemplate(string(file)) - if err != nil { - return fmt.Errorf("error while parsing template file %s : %w", tplPath, err) + if err := targetDirectory.Generate(networking, fmt.Sprintf("%s-%s-node-networking.tf", clusterID, specName), data); err != nil { + return fmt.Errorf("error while generating %s file : %w", fmt.Sprintf("%s-%s-node-networking.tf", clusterID, specName), err) } + } - // Parse the templates and create Tf files - if err := targetDirectory.Generate(tpl, fmt.Sprintf("%s-%s-provider.tf", clusterID, providerSpecName), nodepoolData); err != nil { - return fmt.Errorf("error while generating %s file : %w", fmt.Sprintf("%s-%s-provider.tf", clusterID, providerSpecName), err) - } + file, err = templates.CloudProviderTemplates.ReadFile(nodesPath) + if err != nil { + return fmt.Errorf("error while reading nodepool template file %s: %w", provider, err) + } - // Save keys - if err = utils.CreateKeyFile(nps[0].NodePool.Provider.Credentials, directory, providerSpecName); err != nil { - return fmt.Errorf("error creating provider credential key file for provider %s in %s : %w", providerSpecName, directory, err) - } + nodepool, err := templateUtils.LoadTemplate(string(file)) + if err != nil { + return fmt.Errorf("error while parsing nodepool template file %s: %w", provider, err) + } + + err = targetDirectory.Generate(nodepool, fmt.Sprintf("%s-%s-nodepool.tf", clusterID, specName), data) + if err != nil { + return fmt.Errorf("error while generating %s file: %w", fmt.Sprintf("%s-%s.tf", clusterID, specName), err) } return nil diff --git a/services/terraformer/server/domain/utils/cluster-builder/cluster_builder_test.go b/services/terraformer/server/domain/utils/cluster-builder/cluster_builder_test.go index ad5ac6c40..b18ade1d7 100644 --- a/services/terraformer/server/domain/utils/cluster-builder/cluster_builder_test.go +++ b/services/terraformer/server/domain/utils/cluster-builder/cluster_builder_test.go @@ -46,7 +46,7 @@ func TestGenerateTf(t *testing.T) { require.NoError(t, err) tpl, err := templateUtils.LoadTemplate(string(file)) require.NoError(t, err) - err = template.Generate(tpl, "az-acc-net.tf", &NodepoolsData{ClusterName: "test", ClusterHash: "abcdef", NodePools: []NodePoolInfo{{NodePool: testNp.GetDynamicNodePool()}}}) + err = template.Generate(tpl, "az-acc-net.tf", &NodepoolsData{ClusterData: ClusterData{ClusterName: "test", ClusterHash: "abcdef"}, NodePools: []NodePoolInfo{{NodePool: testNp.GetDynamicNodePool()}}}) require.NoError(t, err) } diff --git a/services/terraformer/templates/aws/networking.tpl b/services/terraformer/templates/aws/networking.tpl index e5ee7c061..eb194a08a 100644 --- a/services/terraformer/templates/aws/networking.tpl +++ b/services/terraformer/templates/aws/networking.tpl @@ -1,140 +1,131 @@ -{{- $clusterName := .ClusterName }} -{{- $clusterHash := .ClusterHash }} +{{- $clusterName := .ClusterData.ClusterName }} +{{- $clusterHash := .ClusterData.ClusterHash }} -{{- range $i, $region := .Regions }} -resource "aws_vpc" "claudie_vpc_{{ $region }}" { - provider = aws.nodepool_{{ $region }} +{{- range $_, $region := .Regions }} +{{- $specName := $.Provider.SpecName }} + +resource "aws_key_pair" "claudie_pair_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} + key_name = "key-{{ $clusterHash }}-{{ $region }}-{{ $specName }}" + public_key = file("./public.pem") + tags = { + Name = "key-{{ $clusterHash }}-{{ $region }}-{{ $specName }}" + Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" + } +} + +resource "aws_vpc" "claudie_vpc_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} cidr_block = "10.0.0.0/16" tags = { - Name = "{{ $clusterName }}-{{ $clusterHash }}-{{ $region }}-vpc" + Name = "vpc-{{ $clusterHash }}-{{ $region }}-{{ $specName }}" Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" } } -resource "aws_internet_gateway" "claudie_gateway_{{ $region }}" { - provider = aws.nodepool_{{ $region }} - vpc_id = aws_vpc.claudie_vpc_{{ $region }}.id +resource "aws_internet_gateway" "claudie_gateway_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} + vpc_id = aws_vpc.claudie_vpc_{{ $region }}_{{ $specName }}.id tags = { - Name = "{{ $clusterName }}-{{ $clusterHash }}-{{ $region }}-gateway" + Name = "gtw-{{ $clusterHash }}-{{ $region }}-{{ $specName }}" Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" } } -resource "aws_route_table" "claudie_route_table_{{ $region }}" { - provider = aws.nodepool_{{ $region }} - vpc_id = aws_vpc.claudie_vpc_{{ $region }}.id +resource "aws_route_table" "claudie_route_table_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} + vpc_id = aws_vpc.claudie_vpc_{{ $region }}_{{ $specName }}.id route { cidr_block = "0.0.0.0/0" - gateway_id = aws_internet_gateway.claudie_gateway_{{ $region }}.id + gateway_id = aws_internet_gateway.claudie_gateway_{{ $region }}_{{ $specName }}.id } tags = { - Name = "{{ $clusterName }}-{{ $clusterHash }}-{{ $region }}-rt" + Name = "rt-{{ $clusterHash }}-{{ $region }}-{{ $specName }}" Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" } } -resource "aws_security_group" "claudie_sg_{{ $region }}" { - provider = aws.nodepool_{{ $region }} - vpc_id = aws_vpc.claudie_vpc_{{ $region }}.id +resource "aws_security_group" "claudie_sg_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} + vpc_id = aws_vpc.claudie_vpc_{{ $region }}_{{ $specName }}.id revoke_rules_on_delete = true tags = { - Name = "{{ $clusterName }}-{{ $clusterHash }}-{{ $region }}-sg" + Name = "sg-{{ $clusterHash }}-{{ $region }}-{{ $specName }}" Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" } } -resource "aws_security_group_rule" "allow_egress_{{ $region }}" { - provider = aws.nodepool_{{ $region }} +resource "aws_security_group_rule" "allow_egress_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} type = "egress" from_port = 0 to_port = 65535 protocol = "-1" cidr_blocks = ["0.0.0.0/0"] - security_group_id = aws_security_group.claudie_sg_{{ $region }}.id + security_group_id = aws_security_group.claudie_sg_{{ $region }}_{{ $specName }}.id } -resource "aws_security_group_rule" "allow_ssh_{{ $region }}" { - provider = aws.nodepool_{{ $region }} +resource "aws_security_group_rule" "allow_ssh_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} type = "ingress" from_port = 22 to_port = 22 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = aws_security_group.claudie_sg_{{ $region }}.id + security_group_id = aws_security_group.claudie_sg_{{ $region }}_{{ $specName }}.id } -{{- if eq $.ClusterType "K8s" }} +{{- if eq $.ClusterData.ClusterType "K8s" }} {{- if index $.Metadata "loadBalancers" | targetPorts | isMissing 6443 }} -resource "aws_security_group_rule" "allow_kube_api_{{ $region }}" { - provider = aws.nodepool_{{ $region }} +resource "aws_security_group_rule" "allow_kube_api_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} type = "ingress" from_port = 6443 to_port = 6443 protocol = "tcp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = aws_security_group.claudie_sg_{{ $region }}.id + security_group_id = aws_security_group.claudie_sg_{{ $region }}_{{ $specName }}.id } {{- end }} {{- end }} -{{- if eq $.ClusterType "LB" }} +{{- if eq $.ClusterData.ClusterType "LB" }} {{- range $role := index $.Metadata "roles" }} -resource "aws_security_group_rule" "allow_{{ $role.Port }}_{{ $region }}" { - provider = aws.nodepool_{{ $region }} +resource "aws_security_group_rule" "allow_{{ $role.Port }}_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} type = "ingress" from_port = {{ $role.Port }} to_port = {{ $role.Port }} protocol = "{{ $role.Protocol }}" cidr_blocks = ["0.0.0.0/0"] - security_group_id = aws_security_group.claudie_sg_{{ $region }}.id + security_group_id = aws_security_group.claudie_sg_{{ $region }}_{{ $specName }}.id } {{- end }} {{- end }} -resource "aws_security_group_rule" "allow_wireguard_{{ $region }}" { - provider = aws.nodepool_{{ $region }} +resource "aws_security_group_rule" "allow_wireguard_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} type = "ingress" from_port = 51820 to_port = 51820 protocol = "udp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = aws_security_group.claudie_sg_{{ $region }}.id + security_group_id = aws_security_group.claudie_sg_{{ $region }}_{{ $specName }}.id } -resource "aws_security_group_rule" "allow_icmp_{{ $region }}" { - provider = aws.nodepool_{{ $region }} +resource "aws_security_group_rule" "allow_icmp_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} type = "ingress" from_port = 8 to_port = 0 protocol = "icmp" cidr_blocks = ["0.0.0.0/0"] - security_group_id = aws_security_group.claudie_sg_{{ $region }}.id + security_group_id = aws_security_group.claudie_sg_{{ $region }}_{{ $specName }}.id } {{- end }} - -{{- range $i, $nodepool := .NodePools }} -resource "aws_subnet" "{{ $nodepool.Name }}_subnet" { - provider = aws.nodepool_{{ $nodepool.NodePool.Region }} - vpc_id = aws_vpc.claudie_vpc_{{ $nodepool.NodePool.Region }}.id - cidr_block = "{{ index $.Metadata (printf "%s-subnet-cidr" $nodepool.Name) }}" - map_public_ip_on_launch = true - availability_zone = "{{ $nodepool.NodePool.Zone }}" - - tags = { - Name = "{{ $nodepool.Name }}-{{ $clusterHash }}-subnet" - Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" - } -} - -resource "aws_route_table_association" "{{ $nodepool.Name }}_rta" { - provider = aws.nodepool_{{ $nodepool.NodePool.Region }} - subnet_id = aws_subnet.{{ $nodepool.Name }}_subnet.id - route_table_id = aws_route_table.claudie_route_table_{{ $nodepool.NodePool.Region }}.id -} -{{- end }} \ No newline at end of file diff --git a/services/terraformer/templates/aws/node.tpl b/services/terraformer/templates/aws/node.tpl index ae5a005fd..18c8be9c7 100644 --- a/services/terraformer/templates/aws/node.tpl +++ b/services/terraformer/templates/aws/node.tpl @@ -1,37 +1,29 @@ -{{- $clusterName := .ClusterName }} -{{- $clusterHash := .ClusterHash }} +{{- $clusterName := .ClusterData.ClusterName }} +{{- $clusterHash := .ClusterData.ClusterHash }} -{{- range $i, $region := .Regions }} -resource "aws_key_pair" "claudie_pair_{{ $region }}" { - provider = aws.nodepool_{{ $region }} - key_name = "{{ $clusterName }}-{{ $clusterHash }}-key" - public_key = file("./public.pem") - tags = { - Name = "{{ $clusterName }}-{{ $clusterHash }}-{{ $region }}-key" - Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" - } -} -{{- end }} +{{- range $_, $nodepool := .NodePools }} + +{{- $region := $nodepool.NodePool.Region }} +{{- $specName := $nodepool.NodePool.Provider.SpecName }} -{{- range $i, $nodepool := .NodePools }} {{- range $node := $nodepool.Nodes }} -resource "aws_instance" "{{ $node.Name }}" { - provider = aws.nodepool_{{ $nodepool.NodePool.Region }} +resource "aws_instance" "{{ $node.Name }}_{{ $region }}_{{ $specName }}" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} availability_zone = "{{ $nodepool.NodePool.Zone }}" instance_type = "{{ $nodepool.NodePool.ServerType }}" ami = "{{ $nodepool.NodePool.Image }}" associate_public_ip_address = true - key_name = aws_key_pair.claudie_pair_{{ $nodepool.NodePool.Region }}.key_name - subnet_id = aws_subnet.{{ $nodepool.Name }}_subnet.id - vpc_security_group_ids = [aws_security_group.claudie_sg_{{ $nodepool.NodePool.Region }}.id] + key_name = aws_key_pair.claudie_pair_{{ $region }}_{{ $specName }}.key_name + subnet_id = aws_subnet.{{ $nodepool.Name }}_{{ $region }}_{{ $specName }}_subnet.id + vpc_security_group_ids = [aws_security_group.claudie_sg_{{ $region }}_{{ $specName }}.id] tags = { Name = "{{ $node.Name }}" Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" } -{{- if eq $.ClusterType "LB" }} +{{- if eq $.ClusterData.ClusterType "LB" }} root_block_device { volume_size = 50 delete_on_termination = true @@ -49,7 +41,7 @@ EOF {{- end }} -{{- if eq $.ClusterType "K8s" }} +{{- if eq $.ClusterData.ClusterType "K8s" }} root_block_device { volume_size = 100 delete_on_termination = true @@ -68,7 +60,7 @@ mkdir -p /opt/claudie/data {{- if and (not $nodepool.IsControl) (gt $nodepool.NodePool.StorageDiskSize 0) }} # Mount EBS volume only when not mounted yet sleep 50 -disk=$(ls -l /dev/disk/by-id | grep "${replace("${aws_ebs_volume.{{ $node.Name }}_volume.id}", "-", "")}" | awk '{print $NF}') +disk=$(ls -l /dev/disk/by-id | grep "${replace("${aws_ebs_volume.{{ $node.Name }}_{{ $region }}_{{ $specName }}_volume.id}", "-", "")}" | awk '{print $NF}') disk=$(basename "$disk") if ! grep -qs "/dev/$disk" /proc/mounts; then if ! blkid /dev/$disk | grep -q "TYPE=\"xfs\""; then @@ -82,25 +74,25 @@ EOF {{- end }} } -{{- if eq $.ClusterType "K8s" }} +{{- if eq $.ClusterData.ClusterType "K8s" }} {{- if and (not $nodepool.IsControl) (gt $nodepool.NodePool.StorageDiskSize 0) }} -resource "aws_ebs_volume" "{{ $node.Name }}_volume" { - provider = aws.nodepool_{{ $nodepool.NodePool.Region }} +resource "aws_ebs_volume" "{{ $node.Name }}_{{ $region }}_{{ $specName }}_volume" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} availability_zone = "{{ $nodepool.NodePool.Zone }}" size = {{ $nodepool.NodePool.StorageDiskSize }} type = "gp2" tags = { - Name = "{{ $node.Name }}-storage" + Name = "{{ $node.Name }}d" Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" } } -resource "aws_volume_attachment" "{{ $node.Name }}_volume_att" { - provider = aws.nodepool_{{ $nodepool.NodePool.Region }} +resource "aws_volume_attachment" "{{ $node.Name }}_{{ $region }}_{{ $specName }}_volume_att" { + provider = aws.nodepool_{{ $region }}_{{ $specName }} device_name = "/dev/sdh" - volume_id = aws_ebs_volume.{{ $node.Name }}_volume.id - instance_id = aws_instance.{{ $node.Name }}.id + volume_id = aws_ebs_volume.{{ $node.Name }}_{{ $region }}_{{ $specName }}_volume.id + instance_id = aws_instance.{{ $node.Name }}_{{ $region }}_{{ $specName }}.id } {{- end }} {{- end }} @@ -109,9 +101,8 @@ resource "aws_volume_attachment" "{{ $node.Name }}_volume_att" { output "{{ $nodepool.Name }}" { value = { - {{- range $j, $node := $nodepool.Nodes }} - {{- $name := (printf "%s-%s-%s-%d" $clusterName $clusterHash $nodepool.Name $j ) }} - "${aws_instance.{{ $node.Name }}.tags_all.Name}" = aws_instance.{{ $node.Name }}.public_ip + {{- range $_, $node := $nodepool.Nodes }} + "${aws_instance.{{ $node.Name }}_{{ $region }}_{{ $specName }}.tags_all.Name}" = aws_instance.{{ $node.Name }}_{{ $region }}_{{ $specName }}.public_ip {{- end }} } } diff --git a/services/terraformer/templates/aws/node_networking.tpl b/services/terraformer/templates/aws/node_networking.tpl new file mode 100644 index 000000000..06fb9d6cc --- /dev/null +++ b/services/terraformer/templates/aws/node_networking.tpl @@ -0,0 +1,25 @@ +{{- $clusterName := .ClusterData.ClusterName }} +{{- $clusterHash := .ClusterData.ClusterHash }} + +{{- range $i, $nodepool := .NodePools }} +{{- $region := $nodepool.NodePool.Region }} +{{- $specName := $nodepool.NodePool.Provider.SpecName }} +resource "aws_subnet" "{{ $nodepool.Name }}_{{ $region }}_{{ $specName }}_subnet" { + provider = aws.nodepool_{{ $region }}_{{ $specName}} + vpc_id = aws_vpc.claudie_vpc_{{ $region }}_{{ $specName}}.id + cidr_block = "{{ index $.Metadata (printf "%s-subnet-cidr" $nodepool.Name) }}" + map_public_ip_on_launch = true + availability_zone = "{{ $nodepool.NodePool.Zone }}" + + tags = { + Name = "snt-{{ $clusterHash }}-{{ $region }}-{{ $nodepool.Name }}" + Claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" + } +} + +resource "aws_route_table_association" "{{ $nodepool.Name }}_{{ $region }}_{{ $specName }}_rta" { + provider = aws.nodepool_{{ $region }}_{{ $specName}} + subnet_id = aws_subnet.{{ $nodepool.Name }}_{{ $region }}_{{ $specName }}_subnet.id + route_table_id = aws_route_table.claudie_route_table_{{ $region }}_{{ $specName }}.id +} +{{- end }} diff --git a/services/terraformer/templates/aws/provider.tpl b/services/terraformer/templates/aws/provider.tpl index eac3b859e..f0c53b082 100644 --- a/services/terraformer/templates/aws/provider.tpl +++ b/services/terraformer/templates/aws/provider.tpl @@ -1,13 +1,13 @@ -{{- range $i, $region := .Regions }} +{{- range $_, $region := .Regions }} provider "aws" { - access_key = "{{ (index $.NodePools 0).NodePool.Provider.AwsAccessKey }}" - secret_key = file("{{ (index $.NodePools 0).NodePool.Provider.SpecName }}") + access_key = "{{ $.Provider.AwsAccessKey }}" + secret_key = file("{{ $.Provider.SpecName }}") region = "{{ $region }}" - alias = "nodepool_{{ $region }}" + alias = "nodepool_{{ $region }}_{{ $.Provider.SpecName }}" default_tags { tags = { Managed-by = "Claudie" } } } -{{- end}} \ No newline at end of file +{{- end}} diff --git a/services/terraformer/templates/azure/networking.tpl b/services/terraformer/templates/azure/networking.tpl index f12957952..b778e8494 100644 --- a/services/terraformer/templates/azure/networking.tpl +++ b/services/terraformer/templates/azure/networking.tpl @@ -1,11 +1,13 @@ -{{- $clusterName := .ClusterName}} -{{- $clusterHash := .ClusterHash}} +{{- $clusterName := .ClusterData.ClusterName }} +{{- $clusterHash := .ClusterData.ClusterHash }} -{{- range $i, $region := .Regions }} +{{- range $_, $region := .Regions }} {{- $sanitisedRegion := replaceAll $region " " "_"}} -resource "azurerm_resource_group" "rg_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}" { - provider = azurerm.nodepool - name = "{{ $clusterName }}-{{ $clusterHash }}-{{ $sanitisedRegion }}" +{{- $specName := $.Provider.SpecName }} + +resource "azurerm_resource_group" "rg_{{ $sanitisedRegion }}_{{ $specName }}" { + provider = azurerm.nodepool_{{ $sanitisedRegion }}_{{ $specName }} + name = "rg-{{ $clusterHash }}-{{ $sanitisedRegion }}-{{ $specName }}" location = "{{ $region }}" tags = { @@ -14,12 +16,12 @@ resource "azurerm_resource_group" "rg_{{ $sanitisedRegion }}_{{ $clusterName }}_ } } -resource "azurerm_virtual_network" "claudie_vn_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}" { - provider = azurerm.nodepool - name = "{{ $clusterName }}-{{ $clusterHash }}-vn" +resource "azurerm_virtual_network" "claudie_vn_{{ $sanitisedRegion }}_{{ $specName }}" { + provider = azurerm.nodepool_{{ $sanitisedRegion }}_{{ $specName }} + name = "vn-{{ $clusterHash }}-{{ $sanitisedRegion }}-{{ $specName }}" address_space = ["10.0.0.0/16"] location = "{{ $region }}" - resource_group_name = azurerm_resource_group.rg_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}.name + resource_group_name = azurerm_resource_group.rg_{{ $sanitisedRegion }}_{{ $specName }}.name tags = { managed-by = "Claudie" @@ -27,11 +29,11 @@ resource "azurerm_virtual_network" "claudie_vn_{{ $sanitisedRegion }}_{{ $cluste } } -resource "azurerm_network_security_group" "claudie_nsg_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}" { - provider = azurerm.nodepool - name = "{{ $clusterName }}-{{ $clusterHash }}-nsg" +resource "azurerm_network_security_group" "claudie_nsg_{{ $sanitisedRegion }}_{{ $specName }}" { + provider = azurerm.nodepool_{{ $sanitisedRegion }}_{{ $specName }} + name = "nsg-{{ $clusterHash }}-{{ $sanitisedRegion }}-{{ $specName }}" location = "{{ $region }}" - resource_group_name = azurerm_resource_group.rg_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}.name + resource_group_name = azurerm_resource_group.rg_{{ $sanitisedRegion }}_{{ $specName }}.name security_rule { name = "SSH" @@ -69,7 +71,7 @@ resource "azurerm_network_security_group" "claudie_nsg_{{ $sanitisedRegion }}_{{ destination_address_prefix = "*" } -{{- if eq $.ClusterType "LB" }} +{{- if eq $.ClusterData.ClusterType "LB" }} {{- range $i,$role := index $.Metadata "roles" }} security_rule { name = "Allow-{{ $role.Name }}" @@ -85,7 +87,7 @@ resource "azurerm_network_security_group" "claudie_nsg_{{ $sanitisedRegion }}_{{ {{- end }} {{- end }} -{{- if eq $.ClusterType "K8s" }} +{{- if eq $.ClusterData.ClusterType "K8s" }} {{- if index $.Metadata "loadBalancers" | targetPorts | isMissing 6443 }} security_rule { name = "KubeApi" @@ -107,57 +109,3 @@ resource "azurerm_network_security_group" "claudie_nsg_{{ $sanitisedRegion }}_{{ } } {{- end }} - -{{- range $i, $nodepool := .NodePools }} -{{- $sanitisedRegion := replaceAll $nodepool.NodePool.Region " " "_"}} -resource "azurerm_subnet" "{{ $nodepool.Name }}_{{ $clusterHash }}_subnet" { - provider = azurerm.nodepool - name = "{{ $nodepool.Name }}_{{ $clusterHash }}_subnet" - resource_group_name = azurerm_resource_group.rg_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}.name - virtual_network_name = azurerm_virtual_network.claudie_vn_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}.name - address_prefixes = ["{{ index $.Metadata (printf "%s-subnet-cidr" $nodepool.Name) }}"] -} - -resource "azurerm_subnet_network_security_group_association" "{{ $nodepool.Name }}_associate_nsg" { - provider = azurerm.nodepool - subnet_id = azurerm_subnet.{{ $nodepool.Name }}_{{ $clusterHash }}_subnet.id - network_security_group_id = azurerm_network_security_group.claudie_nsg_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}.id -} - -{{- range $node := $nodepool.Nodes }} -resource "azurerm_public_ip" "{{ $node.Name }}_public_ip" { - provider = azurerm.nodepool - name = "{{ $node.Name }}-ip" - location = "{{ $nodepool.NodePool.Region }}" - resource_group_name = azurerm_resource_group.rg_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}.name - allocation_method = "Static" - sku = "Standard" - - tags = { - managed-by = "Claudie" - claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" - } -} - -resource "azurerm_network_interface" "{{ $node.Name }}_ni" { - provider = azurerm.nodepool - name = "{{ $node.Name }}-ni" - location = "{{ $nodepool.NodePool.Region }}" - resource_group_name = azurerm_resource_group.rg_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}.name - enable_accelerated_networking = {{ enableAccNet $nodepool.NodePool.ServerType }} - - ip_configuration { - name = "{{ $node.Name }}-ip-conf" - subnet_id = azurerm_subnet.{{ $nodepool.Name }}_{{ $clusterHash }}_subnet.id - private_ip_address_allocation = "Dynamic" - public_ip_address_id = azurerm_public_ip.{{ $node.Name }}_public_ip.id - primary = true - } - - tags = { - managed-by = "Claudie" - claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" - } -} -{{- end }} -{{- end }} diff --git a/services/terraformer/templates/azure/node.tpl b/services/terraformer/templates/azure/node.tpl index ff808b030..dae48190d 100644 --- a/services/terraformer/templates/azure/node.tpl +++ b/services/terraformer/templates/azure/node.tpl @@ -1,15 +1,17 @@ -{{- $clusterName := .ClusterName}} -{{- $clusterHash := .ClusterHash}} +{{- $clusterName := .ClusterData.ClusterName }} +{{- $clusterHash := .ClusterData.ClusterHash }} {{- range $i, $nodepool := .NodePools }} {{- $sanitisedRegion := replaceAll $nodepool.NodePool.Region " " "_"}} +{{- $specName := $nodepool.NodePool.Provider.SpecName }} + {{- range $node := $nodepool.Nodes }} -resource "azurerm_linux_virtual_machine" "{{ $node.Name }}" { - provider = azurerm.nodepool +resource "azurerm_linux_virtual_machine" "{{ $node.Name }}_{{ $sanitisedRegion }}_{{ $specName }}" { + provider = azurerm.nodepool_{{ $sanitisedRegion }}_{{ $specName }} name = "{{ $node.Name }}" location = "{{ $nodepool.NodePool.Region }}" - resource_group_name = azurerm_resource_group.rg_{{ $sanitisedRegion }}_{{ $clusterName }}_{{ $clusterHash }}.name + resource_group_name = azurerm_resource_group.rg_{{ $sanitisedRegion }}_{{ $specName }}.name network_interface_ids = [azurerm_network_interface.{{ $node.Name }}_ni.id] size = "{{$nodepool.NodePool.ServerType}}" zone = "{{$nodepool.NodePool.Zone}}" @@ -35,7 +37,7 @@ resource "azurerm_linux_virtual_machine" "{{ $node.Name }}" { claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" } -{{- if eq $.ClusterType "LB" }} +{{- if eq $.ClusterData.ClusterType "LB" }} os_disk { name = "{{ $node.Name }}-osdisk" caching = "ReadWrite" @@ -44,7 +46,7 @@ resource "azurerm_linux_virtual_machine" "{{ $node.Name }}" { } {{- end }} -{{- if eq $.ClusterType "K8s" }} +{{- if eq $.ClusterData.ClusterType "K8s" }} os_disk { name = "{{ $node.Name }}-osdisk" caching = "ReadWrite" @@ -54,10 +56,10 @@ resource "azurerm_linux_virtual_machine" "{{ $node.Name }}" { {{- end }} } -resource "azurerm_virtual_machine_extension" "{{ $node.Name }}_{{ $clusterHash }}_postcreation_script" { - provider = azurerm.nodepool - name = "{{ $clusterName }}-{{ $clusterHash }}-postcreation-script" - virtual_machine_id = azurerm_linux_virtual_machine.{{ $node.Name }}.id +resource "azurerm_virtual_machine_extension" "{{ $node.Name }}_{{ $sanitisedRegion }}_{{ $specName }}_postcreation_script" { + provider = azurerm.nodepool_{{ $sanitisedRegion }}_{{ $specName }} + name = "vm-ext-{{ $node.Name }}" + virtual_machine_id = azurerm_linux_virtual_machine.{{ $node.Name }}_{{ $sanitisedRegion }}_{{ $specName }}.id publisher = "Microsoft.Azure.Extensions" type = "CustomScript" type_handler_version = "2.0" @@ -67,7 +69,7 @@ resource "azurerm_virtual_machine_extension" "{{ $node.Name }}_{{ $clusterHash } claudie-cluster = "{{ $clusterName }}-{{ $clusterHash }}" } -{{- if eq $.ClusterType "LB" }} +{{- if eq $.ClusterData.ClusterType "LB" }} protected_settings = <