diff --git a/.circleci/config.yml b/.circleci/config.yml index 2ba8b2ce4..8ed2d0c63 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,7 +2,7 @@ version: 2.1 executors: default: docker: - - image: cimg/go:1.17 + - image: cimg/go:1.17.8 aliases: - &restore_cache @@ -71,7 +71,7 @@ jobs: test-postgres: docker: - - image: cimg/go:1.17 + - image: cimg/go:1.17.8 environment: CLOUD_DATABASE=postgres://cloud_test@localhost:5432/cloud_test?sslmode=disable - image: circleci/postgres:11.2-alpine diff --git a/Makefile b/Makefile index e69cf3103..5e009f85b 100644 --- a/Makefile +++ b/Makefile @@ -6,12 +6,12 @@ ################################################################################ ## Docker Build Versions -DOCKER_BUILD_IMAGE = golang:1.17 -DOCKER_BASE_IMAGE = alpine:3.16 +DOCKER_BUILD_IMAGE = golang:1.17.4 +DOCKER_BASE_IMAGE = alpine:3.14 ## Tool Versions TERRAFORM_VERSION=0.15.5 -KOPS_VERSION=v1.22.6 +KOPS_VERSION=v1.21.4 HELM_VERSION=v3.5.3 KUBECTL_VERSION=v1.21.2 diff --git a/README.md b/README.md index e5c02cbf4..5c8c07cc8 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ The following is required to properly run the cloud server. 1. Install [Go](https://golang.org/doc/install) 2. Install [Terraform](https://learn.hashicorp.com/terraform/getting-started/install.html) version v0.15.5 1. Try using [tfswitch](https://warrensbox.github.io/terraform-switcher/) for switching easily between versions -3. Install [kops](https://github.com/kubernetes/kops/blob/master/docs/install.md) version 1.22.X +3. Install [kops](https://github.com/kubernetes/kops/blob/master/docs/install.md) version 1.21.X 4. Install [Helm](https://helm.sh/docs/intro/install/) version 3.5.X 5. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) 6. Install [golang/mock](https://github.com/golang/mock#installation) version 1.4.x diff --git a/build/Dockerfile b/build/Dockerfile index f20054ff4..6e68af3be 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -3,7 +3,7 @@ # Build the mattermost cloud ARG DOCKER_BUILD_IMAGE=golang:1.17 -ARG DOCKER_BASE_IMAGE=alpine:3.16 +ARG DOCKER_BASE_IMAGE=alpine:3.14 FROM ${DOCKER_BUILD_IMAGE} AS build WORKDIR /mattermost-cloud/ diff --git a/cmd/cloud/table_printer.go b/cmd/cloud/table_printer.go index 24e4f65cc..1f4a4470d 100644 --- a/cmd/cloud/table_printer.go +++ b/cmd/cloud/table_printer.go @@ -126,10 +126,10 @@ var jsonRegexp = regexp.MustCompile(`^\{\.?([^{}]+)\}$|^\.?([^{}]+)$`) // relaxedJSONPathExpression attempts to be flexible with JSONPath expressions, // it accepts following formats: -// - {.ID} -// - {ID} -// - .ID -// - ID +// * {.ID} +// * {ID} +// * .ID +// * ID func relaxedJSONPathExpression(pathExpression string) (string, error) { if len(pathExpression) == 0 { return pathExpression, nil diff --git a/internal/api/cluster.go b/internal/api/cluster.go index 2b568aacd..3de3dc726 100644 --- a/internal/api/cluster.go +++ b/internal/api/cluster.go @@ -90,15 +90,14 @@ func handleGetClusters(c *Context, w http.ResponseWriter, r *http.Request) { // handleCreateCluster responds to POST /api/clusters, beginning the process of creating a new // cluster. // sample body: -// -// { -// "provider": "aws", -// "version": "1.15.0", -// "kops-ami": "ami-xoxoxo", -// "size": "SizeAlef1000", -// "zones": "", -// "allow-installations": true -// } +// { +// "provider": "aws", +// "version": "1.15.0", +// "kops-ami": "ami-xoxoxo", +// "size": "SizeAlef1000", +// "zones": "", +// "allow-installations": true +// } func handleCreateCluster(c *Context, w http.ResponseWriter, r *http.Request) { createClusterRequest, err := model.NewCreateClusterRequestFromReader(r.Body) if err != nil { diff --git a/internal/provisioner/kops_provisioner.go b/internal/provisioner/kops_provisioner.go index a5943da4a..dc6063f4a 100644 --- a/internal/provisioner/kops_provisioner.go +++ b/internal/provisioner/kops_provisioner.go @@ -132,8 +132,8 @@ type kopsCluster struct { // unmarshalKopsListClustersResponse unmarshals response from `kops get clusters -o json`. // Kops output from this command is not consistent, and it behaves in the following ways: -// - If there are multiple clusters an array of clusters is returned. -// - If there is only one cluster a single cluster object is returned (not as an array). +// * If there are multiple clusters an array of clusters is returned. +// * If there is only one cluster a single cluster object is returned (not as an array). func unmarshalKopsListClustersResponse(output string) ([]kopsCluster, error) { trimmedOut := strings.TrimSpace(output) if strings.HasPrefix(trimmedOut, "[") { diff --git a/internal/provisioner/kops_provisioner_cluster.go b/internal/provisioner/kops_provisioner_cluster.go index 4b996e4d0..b7c660139 100644 --- a/internal/provisioner/kops_provisioner_cluster.go +++ b/internal/provisioner/kops_provisioner_cluster.go @@ -191,6 +191,18 @@ func (provisioner *KopsProvisioner) CreateCluster(cluster *model.Cluster, awsCli logger.WithField("name", kopsMetadata.Name).Info("Successfully deployed kubernetes") + logger.WithField("name", kopsMetadata.Name).Info("Updating VolumeBindingMode in default storage class") + k8sClient, err := k8s.NewFromFile(kops.GetKubeConfigPath(), logger) + if err != nil { + return err + } + + _, err = k8sClient.UpdateStorageClassVolumeBindingMode("gp2") + if err != nil { + return err + } + logger.WithField("name", kopsMetadata.Name).Info("Successfully updated storage class") + iamRole := fmt.Sprintf("nodes.%s", kopsMetadata.Name) err = awsClient.AttachPolicyToRole(iamRole, aws.CustomNodePolicyName, logger) if err != nil { @@ -505,7 +517,7 @@ func (provisioner *KopsProvisioner) ProvisionCluster(cluster *model.Cluster, aws if err != nil { return err } - // Pods for k8s-spot-termination-handler do not mean to be schedule in every cluster so doesn't need to fail provision in this case + // Pods for k8s-spot-termination-handler do not ment to be schedule in every cluster so doesn't need to fail provision in this case/ if len(pods.Items) == 0 && daemonSet != "k8s-spot-termination-handler" { return fmt.Errorf("no pods found from %s/%s daemonSet", namespace, daemonSet) } diff --git a/internal/tools/aws/database_multitenant.go b/internal/tools/aws/database_multitenant.go index c75f43c68..fb67710b0 100644 --- a/internal/tools/aws/database_multitenant.go +++ b/internal/tools/aws/database_multitenant.go @@ -604,9 +604,9 @@ func (d *RDSMultitenantDatabase) getAndLockAssignedMultitenantDatabase(store mod // This helper method finds a multitenant RDS cluster that is ready for receiving a database installation. The lookup // for multitenant databases will happen in order: -// 1. fetch a multitenant database by installation ID. -// 2. fetch all multitenant databases in the store which are under the max number of installations limit. -// 3. fetch all multitenant databases in the RDS cluster that are under the max number of installations limit. +// 1. fetch a multitenant database by installation ID. +// 2. fetch all multitenant databases in the store which are under the max number of installations limit. +// 3. fetch all multitenant databases in the RDS cluster that are under the max number of installations limit. func (d *RDSMultitenantDatabase) assignInstallationToMultitenantDatabaseAndLock(vpcID string, store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) (*model.MultitenantDatabase, func(), error) { multitenantDatabases, err := store.GetMultitenantDatabases(&model.MultitenantDatabaseFilter{ DatabaseType: d.databaseType, diff --git a/internal/tools/aws/database_multitenant_pgbouncer.go b/internal/tools/aws/database_multitenant_pgbouncer.go index a692684b2..5fe4e45d8 100644 --- a/internal/tools/aws/database_multitenant_pgbouncer.go +++ b/internal/tools/aws/database_multitenant_pgbouncer.go @@ -163,9 +163,9 @@ func (d *RDSMultitenantPGBouncerDatabase) Provision(store model.InstallationData // This helper method finds a multitenant RDS cluster that is ready for receiving a database installation. The lookup // for multitenant databases will happen in order: -// 1. fetch a multitenant database by installation ID. -// 2. fetch all multitenant databases in the store which are under the max number of installations limit. -// 3. fetch all multitenant databases in the RDS cluster that are under the max number of installations limit. +// 1. fetch a multitenant database by installation ID. +// 2. fetch all multitenant databases in the store which are under the max number of installations limit. +// 3. fetch all multitenant databases in the RDS cluster that are under the max number of installations limit. func (d *RDSMultitenantPGBouncerDatabase) assignInstallationToProxiedDatabaseAndLock(vpcID string, store model.InstallationDatabaseStoreInterface, logger log.FieldLogger) (*model.DatabaseResourceGrouping, func(), error) { multitenantDatabases, err := store.GetMultitenantDatabases(&model.MultitenantDatabaseFilter{ DatabaseType: d.databaseType, diff --git a/internal/tools/kops/cluster.go b/internal/tools/kops/cluster.go index 0fd94e0f2..f1452ecbb 100644 --- a/internal/tools/kops/cluster.go +++ b/internal/tools/kops/cluster.go @@ -79,11 +79,11 @@ func (c *Cmd) CreateCluster(name, cloud string, kopsRequest *model.KopsMetadataR // Example setValue: spec.kubernetesVersion=1.10.0 func (c *Cmd) SetCluster(name, setValue string) error { _, _, err := c.run( - "edit", + "set", "cluster", arg("name", name), arg("state", "s3://", c.s3StateStore), - arg("set", setValue), + setValue, ) if err != nil { return errors.Wrap(err, "failed to invoke kops set cluster") diff --git a/internal/tools/kops/instance_groups.go b/internal/tools/kops/instance_groups.go index 43a215af7..ac95b2144 100644 --- a/internal/tools/kops/instance_groups.go +++ b/internal/tools/kops/instance_groups.go @@ -220,12 +220,12 @@ func (c *Cmd) GetInstanceGroupYAML(clusterName, igName string) (string, error) { // SetInstanceGroup invokes kops set instancegroup, using the context of the created Cmd. func (c *Cmd) SetInstanceGroup(clusterName, instanceGroupName, setValue string) error { _, _, err := c.run( - "edit", + "set", "instancegroup", arg("name", clusterName), arg("state", "s3://", c.s3StateStore), instanceGroupName, - arg("set", setValue), + setValue, ) if err != nil { return errors.Wrap(err, "failed to invoke kops set instancegroup") diff --git a/model/env.go b/model/env.go index 06e460253..48de552f2 100644 --- a/model/env.go +++ b/model/env.go @@ -49,8 +49,8 @@ func (em *EnvVarMap) Validate() error { // ClearOrPatch takes a new EnvVarMap and patches changes into the existing // EnvVarMap with the following logic: -// - If the new EnvVarMap is empty, clear the existing EnvVarMap completely. -// - If the new EnvVarMap is not empty, apply normal patch logic. +// - If the new EnvVarMap is empty, clear the existing EnvVarMap completely. +// - If the new EnvVarMap is not empty, apply normal patch logic. func (em *EnvVarMap) ClearOrPatch(new *EnvVarMap) bool { if *em == nil { if len(*new) == 0 { @@ -73,10 +73,10 @@ func (em *EnvVarMap) ClearOrPatch(new *EnvVarMap) bool { // Patch takes a new EnvVarMap and patches changes into the existing EnvVarMap // with the following logic: -// - If the new EnvVar has the same key as an old EnvVar, update the value. -// - If the new EnvVar is a new key, add the EnvVar. -// - If the new EnvVar has no value(is blank), clear the old EnvVar if there -// was one. +// - If the new EnvVar has the same key as an old EnvVar, update the value. +// - If the new EnvVar is a new key, add the EnvVar. +// - If the new EnvVar has no value(is blank), clear the old EnvVar if there +// was one. func (em EnvVarMap) Patch(new EnvVarMap) bool { if new == nil { return false