Skip to content

Commit

Permalink
Merge branch 'master' into MM-48846
Browse files Browse the repository at this point in the history
  • Loading branch information
mirshahriar committed Dec 13, 2022
2 parents fd7ccc7 + d692053 commit 37df78f
Show file tree
Hide file tree
Showing 9 changed files with 89 additions and 40 deletions.
22 changes: 22 additions & 0 deletions cmd/cloud/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -535,6 +535,10 @@ func executeClusterListCmd(flags clusterListFlags) error {
keys, vals = defaultClustersTableData(clusters)
}

if flags.showTags {
keys, vals = enhanceTableWithAnnotations(clusters, keys, vals)
}

printTable(keys, vals)
return nil
}
Expand Down Expand Up @@ -571,6 +575,24 @@ func defaultClustersTableData(clusters []*model.ClusterDTO) ([]string, [][]strin
return keys, values
}

func enhanceTableWithAnnotations(clusters []*model.ClusterDTO, keys []string, vals [][]string) ([]string, [][]string) {
var tags [][]string
for _, cluster := range clusters {
var list []string
for _, annotation := range cluster.Annotations {
list = append(list, annotation.Name)
}
tags = append(tags, list)
}
keys = append(keys, "TAG")
for i, v := range vals {
v = append(v, strings.Join(tags[i], ","))
vals[i] = v
}

return keys, vals
}

func newCmdClusterUtilities() *cobra.Command {
var flags clusterUtilitiesFlags

Expand Down
2 changes: 2 additions & 0 deletions cmd/cloud/cluster_flag.go
Original file line number Diff line number Diff line change
Expand Up @@ -301,11 +301,13 @@ type clusterListFlags struct {
clusterFlags
pagingFlags
tableOptions
showTags bool
}

func (flags *clusterListFlags) addFlags(command *cobra.Command) {
flags.pagingFlags.addFlags(command)
flags.tableOptions.addFlags(command)
command.Flags().BoolVar(&flags.showTags, "show-tags", false, "When printing, show all tags as the last column")
}

type clusterUtilitiesFlags struct {
Expand Down
11 changes: 0 additions & 11 deletions internal/api/databases.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,7 @@ import (

// initDatabases registers database endpoints on the given router.
func initDatabases(apiRouter *mux.Router, context *Context) {
addContext := func(handler contextHandlerFunc) *contextHandler {
return newContextHandler(context, handler)
}

// TODO: retire these endpoints
databasesRouter := apiRouter.PathPrefix("/databases").Subrouter()
databasesRouter.Handle("", addContext(handleGetMultitenantDatabases)).Methods("GET")

databaseRouter := apiRouter.PathPrefix("/database/{multitenant_database:[A-Za-z0-9]{26}}").Subrouter()
databaseRouter.Handle("", addContext(handleUpdateMultitenantDatabase)).Methods("PUT")

// Begin new endpoints
initMultitenantDatabases(databasesRouter, context)
initLogicalDatabases(databasesRouter, context)
initDatabaseSchemas(databasesRouter, context)
Expand Down
14 changes: 7 additions & 7 deletions internal/api/db_multitenant_database_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,31 +48,31 @@ func TestGetMultitenantDatabases(t *testing.T) {

t.Run("parameter handling", func(t *testing.T) {
t.Run("invalid page", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/databases?page=invalid&per_page=100", ts.URL))
resp, err := http.Get(fmt.Sprintf("%s/api/databases/multitenant_databases?page=invalid&per_page=100", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})

t.Run("invalid perPage", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/databases?page=0&per_page=invalid", ts.URL))
resp, err := http.Get(fmt.Sprintf("%s/api/databases/multitenant_databases?page=0&per_page=invalid", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusBadRequest, resp.StatusCode)
})

t.Run("no paging parameters", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/databases", ts.URL))
resp, err := http.Get(fmt.Sprintf("%s/api/databases/multitenant_databases", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
})

t.Run("missing page", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/databases?per_page=100", ts.URL))
resp, err := http.Get(fmt.Sprintf("%s/api/databases/multitenant_databases?per_page=100", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
})

t.Run("missing perPage", func(t *testing.T) {
resp, err := http.Get(fmt.Sprintf("%s/api/databases?page=1", ts.URL))
resp, err := http.Get(fmt.Sprintf("%s/api/databases/multitenant_databases?page=1", ts.URL))
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
})
Expand Down Expand Up @@ -231,7 +231,7 @@ func TestUpdateMultitenantDatabase(t *testing.T) {
require.NoError(t, err)

t.Run("invalid payload", func(t *testing.T) {
httpRequest, err := http.NewRequest(http.MethodPut, fmt.Sprintf("%s/api/database/%s", ts.URL, database1.ID), bytes.NewReader([]byte("invalid")))
httpRequest, err := http.NewRequest(http.MethodPut, fmt.Sprintf("%s/api/databases/multitenant_database/%s", ts.URL, database1.ID), bytes.NewReader([]byte("invalid")))
require.NoError(t, err)

resp, err := http.DefaultClient.Do(httpRequest)
Expand All @@ -240,7 +240,7 @@ func TestUpdateMultitenantDatabase(t *testing.T) {
})

t.Run("empty payload", func(t *testing.T) {
httpRequest, err := http.NewRequest(http.MethodPut, fmt.Sprintf("%s/api/database/%s", ts.URL, database1.ID), bytes.NewReader([]byte("")))
httpRequest, err := http.NewRequest(http.MethodPut, fmt.Sprintf("%s/api/databases/multitenant_database/%s", ts.URL, database1.ID), bytes.NewReader([]byte("")))
require.NoError(t, err)

resp, err := http.DefaultClient.Do(httpRequest)
Expand Down
41 changes: 24 additions & 17 deletions internal/provisioner/cluster_installation_provisioner.go
Original file line number Diff line number Diff line change
Expand Up @@ -450,26 +450,11 @@ func prepareClusterUtilities(
// TODO: Yeah, so this is definitely a bit of a race condition. We would
// need to lock a bunch of stuff to do this completely properly, but that
// isn't really feasible right now.
ini, err := generatePGBouncerIni(vpc, store, pgbouncerConfig)
if err != nil {
return errors.Wrap(err, "failed to generate updated pgbouncer ini contents")
}

ctx, cancel := context.WithTimeout(context.Background(), time.Duration(10)*time.Second)
defer cancel()

configMap, err := k8sClient.Clientset.CoreV1().ConfigMaps("pgbouncer").Get(ctx, "pgbouncer-configmap", metav1.GetOptions{})
err = updatePGBouncerConfigMap(ctx, vpc, store, pgbouncerConfig, k8sClient, logger)
if err != nil {
return errors.Wrap(err, "failed to get configmap for pgbouncer-configmap")
}
if configMap.Data["pgbouncer.ini"] != ini {
logger.Debug("Updating pgbouncer.ini with new database configuration")

configMap.Data["pgbouncer.ini"] = ini
_, err = k8sClient.Clientset.CoreV1().ConfigMaps("pgbouncer").Update(ctx, configMap, metav1.UpdateOptions{})
if err != nil {
return errors.Wrap(err, "failed to update configmap pgbouncer-configmap")
}
return errors.Wrap(err, "failed to update configmap for pgbouncer-configmap")
}

userlistSecret, err := k8sClient.Clientset.CoreV1().Secrets("pgbouncer").Get(ctx, "pgbouncer-userlist-secret", metav1.GetOptions{})
Expand All @@ -495,6 +480,28 @@ func prepareClusterUtilities(
return nil
}

func updatePGBouncerConfigMap(ctx context.Context, vpc string, store model.ClusterUtilityDatabaseStoreInterface, pgbouncerConfig *PGBouncerConfig, k8sClient *k8s.KubeClient, logger log.FieldLogger) error {
ini, err := generatePGBouncerIni(vpc, store, pgbouncerConfig)
if err != nil {
return errors.Wrap(err, "failed to generate updated pgbouncer ini contents")
}

configMap, err := k8sClient.Clientset.CoreV1().ConfigMaps("pgbouncer").Get(ctx, "pgbouncer-configmap", metav1.GetOptions{})
if err != nil {
return errors.Wrap(err, "failed to get configmap for pgbouncer-configmap")
}
if configMap.Data["pgbouncer.ini"] != ini {
logger.Debug("Updating pgbouncer.ini with new database configuration")

configMap.Data["pgbouncer.ini"] = ini
_, err = k8sClient.Clientset.CoreV1().ConfigMaps("pgbouncer").Update(ctx, configMap, metav1.UpdateOptions{})
if err != nil {
return errors.Wrap(err, "failed to update configmap pgbouncer-configmap")
}
}
return nil
}

func prepareClusterInstallationEnv(clusterInstallation *model.ClusterInstallation, k8sClient *k8s.KubeClient) (string, error) {
_, err := k8sClient.CreateOrUpdateNamespace(clusterInstallation.Namespace)
if err != nil {
Expand Down
18 changes: 18 additions & 0 deletions internal/provisioner/cluster_provisioning.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ func provisionCluster(
kubeconfigPath string,
awsClient aws.AWS,
params ProvisioningParams,
store model.ClusterUtilityDatabaseStoreInterface,
logger logrus.FieldLogger) error {

// Start by gathering resources that will be needed later. If any of this
Expand Down Expand Up @@ -342,6 +343,23 @@ func provisionCluster(
}
}

// Sync PGBouncer configmap if there is any change
var vpc string
if cluster.ProvisionerMetadataKops != nil {
vpc = cluster.ProvisionerMetadataKops.VPC
} else if cluster.ProvisionerMetadataEKS != nil {
vpc = cluster.ProvisionerMetadataEKS.VPC
} else {
return errors.New("cluster metadata is nil cannot determine VPC")
}
ctx, cancel = context.WithTimeout(context.Background(), time.Duration(10)*time.Second)
defer cancel()
err = updatePGBouncerConfigMap(ctx, vpc, store, params.PGBouncerConfig, k8sClient, logger)
if err != nil {
return errors.Wrap(err, "failed to update configmap for pgbouncer-configmap")
}
logger.Info("pgbouncer configmap updated successfully")

clusterName := cluster.ID
if cluster.ProvisionerMetadataKops != nil {
clusterName = cluster.ProvisionerMetadataKops.Name
Expand Down
2 changes: 1 addition & 1 deletion internal/provisioner/eks_provisioner_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,7 @@ func (provisioner *EKSProvisioner) ProvisionCluster(cluster *model.Cluster, awsC
return errors.Wrap(err, "failed to prepare kubeconfig file")
}

return provisionCluster(cluster, kubeconfigFile, awsClient, provisioner.params, logger)
return provisionCluster(cluster, kubeconfigFile, awsClient, provisioner.params, provisioner.store, logger)
}

// UpgradeCluster upgrades EKS cluster - not implemented.
Expand Down
17 changes: 14 additions & 3 deletions internal/provisioner/kops_provisioner_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,8 @@ func (provisioner *KopsProvisioner) CreateCluster(cluster *model.Cluster, awsCli
return errors.Wrapf(err, "failed to set %s", setValue)
}
}
if cluster.ProvisionerMetadataKops != nil && cluster.ProvisionerMetadataKops.Networking == "calico" {

if kopsMetadata.ChangeRequest.Networking == "calico" {
logger.Info("Updating calico options")
setValue = "spec.networking.calico.prometheusMetricsEnabled=true"
err = kops.SetCluster(kopsMetadata.Name, setValue)
Expand All @@ -166,6 +167,16 @@ func (provisioner *KopsProvisioner) CreateCluster(cluster *model.Cluster, awsCli
if err != nil {
return errors.Wrapf(err, "failed to set %s", setValue)
}
setValue = "spec.networking.calico.prometheusGoMetricsEnabled=true"
err = kops.SetCluster(kopsMetadata.Name, setValue)
if err != nil {
return errors.Wrapf(err, "failed to set %s", setValue)
}
setValue = "spec.networking.calico.prometheusProcessMetricsEnabled=true"
err = kops.SetCluster(kopsMetadata.Name, setValue)
if err != nil {
return errors.Wrapf(err, "failed to set %s", setValue)
}
setValue = "spec.networking.calico.typhaPrometheusMetricsEnabled=true"
err = kops.SetCluster(kopsMetadata.Name, setValue)
if err != nil {
Expand Down Expand Up @@ -197,7 +208,7 @@ func (provisioner *KopsProvisioner) CreateCluster(cluster *model.Cluster, awsCli
logger.Infof("Adding environment variables to etcd cluster manager")
err = kops.SetCluster(kopsMetadata.Name, strings.Join(override, ","))
if err != nil {
return errors.Wrapf(err, "failed to set %s", setValue)
return errors.Wrap(err, "failed to set etcd environment variables")
}
}

Expand Down Expand Up @@ -275,7 +286,7 @@ func (provisioner *KopsProvisioner) ProvisionCluster(cluster *model.Cluster, aws
}
defer provisioner.invalidateCachedKopsClientOnError(err, cluster.ProvisionerMetadataKops.Name, logger)

return provisionCluster(cluster, kopsClient.GetKubeConfigPath(), awsClient, provisioner.params, logger)
return provisionCluster(cluster, kopsClient.GetKubeConfigPath(), awsClient, provisioner.params, provisioner.store, logger)
}

// UpgradeCluster upgrades a cluster to the latest recommended production ready k8s version.
Expand Down
2 changes: 1 addition & 1 deletion internal/tools/aws/constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ const (

// DefaultMySQLContextTimeSeconds is the number of seconds that a SQL
// client will take before cancel a call to the database.
DefaultMySQLContextTimeSeconds = 15
DefaultMySQLContextTimeSeconds = 30

// DefaultRDSMultitenantDatabaseMySQLCountLimit is the maximum number of
// schemas allowed in a MySQL multitenant RDS database cluster.
Expand Down

0 comments on commit 37df78f

Please sign in to comment.