From 6032e228bd91ce8ed0534c2df16246f506250607 Mon Sep 17 00:00:00 2001 From: fabiankramm Date: Tue, 9 Oct 2018 16:47:19 +0200 Subject: [PATCH 1/4] Refactor helm package & rbac creation --- cmd/init.go | 4 + cmd/reset.go | 4 +- cmd/up.go | 62 +-- pkg/devspace/clients/helm/client.go | 586 ------------------------- pkg/devspace/clients/helm/install.go | 147 +++++++ pkg/devspace/clients/helm/rbac.go | 170 +++++++ pkg/devspace/clients/helm/search.go | 151 +++++++ pkg/devspace/clients/helm/tiller.go | 218 +++++++++ pkg/devspace/clients/kubectl/client.go | 20 +- pkg/devspace/cloud/config.go | 10 +- pkg/devspace/cloud/login.go | 16 +- 11 files changed, 729 insertions(+), 659 deletions(-) create mode 100644 pkg/devspace/clients/helm/install.go create mode 100644 pkg/devspace/clients/helm/rbac.go create mode 100644 pkg/devspace/clients/helm/search.go create mode 100644 pkg/devspace/clients/helm/tiller.go diff --git a/cmd/init.go b/cmd/init.go index c9c98ac002..a67ec19dc8 100644 --- a/cmd/init.go +++ b/cmd/init.go @@ -391,7 +391,9 @@ func (cmd *InitCmd) useCloudProvider() bool { cmd.config.Cluster.CloudProvider = &cloudProviderSelected cmd.config.Cluster.UseKubeConfig = &addToContext + log.StartWait("Logging into cloud provider " + providerConfig[cloudProviderSelected].Host + cloud.LoginEndpoint + "...") err := cloud.Update(providerConfig, cmd.config, true) + log.StopWait() if err != nil { log.Fatalf("Couldn't authenticate to devspace cloud: %v", err) } @@ -415,7 +417,9 @@ func (cmd *InitCmd) useCloudProvider() bool { cmd.config.Cluster.CloudProvider = configutil.String(cloud.DevSpaceCloudProviderName) cmd.config.Cluster.UseKubeConfig = &addToContext + log.StartWait("Logging into cloud provider " + providerConfig[cloud.DevSpaceCloudProviderName].Host + cloud.LoginEndpoint + "...") err := cloud.Update(providerConfig, cmd.config, true) + log.StopWait() if err != nil { log.Fatalf("Couldn't authenticate to devspace cloud: %v", err) } diff --git a/cmd/reset.go b/cmd/reset.go index 30e843cd31..35afcf7a8b 100644 --- a/cmd/reset.go +++ b/cmd/reset.go @@ -284,9 +284,7 @@ func (cmd *ResetCmd) deleteRegistry() error { } func (cmd *ResetCmd) deleteTiller() error { - config := configutil.GetConfig(false) - - return helmClient.DeleteTiller(cmd.kubectl, config.Services.Tiller) + return helmClient.DeleteTiller(cmd.kubectl) } func (cmd *ResetCmd) deleteDockerfile() error { diff --git a/cmd/up.go b/cmd/up.go index 5bb5acdfb1..a247d7a78a 100644 --- a/cmd/up.go +++ b/cmd/up.go @@ -140,10 +140,7 @@ func (cmd *UpCmd) Run(cobraCmd *cobra.Command, args []string) { log.Fatalf("Unable to create new kubectl client: %v", err) } - err = cmd.ensureNamespace() - if err != nil { - log.Fatalf("Unable to create release namespace: %v", err) - } + cmd.ensureNamespace() err = cmd.ensureClusterRoleBinding() if err != nil { @@ -198,56 +195,26 @@ func (cmd *UpCmd) Run(cobraCmd *cobra.Command, args []string) { enterTerminal(cmd.kubectl, cmd.pod, cmd.flags.container, args) } -func (cmd *UpCmd) ensureNamespace() error { +func (cmd *UpCmd) ensureNamespace() { config := configutil.GetConfig(false) releaseNamespace := *config.DevSpace.Release.Namespace - // Check if registry namespace exists - _, err := cmd.kubectl.CoreV1().Namespaces().Get(releaseNamespace, metav1.GetOptions{}) - if err != nil { - // Create registry namespace - _, err = cmd.kubectl.CoreV1().Namespaces().Create(&k8sv1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: releaseNamespace, - }, - }) - - if err != nil { - return err - } - } - - return nil + // Create release namespace and ignore errors + _, _ = cmd.kubectl.CoreV1().Namespaces().Create(&k8sv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: releaseNamespace, + }, + }) } func (cmd *UpCmd) ensureClusterRoleBinding() error { - /* - config := configutil.GetConfig(false) - - accessReview := &k8sauthorizationv1.SelfSubjectAccessReview{ - Spec: k8sauthorizationv1.SelfSubjectAccessReviewSpec{ - ResourceAttributes: &k8sauthorizationv1.ResourceAttributes{ - Namespace: *config.DevSpace.Release.Namespace, - Verb: "create", - Group: "rbac.authorization.k8s.io", - Resource: "roles", - }, - }, - } - - resp, permErr := cmd.kubectl.Authorization().SelfSubjectAccessReviews().Create(accessReview) - - if permErr != nil {*/ - if kubectl.IsMinikube() { return nil } _, err := cmd.kubectl.RbacV1beta1().ClusterRoleBindings().Get(clusterRoleBindingName, metav1.GetOptions{}) - if err != nil { clusterConfig, _ := kubectl.GetClientConfig() - if clusterConfig.AuthProvider != nil && clusterConfig.AuthProvider.Name == "gcp" { createRoleBinding := stdinutil.GetFromStdin(&stdinutil.GetFromStdinParams{ Question: "Do you want the ClusterRoleBinding '" + clusterRoleBindingName + "' to be created automatically? (yes|no)", @@ -295,14 +262,19 @@ func (cmd *UpCmd) ensureClusterRoleBinding() error { }, } - _, roleBindingErr := cmd.kubectl.RbacV1beta1().ClusterRoleBindings().Create(rolebinding) - if roleBindingErr != nil { - return roleBindingErr + _, err = cmd.kubectl.RbacV1beta1().ClusterRoleBindings().Create(rolebinding) + if err != nil { + return err } } else { - log.Warn("Unable to check permissions: If you run into errors, please create the ClusterRoleBinding '" + clusterRoleBindingName + "' as described here: https://devspace.covexo.com/docs/advanced/rbac.html") + cfg := configutil.GetConfig(false) + + if cfg.Cluster.CloudProvider == nil || *cfg.Cluster.CloudProvider == "" { + log.Warn("Unable to check permissions: If you run into errors, please create the ClusterRoleBinding '" + clusterRoleBindingName + "' as described here: https://devspace.covexo.com/docs/advanced/rbac.html") + } } } + return nil } diff --git a/pkg/devspace/clients/helm/client.go b/pkg/devspace/clients/helm/client.go index 0c81b724b0..11f626df92 100644 --- a/pkg/devspace/clients/helm/client.go +++ b/pkg/devspace/clients/helm/client.go @@ -1,25 +1,16 @@ package helm import ( - "errors" - "fmt" - "io/ioutil" "os" "path/filepath" - "regexp" - "sort" "strconv" "strings" "sync" "time" - yaml "gopkg.in/yaml.v2" - "github.com/covexo/devspace/pkg/util/fsutil" "github.com/covexo/devspace/pkg/util/log" - helminstaller "k8s.io/helm/cmd/helm/installer" - "k8s.io/helm/pkg/downloader" "k8s.io/helm/pkg/getter" "k8s.io/helm/pkg/kube" "k8s.io/helm/pkg/repo" @@ -30,17 +21,10 @@ import ( "github.com/covexo/devspace/pkg/devspace/config/configutil" "github.com/covexo/devspace/pkg/devspace/config/v1" homedir "github.com/mitchellh/go-homedir" - k8sv1 "k8s.io/api/core/v1" - k8sv1beta1 "k8s.io/api/rbac/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - helmchartutil "k8s.io/helm/pkg/chartutil" - helmdownloader "k8s.io/helm/pkg/downloader" k8shelm "k8s.io/helm/pkg/helm" helmenvironment "k8s.io/helm/pkg/helm/environment" "k8s.io/helm/pkg/helm/helmpath" "k8s.io/helm/pkg/helm/portforwarder" - "k8s.io/helm/pkg/proto/hapi/chart" - hapi_release5 "k8s.io/helm/pkg/proto/hapi/release" rls "k8s.io/helm/pkg/proto/hapi/services" helmstoragedriver "k8s.io/helm/pkg/storage/driver" ) @@ -53,36 +37,6 @@ type HelmClientWrapper struct { kubectl *kubernetes.Clientset } -// TillerDeploymentName is the string identifier for the tiller deployment -const TillerDeploymentName = "tiller-deploy" -const tillerServiceAccountName = "devspace-tiller" -const tillerRoleName = "devspace-tiller" -const tillerRoleManagerName = "tiller-config-manager" -const stableRepoCachePath = "repository/cache/stable-index.yaml" -const defaultRepositories = `apiVersion: v1 -repositories: -- caFile: "" - cache: ` + stableRepoCachePath + ` - certFile: "" - keyFile: "" - name: stable - url: https://kubernetes-charts.storage.googleapis.com -` - -var alreadyExistsRegexp = regexp.MustCompile(".* already exists$") - -var defaultPolicyRules = []k8sv1beta1.PolicyRule{ - { - APIGroups: []string{ - k8sv1beta1.APIGroupAll, - "extensions", - "apps", - }, - Resources: []string{k8sv1beta1.ResourceAll}, - Verbs: []string{k8sv1beta1.ResourceAll}, - }, -} - // NewClient creates a new helm client func NewClient(kubectlClient *kubernetes.Clientset, upgradeTiller bool) (*HelmClientWrapper, error) { config := configutil.GetConfig(false) @@ -192,277 +146,6 @@ func NewClient(kubectlClient *kubernetes.Clientset, upgradeTiller bool) (*HelmCl return wrapper, nil } -func ensureTiller(kubectlClient *kubernetes.Clientset, config *v1.Config, upgrade bool) error { - tillerConfig := config.Services.Tiller - tillerNamespace := *tillerConfig.Release.Namespace - tillerSA := &k8sv1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Name: tillerServiceAccountName, - Namespace: tillerNamespace, - }, - } - tillerOptions := &helminstaller.Options{ - Namespace: tillerNamespace, - MaxHistory: 10, - ImageSpec: "gcr.io/kubernetes-helm/tiller:v2.10.0", - ServiceAccount: tillerSA.ObjectMeta.Name, - } - - // Check if tiller namespace exists - _, err := kubectlClient.CoreV1().Namespaces().Get(tillerNamespace, metav1.GetOptions{}) - if err != nil { - // Create tiller namespace - _, err := kubectlClient.CoreV1().Namespaces().Create(&k8sv1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: tillerNamespace, - }, - }) - - if err != nil { - return err - } - } - - _, tillerCheckErr := kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Get(TillerDeploymentName, metav1.GetOptions{}) - - // Tiller is not there - if tillerCheckErr != nil { - log.StartWait("Installing Tiller server") - defer log.StopWait() - - _, err := kubectlClient.CoreV1().ServiceAccounts(tillerSA.Namespace).Get(tillerSA.Name, metav1.GetOptions{}) - if err != nil { - _, err := kubectlClient.CoreV1().ServiceAccounts(tillerSA.Namespace).Create(tillerSA) - if err != nil { - return err - } - } - serviceAccountSubject := []k8sv1beta1.Subject{ - { - Kind: k8sv1beta1.ServiceAccountKind, - Name: tillerServiceAccountName, - Namespace: tillerNamespace, - }, - } - - err = ensureRoleBinding(kubectlClient, tillerConfig, tillerRoleManagerName, tillerNamespace, []k8sv1beta1.PolicyRule{ - { - APIGroups: []string{ - k8sv1beta1.APIGroupAll, - "extensions", - "apps", - }, - Resources: []string{ - "configmaps", - }, - Verbs: []string{k8sv1beta1.ResourceAll}, - }, - }, serviceAccountSubject) - if err != nil { - return err - } - - err = helminstaller.Install(kubectlClient, tillerOptions) - if err != nil { - return err - } - - appNamespaces := []*string{ - config.DevSpace.Release.Namespace, - } - - if config.Services.InternalRegistry != nil && config.Services.InternalRegistry.Release.Namespace != nil { - appNamespaces = append(appNamespaces, config.Services.InternalRegistry.Release.Namespace) - } - - tillerConfig.AppNamespaces = &appNamespaces - for _, appNamespace := range *tillerConfig.AppNamespaces { - if *appNamespace == tillerRoleManagerName { - continue - } - - err = ensureRoleBinding(kubectlClient, tillerConfig, tillerRoleName, *appNamespace, defaultPolicyRules, serviceAccountSubject) - if err != nil { - return err - } - } - - log.StopWait() - log.Done("Tiller started") - - //Upgrade of tiller is necessary - } else if upgrade { - log.StartWait("Upgrading tiller") - - tillerOptions.ImageSpec = "" - err := helminstaller.Upgrade(kubectlClient, tillerOptions) - - log.StopWait() - - if err != nil { - return err - } - } - - tillerWaitingTime := 2 * 60 * time.Second - tillerCheckInterval := 5 * time.Second - - log.StartWait("Waiting for tiller to start") - - for tillerWaitingTime > 0 { - tillerDeployment, err := kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Get(TillerDeploymentName, metav1.GetOptions{}) - - if err != nil { - continue - } - - if tillerDeployment.Status.ReadyReplicas == tillerDeployment.Status.Replicas { - break - } - - time.Sleep(tillerCheckInterval) - tillerWaitingTime = tillerWaitingTime - tillerCheckInterval - } - - log.StopWait() - - return nil -} - -func addAppNamespaces(appNamespaces *[]*string, namespaces []*string) { - newAppNamespaces := *appNamespaces - - for _, ns := range namespaces { - isExisting := false - - for _, existingNS := range newAppNamespaces { - if ns == existingNS { - isExisting = true - break - } - } - - if !isExisting { - newAppNamespaces = append(newAppNamespaces, ns) - } - } - - appNamespaces = &newAppNamespaces -} - -// IsTillerDeployed determines if we could connect to a tiller server -func IsTillerDeployed(kubectlClient *kubernetes.Clientset, tillerConfig *v1.TillerConfig) bool { - tillerNamespace := *tillerConfig.Release.Namespace - deployment, err := kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Get(TillerDeploymentName, metav1.GetOptions{}) - - if err != nil { - return false - } - - if deployment == nil { - return false - } - - return true -} - -// DeleteTiller clears the tiller server, the service account and role binding -func DeleteTiller(kubectlClient *kubernetes.Clientset, tillerConfig *v1.TillerConfig) error { - tillerNamespace := *tillerConfig.Release.Namespace - errs := make([]error, 0, 1) - propagationPolicy := metav1.DeletePropagationForeground - - err := kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Delete(TillerDeploymentName, &metav1.DeleteOptions{ - PropagationPolicy: &propagationPolicy, - }) - if err != nil && strings.HasSuffix(err.Error(), "not found") == false { - errs = append(errs, err) - } - - err = kubectlClient.CoreV1().Services(tillerNamespace).Delete(TillerDeploymentName, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) - if err != nil && strings.HasSuffix(err.Error(), "not found") == false { - errs = append(errs, err) - } - - err = kubectlClient.CoreV1().ServiceAccounts(tillerNamespace).Delete(tillerServiceAccountName, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) - if err != nil && strings.HasSuffix(err.Error(), "not found") == false { - errs = append(errs, err) - } - - roleNamespace := append(*tillerConfig.AppNamespaces, &tillerNamespace) - for _, appNamespace := range roleNamespace { - err = kubectlClient.RbacV1beta1().Roles(*appNamespace).Delete(tillerRoleName, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) - if err != nil && strings.HasSuffix(err.Error(), "not found") == false { - errs = append(errs, err) - } - - err = kubectlClient.RbacV1beta1().RoleBindings(*appNamespace).Delete(tillerRoleName+"-binding", &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) - if err != nil && strings.HasSuffix(err.Error(), "not found") == false { - errs = append(errs, err) - } - - err = kubectlClient.RbacV1beta1().Roles(*appNamespace).Delete(tillerRoleManagerName, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) - if err != nil && strings.HasSuffix(err.Error(), "not found") == false { - errs = append(errs, err) - } - - err = kubectlClient.RbacV1beta1().RoleBindings(*appNamespace).Delete(tillerRoleManagerName+"-binding", &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) - if err != nil && strings.HasSuffix(err.Error(), "not found") == false { - errs = append(errs, err) - } - } - - // Merge errors - errorText := "" - - for _, value := range errs { - errorText += value.Error() + "\n" - } - - if errorText == "" { - return nil - } - return errors.New(errorText) -} - -// func (helmClientWrapper *HelmClientWrapper) ensureAuth(namespace string) error { -// return ensureRoleBinding(helmClientWrapper.kubectl, tillerRoleName, namespace, helmClientWrapper.Settings.TillerNamespace, defaultPolicyRules) -// } - -func ensureRoleBinding(kubectlClient *kubernetes.Clientset, tillerConfig *v1.TillerConfig, name, namespace string, rules []k8sv1beta1.PolicyRule, subjects []k8sv1beta1.Subject) error { - role := &k8sv1beta1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Rules: rules, - } - rolebinding := &k8sv1beta1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: name + "-binding", - Namespace: namespace, - }, - Subjects: subjects, - RoleRef: k8sv1beta1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: role.Name, - }, - } - - _, roleErr := kubectlClient.RbacV1beta1().Roles(namespace).Create(role) - if roleErr != nil && alreadyExistsRegexp.Match([]byte(roleErr.Error())) == false { - return roleErr - } - - _, roleBindingErr := kubectlClient.RbacV1beta1().RoleBindings(namespace).Create(rolebinding) - if roleBindingErr != nil && alreadyExistsRegexp.Match([]byte(roleBindingErr.Error())) == false { - return roleBindingErr - } - - return nil -} - func (helmClientWrapper *HelmClientWrapper) updateRepos() error { allRepos, err := repo.LoadRepositoriesFile(helmClientWrapper.Settings.Home.RepositoryFile()) if err != nil { @@ -516,275 +199,6 @@ func (helmClientWrapper *HelmClientWrapper) ReleaseExists(releaseName string) (b return true, nil } -func checkDependencies(ch *chart.Chart, reqs *helmchartutil.Requirements) error { - missing := []string{} - - deps := ch.GetDependencies() - for _, r := range reqs.Dependencies { - found := false - for _, d := range deps { - if d.Metadata.Name == r.Name { - found = true - break - } - } - if !found { - missing = append(missing, r.Name) - } - } - - if len(missing) > 0 { - return fmt.Errorf("found in requirements.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", ")) - } - return nil -} - -// InstallChartByPath installs the given chartpath und the releasename in the releasenamespace -func (helmClientWrapper *HelmClientWrapper) InstallChartByPath(releaseName string, releaseNamespace string, chartPath string, values *map[interface{}]interface{}) (*hapi_release5.Release, error) { - chart, err := helmchartutil.Load(chartPath) - if err != nil { - return nil, err - } - - if req, err := helmchartutil.LoadRequirements(chart); err == nil { - // If checkDependencies returns an error, we have unfulfilled dependencies. - // As of Helm 2.4.0, this is treated as a stopping condition: - // https://github.com/kubernetes/helm/issues/2209 - if err := checkDependencies(chart, req); err != nil { - man := &helmdownloader.Manager{ - Out: ioutil.Discard, - ChartPath: chartPath, - HelmHome: helmClientWrapper.Settings.Home, - Getters: getter.All(*helmClientWrapper.Settings), - } - if err := man.Update(); err != nil { - return nil, err - } - - // Update all dependencies which are present in /charts. - chart, err = helmchartutil.Load(chartPath) - if err != nil { - return nil, err - } - } - } - - releaseExists, err := helmClientWrapper.ReleaseExists(releaseName) - if err != nil { - return nil, err - } - - deploymentTimeout := int64(10 * 60) - overwriteValues := []byte("") - - if values != nil { - unmarshalledValues, err := yaml.Marshal(values) - - if err != nil { - return nil, err - } - overwriteValues = unmarshalledValues - } - - var release *hapi_release5.Release - - if releaseExists { - upgradeResponse, err := helmClientWrapper.Client.UpdateRelease( - releaseName, - chartPath, - k8shelm.UpgradeTimeout(deploymentTimeout), - k8shelm.UpdateValueOverrides(overwriteValues), - k8shelm.ReuseValues(false), - k8shelm.UpgradeWait(true), - ) - - if err != nil { - return nil, err - } - - release = upgradeResponse.GetRelease() - } else { - installResponse, err := helmClientWrapper.Client.InstallReleaseFromChart( - chart, - releaseNamespace, - k8shelm.InstallTimeout(deploymentTimeout), - k8shelm.ValueOverrides(overwriteValues), - k8shelm.ReleaseName(releaseName), - k8shelm.InstallReuseName(false), - k8shelm.InstallWait(true), - ) - - if err != nil { - return nil, err - } - - release = installResponse.GetRelease() - } - return release, nil -} - -// InstallChartByName installs the given chart by name under the releasename in the releasenamespace -func (helmClientWrapper *HelmClientWrapper) InstallChartByName(releaseName string, releaseNamespace string, chartName string, chartVersion string, values *map[interface{}]interface{}) (*hapi_release5.Release, error) { - if len(chartVersion) == 0 { - chartVersion = ">0.0.0-0" - } - - getter := getter.All(*helmClientWrapper.Settings) - chartDownloader := downloader.ChartDownloader{ - HelmHome: helmClientWrapper.Settings.Home, - Out: os.Stdout, - Getters: getter, - Verify: downloader.VerifyNever, - } - os.MkdirAll(helmClientWrapper.Settings.Home.Archive(), os.ModePerm) - - chartPath, _, err := chartDownloader.DownloadTo(chartName, chartVersion, helmClientWrapper.Settings.Home.Archive()) - if err != nil { - return nil, err - } - - return helmClientWrapper.InstallChartByPath(releaseName, releaseNamespace, chartPath, values) -} - -// stringArraySorter -type stringArraySorter [][]string - -// Len returns the length of this scoreSorter. -func (s stringArraySorter) Len() int { return len(s) } - -// Swap performs an in-place swap. -func (s stringArraySorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// Less compares a to b, and returns true if a is less than b. -func (s stringArraySorter) Less(a, b int) bool { return s[a][0] < s[b][0] } - -// PrintAllAvailableCharts prints all available charts -func (helmClientWrapper *HelmClientWrapper) PrintAllAvailableCharts() { - var values stringArraySorter - var header = []string{ - "NAME", - "CHART VERSION", - "APP VERSION", - "DESCRIPTION", - } - - allRepos, err := repo.LoadRepositoriesFile(helmClientWrapper.Settings.Home.RepositoryFile()) - if err != nil { - log.Fatal(err) - } - - for _, re := range allRepos.Repositories { - n := re.Name - f := helmClientWrapper.Settings.Home.CacheIndex(n) - - ind, err := repo.LoadIndexFile(f) - if err != nil { - continue - } - - // Sort versions - ind.SortEntries() - - for _, versions := range ind.Entries { - if len(versions) == 0 { - continue - } - - description := versions[0].Description - if len(description) > 45 { - description = description[:45] + "..." - } - - values = append(values, []string{ - versions[0].GetName(), - versions[0].GetVersion(), - versions[0].GetAppVersion(), - description, - }) - } - } - - sort.Sort(values) - log.PrintTable(header, values) -} - -// SearchChart searches the chart name in all repositories -func (helmClientWrapper *HelmClientWrapper) SearchChart(chartName, chartVersion, appVersion string) (*repo.Entry, *repo.ChartVersion, error) { - allRepos, err := repo.LoadRepositoriesFile(helmClientWrapper.Settings.Home.RepositoryFile()) - if err != nil { - return nil, nil, err - } - - for _, re := range allRepos.Repositories { - n := re.Name - f := helmClientWrapper.Settings.Home.CacheIndex(n) - - ind, err := repo.LoadIndexFile(f) - if err != nil { - continue - } - - // Sort versions - ind.SortEntries() - - // Check if chart exists - if versions, ok := ind.Entries[chartName]; ok { - if len(versions) == 0 { - // Skip chart names that have zero releases. - continue - } - - if chartVersion != "" { - for _, version := range versions { - if version.GetVersion() == chartVersion { - return re, version, nil - } - } - - return nil, nil, fmt.Errorf("Chart %s with chart version %s not found", chartName, chartVersion) - } - - if appVersion != "" { - for _, version := range versions { - if version.GetAppVersion() == appVersion { - return re, version, nil - } - } - - return nil, nil, fmt.Errorf("Chart %s with app version %s not found", chartName, appVersion) - } - - return re, versions[0], nil - } - } - - return nil, nil, fmt.Errorf("Chart %s not found", chartName) -} - -// BuildDependencies builds the dependencies -func (helmClientWrapper *HelmClientWrapper) BuildDependencies(chartPath string) error { - man := &helmdownloader.Manager{ - Out: ioutil.Discard, - ChartPath: chartPath, - HelmHome: helmClientWrapper.Settings.Home, - Getters: getter.All(*helmClientWrapper.Settings), - } - - return man.Build() -} - -// UpdateDependencies updates the dependencies -func (helmClientWrapper *HelmClientWrapper) UpdateDependencies(chartPath string) error { - man := &helmdownloader.Manager{ - Out: ioutil.Discard, - ChartPath: chartPath, - HelmHome: helmClientWrapper.Settings.Home, - Getters: getter.All(*helmClientWrapper.Settings), - } - - return man.Update() -} - // DeleteRelease deletes a helm release and optionally purges it func (helmClientWrapper *HelmClientWrapper) DeleteRelease(releaseName string, purge bool) (*rls.UninstallReleaseResponse, error) { return helmClientWrapper.Client.DeleteRelease(releaseName, k8shelm.DeletePurge(purge)) diff --git a/pkg/devspace/clients/helm/install.go b/pkg/devspace/clients/helm/install.go new file mode 100644 index 0000000000..b79fed62a6 --- /dev/null +++ b/pkg/devspace/clients/helm/install.go @@ -0,0 +1,147 @@ +package helm + +import ( + "fmt" + "io/ioutil" + "os" + "strings" + + yaml "gopkg.in/yaml.v2" + helmchartutil "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/downloader" + helmdownloader "k8s.io/helm/pkg/downloader" + "k8s.io/helm/pkg/getter" + k8shelm "k8s.io/helm/pkg/helm" + "k8s.io/helm/pkg/proto/hapi/chart" + hapi_release5 "k8s.io/helm/pkg/proto/hapi/release" +) + +func checkDependencies(ch *chart.Chart, reqs *helmchartutil.Requirements) error { + missing := []string{} + + deps := ch.GetDependencies() + for _, r := range reqs.Dependencies { + found := false + for _, d := range deps { + if d.Metadata.Name == r.Name { + found = true + break + } + } + if !found { + missing = append(missing, r.Name) + } + } + + if len(missing) > 0 { + return fmt.Errorf("found in requirements.yaml, but missing in charts/ directory: %s", strings.Join(missing, ", ")) + } + return nil +} + +// InstallChartByPath installs the given chartpath und the releasename in the releasenamespace +func (helmClientWrapper *HelmClientWrapper) InstallChartByPath(releaseName string, releaseNamespace string, chartPath string, values *map[interface{}]interface{}) (*hapi_release5.Release, error) { + chart, err := helmchartutil.Load(chartPath) + if err != nil { + return nil, err + } + + if req, err := helmchartutil.LoadRequirements(chart); err == nil { + // If checkDependencies returns an error, we have unfulfilled dependencies. + // As of Helm 2.4.0, this is treated as a stopping condition: + // https://github.com/kubernetes/helm/issues/2209 + if err := checkDependencies(chart, req); err != nil { + man := &helmdownloader.Manager{ + Out: ioutil.Discard, + ChartPath: chartPath, + HelmHome: helmClientWrapper.Settings.Home, + Getters: getter.All(*helmClientWrapper.Settings), + } + if err := man.Update(); err != nil { + return nil, err + } + + // Update all dependencies which are present in /charts. + chart, err = helmchartutil.Load(chartPath) + if err != nil { + return nil, err + } + } + } + + releaseExists, err := helmClientWrapper.ReleaseExists(releaseName) + if err != nil { + return nil, err + } + + deploymentTimeout := int64(10 * 60) + overwriteValues := []byte("") + + if values != nil { + unmarshalledValues, err := yaml.Marshal(values) + + if err != nil { + return nil, err + } + overwriteValues = unmarshalledValues + } + + var release *hapi_release5.Release + + if releaseExists { + upgradeResponse, err := helmClientWrapper.Client.UpdateRelease( + releaseName, + chartPath, + k8shelm.UpgradeTimeout(deploymentTimeout), + k8shelm.UpdateValueOverrides(overwriteValues), + k8shelm.ReuseValues(false), + k8shelm.UpgradeWait(true), + ) + + if err != nil { + return nil, err + } + + release = upgradeResponse.GetRelease() + } else { + installResponse, err := helmClientWrapper.Client.InstallReleaseFromChart( + chart, + releaseNamespace, + k8shelm.InstallTimeout(deploymentTimeout), + k8shelm.ValueOverrides(overwriteValues), + k8shelm.ReleaseName(releaseName), + k8shelm.InstallReuseName(false), + k8shelm.InstallWait(true), + ) + + if err != nil { + return nil, err + } + + release = installResponse.GetRelease() + } + return release, nil +} + +// InstallChartByName installs the given chart by name under the releasename in the releasenamespace +func (helmClientWrapper *HelmClientWrapper) InstallChartByName(releaseName string, releaseNamespace string, chartName string, chartVersion string, values *map[interface{}]interface{}) (*hapi_release5.Release, error) { + if len(chartVersion) == 0 { + chartVersion = ">0.0.0-0" + } + + getter := getter.All(*helmClientWrapper.Settings) + chartDownloader := downloader.ChartDownloader{ + HelmHome: helmClientWrapper.Settings.Home, + Out: os.Stdout, + Getters: getter, + Verify: downloader.VerifyNever, + } + os.MkdirAll(helmClientWrapper.Settings.Home.Archive(), os.ModePerm) + + chartPath, _, err := chartDownloader.DownloadTo(chartName, chartVersion, helmClientWrapper.Settings.Home.Archive()) + if err != nil { + return nil, err + } + + return helmClientWrapper.InstallChartByPath(releaseName, releaseNamespace, chartPath, values) +} diff --git a/pkg/devspace/clients/helm/rbac.go b/pkg/devspace/clients/helm/rbac.go new file mode 100644 index 0000000000..f17f26b611 --- /dev/null +++ b/pkg/devspace/clients/helm/rbac.go @@ -0,0 +1,170 @@ +package helm + +import ( + "regexp" + + "github.com/covexo/devspace/pkg/devspace/config/v1" + k8sv1 "k8s.io/api/core/v1" + k8sv1beta1 "k8s.io/api/rbac/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// TillerServiceAccountName is the name of the service account tiller will use +const TillerServiceAccountName = "devspace-tiller" + +// TillerRoleName is the name of the role that is assigned to tiller to allow it to deploy to a certain namespace +const TillerRoleName = "devspace-tiller" + +// TillerRoleManagerName is the name of the role with minimal rights to allow tiller to manage itself +const TillerRoleManagerName = "tiller-config-manager" + +var alreadyExistsRegexp = regexp.MustCompile(".* already exists$") + +func createTillerRBAC(kubectlClient *kubernetes.Clientset, dsConfig *v1.Config) error { + tillerConfig := dsConfig.Services.Tiller + tillerNamespace := *dsConfig.Services.Tiller.Release.Namespace + + // Create service account + err := createTillerServiceAccount(kubectlClient, tillerNamespace) + if err != nil { + return err + } + + // If tiller server should not deploy in it's own namespace it does not need full access to the namespace + if tillerNamespace != *dsConfig.DevSpace.Release.Namespace { + err = addMinimalAccessToTiller(kubectlClient, tillerNamespace) + if err != nil { + return err + } + } + + // Tiller does need full access to all namespaces is should deploy to and therefore we create the roles & rolebindings + appNamespaces := []*string{ + dsConfig.DevSpace.Release.Namespace, + } + + // Check if there is an internal registry + if dsConfig.Services.InternalRegistry != nil && dsConfig.Services.InternalRegistry.Release.Namespace != nil { + // Tiller needs access to the internal registry namespace + appNamespaces = append(appNamespaces, dsConfig.Services.InternalRegistry.Release.Namespace) + } + + // Persist the app namespaces to the config + tillerConfig.AppNamespaces = &appNamespaces + for _, appNamespace := range *tillerConfig.AppNamespaces { + err = addDeployAccessToTiller(kubectlClient, tillerNamespace, *appNamespace) + if err != nil { + return err + } + } + + return nil +} + +func createTillerServiceAccount(kubectlClient *kubernetes.Clientset, tillerNamespace string) error { + _, err := kubectlClient.CoreV1().ServiceAccounts(tillerNamespace).Create(&k8sv1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: TillerServiceAccountName, + Namespace: tillerNamespace, + }, + }) + + return err +} + +func addMinimalAccessToTiller(kubectlClient *kubernetes.Clientset, tillerNamespace string) error { + _, err := kubectlClient.RbacV1beta1().Roles(tillerNamespace).Create(&k8sv1beta1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: TillerRoleManagerName, + Namespace: tillerNamespace, + }, + Rules: []k8sv1beta1.PolicyRule{ + { + APIGroups: []string{ + k8sv1beta1.APIGroupAll, + "extensions", + "apps", + }, + Resources: []string{ + "configmaps", + }, + Verbs: []string{k8sv1beta1.ResourceAll}, + }, + }, + }) + if err != nil && alreadyExistsRegexp.Match([]byte(err.Error())) == false { + return err + } + + _, err = kubectlClient.RbacV1beta1().RoleBindings(tillerNamespace).Create(&k8sv1beta1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: TillerRoleManagerName + "-binding", + Namespace: tillerNamespace, + }, + Subjects: []k8sv1beta1.Subject{ + { + Kind: k8sv1beta1.ServiceAccountKind, + Name: TillerServiceAccountName, + Namespace: tillerNamespace, + }, + }, + RoleRef: k8sv1beta1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: TillerRoleManagerName, + }, + }) + if err != nil && alreadyExistsRegexp.Match([]byte(err.Error())) == false { + return err + } + + return nil +} + +func addDeployAccessToTiller(kubectlClient *kubernetes.Clientset, tillerNamespace, namespace string) error { + _, err := kubectlClient.RbacV1beta1().Roles(namespace).Create(&k8sv1beta1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Name: TillerRoleName, + Namespace: namespace, + }, + Rules: []k8sv1beta1.PolicyRule{ + { + APIGroups: []string{ + k8sv1beta1.APIGroupAll, + "extensions", + "apps", + }, + Resources: []string{k8sv1beta1.ResourceAll}, + Verbs: []string{k8sv1beta1.ResourceAll}, + }, + }, + }) + if err != nil && alreadyExistsRegexp.Match([]byte(err.Error())) == false { + return err + } + + _, err = kubectlClient.RbacV1beta1().RoleBindings(namespace).Create(&k8sv1beta1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: TillerRoleName + "-binding", + Namespace: namespace, + }, + Subjects: []k8sv1beta1.Subject{ + { + Kind: k8sv1beta1.ServiceAccountKind, + Name: TillerServiceAccountName, + Namespace: tillerNamespace, + }, + }, + RoleRef: k8sv1beta1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: TillerRoleName, + }, + }) + if err != nil && alreadyExistsRegexp.Match([]byte(err.Error())) == false { + return err + } + + return nil +} diff --git a/pkg/devspace/clients/helm/search.go b/pkg/devspace/clients/helm/search.go new file mode 100644 index 0000000000..814988a544 --- /dev/null +++ b/pkg/devspace/clients/helm/search.go @@ -0,0 +1,151 @@ +package helm + +import ( + "fmt" + "io/ioutil" + "sort" + + "github.com/covexo/devspace/pkg/util/log" + helmdownloader "k8s.io/helm/pkg/downloader" + "k8s.io/helm/pkg/getter" + "k8s.io/helm/pkg/repo" +) + +// stringArraySorter +type stringArraySorter [][]string + +// Len returns the length of this scoreSorter. +func (s stringArraySorter) Len() int { return len(s) } + +// Swap performs an in-place swap. +func (s stringArraySorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// Less compares a to b, and returns true if a is less than b. +func (s stringArraySorter) Less(a, b int) bool { return s[a][0] < s[b][0] } + +// PrintAllAvailableCharts prints all available charts +func (helmClientWrapper *HelmClientWrapper) PrintAllAvailableCharts() { + var values stringArraySorter + var header = []string{ + "NAME", + "CHART VERSION", + "APP VERSION", + "DESCRIPTION", + } + + allRepos, err := repo.LoadRepositoriesFile(helmClientWrapper.Settings.Home.RepositoryFile()) + if err != nil { + log.Fatal(err) + } + + for _, re := range allRepos.Repositories { + n := re.Name + f := helmClientWrapper.Settings.Home.CacheIndex(n) + + ind, err := repo.LoadIndexFile(f) + if err != nil { + continue + } + + // Sort versions + ind.SortEntries() + + for _, versions := range ind.Entries { + if len(versions) == 0 { + continue + } + + description := versions[0].Description + if len(description) > 45 { + description = description[:45] + "..." + } + + values = append(values, []string{ + versions[0].GetName(), + versions[0].GetVersion(), + versions[0].GetAppVersion(), + description, + }) + } + } + + sort.Sort(values) + log.PrintTable(header, values) +} + +// SearchChart searches the chart name in all repositories +func (helmClientWrapper *HelmClientWrapper) SearchChart(chartName, chartVersion, appVersion string) (*repo.Entry, *repo.ChartVersion, error) { + allRepos, err := repo.LoadRepositoriesFile(helmClientWrapper.Settings.Home.RepositoryFile()) + if err != nil { + return nil, nil, err + } + + for _, re := range allRepos.Repositories { + n := re.Name + f := helmClientWrapper.Settings.Home.CacheIndex(n) + + ind, err := repo.LoadIndexFile(f) + if err != nil { + continue + } + + // Sort versions + ind.SortEntries() + + // Check if chart exists + if versions, ok := ind.Entries[chartName]; ok { + if len(versions) == 0 { + // Skip chart names that have zero releases. + continue + } + + if chartVersion != "" { + for _, version := range versions { + if version.GetVersion() == chartVersion { + return re, version, nil + } + } + + return nil, nil, fmt.Errorf("Chart %s with chart version %s not found", chartName, chartVersion) + } + + if appVersion != "" { + for _, version := range versions { + if version.GetAppVersion() == appVersion { + return re, version, nil + } + } + + return nil, nil, fmt.Errorf("Chart %s with app version %s not found", chartName, appVersion) + } + + return re, versions[0], nil + } + } + + return nil, nil, fmt.Errorf("Chart %s not found", chartName) +} + +// BuildDependencies builds the dependencies +func (helmClientWrapper *HelmClientWrapper) BuildDependencies(chartPath string) error { + man := &helmdownloader.Manager{ + Out: ioutil.Discard, + ChartPath: chartPath, + HelmHome: helmClientWrapper.Settings.Home, + Getters: getter.All(*helmClientWrapper.Settings), + } + + return man.Build() +} + +// UpdateDependencies updates the dependencies +func (helmClientWrapper *HelmClientWrapper) UpdateDependencies(chartPath string) error { + man := &helmdownloader.Manager{ + Out: ioutil.Discard, + ChartPath: chartPath, + HelmHome: helmClientWrapper.Settings.Home, + Getters: getter.All(*helmClientWrapper.Settings), + } + + return man.Update() +} diff --git a/pkg/devspace/clients/helm/tiller.go b/pkg/devspace/clients/helm/tiller.go new file mode 100644 index 0000000000..e698f49a10 --- /dev/null +++ b/pkg/devspace/clients/helm/tiller.go @@ -0,0 +1,218 @@ +package helm + +import ( + "errors" + "strings" + "time" + + "github.com/covexo/devspace/pkg/devspace/config/configutil" + "github.com/covexo/devspace/pkg/devspace/config/v1" + "github.com/covexo/devspace/pkg/util/log" + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + helminstaller "k8s.io/helm/cmd/helm/installer" +) + +// TillerDeploymentName is the string identifier for the tiller deployment +const TillerDeploymentName = "tiller-deploy" +const stableRepoCachePath = "repository/cache/stable-index.yaml" +const defaultRepositories = `apiVersion: v1 +repositories: +- caFile: "" + cache: ` + stableRepoCachePath + ` + certFile: "" + keyFile: "" + name: stable + url: https://kubernetes-charts.storage.googleapis.com +` + +func ensureTiller(kubectlClient *kubernetes.Clientset, config *v1.Config, upgrade bool) error { + tillerNamespace := *config.Services.Tiller.Release.Namespace + tillerOptions := &helminstaller.Options{ + Namespace: tillerNamespace, + MaxHistory: 10, + ImageSpec: "gcr.io/kubernetes-helm/tiller:v2.10.0", + ServiceAccount: TillerServiceAccountName, + } + + // Create tiller namespace & ignore any errors + kubectlClient.CoreV1().Namespaces().Create(&k8sv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: tillerNamespace, + }, + }) + + _, err := kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Get(TillerDeploymentName, metav1.GetOptions{}) + if err != nil { + // Create tiller server + err = createTiller(kubectlClient, config, tillerOptions) + if err != nil { + return err + } + + log.Done("Tiller started") + } else if upgrade { + // Upgrade tiller if necessary + tillerOptions.ImageSpec = "" + err = upgradeTiller(kubectlClient, tillerOptions) + if err != nil { + return err + } + } + + return waitUntilTillerIsStarted(kubectlClient, tillerNamespace) +} + +func createTiller(kubectlClient *kubernetes.Clientset, dsConfig *v1.Config, tillerOptions *helminstaller.Options) error { + log.StartWait("Installing Tiller server") + defer log.StopWait() + + // If the service account is already there we do not create it or any roles/rolebindings + _, err := kubectlClient.CoreV1().ServiceAccounts(*dsConfig.Services.Tiller.Release.Namespace).Get(TillerServiceAccountName, metav1.GetOptions{}) + if err != nil { + err = createTillerRBAC(kubectlClient, dsConfig) + if err != nil { + return err + } + } + + // Create the deployment + return helminstaller.Install(kubectlClient, tillerOptions) +} + +func waitUntilTillerIsStarted(kubectlClient *kubernetes.Clientset, tillerNamespace string) error { + tillerWaitingTime := 2 * 60 * time.Second + tillerCheckInterval := 5 * time.Second + + log.StartWait("Waiting for tiller to start") + defer log.StopWait() + + for tillerWaitingTime > 0 { + tillerDeployment, err := kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Get(TillerDeploymentName, metav1.GetOptions{}) + if err != nil { + continue + } + if tillerDeployment.Status.ReadyReplicas == tillerDeployment.Status.Replicas { + return nil + } + + time.Sleep(tillerCheckInterval) + tillerWaitingTime = tillerWaitingTime - tillerCheckInterval + } + + return errors.New("Tiller didn't start in time") +} + +func upgradeTiller(kubectlClient *kubernetes.Clientset, tillerOptions *helminstaller.Options) error { + log.StartWait("Upgrading tiller") + err := helminstaller.Upgrade(kubectlClient, tillerOptions) + log.StopWait() + if err != nil { + return err + } + + return nil +} + +func addAppNamespaces(appNamespaces *[]*string, namespaces []*string) { + newAppNamespaces := *appNamespaces + + for _, ns := range namespaces { + isExisting := false + + for _, existingNS := range newAppNamespaces { + if ns == existingNS { + isExisting = true + break + } + } + + if !isExisting { + newAppNamespaces = append(newAppNamespaces, ns) + } + } + + appNamespaces = &newAppNamespaces +} + +// IsTillerDeployed determines if we could connect to a tiller server +func IsTillerDeployed(kubectlClient *kubernetes.Clientset, tillerConfig *v1.TillerConfig) bool { + tillerNamespace := *tillerConfig.Release.Namespace + deployment, err := kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Get(TillerDeploymentName, metav1.GetOptions{}) + + if err != nil { + return false + } + + if deployment == nil { + return false + } + + return true +} + +// DeleteTiller clears the tiller server, the service account and role binding +func DeleteTiller(kubectlClient *kubernetes.Clientset) error { + config := configutil.GetConfig(false) + + tillerConfig := config.Services.Tiller + tillerNamespace := *tillerConfig.Release.Namespace + errs := make([]error, 0, 1) + propagationPolicy := metav1.DeletePropagationForeground + + err := kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Delete(TillerDeploymentName, &metav1.DeleteOptions{ + PropagationPolicy: &propagationPolicy, + }) + if err != nil && strings.HasSuffix(err.Error(), "not found") == false { + errs = append(errs, err) + } + + err = kubectlClient.CoreV1().Services(tillerNamespace).Delete(TillerDeploymentName, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + if err != nil && strings.HasSuffix(err.Error(), "not found") == false { + errs = append(errs, err) + } + + // Only delete service accounts and roles in non cloud-provider environments + if config.Cluster.CloudProvider == nil || *config.Cluster.CloudProvider == "" { + err = kubectlClient.CoreV1().ServiceAccounts(tillerNamespace).Delete(TillerServiceAccountName, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + if err != nil && strings.HasSuffix(err.Error(), "not found") == false { + errs = append(errs, err) + } + + roleNamespace := append(*tillerConfig.AppNamespaces, &tillerNamespace) + for _, appNamespace := range roleNamespace { + err = kubectlClient.RbacV1beta1().Roles(*appNamespace).Delete(TillerRoleName, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + if err != nil && strings.HasSuffix(err.Error(), "not found") == false { + errs = append(errs, err) + } + + err = kubectlClient.RbacV1beta1().RoleBindings(*appNamespace).Delete(TillerRoleName+"-binding", &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + if err != nil && strings.HasSuffix(err.Error(), "not found") == false { + errs = append(errs, err) + } + + err = kubectlClient.RbacV1beta1().Roles(*appNamespace).Delete(TillerRoleManagerName, &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + if err != nil && strings.HasSuffix(err.Error(), "not found") == false { + errs = append(errs, err) + } + + err = kubectlClient.RbacV1beta1().RoleBindings(*appNamespace).Delete(TillerRoleManagerName+"-binding", &metav1.DeleteOptions{PropagationPolicy: &propagationPolicy}) + if err != nil && strings.HasSuffix(err.Error(), "not found") == false { + errs = append(errs, err) + } + } + } + + // Merge errors + errorText := "" + + for _, value := range errs { + errorText += value.Error() + "\n" + } + + if errorText == "" { + return nil + } + return errors.New(errorText) +} diff --git a/pkg/devspace/clients/kubectl/client.go b/pkg/devspace/clients/kubectl/client.go index 8ed89187f5..4a1a7256db 100644 --- a/pkg/devspace/clients/kubectl/client.go +++ b/pkg/devspace/clients/kubectl/client.go @@ -117,15 +117,23 @@ func GetClientConfig() (*rest.Config, error) { // IsMinikube returns true if the Kubernetes cluster is a minikube func IsMinikube() bool { if isMinikubeVar == nil { - loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() - kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) - cfg, err := kubeConfig.RawConfig() + isMinikube := false + config := configutil.GetConfig(false) + if config.Cluster.UseKubeConfig != nil && *config.Cluster.UseKubeConfig == true { + if config.Cluster.KubeContext == nil { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, &clientcmd.ConfigOverrides{}) + cfg, err := kubeConfig.RawConfig() + if err != nil { + return false + } - if err != nil { - return false + isMinikube = cfg.CurrentContext == "minikube" + } else { + isMinikube = *config.Cluster.KubeContext == "minikube" + } } - isMinikube := cfg.CurrentContext == "minikube" isMinikubeVar = &isMinikube } diff --git a/pkg/devspace/cloud/config.go b/pkg/devspace/cloud/config.go index f39777377b..f48fe3fedb 100644 --- a/pkg/devspace/cloud/config.go +++ b/pkg/devspace/cloud/config.go @@ -20,10 +20,9 @@ type ProviderConfig map[string]*Provider // Provider describes the struct to hold the cloud configuration type Provider struct { - Name string `yaml:"name,omitempty"` - KubeContext string `yaml:"kubecontext,omitempty"` - Host string `yaml:"host,omitempty"` - Token string `yaml:"token,omitempty"` + Name string `yaml:"name,omitempty"` + Host string `yaml:"host,omitempty"` + Token string `yaml:"token,omitempty"` } // DevSpaceCloudProviderName is the name of the default devspace-cloud provider @@ -40,8 +39,7 @@ const GetClusterConfigEndpoint = "/clusterConfig" // DevSpaceCloudProviderConfig holds the information for the devspace-cloud var DevSpaceCloudProviderConfig = &Provider{ - Host: "https://cloud.devspace.covexo.com", - KubeContext: DevSpaceKubeContextName, + Host: "https://cloud.devspace.covexo.com", } // ParseCloudConfig parses the cloud configuration and returns a map containing the configurations diff --git a/pkg/devspace/cloud/login.go b/pkg/devspace/cloud/login.go index 953582e4c9..514622a874 100644 --- a/pkg/devspace/cloud/login.go +++ b/pkg/devspace/cloud/login.go @@ -85,9 +85,7 @@ func Login(provider *Provider) (string, *api.Cluster, *api.AuthInfo, error) { ctx := context.Background() tokenChannel := make(chan string) - log.StartWait("Logging into cloud " + provider.Host + LoginEndpoint + " ...") server := startServer(provider.Host+LoginSuccessEndpoint, tokenChannel) - open.Start(provider.Host + LoginEndpoint) token := <-tokenChannel @@ -136,10 +134,7 @@ func Update(providerConfig ProviderConfig, dsConfig *v1.Config, switchKubeContex dsConfig.Services.Tiller.Release.Namespace = &namespace if *dsConfig.Cluster.UseKubeConfig { - kubeContext := DevSpaceKubeContextName - if provider.KubeContext != "" { - kubeContext = provider.KubeContext - } + kubeContext := DevSpaceKubeContextName + "-" + namespace err = UpdateKubeConfig(kubeContext, namespace, cluster, authInfo, switchKubeContext) if err != nil { @@ -173,17 +168,14 @@ func UpdateKubeConfig(contextName, namespace string, cluster *api.Cluster, authI config.CurrentContext = contextName } - // We generate a unique auth info name for each devspace - authInfoName := contextName + "-" + namespace - config.Clusters[contextName] = cluster - config.AuthInfos[authInfoName] = authInfo + config.AuthInfos[contextName] = authInfo // Check if we need to add the context if _, ok := config.Contexts[contextName]; !ok { context := api.NewContext() context.Cluster = contextName - context.AuthInfo = authInfoName + context.AuthInfo = contextName context.Namespace = namespace config.Contexts[contextName] = context @@ -201,9 +193,7 @@ func startServer(redirectURI string, tokenChannel chan string) *http.Server { log.Fatal("Bad request") } - log.StopWait() tokenChannel <- keys[0] - http.Redirect(w, r, redirectURI, http.StatusSeeOther) }) From d65f0689f91c9b83dad05c2d0332d70e95e8c348 Mon Sep 17 00:00:00 2001 From: fabiankramm Date: Tue, 9 Oct 2018 17:08:44 +0200 Subject: [PATCH 2/4] Refactor registry package --- cmd/up.go | 3 +- pkg/devspace/registry/create.go | 153 ++++++++++++++++++++++++++++++ pkg/devspace/registry/registry.go | 117 +---------------------- 3 files changed, 158 insertions(+), 115 deletions(-) create mode 100644 pkg/devspace/registry/create.go diff --git a/cmd/up.go b/cmd/up.go index a247d7a78a..faae506894 100644 --- a/cmd/up.go +++ b/cmd/up.go @@ -156,7 +156,7 @@ func (cmd *UpCmd) Run(cobraCmd *cobra.Command, args []string) { // Build image if necessary mustRedeploy := cmd.buildImages() - // Check if we find a running release pod + // Check if the chart directory has changed hash, err := hash.Directory("chart") if err != nil { log.Fatalf("Error hashing chart directory: %v", err) @@ -165,6 +165,7 @@ func (cmd *UpCmd) Run(cobraCmd *cobra.Command, args []string) { // Load config config := configutil.GetConfig(false) + // Check if we find a running release pod pod, err := getRunningDevSpacePod(cmd.helm, cmd.kubectl) if err != nil || mustRedeploy || cmd.flags.deploy || config.DevSpace.ChartHash == nil || *config.DevSpace.ChartHash != hash { cmd.deployChart() diff --git a/pkg/devspace/registry/create.go b/pkg/devspace/registry/create.go new file mode 100644 index 0000000000..7ba4b5a5c8 --- /dev/null +++ b/pkg/devspace/registry/create.go @@ -0,0 +1,153 @@ +package registry + +import ( + "errors" + "fmt" + "strconv" + "time" + + "k8s.io/client-go/kubernetes" + + "github.com/covexo/devspace/pkg/devspace/clients/helm" + "github.com/covexo/devspace/pkg/devspace/config/configutil" + "github.com/covexo/devspace/pkg/devspace/config/v1" + "github.com/covexo/yamlq" + "github.com/foomo/htpasswd" + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func createRegistry(kubectl *kubernetes.Clientset, helm *helm.HelmClientWrapper, internalRegistry *v1.InternalRegistry, registryConfig *v1.RegistryConfig) error { + registryReleaseName := *internalRegistry.Release.Name + registryReleaseNamespace := *internalRegistry.Release.Namespace + registryReleaseValues := internalRegistry.Release.Values + + // Create registry namespace & ignore errors + kubectl.CoreV1().Namespaces().Create(&k8sv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: registryReleaseNamespace, + }, + }) + + // Deploy the registry + _, err := helm.InstallChartByName(registryReleaseName, registryReleaseNamespace, "stable/docker-registry", "", registryReleaseValues) + if err != nil { + return fmt.Errorf("Unable to initialize docker registry: %s", err.Error()) + } + + // Create/Update secret if necessary + if registryConfig != nil && registryConfig.Auth != nil { + // Update registry secret + err = createOrUpdateRegistrySecret(kubectl, internalRegistry, registryConfig) + if err != nil { + return err + } + } + + // Get the registry url + serviceHostname, err := getRegistryURL(kubectl, registryReleaseNamespace, registryReleaseName+"-docker-registry") + if err != nil { + return err + } + + // Check if an ingress is configured + ingressHostname := "" + if registryReleaseValues != nil { + registryValues := yamlq.NewQuery(*registryReleaseValues) + isIngressEnabled, _ := registryValues.Bool("ingress", "enabled") + + if isIngressEnabled { + firstIngressHostname, _ := registryValues.String("ingress", "hosts", "0") + + if len(firstIngressHostname) > 0 { + ingressHostname = firstIngressHostname + } + } + } + + // Update config values + if len(ingressHostname) == 0 { + registryConfig.URL = configutil.String(serviceHostname) + registryConfig.Insecure = configutil.Bool(true) + } else { + registryConfig.URL = configutil.String(ingressHostname) + registryConfig.Insecure = configutil.Bool(false) + } + + return nil +} + +func createOrUpdateRegistrySecret(kubectl *kubernetes.Clientset, internalRegistry *v1.InternalRegistry, registryConfig *v1.RegistryConfig) error { + registryReleaseName := *internalRegistry.Release.Name + registryReleaseNamespace := *internalRegistry.Release.Namespace + + registryAuth := registryConfig.Auth + htpasswdSecretName := registryReleaseName + "-docker-registry-secret" + htpasswdSecret, err := kubectl.Core().Secrets(registryReleaseNamespace).Get(htpasswdSecretName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("Unable to retrieve secret for docker registry: %s", err.Error()) + } + + if htpasswdSecret == nil || htpasswdSecret.Data == nil { + htpasswdSecret = &k8sv1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: htpasswdSecretName, + }, + Data: map[string][]byte{}, + } + } + + oldHtpasswdData := htpasswdSecret.Data["htpasswd"] + newHtpasswdData := htpasswd.HashedPasswords{} + + if len(oldHtpasswdData) != 0 { + oldHtpasswdDataBytes := []byte(oldHtpasswdData) + newHtpasswdData, _ = htpasswd.ParseHtpasswd(oldHtpasswdDataBytes) + } + + err = newHtpasswdData.SetPassword(*registryAuth.Username, *registryAuth.Password, htpasswd.HashBCrypt) + if err != nil { + return fmt.Errorf("Unable to set password in htpasswd: %s", err.Error()) + } + + newHtpasswdDataBytes := newHtpasswdData.Bytes() + htpasswdSecret.Data["htpasswd"] = newHtpasswdDataBytes + + _, err = kubectl.Core().Secrets(registryReleaseNamespace).Get(htpasswdSecretName, metav1.GetOptions{}) + if err != nil { + _, err = kubectl.Core().Secrets(registryReleaseNamespace).Create(htpasswdSecret) + } else { + _, err = kubectl.Core().Secrets(registryReleaseNamespace).Update(htpasswdSecret) + } + + if err != nil { + return fmt.Errorf("Unable to update htpasswd secret: %s", err.Error()) + } + + return nil +} + +func getRegistryURL(kubectl *kubernetes.Clientset, registryReleaseNamespace, registryServiceName string) (string, error) { + maxServiceWaiting := 60 * time.Second + serviceWaitingInterval := 3 * time.Second + + for true { + registryService, err := kubectl.Core().Services(registryReleaseNamespace).Get(registryServiceName, metav1.GetOptions{}) + if err != nil { + return "", err + } + + if len(registryService.Spec.ClusterIP) > 0 { + return registryService.Spec.ClusterIP + ":" + strconv.Itoa(registryPort), nil + } + + time.Sleep(serviceWaitingInterval) + maxServiceWaiting = maxServiceWaiting - serviceWaitingInterval + + if maxServiceWaiting <= 0 { + return "", errors.New("Timeout waiting for registry service to start") + } + } + + return "", nil +} diff --git a/pkg/devspace/registry/registry.go b/pkg/devspace/registry/registry.go index 1d22625888..b5f24f7c7f 100644 --- a/pkg/devspace/registry/registry.go +++ b/pkg/devspace/registry/registry.go @@ -6,18 +6,15 @@ import ( "encoding/hex" "errors" "fmt" - "strconv" "time" "github.com/covexo/devspace/pkg/devspace/config/v1" "github.com/covexo/devspace/pkg/util/log" - "github.com/foomo/htpasswd" "k8s.io/client-go/kubernetes" "github.com/covexo/devspace/pkg/devspace/clients/helm" "github.com/covexo/devspace/pkg/devspace/config/configutil" - "github.com/covexo/yamlq" k8sv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -85,121 +82,13 @@ func InitInternalRegistry(kubectl *kubernetes.Clientset, helm *helm.HelmClientWr registryReleaseName := *internalRegistry.Release.Name registryReleaseDeploymentName := registryReleaseName + "-docker-registry" registryReleaseNamespace := *internalRegistry.Release.Namespace - registryReleaseValues := internalRegistry.Release.Values // Check if registry already exists registryDeployment, err := kubectl.ExtensionsV1beta1().Deployments(registryReleaseNamespace).Get(registryReleaseDeploymentName, metav1.GetOptions{}) if err != nil { - // Check if registry namespace exists - _, err := kubectl.CoreV1().Namespaces().Get(registryReleaseNamespace, metav1.GetOptions{}) + err = createRegistry(kubectl, helm, internalRegistry, registryConfig) if err != nil { - // Create registry namespace - _, err = kubectl.CoreV1().Namespaces().Create(&k8sv1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: registryReleaseNamespace, - }, - }) - - if err != nil { - return err - } - } - - _, err = helm.InstallChartByName(registryReleaseName, registryReleaseNamespace, "stable/docker-registry", "", registryReleaseValues) - if err != nil { - return fmt.Errorf("Unable to initialize docker registry: %s", err.Error()) - } - - if registryConfig != nil && registryConfig.Auth != nil { - registryAuth := registryConfig.Auth - htpasswdSecretName := registryReleaseName + "-docker-registry-secret" - htpasswdSecret, err := kubectl.Core().Secrets(registryReleaseNamespace).Get(htpasswdSecretName, metav1.GetOptions{}) - if err != nil { - return fmt.Errorf("Unable to retrieve secret for docker registry: %s", err.Error()) - } - - if htpasswdSecret == nil || htpasswdSecret.Data == nil { - htpasswdSecret = &k8sv1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: htpasswdSecretName, - }, - Data: map[string][]byte{}, - } - } - - oldHtpasswdData := htpasswdSecret.Data["htpasswd"] - newHtpasswdData := htpasswd.HashedPasswords{} - - if len(oldHtpasswdData) != 0 { - oldHtpasswdDataBytes := []byte(oldHtpasswdData) - newHtpasswdData, _ = htpasswd.ParseHtpasswd(oldHtpasswdDataBytes) - } - - err = newHtpasswdData.SetPassword(*registryAuth.Username, *registryAuth.Password, htpasswd.HashBCrypt) - if err != nil { - return fmt.Errorf("Unable to set password in htpasswd: %s", err.Error()) - } - - newHtpasswdDataBytes := newHtpasswdData.Bytes() - - htpasswdSecret.Data["htpasswd"] = newHtpasswdDataBytes - - _, err = kubectl.Core().Secrets(registryReleaseNamespace).Get(htpasswdSecretName, metav1.GetOptions{}) - if err != nil { - _, err = kubectl.Core().Secrets(registryReleaseNamespace).Create(htpasswdSecret) - } else { - _, err = kubectl.Core().Secrets(registryReleaseNamespace).Update(htpasswdSecret) - } - } - - if err != nil { - return fmt.Errorf("Unable to update htpasswd secret: %s", err.Error()) - } - - registryServiceName := registryReleaseName + "-docker-registry" - serviceHostname := "" - maxServiceWaiting := 60 * time.Second - serviceWaitingInterval := 3 * time.Second - - for true { - registryService, err := kubectl.Core().Services(registryReleaseNamespace).Get(registryServiceName, metav1.GetOptions{}) - if err != nil { - return err - } - - if len(registryService.Spec.ClusterIP) > 0 { - serviceHostname = registryService.Spec.ClusterIP + ":" + strconv.Itoa(registryPort) - break - } - - time.Sleep(serviceWaitingInterval) - maxServiceWaiting = maxServiceWaiting - serviceWaitingInterval - - if maxServiceWaiting <= 0 { - return errors.New("Timeout waiting for registry service to start") - } - } - - ingressHostname := "" - if registryReleaseValues != nil { - registryValues := yamlq.NewQuery(*registryReleaseValues) - isIngressEnabled, _ := registryValues.Bool("ingress", "enabled") - - if isIngressEnabled { - firstIngressHostname, _ := registryValues.String("ingress", "hosts", "0") - - if len(firstIngressHostname) > 0 { - ingressHostname = firstIngressHostname - } - } - } - - if len(ingressHostname) == 0 { - registryConfig.URL = configutil.String(serviceHostname) - registryConfig.Insecure = configutil.Bool(true) - } else { - registryConfig.URL = configutil.String(ingressHostname) - registryConfig.Insecure = configutil.Bool(false) + return err } } @@ -238,7 +127,7 @@ func waitForRegistry(registryNamespace, registryReleaseDeploymentName string, cl return errors.New("Internal registry start waiting time timed out") } -//GetImageURL returns the image (optional with tag) +// GetImageURL returns the image (optional with tag) func GetImageURL(imageConfig *v1.ImageConfig, includingLatestTag bool) string { registryConfig, registryConfErr := GetRegistryConfig(imageConfig) From 8cd24474f8b624296d364cfb8289f26e56bbd649 Mon Sep 17 00:00:00 2001 From: fabiankramm Date: Tue, 9 Oct 2018 17:35:23 +0200 Subject: [PATCH 3/4] Check if we can access namespaces --- cmd/up.go | 24 ++++++++++++++++-------- pkg/devspace/clients/helm/tiller.go | 20 +++++++++++++------- pkg/devspace/registry/create.go | 20 +++++++++++++------- 3 files changed, 42 insertions(+), 22 deletions(-) diff --git a/cmd/up.go b/cmd/up.go index faae506894..215497eafc 100644 --- a/cmd/up.go +++ b/cmd/up.go @@ -140,7 +140,10 @@ func (cmd *UpCmd) Run(cobraCmd *cobra.Command, args []string) { log.Fatalf("Unable to create new kubectl client: %v", err) } - cmd.ensureNamespace() + err = cmd.ensureNamespace() + if err != nil { + log.Fatalf("Unable to create namespace: %v", err) + } err = cmd.ensureClusterRoleBinding() if err != nil { @@ -196,16 +199,21 @@ func (cmd *UpCmd) Run(cobraCmd *cobra.Command, args []string) { enterTerminal(cmd.kubectl, cmd.pod, cmd.flags.container, args) } -func (cmd *UpCmd) ensureNamespace() { +func (cmd *UpCmd) ensureNamespace() error { config := configutil.GetConfig(false) releaseNamespace := *config.DevSpace.Release.Namespace - // Create release namespace and ignore errors - _, _ = cmd.kubectl.CoreV1().Namespaces().Create(&k8sv1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: releaseNamespace, - }, - }) + _, err := cmd.kubectl.CoreV1().Namespaces().Get(releaseNamespace, metav1.GetOptions{}) + if err != nil { + // Create release namespace + _, err = cmd.kubectl.CoreV1().Namespaces().Create(&k8sv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: releaseNamespace, + }, + }) + } + + return err } func (cmd *UpCmd) ensureClusterRoleBinding() error { diff --git a/pkg/devspace/clients/helm/tiller.go b/pkg/devspace/clients/helm/tiller.go index e698f49a10..00791dd908 100644 --- a/pkg/devspace/clients/helm/tiller.go +++ b/pkg/devspace/clients/helm/tiller.go @@ -36,14 +36,20 @@ func ensureTiller(kubectlClient *kubernetes.Clientset, config *v1.Config, upgrad ServiceAccount: TillerServiceAccountName, } - // Create tiller namespace & ignore any errors - kubectlClient.CoreV1().Namespaces().Create(&k8sv1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: tillerNamespace, - }, - }) + _, err := kubectlClient.CoreV1().Namespaces().Get(tillerNamespace, metav1.GetOptions{}) + if err != nil { + // Create tiller namespace + _, err = kubectlClient.CoreV1().Namespaces().Create(&k8sv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: tillerNamespace, + }, + }) + if err != nil { + return err + } + } - _, err := kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Get(TillerDeploymentName, metav1.GetOptions{}) + _, err = kubectlClient.ExtensionsV1beta1().Deployments(tillerNamespace).Get(TillerDeploymentName, metav1.GetOptions{}) if err != nil { // Create tiller server err = createTiller(kubectlClient, config, tillerOptions) diff --git a/pkg/devspace/registry/create.go b/pkg/devspace/registry/create.go index 7ba4b5a5c8..6817021070 100644 --- a/pkg/devspace/registry/create.go +++ b/pkg/devspace/registry/create.go @@ -22,15 +22,21 @@ func createRegistry(kubectl *kubernetes.Clientset, helm *helm.HelmClientWrapper, registryReleaseNamespace := *internalRegistry.Release.Namespace registryReleaseValues := internalRegistry.Release.Values - // Create registry namespace & ignore errors - kubectl.CoreV1().Namespaces().Create(&k8sv1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: registryReleaseNamespace, - }, - }) + _, err := kubectl.CoreV1().Namespaces().Get(registryReleaseNamespace, metav1.GetOptions{}) + if err != nil { + // Create registryReleaseNamespace + _, err = kubectl.CoreV1().Namespaces().Create(&k8sv1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: registryReleaseNamespace, + }, + }) + if err != nil { + return err + } + } // Deploy the registry - _, err := helm.InstallChartByName(registryReleaseName, registryReleaseNamespace, "stable/docker-registry", "", registryReleaseValues) + _, err = helm.InstallChartByName(registryReleaseName, registryReleaseNamespace, "stable/docker-registry", "", registryReleaseValues) if err != nil { return fmt.Errorf("Unable to initialize docker registry: %s", err.Error()) } From 8581cb0ff54465725b460c896d68b8e32654f722 Mon Sep 17 00:00:00 2001 From: fabiankramm Date: Tue, 9 Oct 2018 18:07:04 +0200 Subject: [PATCH 4/4] Save config after refreshing cloud config --- pkg/devspace/clients/kubectl/client.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/pkg/devspace/clients/kubectl/client.go b/pkg/devspace/clients/kubectl/client.go index 4a1a7256db..05a4ef55f6 100644 --- a/pkg/devspace/clients/kubectl/client.go +++ b/pkg/devspace/clients/kubectl/client.go @@ -63,6 +63,11 @@ func GetClientConfig() (*rest.Config, error) { if err != nil { log.Warnf("Couldn't update cloud provider %s information: %v", *config.Cluster.CloudProvider, err) } + + err = configutil.SaveConfig() + if err != nil { + return nil, fmt.Errorf("Error saving config: %v", err) + } } if (config.Cluster.UseKubeConfig != nil && *config.Cluster.UseKubeConfig) || config.Cluster.APIServer == nil {