Skip to content

Commit

Permalink
pkg/helm: migrate to secret storage backend in namespace of CR (#1102)
Browse files Browse the repository at this point in the history
* pkg/helm: migrate to secret storage backend in namespace of CR

* CHANGELOG.md: added change for PR #1102
  • Loading branch information
joelanford committed Mar 18, 2019
1 parent a38633d commit c412807
Show file tree
Hide file tree
Showing 6 changed files with 62 additions and 43 deletions.
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@

### Changed

- Updated the helm-operator to store release state in kubernetes secrets in the same namespace of the custom resource that defines the release. ([#1102](https://github.com/operator-framework/operator-sdk/pull/1102))
- **WARNING**: Users with active CRs and releases who are upgrading their helm-based operator should not skip this version. Future versions will not seamlessly transition release state to the persistent backend, and will instead uninstall and reinstall all managed releases.

### Deprecated

### Removed
Expand Down
1 change: 1 addition & 0 deletions Gopkg.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

7 changes: 6 additions & 1 deletion pkg/helm/controller/reconcile.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,12 @@ func (r HelmOperatorReconciler) Reconcile(request reconcile.Request) (reconcile.
return reconcile.Result{}, err
}

manager := r.ManagerFactory.NewManager(o)
manager, err := r.ManagerFactory.NewManager(o)
if err != nil {
log.Error(err, "Failed to get release manager")
return reconcile.Result{}, err
}

status := types.StatusFor(o)
log = log.WithValues("release", manager.ReleaseName())

Expand Down
4 changes: 4 additions & 0 deletions pkg/helm/release/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,10 @@ func (m manager) IsUpdateRequired() bool {
// Sync ensures the Helm storage backend is in sync with the status of the
// custom resource.
func (m *manager) Sync(ctx context.Context) error {
// TODO: We're now persisting releases as secrets. To support seamless upgrades, we
// need to sync the release status from the CR to the persistent storage backend.
// Once we release the storage backend migration, this function (and comment)
// can be removed.
if err := m.syncReleaseStatus(*m.status); err != nil {
return fmt.Errorf("failed to sync release status to storage backend: %s", err)
}
Expand Down
77 changes: 47 additions & 30 deletions pkg/helm/release/manager_factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,20 @@ import (

"github.com/martinlindhe/base36"
"github.com/pborman/uuid"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
apitypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/typed/core/v1"
helmengine "k8s.io/helm/pkg/engine"
"k8s.io/helm/pkg/kube"
"k8s.io/helm/pkg/storage"
"k8s.io/helm/pkg/storage/driver"
"k8s.io/helm/pkg/tiller"
"k8s.io/helm/pkg/tiller/environment"
crmanager "sigs.k8s.io/controller-runtime/pkg/manager"

"github.com/operator-framework/operator-sdk/pkg/helm/client"
"github.com/operator-framework/operator-sdk/pkg/helm/engine"
"github.com/operator-framework/operator-sdk/pkg/helm/internal/types"
)
Expand All @@ -40,43 +43,51 @@ import (
// improves decoupling between reconciliation logic and the Helm backend
// components used to manage releases.
type ManagerFactory interface {
NewManager(r *unstructured.Unstructured) Manager
NewManager(r *unstructured.Unstructured) (Manager, error)
}

type managerFactory struct {
storageBackend *storage.Storage
tillerKubeClient *kube.Client
chartDir string
mgr crmanager.Manager
chartDir string
}

// NewManagerFactory returns a new Helm manager factory capable of installing and uninstalling releases.
func NewManagerFactory(storageBackend *storage.Storage, tillerKubeClient *kube.Client, chartDir string) ManagerFactory {
return &managerFactory{storageBackend, tillerKubeClient, chartDir}
}

func (f managerFactory) NewManager(r *unstructured.Unstructured) Manager {
return f.newManagerForCR(r)
func NewManagerFactory(mgr crmanager.Manager, chartDir string) ManagerFactory {
return &managerFactory{mgr, chartDir}
}

func (f managerFactory) newManagerForCR(r *unstructured.Unstructured) Manager {
func (f managerFactory) NewManager(cr *unstructured.Unstructured) (Manager, error) {
clientv1, err := v1.NewForConfig(f.mgr.GetConfig())
if err != nil {
return nil, fmt.Errorf("failed to get core/v1 client: %s", err)
}
storageBackend := storage.Init(driver.NewSecrets(clientv1.Secrets(cr.GetNamespace())))
tillerKubeClient, err := client.NewFromManager(f.mgr)
if err != nil {
return nil, fmt.Errorf("failed to get client from manager: %s", err)
}
releaseServer, err := getReleaseServer(cr, storageBackend, tillerKubeClient)
if err != nil {
return nil, fmt.Errorf("failed to get helm release server: %s", err)
}
return &manager{
storageBackend: f.storageBackend,
tillerKubeClient: f.tillerKubeClient,
storageBackend: storageBackend,
tillerKubeClient: tillerKubeClient,
chartDir: f.chartDir,

tiller: f.tillerRendererForCR(r),
releaseName: getReleaseName(r),
namespace: r.GetNamespace(),
tiller: releaseServer,
releaseName: getReleaseName(cr),
namespace: cr.GetNamespace(),

spec: r.Object["spec"],
status: types.StatusFor(r),
}
spec: cr.Object["spec"],
status: types.StatusFor(cr),
}, nil
}

// tillerRendererForCR creates a ReleaseServer configured with a rendering engine that adds ownerrefs to rendered assets
// getReleaseServer creates a ReleaseServer configured with a rendering engine that adds ownerrefs to rendered assets
// based on the CR.
func (f managerFactory) tillerRendererForCR(r *unstructured.Unstructured) *tiller.ReleaseServer {
controllerRef := metav1.NewControllerRef(r, r.GroupVersionKind())
func getReleaseServer(cr *unstructured.Unstructured, storageBackend *storage.Storage, tillerKubeClient *kube.Client) (*tiller.ReleaseServer, error) {
controllerRef := metav1.NewControllerRef(cr, cr.GroupVersionKind())
ownerRefs := []metav1.OwnerReference{
*controllerRef,
}
Expand All @@ -87,17 +98,23 @@ func (f managerFactory) tillerRendererForCR(r *unstructured.Unstructured) *tille
}
env := &environment.Environment{
EngineYard: ey,
Releases: f.storageBackend,
KubeClient: f.tillerKubeClient,
Releases: storageBackend,
KubeClient: tillerKubeClient,
}
kubeconfig, err := tillerKubeClient.ToRESTConfig()
if err != nil {
return nil, err
}
cs, err := clientset.NewForConfig(kubeconfig)
if err != nil {
return nil, err
}
kubeconfig, _ := f.tillerKubeClient.ToRESTConfig()
cs := clientset.NewForConfigOrDie(kubeconfig)

return tiller.NewReleaseServer(env, cs, false)
return tiller.NewReleaseServer(env, cs, false), nil
}

func getReleaseName(r *unstructured.Unstructured) string {
return fmt.Sprintf("%s-%s", r.GetName(), shortenUID(r.GetUID()))
func getReleaseName(cr *unstructured.Unstructured) string {
return fmt.Sprintf("%s-%s", cr.GetName(), shortenUID(cr.GetUID()))
}

func shortenUID(uid apitypes.UID) string {
Expand Down
13 changes: 1 addition & 12 deletions pkg/helm/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@ import (
"os"
"runtime"

"github.com/operator-framework/operator-sdk/pkg/helm/client"
"github.com/operator-framework/operator-sdk/pkg/helm/controller"
hoflags "github.com/operator-framework/operator-sdk/pkg/helm/flags"
"github.com/operator-framework/operator-sdk/pkg/helm/release"
Expand All @@ -30,8 +29,6 @@ import (
sdkVersion "github.com/operator-framework/operator-sdk/version"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/helm/pkg/storage"
"k8s.io/helm/pkg/storage/driver"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
logf "sigs.k8s.io/controller-runtime/pkg/runtime/log"
Expand Down Expand Up @@ -77,14 +74,6 @@ func Run(flags *hoflags.HelmOperatorFlags) error {
return err
}

// Create Tiller's storage backend and kubernetes client
storageBackend := storage.Init(driver.NewMemory())
tillerKubeClient, err := client.NewFromManager(mgr)
if err != nil {
log.Error(err, "Failed to create new Tiller client.")
return err
}

watches, err := watches.Load(flags.WatchesFile)
if err != nil {
log.Error(err, "Failed to create new manager factories.")
Expand All @@ -96,7 +85,7 @@ func Run(flags *hoflags.HelmOperatorFlags) error {
err := controller.Add(mgr, controller.WatchOptions{
Namespace: namespace,
GVK: w.GroupVersionKind,
ManagerFactory: release.NewManagerFactory(storageBackend, tillerKubeClient, w.ChartDir),
ManagerFactory: release.NewManagerFactory(mgr, w.ChartDir),
ReconcilePeriod: flags.ReconcilePeriod,
WatchDependentResources: w.WatchDependentResources,
})
Expand Down

0 comments on commit c412807

Please sign in to comment.