Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ Remove unwanted objects during upgrade with hook server #168

Merged
merged 1 commit into from
Jun 3, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions api/v1alpha1/clusteraddon_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,15 @@
ClusterAddonFinalizer = "clusteraddon.clusterstack.x-k8s.io"
)

const StageAnnotation = "ClusterAddonStage"

Check warning on line 31 in api/v1alpha1/clusteraddon_types.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

exported: exported const StageAnnotation should have comment or be unexported (revive)

type StageAnnotationValue string

Check warning on line 33 in api/v1alpha1/clusteraddon_types.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

exported: exported type StageAnnotationValue should have comment or be unexported (revive)

const (
StageCreated = StageAnnotationValue("created")
StageUpgraded = StageAnnotationValue("upgraded")
)

// StagePhase defines the status of helm chart in the cluster addon.
type StagePhase string

Expand Down Expand Up @@ -133,6 +142,26 @@
}
}

func (r *ClusterAddon) SetStageAnnotations(value StageAnnotationValue) {
if r.Annotations == nil {
r.Annotations = make(map[string]string, 0)
}
_, found := r.Annotations[StageAnnotation]
if !found {
r.Annotations[StageAnnotation] = string(value)
}
}

// HasAnnotation returns a bool if passed in annotation exists.
func (r *ClusterAddon) HasStageAnnotation(value StageAnnotationValue) bool {
val, found := r.Annotations[StageAnnotation]
if found && val == string(value) {
return true
}

return false
}

// GetConditions returns the observations of the operational state of the ClusterAddon resource.
func (r *ClusterAddon) GetConditions() clusterv1.Conditions {
return r.Status.Conditions
Expand Down
228 changes: 220 additions & 8 deletions internal/controller/clusteraddon_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@
return reconcile.Result{}, fmt.Errorf("failed to get cluster addon config path: %w", err)
}

logger := log.FromContext(ctx)

// Check whether current Helm chart has been applied in the workload cluster. If not, then we need to apply the helm chart (again).
// the spec.clusterStack is only set after a Helm chart from a ClusterStack has been applied successfully.
// If it is not set, the Helm chart has never been applied.
Expand Down Expand Up @@ -250,6 +252,8 @@
conditions.MarkTrue(clusterAddon, csov1alpha1.HelmChartAppliedCondition)
}

clusterAddon.SetStageAnnotations(csov1alpha1.StageCreated)
clusterAddon.Spec.Hook = ""
clusterAddon.Spec.ClusterStack = cluster.Spec.Topology.Class
clusterAddon.Status.Ready = true
return ctrl.Result{}, nil
Expand All @@ -271,11 +275,14 @@
// set condition that helm chart has been applied successfully
conditions.MarkTrue(clusterAddon, csov1alpha1.HelmChartAppliedCondition)
}

clusterAddon.Spec.Hook = ""
clusterAddon.SetStageAnnotations(csov1alpha1.StageCreated)
clusterAddon.Status.Ready = true
return ctrl.Result{}, nil

} else {
in.addonStagesInput, err = r.getAddonStagesInput(clusterAddon, in.restConfig, in.clusterAddonChartPath)
in.addonStagesInput, err = r.getAddonStagesInput(in.restConfig, in.clusterAddonChartPath)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get addon stages input: %w", err)
}
Expand Down Expand Up @@ -305,7 +312,7 @@
in.oldDestinationClusterAddonChartDir = strings.TrimSuffix(oldRelease.ClusterAddonChartPath(), ".tgz")

if err := unTarContent(oldRelease.ClusterAddonChartPath(), in.oldDestinationClusterAddonChartDir); err != nil {
return reconcile.Result{}, fmt.Errorf("failed to untar cluster addon chart: %q: %w", oldRelease.ClusterAddonChartPath(), err)
return reconcile.Result{}, fmt.Errorf("failed to untar old cluster stack cluster addon chart: %q: %w", oldRelease.ClusterAddonChartPath(), err)
}
}

Expand Down Expand Up @@ -354,7 +361,7 @@
// In case the Kubernetes version stayed the same during an upgrade, the hook server does not trigger and
// we just take the Helm charts that are supposed to be installed in the BeforeClusterUpgrade hook and apply them.
if oldRelease.Meta.Versions.Kubernetes == releaseAsset.Meta.Versions.Kubernetes {
clusterAddon.Spec.Hook = "BeforeClusterUpgrade"

Check failure on line 364 in internal/controller/clusteraddon_controller.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

string `BeforeClusterUpgrade` has 4 occurrences, make it a constant (goconst)
for _, stage := range clusterAddonConfig.AddonStages["BeforeClusterUpgrade"] {
shouldRequeue, err := r.executeStage(ctx, stage, in)
if err != nil {
Expand Down Expand Up @@ -397,7 +404,39 @@
}
}

logger.Info("the hook is here", "hook", in.clusterAddon.Spec.Hook)

if clusterAddon.Spec.Hook == "AfterControlPlaneInitialized" || clusterAddon.Spec.Hook == "BeforeClusterUpgrade" {
if clusterAddon.Spec.Hook == "BeforeClusterUpgrade" {
// create the list of old release objects
oldClusterStackObjectList, err := r.getOldReleaseObjects(ctx, in, clusterAddonConfig, oldRelease)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get old cluster stack object list from helm charts: %w", err)
}
logger.Info("here is the old cluster stack object list", "list", oldClusterStackObjectList)

newClusterStackObjectList, err := r.getNewReleaseObjects(ctx, in, clusterAddonConfig)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get new cluster stack object list from helm charts: %w", err)
}

logger.Info("here in the clean up begins", "currentList", newClusterStackObjectList)
shouldReque, err := r.cleanUpResources(ctx, in, oldClusterStackObjectList, newClusterStackObjectList)
logger.Info("here in the clean up done", "currentList", newClusterStackObjectList, "reque", shouldReque)
if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to clean up resources: %w", err)
}
if shouldReque {
return reconcile.Result{RequeueAfter: 20 * time.Second}, nil
}

// set upgrade annotation once done
clusterAddon.SetStageAnnotations(csov1alpha1.StageUpgraded)
}

// if upgrade annotation is not present add the create annotation
clusterAddon.SetStageAnnotations(csov1alpha1.StageCreated)

clusterAddon.Spec.ClusterStack = cluster.Spec.Topology.Class
}

Expand All @@ -419,6 +458,153 @@
return ctrl.Result{}, nil
}

func (r *ClusterAddonReconciler) getNewReleaseObjects(ctx context.Context, in templateAndApplyClusterAddonInput, clusterAddonConfig clusteraddon.ClusterAddonConfig) ([]*csov1alpha1.Resource, error) {

Check failure on line 461 in internal/controller/clusteraddon_controller.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

hugeParam: in is heavy (144 bytes); consider passing it by pointer (gocritic)
var (
newBuildTemplate []byte
resources []*csov1alpha1.Resource
)

for _, stage := range clusterAddonConfig.AddonStages[in.clusterAddon.Spec.Hook] {
if _, err := os.Stat(filepath.Join(in.newDestinationClusterAddonChartDir, stage.HelmChartName, release.OverwriteYaml)); err == nil {
newBuildTemplate, err = buildTemplateFromClusterAddonValues(ctx, filepath.Join(in.newDestinationClusterAddonChartDir, stage.HelmChartName, release.OverwriteYaml), in.cluster, r.Client, true)
if err != nil {
return nil, fmt.Errorf("failed to build template from new cluster addon values of the latest cluster stack: %w", err)
}
}

helmTemplate, err := helmTemplateClusterAddon(filepath.Join(in.newDestinationClusterAddonChartDir, stage.HelmChartName), newBuildTemplate)
if err != nil {
return nil, fmt.Errorf("failed to template new helm chart of the latest cluster stack: %w", err)
}

resource, err := kube.GetResourcesFromHelmTemplate(helmTemplate)
if err != nil {
return nil, fmt.Errorf("failed to get resources form old cluster stack helm template of the latest cluster stack: %w", err)
}

if stage.Action == clusteraddon.Apply {
resources = append(resources, resource...)
} else {
resources = removeResourcesFromCurrentListOfObjects(resources, resource)
}
}

return resources, nil
}

// getOldReleaseObjects returns the old cluster stack objects in the workload cluster.
func (r *ClusterAddonReconciler) getOldReleaseObjects(ctx context.Context, in templateAndApplyClusterAddonInput, clusterAddonConfig clusteraddon.ClusterAddonConfig, oldRelease release.Release) ([]*csov1alpha1.Resource, error) {

Check failure on line 496 in internal/controller/clusteraddon_controller.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

hugeParam: in is heavy (144 bytes); consider passing it by pointer (gocritic)
// clusteraddon.yaml
clusterAddonConfigPath, err := r.getClusterAddonConfigPath(in.clusterAddon.Spec.ClusterStack)
if err != nil {
return nil, fmt.Errorf("failed to get old cluster stack cluster addon config path: %w", err)
}

if _, err := os.Stat(clusterAddonConfigPath); err != nil {
if !os.IsNotExist(err) {
return nil, fmt.Errorf("failed to verify the clusteraddon.yaml on old cluster stack release path %q with error: %w", clusterAddonConfigPath, err)
}

// this is the old way
buildTemplate, err := buildTemplateFromClusterAddonValues(ctx, oldRelease.ClusterAddonValuesPath(), in.cluster, r.Client, false)
if err != nil {
return nil, fmt.Errorf("failed to build template from the old cluster stack cluster addon values: %w", err)
}

helmTemplate, err := helmTemplateClusterAddon(oldRelease.ClusterAddonChartPath(), buildTemplate)
if err != nil {
return nil, fmt.Errorf("failed to template helm chart: %w", err)
}

resources, err := kube.GetResourcesFromHelmTemplate(helmTemplate)
if err != nil {
return nil, fmt.Errorf("failed to get resources form old cluster stack helm template: %w", err)
}

return resources, nil
}

// this is the new way
// Read all the helm charts in new the un-tared cluster addon.

var (
newBuildTemplate []byte
resources []*csov1alpha1.Resource

hook string
)

if in.clusterAddon.HasStageAnnotation(csov1alpha1.StageCreated) {
hook = "AfterControlPlaneInitialized"
} else {
hook = "BeforeClusterUpgrade"
}

for _, stage := range clusterAddonConfig.AddonStages[hook] {
if _, err := os.Stat(filepath.Join(in.oldDestinationClusterAddonChartDir, stage.HelmChartName, release.OverwriteYaml)); err == nil {
newBuildTemplate, err = buildTemplateFromClusterAddonValues(ctx, filepath.Join(in.oldDestinationClusterAddonChartDir, stage.HelmChartName, release.OverwriteYaml), in.cluster, r.Client, true)
if err != nil {
return nil, fmt.Errorf("failed to build template from new cluster addon values: %w", err)
}
}

helmTemplate, err := helmTemplateClusterAddon(filepath.Join(in.oldDestinationClusterAddonChartDir, stage.HelmChartName), newBuildTemplate)
if err != nil {
return nil, fmt.Errorf("failed to template new helm chart: %w", err)
}

resource, err := kube.GetResourcesFromHelmTemplate(helmTemplate)
if err != nil {
return nil, fmt.Errorf("failed to get resources form old cluster stack helm template: %w", err)
}

if stage.Action == clusteraddon.Apply {
resources = append(resources, resource...)
} else {
resources = removeResourcesFromCurrentListOfObjects(resources, resource)
}
}

return resources, nil
}

func (r *ClusterAddonReconciler) cleanUpResources(ctx context.Context, in templateAndApplyClusterAddonInput, oldList, newList []*csov1alpha1.Resource) (shouldRequeue bool, err error) {

Check failure on line 571 in internal/controller/clusteraddon_controller.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

hugeParam: in is heavy (144 bytes); consider passing it by pointer (gocritic)
// Create a map of items in the new slice for faster lookup
newMap := make(map[*csov1alpha1.Resource]bool)
for _, item := range newList {
newMap[item] = true
}

// Find extra objects in the old slice
var extraResources []*csov1alpha1.Resource
for _, item := range oldList {
if !newMap[item] {
extraResources = append(extraResources, item)
}
}
logger := log.FromContext(ctx)

logger.Info("diff in resources", "diff", extraResources)

for _, resource := range extraResources {
if resource.Namespace == "" {
resource.Namespace = clusterAddonNamespace
}
dr, err := kube.GetDynamicResourceInterface(resource.Namespace, in.restConfig, resource.GroupVersionKind())
if err != nil {
return false, fmt.Errorf("failed to get dynamic resource interface: %w", err)
}

if err := dr.Delete(ctx, resource.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
reterr := fmt.Errorf("failed to delete object %q: %w", resource.GroupVersionKind(), err)
resource.Error = reterr.Error()
shouldRequeue = true
}
}

return shouldRequeue, nil
}

func (r *ClusterAddonReconciler) getClusterAddonConfigPath(clusterClassName string) (string, error) {
// path to the clusteraddon config /tmp/cluster-stacks/docker-ferrol-1-27-v1/clusteraddon.yaml
// if present then new way of cluster stack otherwise old way.
Expand Down Expand Up @@ -455,7 +641,7 @@
oldDestinationClusterAddonChartDir string
}

func (r *ClusterAddonReconciler) getAddonStagesInput(clusterAddon *csov1alpha1.ClusterAddon, restConfig *rest.Config, clusterAddonChart string) (addonStagesInput, error) {
func (r *ClusterAddonReconciler) getAddonStagesInput(restConfig *rest.Config, clusterAddonChart string) (addonStagesInput, error) {
var (
addonStages addonStagesInput
err error
Expand All @@ -476,7 +662,7 @@
// dst - /tmp/cluster-stacks/docker-ferrol-1-27-v1/docker-ferrol-1-27-cluster-addon-v1/
addonStages.newDestinationClusterAddonChartDir = strings.TrimSuffix(clusterAddonChart, ".tgz")
if err := unTarContent(clusterAddonChart, addonStages.newDestinationClusterAddonChartDir); err != nil {
return addonStagesInput{}, fmt.Errorf("failed to untar cluster addon chart: %q: %w", clusterAddonChart, err)
return addonStagesInput{}, fmt.Errorf("failed to untar new cluster stack cluster addon chart: %q: %w", clusterAddonChart, err)
}

// Read all the helm charts in the un-tared cluster addon.
Expand All @@ -495,7 +681,7 @@
return addonStages, nil
}

func (r *ClusterAddonReconciler) templateAndApplyClusterAddonHelmChart(ctx context.Context, in templateAndApplyClusterAddonInput, shouldDelete bool) (bool, error) {

Check failure on line 684 in internal/controller/clusteraddon_controller.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

hugeParam: in is heavy (144 bytes); consider passing it by pointer (gocritic)
clusterAddonChart := in.clusterAddonChartPath
var shouldRequeue bool

Expand All @@ -517,10 +703,11 @@
}

in.clusterAddon.Status.Resources = newResources

return shouldRequeue, nil
}

func (r *ClusterAddonReconciler) executeStage(ctx context.Context, stage clusteraddon.Stage, in templateAndApplyClusterAddonInput) (bool, error) {

Check failure on line 710 in internal/controller/clusteraddon_controller.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

hugeParam: stage is heavy (112 bytes); consider passing it by pointer (gocritic)
logger := log.FromContext(ctx)

var (
Expand Down Expand Up @@ -600,7 +787,7 @@
} else {
// Delete part
logger.V(1).Info("starting to delete helm chart", "clusterStack", in.clusterAddon.Spec.ClusterStack, "helm chart", stage.HelmChartName, "hook", in.clusterAddon.Spec.Hook)
shouldRequeue, err = helmTemplateAndDeleteNewClusterStack(ctx, in, stage.HelmChartName)
shouldRequeue, err = r.helmTemplateAndDeleteNewClusterStack(ctx, in, stage.HelmChartName)
if err != nil {
return false, fmt.Errorf("failed to delete helm chart: %w", err)
}
Expand Down Expand Up @@ -718,7 +905,7 @@
return releaseAsset, false, nil
}

func (r *ClusterAddonReconciler) templateAndApplyNewClusterStackAddonHelmChart(ctx context.Context, in templateAndApplyClusterAddonInput, helmChartName string) (bool, error) {

Check failure on line 908 in internal/controller/clusteraddon_controller.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

hugeParam: in is heavy (144 bytes); consider passing it by pointer (gocritic)
var (
oldHelmTemplate []byte
oldBuildTemplate []byte
Expand All @@ -729,6 +916,7 @@
if in.oldDestinationClusterAddonChartDir != "" {
oldClusterStackSubDirPath := filepath.Join(in.oldDestinationClusterAddonChartDir, helmChartName)

// we skip helm templating if last cluster stack don't follow the new convention.
if _, err := os.Stat(filepath.Join(oldClusterStackSubDirPath, release.OverwriteYaml)); err == nil {
oldBuildTemplate, err = buildTemplateFromClusterAddonValues(ctx, filepath.Join(oldClusterStackSubDirPath, release.OverwriteYaml), in.cluster, r.Client, true)
if err != nil {
Expand Down Expand Up @@ -761,12 +949,13 @@
return false, fmt.Errorf("failed to apply objects from cluster addon Helm chart: %w", err)
}

// This is for the current stage objects and will be removed once done.
in.clusterAddon.Status.Resources = newResources

return shouldRequeue, nil
}

func helmTemplateAndDeleteNewClusterStack(ctx context.Context, in templateAndApplyClusterAddonInput, helmChartName string) (bool, error) {
func (r *ClusterAddonReconciler) helmTemplateAndDeleteNewClusterStack(ctx context.Context, in templateAndApplyClusterAddonInput, helmChartName string) (bool, error) {

Check failure on line 958 in internal/controller/clusteraddon_controller.go

View workflow job for this annotation

GitHub Actions / Lint Pull Request

hugeParam: in is heavy (144 bytes); consider passing it by pointer (gocritic)
var (
buildTemplate []byte
err error
Expand All @@ -778,12 +967,13 @@
return false, fmt.Errorf("failed to template new helm chart: %w", err)
}

newResources, shouldRequeue, err := in.kubeClient.DeleteNewClusterStack(ctx, newHelmTemplate)
deletedResources, shouldRequeue, err := in.kubeClient.DeleteNewClusterStack(ctx, newHelmTemplate)
if err != nil {
return false, fmt.Errorf("failed to delete objects from cluster addon Helm chart: %w", err)
}

in.clusterAddon.Status.Resources = newResources
// This is for the current stage objects and will be removed once done.
in.clusterAddon.Status.Resources = deletedResources

return shouldRequeue, nil
}
Expand Down Expand Up @@ -1096,6 +1286,10 @@
}
case tar.TypeReg:
// Create regular files
if err := os.MkdirAll(filepath.Dir(targetPath), os.ModePerm); err != nil {
return fmt.Errorf("%q: creating directory: %w", filepath.Dir(targetPath), err)
}

outputFile, err := os.Create(filepath.Clean(targetPath))
if err != nil {
return fmt.Errorf("%q: creating file: %w", targetPath, err)
Expand All @@ -1118,3 +1312,21 @@

return nil
}

func removeResourcesFromCurrentListOfObjects(baseList []*csov1alpha1.Resource, itemsToRemove []*csov1alpha1.Resource) []*csov1alpha1.Resource {
// Create a map of items to remove for faster lookup
itemsMap := make(map[*csov1alpha1.Resource]bool)
for _, item := range itemsToRemove {
itemsMap[item] = true
}

// Create a new list without the items to remove
var newList []*csov1alpha1.Resource
for _, item := range baseList {
if !itemsMap[item] {
newList = append(newList, item)
}
}

return newList
}
Loading
Loading