Skip to content

Commit

Permalink
clusterctl: add cluster class support to clusterctl generate cluster …
Browse files Browse the repository at this point in the history
…command
  • Loading branch information
ykakarap committed Oct 20, 2021
1 parent e1597ec commit 61f13e1
Show file tree
Hide file tree
Showing 16 changed files with 999 additions and 5 deletions.
37 changes: 35 additions & 2 deletions cmd/clusterctl/client/client_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -436,7 +436,7 @@ func (f fakeRepositoryClient) Components() repository.ComponentsClient {
}

func (f fakeRepositoryClient) Templates(version string) repository.TemplateClient {
// use a fakeTemplateClient (instead of the internal client used in other fake objects) we can de deterministic on what is returned (e.g. avoid interferences from overrides)
// Use a fakeTemplateClient (instead of the internal client used in other fake objects) we can be deterministic on what is returned (e.g. avoid interferences from overrides)
return &fakeTemplateClient{
version: version,
fakeRepository: f.fakeRepository,
Expand All @@ -445,8 +445,18 @@ func (f fakeRepositoryClient) Templates(version string) repository.TemplateClien
}
}

func (f fakeRepositoryClient) ClusterClasses(version string) repository.ClusterClassClient {
// Use a fakeTemplateClient (instead of the internal client used in other fake objects) we can be deterministic on what is returned (e.g. avoid interferences from overrides)
return &fakeClusterClassClient{
version: version,
fakeRepository: f.fakeRepository,
configVariablesClient: f.configClient.Variables(),
processor: f.processor,
}
}

func (f fakeRepositoryClient) Metadata(version string) repository.MetadataClient {
// use a fakeMetadataClient (instead of the internal client used in other fake objects) we can de deterministic on what is returned (e.g. avoid interferences from overrides)
// Use a fakeMetadataClient (instead of the internal client used in other fake objects) we can be deterministic on what is returned (e.g. avoid interferences from overrides)
return &fakeMetadataClient{
version: version,
fakeRepository: f.fakeRepository,
Expand Down Expand Up @@ -506,6 +516,29 @@ func (f *fakeTemplateClient) Get(flavor, targetNamespace string, skipTemplatePro
})
}

// fakeClusterClassClient provides a super simple TemplateClient (e.g. without support for local overrides).
type fakeClusterClassClient struct {
version string
fakeRepository *repository.MemoryRepository
configVariablesClient config.VariablesClient
processor yaml.Processor
}

func (f *fakeClusterClassClient) Get(class, targetNamespace string, skipTemplateProcess bool) (repository.Template, error) {
name := fmt.Sprintf("clusterclass-%s.yaml", class)
content, err := f.fakeRepository.GetFile(f.version, name)
if err != nil {
return nil, err
}
return repository.NewTemplate(repository.TemplateInput{
RawArtifact: content,
ConfigVariablesClient: f.configVariablesClient,
Processor: f.processor,
TargetNamespace: targetNamespace,
SkipTemplateProcess: skipTemplateProcess,
})
}

// fakeMetadataClient provides a super simple MetadataClient (e.g. without support for local overrides/embedded metadata).
type fakeMetadataClient struct {
version string
Expand Down
32 changes: 31 additions & 1 deletion cmd/clusterctl/client/cluster/inventory.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ type CheckCAPIContractOptions struct {

// AllowCAPIContracts instructs CheckCAPIContract to tolerate management clusters with Cluster API with the given contract.
AllowCAPIContracts []string

// AllowCAPIAnyContract instructs CheckCAPIContract to tolerate management clusters with Cluster API installed with any contract.
AllowCAPIAnyContract bool
}

// AllowCAPINotInstalled instructs CheckCAPIContract to tolerate management clusters without Cluster API installed yet.
Expand All @@ -64,6 +67,15 @@ func (t AllowCAPINotInstalled) Apply(in *CheckCAPIContractOptions) {
in.AllowCAPINotInstalled = true
}

// AllowCAPIAnyContract instructs CheckCAPIContract to tolerate management clusters with Cluster API with any contract.
// NOTE: This allows clusterctl generate cluster with managed topologies to work properly by performing checks to see if CAPI is installed.
type AllowCAPIAnyContract struct{}

// Apply applies this configuration to the given CheckCAPIContractOptions.
func (t AllowCAPIAnyContract) Apply(in *CheckCAPIContractOptions) {
in.AllowCAPIAnyContract = true
}

// AllowCAPIContract instructs CheckCAPIContract to tolerate management clusters with Cluster API with the given contract.
// NOTE: This allows clusterctl upgrade to work on management clusters with old contract.
type AllowCAPIContract struct {
Expand Down Expand Up @@ -103,6 +115,9 @@ type InventoryClient interface {
// does not match the current one supported by clusterctl.
CheckCAPIContract(...CheckCAPIContractOption) error

// CheckCAPIInstalled checks if Cluster API is installed on the management cluster.
CheckCAPIInstalled() (bool, error)

// CheckSingleProviderInstance ensures that only one instance of a provider is running, returns error otherwise.
CheckSingleProviderInstance() error
}
Expand Down Expand Up @@ -395,6 +410,10 @@ func (p *inventoryClient) CheckCAPIContract(options ...CheckCAPIContractOption)
return errors.Wrap(err, "failed to check Cluster API version")
}

if opt.AllowCAPIAnyContract {
return nil
}

for _, version := range crd.Spec.Versions {
if version.Storage {
if version.Name == clusterv1.GroupVersion.Version {
Expand All @@ -411,6 +430,17 @@ func (p *inventoryClient) CheckCAPIContract(options ...CheckCAPIContractOption)
return errors.Errorf("failed to check Cluster API version")
}

func (p *inventoryClient) CheckCAPIInstalled() (bool, error) {
if err := p.CheckCAPIContract(AllowCAPIAnyContract{}); err != nil {
if apierrors.IsNotFound(err) {
// The expected CRDs are not installed on the management. This would mean that Cluster API is not installed on the cluster.
return false, nil
}
return false, err
}
return true, nil
}

func (p *inventoryClient) CheckSingleProviderInstance() error {
providers, err := p.List()
if err != nil {
Expand All @@ -436,7 +466,7 @@ func (p *inventoryClient) CheckSingleProviderInstance() error {

if len(errs) > 0 {
return errors.Wrap(kerrors.NewAggregate(errs), "detected multiple instances of the same provider, "+
"but clusterctl v1alpha4 does not support this use case. See https://cluster-api.sigs.k8s.io/developer/architecture/controllers/support-multiple-instances.html for more details")
"but clusterctl does not support this use case. See https://cluster-api.sigs.k8s.io/developer/architecture/controllers/support-multiple-instances.html for more details")
}

return nil
Expand Down
177 changes: 177 additions & 0 deletions cmd/clusterctl/client/clusterclass.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,177 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package client

import (
"context"
"fmt"

"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
"sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster"
"sigs.k8s.io/cluster-api/cmd/clusterctl/client/repository"
"sigs.k8s.io/cluster-api/cmd/clusterctl/internal/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
)

// addClusterClassIfMissing returns a Template that includes the base template and adds any cluster class definitions that
// are references in the template. If the cluster class referenced already exists in the cluster it is not added to the
// template.
func addClusterClassIfMissing(template Template, clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, targetNamespace string, listVariablesOnly bool) (Template, error) {
classes, err := clusterClassNamesFromTemplate(template)
if err != nil {
return nil, err
}
// If the template does not reference any ClusterClass, return early.
if len(classes) == 0 {
return template, nil
}

clusterClassesTemplate, err := fetchMissingClusterClassTemplates(clusterClassClient, clusterClient, classes, targetNamespace, listVariablesOnly)
if err != nil {
return nil, err
}

mergedTemplate, err := repository.MergeTemplates(template, clusterClassesTemplate)
if err != nil {
return nil, err
}

return mergedTemplate, nil
}

// clusterClassNamesFromTemplate returns the list of cluster classes referenced
// by custers defined in the template. If not clusters are defined in the template
// or if no cluster uses a cluster class it returns an empty list.
func clusterClassNamesFromTemplate(template Template) ([]string, error) {
classes := []string{}

// loop thorugh all the objects and if the object is a cluster
// check and see if cluster.spec.topology.class is defined.
// If defined, add value to the result.
for i := range template.Objs() {
obj := template.Objs()[i]
if obj.GroupVersionKind().GroupKind() != clusterv1.GroupVersion.WithKind("Cluster").GroupKind() {
continue
}
cluster := &clusterv1.Cluster{}
if err := scheme.Scheme.Convert(&obj, cluster, nil); err != nil {
return nil, errors.Wrap(err, "failed to convert object to Cluster")
}
if cluster.Spec.Topology == nil {
continue
}
classes = append(classes, cluster.Spec.Topology.Class)
}
return classes, nil
}

// fetchMissingClusterClassTemplates returns a list of templates for cluster classes that do not yet exist
// in the cluster. If the cluster is not initialized, all the ClusterClasses are added.
func fetchMissingClusterClassTemplates(clusterClassClient repository.ClusterClassClient, clusterClient cluster.Client, classes []string, targetNamespace string, listVariablesOnly bool) (Template, error) {
// first check if the cluster is initialized.
// If it is initialized:
// For every ClusterClass check if it already exists in the cluster.
// If the ClusterClass already exists there is nothing further to do.
// If not, get the ClusterClass from the repository
// If it is not initialized:
// For every ClusterClass fetch the class definition from the repository.

// Check if the cluster is initialized
clusterInitialized := false
var err error
if err := clusterClient.Proxy().CheckClusterAvailable(); err == nil {
clusterInitialized, err = clusterClient.ProviderInventory().CheckCAPIInstalled()
if err != nil {
return nil, errors.Wrap(err, "failed to check if the cluster is initialized")
}
}
var c client.Client
if clusterInitialized {
c, err = clusterClient.Proxy().NewClient()
if err != nil {
return nil, err
}
}

// Get the templates for all ClusterClasses and associated objects if the target
// CluterClass does not exits in the cluster.
templates := []repository.Template{}
for _, class := range classes {
if clusterInitialized {
exists, err := clusterClassExists(c, class, targetNamespace)
if err != nil {
return nil, err
}
if exists {
continue
}
}
// The cluster is either not initialized or the ClusterClass does not yet exist in the cluster.
// Fetch the cluster class to install.
clusterClassTemplate, err := clusterClassClient.Get(class, targetNamespace, listVariablesOnly)
if err != nil {
return nil, errors.Wrapf(err, "failed to get the cluster class template for %q", class)
}

// If any of the objects in the ClusterClass template already exist in the cluster then
// we should error out.
// We do this to avoid adding partial items from the template in the output YAML. This ensures
// that we do not add a ClusterClass (and associated objects) who definition is unknown.
if clusterInitialized {
for _, obj := range clusterClassTemplate.Objs() {
if exists, err := objExists(c, obj); err != nil {
return nil, err
} else if exists {
return nil, fmt.Errorf("%s(%s) already exists in the cluster", obj.GetName(), obj.GetObjectKind().GroupVersionKind())
}
}
}
templates = append(templates, clusterClassTemplate)
}

merged, err := repository.MergeTemplates(templates...)
if err != nil {
return nil, err
}

return merged, nil
}

func clusterClassExists(c client.Client, class, targetNamespace string) (bool, error) {
clusterClass := &clusterv1.ClusterClass{}
if err := c.Get(context.TODO(), client.ObjectKey{Name: class, Namespace: targetNamespace}, clusterClass); err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, errors.Wrapf(err, "failed to check if ClusterClass %q exists in the cluster", class)
}
return true, nil
}

func objExists(c client.Client, obj unstructured.Unstructured) (bool, error) {
o := obj.DeepCopy()
if err := c.Get(context.TODO(), client.ObjectKeyFromObject(o), o); err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
}

0 comments on commit 61f13e1

Please sign in to comment.