New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
vsphere creds checking #1186
vsphere creds checking #1186
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -57,6 +57,9 @@ const ( | |
defaultRequeueTime = 10 * time.Second | ||
maxProvisions = 3 | ||
|
||
platformAuthFailureReason = "PlatformAuthError" | ||
platformAuthSuccessReason = "PlatformAuthSuccess" | ||
|
||
clusterImageSetNotFoundReason = "ClusterImageSetNotFound" | ||
clusterImageSetFoundReason = "ClusterImageSetFound" | ||
|
||
|
@@ -71,14 +74,7 @@ const ( | |
deleteAfterAnnotation = "hive.openshift.io/delete-after" // contains a duration after which the cluster should be cleaned up. | ||
tryInstallOnceAnnotation = "hive.openshift.io/try-install-once" | ||
|
||
platformAWS = "aws" | ||
platformAzure = "azure" | ||
platformGCP = "gcp" | ||
platformOpenStack = "openstack" | ||
platformVSphere = "vsphere" | ||
platformBaremetal = "baremetal" | ||
platformUnknown = "unknown" | ||
regionUnknown = "unknown" | ||
regionUnknown = "unknown" | ||
) | ||
|
||
var ( | ||
|
@@ -163,10 +159,11 @@ func Add(mgr manager.Manager) error { | |
// NewReconciler returns a new reconcile.Reconciler | ||
func NewReconciler(mgr manager.Manager, logger log.FieldLogger, rateLimiter flowcontrol.RateLimiter) reconcile.Reconciler { | ||
r := &ReconcileClusterDeployment{ | ||
Client: controllerutils.NewClientWithMetricsOrDie(mgr, ControllerName, &rateLimiter), | ||
scheme: mgr.GetScheme(), | ||
logger: logger, | ||
expectations: controllerutils.NewExpectations(logger), | ||
Client: controllerutils.NewClientWithMetricsOrDie(mgr, ControllerName, &rateLimiter), | ||
scheme: mgr.GetScheme(), | ||
logger: logger, | ||
expectations: controllerutils.NewExpectations(logger), | ||
validateCredentialsForClusterDeployment: controllerutils.ValidateCredentialsForClusterDeployment, | ||
} | ||
r.remoteClusterAPIClientBuilder = func(cd *hivev1.ClusterDeployment) remoteclient.Builder { | ||
return remoteclient.NewBuilder(r.Client, cd, ControllerName) | ||
|
@@ -275,6 +272,10 @@ type ReconcileClusterDeployment struct { | |
// for the remote cluster's API server | ||
remoteClusterAPIClientBuilder func(cd *hivev1.ClusterDeployment) remoteclient.Builder | ||
|
||
// validateCredentialsForClusterDeployment is what this controller will call to validate | ||
// that the platform creds are good (used for testing) | ||
validateCredentialsForClusterDeployment func(client.Client, *hivev1.ClusterDeployment, log.FieldLogger) (bool, error) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I kinda wish this could be a separate controller, but obviously we don't want to process with launching an install here until it's been run. I know we typically wouldn't see a condition false unless it had previously been true, but would it make sense in this case? Don't launch unless we see an AuthenticationFailure condition set to False? It would be nice to encapsulate the responsibility for setting this across cloud providers into it's own controller. |
||
|
||
protectedDelete bool | ||
} | ||
|
||
|
@@ -582,6 +583,27 @@ func (r *ReconcileClusterDeployment) reconcile(request reconcile.Request, cd *hi | |
return *result, err | ||
} | ||
|
||
// Sanity check the platform/cloud credentials. | ||
validCreds, err := r.validatePlatformCreds(cd, cdLog) | ||
if err != nil { | ||
cdLog.WithError(err).Error("unable to validate platform credentials") | ||
return reconcile.Result{}, err | ||
} | ||
// Make sure the condition is set properly. | ||
_, err = r.setAuthenticationFailure(cd, validCreds, cdLog) | ||
if err != nil { | ||
cdLog.WithError(err).Error("unable to update clusterdeployment") | ||
return reconcile.Result{}, err | ||
} | ||
|
||
// If the platform credentials are no good, return error and go into backoff | ||
authCondition := controllerutils.FindClusterDeploymentCondition(cd.Status.Conditions, hivev1.AuthenticationFailureClusterDeploymentCondition) | ||
if authCondition != nil && authCondition.Status == corev1.ConditionTrue { | ||
authError := errors.New(authCondition.Message) | ||
cdLog.WithError(authError).Error("cannot proceed with provision while platform credentials authentication is failing.") | ||
return reconcile.Result{}, authError | ||
} | ||
|
||
imageSet, err := r.getClusterImageSet(cd, cdLog) | ||
if err != nil { | ||
return reconcile.Result{}, err | ||
|
@@ -1272,6 +1294,36 @@ func (r *ReconcileClusterDeployment) setDNSNotReadyCondition(cd *hivev1.ClusterD | |
return r.Status().Update(context.TODO(), cd) | ||
} | ||
|
||
func (r *ReconcileClusterDeployment) setAuthenticationFailure(cd *hivev1.ClusterDeployment, authSuccessful bool, cdLog log.FieldLogger) (bool, error) { | ||
|
||
var status corev1.ConditionStatus | ||
var reason, message string | ||
|
||
if authSuccessful { | ||
status = corev1.ConditionFalse | ||
reason = platformAuthSuccessReason | ||
message = "Platform credentials passed authentication check" | ||
} else { | ||
status = corev1.ConditionTrue | ||
reason = platformAuthFailureReason | ||
message = "Platform credentials failed authentication check" | ||
} | ||
|
||
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck( | ||
cd.Status.Conditions, | ||
hivev1.AuthenticationFailureClusterDeploymentCondition, | ||
status, | ||
reason, | ||
message, | ||
controllerutils.UpdateConditionIfReasonOrMessageChange) | ||
if !changed { | ||
return false, nil | ||
} | ||
cd.Status.Conditions = conditions | ||
|
||
return changed, r.Status().Update(context.TODO(), cd) | ||
} | ||
|
||
func (r *ReconcileClusterDeployment) setInstallLaunchErrorCondition(cd *hivev1.ClusterDeployment, status corev1.ConditionStatus, reason string, message string, cdLog log.FieldLogger) error { | ||
conditions, changed := controllerutils.SetClusterDeploymentConditionWithChangeCheck( | ||
cd.Status.Conditions, | ||
|
@@ -2113,6 +2165,11 @@ func (r *ReconcileClusterDeployment) deleteOldFailedProvisions(provs []*hivev1.C | |
} | ||
} | ||
|
||
// validatePlatformCreds ensure the platform/cloud credentials are at least good enough to authenticate with | ||
func (r *ReconcileClusterDeployment) validatePlatformCreds(cd *hivev1.ClusterDeployment, logger log.FieldLogger) (bool, error) { | ||
return r.validateCredentialsForClusterDeployment(r.Client, cd, logger) | ||
} | ||
|
||
// checkForFailedSync returns true if it finds that the ClusterSync has the Failed condition set | ||
func checkForFailedSync(clusterSync *hiveintv1alpha1.ClusterSync) bool { | ||
for _, cond := range clusterSync.Status.Conditions { | ||
|
@@ -2213,19 +2270,19 @@ func (r *ReconcileClusterDeployment) addOwnershipToSecret(cd *hivev1.ClusterDepl | |
func getClusterPlatform(cd *hivev1.ClusterDeployment) string { | ||
switch { | ||
case cd.Spec.Platform.AWS != nil: | ||
return platformAWS | ||
return constants.PlatformAWS | ||
case cd.Spec.Platform.Azure != nil: | ||
return platformAzure | ||
return constants.PlatformAzure | ||
case cd.Spec.Platform.GCP != nil: | ||
return platformGCP | ||
return constants.PlatformGCP | ||
case cd.Spec.Platform.OpenStack != nil: | ||
return platformOpenStack | ||
return constants.PlatformOpenStack | ||
case cd.Spec.Platform.VSphere != nil: | ||
return platformVSphere | ||
return constants.PlatformVSphere | ||
case cd.Spec.Platform.BareMetal != nil: | ||
return platformBaremetal | ||
return constants.PlatformBaremetal | ||
} | ||
return platformUnknown | ||
return constants.PlatformUnknown | ||
} | ||
|
||
// getClusterRegion returns the region of a given ClusterDeployment | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,68 @@ | ||
package utils | ||
|
||
import ( | ||
"context" | ||
|
||
log "github.com/sirupsen/logrus" | ||
|
||
corev1 "k8s.io/api/core/v1" | ||
"k8s.io/apimachinery/pkg/types" | ||
|
||
"sigs.k8s.io/controller-runtime/pkg/client" | ||
|
||
"github.com/openshift/installer/pkg/types/vsphere" | ||
|
||
hivev1 "github.com/openshift/hive/pkg/apis/hive/v1" | ||
"github.com/openshift/hive/pkg/constants" | ||
) | ||
|
||
// ValidateCredentialsForClusterDeployment will attempt to verify that the platform/cloud credentials | ||
// for the given ClusterDeployment are valid. | ||
// Note: It simply checks that the username/password (or equivalent) can authenticate, | ||
// not that the credentials have any specific permissions. | ||
func ValidateCredentialsForClusterDeployment(kubeClient client.Client, cd *hivev1.ClusterDeployment, logger log.FieldLogger) (bool, error) { | ||
secret := &corev1.Secret{} | ||
|
||
switch getClusterPlatform(cd) { | ||
case constants.PlatformVSphere: | ||
secretKey := types.NamespacedName{Name: cd.Spec.Platform.VSphere.CredentialsSecretRef.Name, Namespace: cd.Namespace} | ||
if err := kubeClient.Get(context.TODO(), secretKey, secret); err != nil { | ||
logger.WithError(err).Error("failed to read in ClusterDeployment's platform creds") | ||
return false, err | ||
} | ||
return validateVSphereCredentials(cd.Spec.Platform.VSphere.VCenter, | ||
string(secret.Data[constants.UsernameSecretKey]), | ||
string(secret.Data[constants.PasswordSecretKey]), | ||
logger) | ||
default: | ||
// If we have no platform-specific credentials verification | ||
// assume the creds are valid. | ||
return true, nil | ||
|
||
} | ||
} | ||
|
||
func validateVSphereCredentials(vcenter, username, password string, logger log.FieldLogger) (bool, error) { | ||
_, _, err := vsphere.CreateVSphereClients(context.TODO(), vcenter, username, password) | ||
dgoodwin marked this conversation as resolved.
Show resolved
Hide resolved
|
||
logger.WithError(err).Warn("failed to validate VSphere credentials") | ||
return err == nil, nil | ||
} | ||
|
||
// getClusterPlatform returns the platform of a given ClusterDeployment | ||
func getClusterPlatform(cd *hivev1.ClusterDeployment) string { | ||
switch { | ||
case cd.Spec.Platform.AWS != nil: | ||
return constants.PlatformAWS | ||
case cd.Spec.Platform.Azure != nil: | ||
return constants.PlatformAzure | ||
case cd.Spec.Platform.GCP != nil: | ||
return constants.PlatformGCP | ||
case cd.Spec.Platform.OpenStack != nil: | ||
return constants.PlatformOpenStack | ||
case cd.Spec.Platform.VSphere != nil: | ||
return constants.PlatformVSphere | ||
case cd.Spec.Platform.BareMetal != nil: | ||
return constants.PlatformBaremetal | ||
} | ||
return constants.PlatformUnknown | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do you think there's any opportunity for better synergy with #1109? I can't see any immediately.