-
Notifications
You must be signed in to change notification settings - Fork 2.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
csi: only create CSI config configmap in CSI reconciler #14089
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -254,12 +254,50 @@ func CreateCsiConfigMap(ctx context.Context, namespace string, clientset kuberne | |
if !k8serrors.IsAlreadyExists(err) { | ||
return errors.Wrapf(err, "failed to create initial csi config map %q (in %q)", configMap.Name, namespace) | ||
} | ||
// CM already exists; update owner refs to it if needed | ||
// this corrects issues where the csi config map was sometimes created with CephCluster | ||
// owner ref, which would result in the cm being deleted if that cluster was deleted | ||
if err := updateCsiConfigMapOwnerRefs(ctx, namespace, clientset, ownerInfo); err != nil { | ||
return errors.Wrapf(err, "failed to ensure csi config map %q (in %q) owner references", configMap.Name, namespace) | ||
} | ||
} | ||
|
||
logger.Infof("successfully created csi config map %q", configMap.Name) | ||
return nil | ||
} | ||
|
||
// check the owner references on the csi config map, and fix incorrect references if needed | ||
func updateCsiConfigMapOwnerRefs(ctx context.Context, namespace string, clientset kubernetes.Interface, expectedOwnerInfo *k8sutil.OwnerInfo) error { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That's a fair question. Using After looking into it, it seems to me like it's slightly preferable to keep this PR as-is just to avoid the developer time needed to rework it. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Agreed, let's keep it as is. |
||
cm, err := clientset.CoreV1().ConfigMaps(namespace).Get(ctx, ConfigName, metav1.GetOptions{}) | ||
if err != nil { | ||
return errors.Wrapf(err, "failed to fetch csi config map %q (in %q) which already exists", ConfigName, namespace) | ||
} | ||
|
||
existingOwners := cm.GetOwnerReferences() | ||
var currentOwner *metav1.OwnerReference = nil | ||
if len(existingOwners) == 1 { | ||
currentOwner = &existingOwners[0] // currentOwner is nil unless there is exactly one owner on the cm | ||
} | ||
// if there is exactly one owner, and it is correct --> no fix needed | ||
if currentOwner != nil && (currentOwner.UID == expectedOwnerInfo.GetUID()) { | ||
logger.Debugf("csi config map %q (in %q) has the expected owner; owner id: %q", ConfigName, namespace, currentOwner.UID) | ||
return nil | ||
} | ||
|
||
// must fix owner refs | ||
logger.Infof("updating csi configmap %q (in %q) owner info", ConfigName, namespace) | ||
cm.OwnerReferences = []metav1.OwnerReference{} | ||
if err := expectedOwnerInfo.SetControllerReference(cm); err != nil { | ||
return errors.Wrapf(err, "failed to set updated owner reference on csi config map %q (in %q)", ConfigName, namespace) | ||
} | ||
_, err = clientset.CoreV1().ConfigMaps(namespace).Update(ctx, cm, metav1.UpdateOptions{}) | ||
if err != nil { | ||
return errors.Wrapf(err, "failed to update csi config map %q (in %q) to update its owner reference", ConfigName, namespace) | ||
} | ||
|
||
return nil | ||
} | ||
|
||
// SaveClusterConfig updates the config map used to provide ceph-csi with | ||
// basic cluster configuration. The clusterNamespace and clusterInfo are | ||
// used to determine what "cluster" in the config map will be updated and | ||
|
@@ -292,10 +330,7 @@ func SaveClusterConfig(clientset kubernetes.Interface, clusterNamespace string, | |
configMap, err := clientset.CoreV1().ConfigMaps(csiNamespace).Get(clusterInfo.Context, ConfigName, metav1.GetOptions{}) | ||
if err != nil { | ||
if k8serrors.IsNotFound(err) { | ||
err = CreateCsiConfigMap(clusterInfo.Context, csiNamespace, clientset, clusterInfo.OwnerInfo) | ||
if err != nil { | ||
return errors.Wrap(err, "failed creating csi config map") | ||
} | ||
return errors.Wrap(err, "waiting for CSI config map to be created") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Any use of |
||
} | ||
return errors.Wrap(err, "failed to fetch current csi config map") | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -200,6 +200,34 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e | |
return reconcile.Result{}, nil | ||
} | ||
|
||
ownerRef, err := k8sutil.GetDeploymentOwnerReference(r.opManagerContext, r.context.Clientset, os.Getenv(k8sutil.PodNameEnvVar), r.opConfig.OperatorNamespace) | ||
if err != nil { | ||
logger.Warningf("could not find deployment owner reference to assign to csi drivers. %v", err) | ||
} | ||
if ownerRef != nil { | ||
blockOwnerDeletion := false | ||
ownerRef.BlockOwnerDeletion = &blockOwnerDeletion | ||
} | ||
|
||
ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(ownerRef, r.opConfig.OperatorNamespace) | ||
// create an empty config map. config map will be filled with data | ||
// later when clusters have mons | ||
err = CreateCsiConfigMap(r.opManagerContext, r.opConfig.OperatorNamespace, r.context.Clientset, ownerInfo) | ||
if err != nil { | ||
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed creating csi config map") | ||
} | ||
Comment on lines
+215
to
+218
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is now the only usage of Because other instances were called with owner info being some CephCluster, they were all removed. This code was moved up from its location below so that it would run before |
||
|
||
err = peermap.CreateOrUpdateConfig(r.opManagerContext, r.context, &peermap.PeerIDMappings{}) | ||
if err != nil { | ||
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to create pool ID mapping config map") | ||
} | ||
|
||
exists, err := checkCsiCephConfigMapExists(r.opManagerContext, r.context.Clientset, r.opConfig.OperatorNamespace) | ||
if err != nil { | ||
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to get csi ceph.conf configmap") | ||
} | ||
CustomCSICephConfigExists = exists | ||
|
||
csiHostNetworkEnabled, err := strconv.ParseBool(k8sutil.GetValue(r.opConfig.Parameters, "CSI_ENABLE_HOST_NETWORK", "true")) | ||
if err != nil { | ||
return reconcile.Result{}, errors.Wrap(err, "failed to parse value for 'CSI_ENABLE_HOST_NETWORK'") | ||
|
@@ -264,34 +292,6 @@ func (r *ReconcileCSI) reconcile(request reconcile.Request) (reconcile.Result, e | |
} | ||
} | ||
|
||
ownerRef, err := k8sutil.GetDeploymentOwnerReference(r.opManagerContext, r.context.Clientset, os.Getenv(k8sutil.PodNameEnvVar), r.opConfig.OperatorNamespace) | ||
if err != nil { | ||
logger.Warningf("could not find deployment owner reference to assign to csi drivers. %v", err) | ||
} | ||
if ownerRef != nil { | ||
blockOwnerDeletion := false | ||
ownerRef.BlockOwnerDeletion = &blockOwnerDeletion | ||
} | ||
|
||
ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(ownerRef, r.opConfig.OperatorNamespace) | ||
// create an empty config map. config map will be filled with data | ||
// later when clusters have mons | ||
err = CreateCsiConfigMap(r.opManagerContext, r.opConfig.OperatorNamespace, r.context.Clientset, ownerInfo) | ||
if err != nil { | ||
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed creating csi config map") | ||
} | ||
|
||
err = peermap.CreateOrUpdateConfig(r.opManagerContext, r.context, &peermap.PeerIDMappings{}) | ||
if err != nil { | ||
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to create pool ID mapping config map") | ||
} | ||
|
||
exists, err := checkCsiCephConfigMapExists(r.opManagerContext, r.context.Clientset, r.opConfig.OperatorNamespace) | ||
if err != nil { | ||
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to get csi ceph.conf configmap") | ||
} | ||
CustomCSICephConfigExists = exists | ||
|
||
err = r.validateAndConfigureDrivers(serverVersion, ownerInfo) | ||
if err != nil { | ||
return opcontroller.ImmediateRetryResult, errors.Wrap(err, "failed to configure ceph csi") | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Will the external cluster reconcile be failed at some point if the configmap doesn't exist?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I looked into this, and I don't believe so.
pkg/operator/ceph/csi/controller.go
line 192 "gates" creation of the csi config map. The only metric that affects creation is whether or not any CephClusters exist. It doesn't matter if the clusters are internal or external. By my logic, if an external cluster exists, the CSI controller will create the configmap (unless there is a create error), and so it is safe for the external reconcile to wait until that happens.It might be good for you to double check my logic just to be sure.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I see now that
SaveClusterConfig()
will be called below on line 133, but given your changes in that method, it will fail the reconcile and retry. Makes sense to me.