From 8e91eb903d3e96ba88ded66ecdaad0db076adc54 Mon Sep 17 00:00:00 2001 From: Ramana Raja Date: Fri, 4 May 2018 03:39:09 +0530 Subject: [PATCH] ceph_volume_client: allow volumes without namespace isolation Fixes: https://tracker.ceph.com/issues/23695 Signed-off-by: Ramana Raja --- src/pybind/ceph_volume_client.py | 55 ++++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 17 deletions(-) diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py index 8e21e13fffa6d1..2bc27847731b9e 100644 --- a/src/pybind/ceph_volume_client.py +++ b/src/pybind/ceph_volume_client.py @@ -204,7 +204,7 @@ class CephFSVolumeClientError(Exception): * 1 - Initial version * 2 - Added get_object, put_object, delete_object methods to CephFSVolumeClient - + * 3 - Allow volumes to be created without RADOS namespace isolation """ @@ -228,7 +228,7 @@ class CephFSVolumeClient(object): """ # Current version - version = 2 + version = 3 # Where shall we create our volumes? POOL_PREFIX = "fsvolume_" @@ -600,7 +600,7 @@ def _mkdir_p(self, path): except cephfs.ObjectNotFound: self.fs.mkdir(subpath, 0o755) - def create_volume(self, volume_path, size=None, data_isolated=False): + def create_volume(self, volume_path, size=None, data_isolated=False, namespace_isolated=True): """ Set up metadata, pools and auth for a volume. @@ -610,6 +610,7 @@ def create_volume(self, volume_path, size=None, data_isolated=False): :param volume_path: VolumePath instance :param size: In bytes, or None for no size limit :param data_isolated: If true, create a separate OSD pool for this volume + :param namespace_isolated: If true, use separate RADOS namespace for this volume :return: """ path = self._get_path(volume_path) @@ -633,10 +634,11 @@ def create_volume(self, volume_path, size=None, data_isolated=False): }) self.fs.setxattr(path, 'ceph.dir.layout.pool', pool_name, 0) - # enforce security isolation, use seperate namespace for this volume - namespace = "{0}{1}".format(self.pool_ns_prefix, volume_path.volume_id) - log.info("create_volume: {0}, using rados namespace {1} to isolate data.".format(volume_path, namespace)) - self.fs.setxattr(path, 'ceph.dir.layout.pool_namespace', namespace, 0) + # enforce security isolation, use separate namespace for this volume + if namespace_isolated: + namespace = "{0}{1}".format(self.pool_ns_prefix, volume_path.volume_id) + log.info("create_volume: {0}, using rados namespace {1} to isolate data.".format(volume_path, namespace)) + self.fs.setxattr(path, 'ceph.dir.layout.pool_namespace', namespace, 0) # Create a volume meta file, if it does not already exist, to store # data about auth ids having access to the volume @@ -1025,15 +1027,23 @@ def _authorize_ceph(self, volume_path, auth_id, readonly): # First I need to work out what the data pool is for this share: # read the layout pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool") - namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + + try: + namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + except cephfs.NoData: + namespace = None # Now construct auth capabilities that give the guest just enough # permissions to access the share client_entity = "client.{0}".format(auth_id) want_access_level = 'r' if readonly else 'rw' want_mds_cap = 'allow {0} path={1}'.format(want_access_level, path) - want_osd_cap = 'allow {0} pool={1} namespace={2}'.format( - want_access_level, pool_name, namespace) + if namespace: + want_osd_cap = 'allow {0} pool={1} namespace={2}'.format( + want_access_level, pool_name, namespace) + else: + want_osd_cap = 'allow {0} pool={1}'.format(want_access_level, + pool_name) try: existing = self._rados_command( @@ -1061,8 +1071,12 @@ def _authorize_ceph(self, volume_path, auth_id, readonly): # auth caps. unwanted_access_level = 'r' if want_access_level is 'rw' else 'rw' unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, path) - unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format( - unwanted_access_level, pool_name, namespace) + if namespace: + unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format( + unwanted_access_level, pool_name, namespace) + else: + unwanted_osd_cap = 'allow {0} pool={1}'.format( + unwanted_access_level, pool_name) def cap_update(orig, want, unwanted): # Updates the existing auth caps such that there is a single @@ -1187,15 +1201,17 @@ def _deauthorize(self, volume_path, auth_id): client_entity = "client.{0}".format(auth_id) path = self._get_path(volume_path) pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool") - namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + try: + namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + except cephfs.NoData: + namespace = None # The auth_id might have read-only or read-write mount access for the # volume path. access_levels = ('r', 'rw') want_mds_caps = {'allow {0} path={1}'.format(access_level, path) for access_level in access_levels} - want_osd_caps = {'allow {0} pool={1} namespace={2}'.format( - access_level, pool_name, namespace) + want_osd_caps = {'allow {0} pool={1} namespace={2}'.format(access_level, pool_name, namespace) for access_level in access_levels} try: @@ -1211,9 +1227,14 @@ def cap_remove(orig, want): return ",".join(cap_tokens.difference(want)) cap = existing[0] - osd_cap_str = cap_remove(cap['caps'].get('osd', ""), want_osd_caps) mds_cap_str = cap_remove(cap['caps'].get('mds', ""), want_mds_caps) - if (not osd_cap_str) and (not mds_cap_str): + if namespace: + osd_cap_str = cap_remove(cap['caps'].get('osd', ""), want_osd_caps) + else: + # Leave OSD caps as is + osd_cap_str = cap['caps'].get('osd', "") + + if not mds_cap_str: self._rados_command('auth del', {'entity': client_entity}, decode=False) else: self._rados_command(