From 5f18588fe6c4de41a6d50821b43b2e215fc82f9f Mon Sep 17 00:00:00 2001 From: Ramana Raja Date: Fri, 4 May 2018 03:39:09 +0530 Subject: [PATCH] ceph_volume_client: allow volumes without namespace isolation Fixes: https://tracker.ceph.com/issues/23695 Signed-off-by: Ramana Raja --- qa/tasks/cephfs/test_volume_client.py | 53 +++++++++++++++++ src/pybind/ceph_volume_client.py | 84 +++++++++++++++++++++------ 2 files changed, 119 insertions(+), 18 deletions(-) diff --git a/qa/tasks/cephfs/test_volume_client.py b/qa/tasks/cephfs/test_volume_client.py index 76de57fefc13e..f69a04f0cfd23 100644 --- a/qa/tasks/cephfs/test_volume_client.py +++ b/qa/tasks/cephfs/test_volume_client.py @@ -343,6 +343,19 @@ def test_idempotency(self): vc.delete_volume(vp, data_isolated=True) vc.purge_volume(vp, data_isolated=True) vc.purge_volume(vp, data_isolated=True) + + vc.create_volume(vp, 10, namespace_isolated=False) + vc.create_volume(vp, 10, namespace_isolated=False) + vc.authorize(vp, "{guest_entity}") + vc.authorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.deauthorize(vp, "{guest_entity}") + vc.evict("{guest_entity}") + vc.evict("{guest_entity}") + vc.delete_volume(vp) + vc.delete_volume(vp) + vc.purge_volume(vp) + vc.purge_volume(vp) """.format( group_id=group_id, volume_id=volume_id, @@ -1014,3 +1027,43 @@ def test_21501(self): # Mount the volume in the guest using the auth ID to assert that the # auth caps are valid guest_mount.mount(mount_path=mount_path) + + def test_volume_without_namespace_isolation(self): + """ + That volume client can create volumes that do not have separate RADOS + namespace layouts. + """ + vc_mount = self.mounts[1] + vc_mount.umount_wait() + + # Configure vc_mount as the handle for driving volumeclient + self._configure_vc_auth(vc_mount, "manila") + + # Create a volume + volume_prefix = "/myprefix" + group_id = "grpid" + volume_id = "volid" + mount_path = self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + create_result = vc.create_volume(vp, 1024*1024*10, namespace_isolated=False) + print create_result['mount_path'] + """.format( + group_id=group_id, + volume_id=volume_id + )), volume_prefix) + + # The CephFS volume should be created + self.mounts[0].stat(os.path.join("myprefix", group_id, volume_id)) + vol_namespace = self.mounts[0].getfattr( + os.path.join("myprefix", group_id, volume_id), + "ceph.dir.layout.pool_namespace") + assert not vol_namespace + + self._volume_client_python(vc_mount, dedent(""" + vp = VolumePath("{group_id}", "{volume_id}") + vc.delete_volume(vp) + vc.purge_volume(vp) + """.format( + group_id=group_id, + volume_id=volume_id, + )), volume_prefix) diff --git a/src/pybind/ceph_volume_client.py b/src/pybind/ceph_volume_client.py index 8e21e13fffa6d..47f5f7d3cfb51 100644 --- a/src/pybind/ceph_volume_client.py +++ b/src/pybind/ceph_volume_client.py @@ -204,7 +204,7 @@ class CephFSVolumeClientError(Exception): * 1 - Initial version * 2 - Added get_object, put_object, delete_object methods to CephFSVolumeClient - + * 3 - Allow volumes to be created without RADOS namespace isolation """ @@ -228,7 +228,7 @@ class CephFSVolumeClient(object): """ # Current version - version = 2 + version = 3 # Where shall we create our volumes? POOL_PREFIX = "fsvolume_" @@ -600,7 +600,7 @@ def _mkdir_p(self, path): except cephfs.ObjectNotFound: self.fs.mkdir(subpath, 0o755) - def create_volume(self, volume_path, size=None, data_isolated=False): + def create_volume(self, volume_path, size=None, data_isolated=False, namespace_isolated=True): """ Set up metadata, pools and auth for a volume. @@ -610,6 +610,7 @@ def create_volume(self, volume_path, size=None, data_isolated=False): :param volume_path: VolumePath instance :param size: In bytes, or None for no size limit :param data_isolated: If true, create a separate OSD pool for this volume + :param namespace_isolated: If true, use separate RADOS namespace for this volume :return: """ path = self._get_path(volume_path) @@ -633,10 +634,17 @@ def create_volume(self, volume_path, size=None, data_isolated=False): }) self.fs.setxattr(path, 'ceph.dir.layout.pool', pool_name, 0) - # enforce security isolation, use seperate namespace for this volume - namespace = "{0}{1}".format(self.pool_ns_prefix, volume_path.volume_id) - log.info("create_volume: {0}, using rados namespace {1} to isolate data.".format(volume_path, namespace)) - self.fs.setxattr(path, 'ceph.dir.layout.pool_namespace', namespace, 0) + # enforce security isolation, use separate namespace for this volume + if namespace_isolated: + namespace = "{0}{1}".format(self.pool_ns_prefix, volume_path.volume_id) + log.info("create_volume: {0}, using rados namespace {1} to isolate data.".format(volume_path, namespace)) + self.fs.setxattr(path, 'ceph.dir.layout.pool_namespace', namespace, 0) + else: + # If volume's namespace layout is not set, then the volume's pool + # layout remains unset and will undesirably change with ancestor's + # pool layout changes. + pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool") + self.fs.setxattr(path, 'ceph.dir.layout.pool', pool_name, 0) # Create a volume meta file, if it does not already exist, to store # data about auth ids having access to the volume @@ -1025,15 +1033,23 @@ def _authorize_ceph(self, volume_path, auth_id, readonly): # First I need to work out what the data pool is for this share: # read the layout pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool") - namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + + try: + namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + except cephfs.NoData: + namespace = None # Now construct auth capabilities that give the guest just enough # permissions to access the share client_entity = "client.{0}".format(auth_id) want_access_level = 'r' if readonly else 'rw' want_mds_cap = 'allow {0} path={1}'.format(want_access_level, path) - want_osd_cap = 'allow {0} pool={1} namespace={2}'.format( - want_access_level, pool_name, namespace) + if namespace: + want_osd_cap = 'allow {0} pool={1} namespace={2}'.format( + want_access_level, pool_name, namespace) + else: + want_osd_cap = 'allow {0} pool={1}'.format(want_access_level, + pool_name) try: existing = self._rados_command( @@ -1061,8 +1077,9 @@ def _authorize_ceph(self, volume_path, auth_id, readonly): # auth caps. unwanted_access_level = 'r' if want_access_level is 'rw' else 'rw' unwanted_mds_cap = 'allow {0} path={1}'.format(unwanted_access_level, path) - unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format( - unwanted_access_level, pool_name, namespace) + if namespace: + unwanted_osd_cap = 'allow {0} pool={1} namespace={2}'.format( + unwanted_access_level, pool_name, namespace) def cap_update(orig, want, unwanted): # Updates the existing auth caps such that there is a single @@ -1079,8 +1096,32 @@ def cap_update(orig, want, unwanted): return ",".join(cap_tokens) - osd_cap_str = cap_update(cap['caps'].get('osd', ""), want_osd_cap, unwanted_osd_cap) + def osd_cap_update_without_namespace( + orig_osd_caps, want_osd_cap, pool): + if not orig_osd_caps: + return want + + rw_osd_cap = 'allow rw pool={}'.format(pool) + ro_osd_cap = 'allow r pool={}'.format(pool) + + osd_cap_tokens = set(orig_osd_caps.split(",")) + + if rw_osd_cap in osd_cap_tokens: + return orig_osd_caps + elif ro_osd_cap in osd_cap_tokens: + osd_cap_tokens.discard(ro_osd_cap) + + osd_cap_tokens.add(want_osd_cap) + + return ",".join(osd_cap_tokens) + mds_cap_str = cap_update(cap['caps'].get('mds', ""), want_mds_cap, unwanted_mds_cap) + if namespace: + osd_cap_str = cap_update(cap['caps'].get('osd', ""), + want_osd_cap, unwanted_osd_cap) + else: + osd_cap_str = osd_cap_update_without_namespace( + cap['caps'].get('osd', ""), want_osd_cap, pool_name) caps = self._rados_command( 'auth caps', @@ -1187,15 +1228,17 @@ def _deauthorize(self, volume_path, auth_id): client_entity = "client.{0}".format(auth_id) path = self._get_path(volume_path) pool_name = self._get_ancestor_xattr(path, "ceph.dir.layout.pool") - namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + try: + namespace = self.fs.getxattr(path, "ceph.dir.layout.pool_namespace") + except cephfs.NoData: + namespace = None # The auth_id might have read-only or read-write mount access for the # volume path. access_levels = ('r', 'rw') want_mds_caps = {'allow {0} path={1}'.format(access_level, path) for access_level in access_levels} - want_osd_caps = {'allow {0} pool={1} namespace={2}'.format( - access_level, pool_name, namespace) + want_osd_caps = {'allow {0} pool={1} namespace={2}'.format(access_level, pool_name, namespace) for access_level in access_levels} try: @@ -1211,9 +1254,14 @@ def cap_remove(orig, want): return ",".join(cap_tokens.difference(want)) cap = existing[0] - osd_cap_str = cap_remove(cap['caps'].get('osd', ""), want_osd_caps) mds_cap_str = cap_remove(cap['caps'].get('mds', ""), want_mds_caps) - if (not osd_cap_str) and (not mds_cap_str): + if namespace: + osd_cap_str = cap_remove(cap['caps'].get('osd', ""), want_osd_caps) + else: + # Leave OSD caps as is + osd_cap_str = cap['caps'].get('osd', "") + + if not mds_cap_str: self._rados_command('auth del', {'entity': client_entity}, decode=False) else: self._rados_command(