diff --git a/qa/workunits/rados/test_pool_access.sh b/qa/workunits/rados/test_pool_access.sh index 95c9768c10f8b..2a7077a416930 100755 --- a/qa/workunits/rados/test_pool_access.sh +++ b/qa/workunits/rados/test_pool_access.sh @@ -2,22 +2,107 @@ set -ex -expect_1() +KEYRING=$(mktemp) +trap cleanup EXIT ERR HUP INT QUIT + +cleanup() { + (ceph auth del client.mon_read || true) >/dev/null 2>&1 + (ceph auth del client.mon_write || true) >/dev/null 2>&1 + + rm -f $KEYRING +} + +expect_false() { - set -x - set +e - "$@" - if [ $? == 1 ]; then return 0; else return 1; fi + set -x + if "$@"; then return 1; else return 0; fi +} + +create_pool_op() { + ID=$1 + POOL=$2 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster = rados.Rados(conffile="", rados_id="${ID}") +cluster.connect() +cluster.create_pool("${POOL}") +EOF } +delete_pool_op() { + ID=$1 + POOL=$2 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster = rados.Rados(conffile="", rados_id="${ID}") +cluster.connect() +cluster.delete_pool("${POOL}") +EOF +} + +create_pool_snap_op() { + ID=$1 + POOL=$2 + SNAP=$3 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster = rados.Rados(conffile="", rados_id="${ID}") +cluster.connect() +ioctx = cluster.open_ioctx("${POOL}") + +ioctx.create_snap("${SNAP}") +EOF +} + +remove_pool_snap_op() { + ID=$1 + POOL=$2 + SNAP=$3 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster = rados.Rados(conffile="", rados_id="${ID}") +cluster.connect() +ioctx = cluster.open_ioctx("${POOL}") + +ioctx.remove_snap("${SNAP}") +EOF +} + +test_pool_op() +{ + ceph auth get-or-create client.mon_read mon 'allow r' >> $KEYRING + ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING + + expect_false create_pool_op mon_read pool1 + create_pool_op mon_write pool1 + + expect_false create_pool_snap_op mon_read pool1 snap1 + create_pool_snap_op mon_write pool1 snap1 + + expect_false remove_pool_snap_op mon_read pool1 snap1 + remove_pool_snap_op mon_write pool1 snap1 + + expect_false delete_pool_op mon_read pool1 + delete_pool_op mon_write pool1 +} key=`ceph auth get-or-create-key client.poolaccess1 mon 'allow r' osd 'allow *'` rados --id poolaccess1 --key $key -p rbd ls key=`ceph auth get-or-create-key client.poolaccess2 mon 'allow r' osd 'allow * pool=nopool'` -expect_1 rados --id poolaccess2 --key $key -p rbd ls +expect_false rados --id poolaccess2 --key $key -p rbd ls key=`ceph auth get-or-create-key client.poolaccess3 mon 'allow r' osd 'allow rw pool=nopool'` -expect_1 rados --id poolaccess3 --key $key -p rbd ls +expect_false rados --id poolaccess3 --key $key -p rbd ls + +test_pool_op echo OK diff --git a/qa/workunits/rbd/permissions.sh b/qa/workunits/rbd/permissions.sh index d86de1bb97acd..9bcdd7914f9a9 100755 --- a/qa/workunits/rbd/permissions.sh +++ b/qa/workunits/rbd/permissions.sh @@ -29,11 +29,27 @@ recreate_pools() { delete_users() { (ceph auth del client.volumes || true) >/dev/null 2>&1 (ceph auth del client.images || true) >/dev/null 2>&1 + + (ceph auth del client.snap_none || true) >/dev/null 2>&1 + (ceph auth del client.snap_all || true) >/dev/null 2>&1 + (ceph auth del client.snap_pool || true) >/dev/null 2>&1 + (ceph auth del client.snap_profile_all || true) >/dev/null 2>&1 + (ceph auth del client.snap_profile_pool || true) >/dev/null 2>&1 + + (ceph auth del client.mon_write || true) >/dev/null 2>&1 } create_users() { ceph auth get-or-create client.volumes mon 'profile rbd' osd 'profile rbd pool=volumes, profile rbd-read-only pool=images' >> $KEYRING ceph auth get-or-create client.images mon 'profile rbd' osd 'profile rbd pool=images' >> $KEYRING + + ceph auth get-or-create client.snap_none mon 'allow r' >> $KEYRING + ceph auth get-or-create client.snap_all mon 'allow r' osd 'allow w' >> $KEYRING + ceph auth get-or-create client.snap_pool mon 'allow r' osd 'allow w pool=images' >> $KEYRING + ceph auth get-or-create client.snap_profile_all mon 'allow r' osd 'profile rbd' >> $KEYRING + ceph auth get-or-create client.snap_profile_pool mon 'allow r' osd 'profile rbd pool=images' >> $KEYRING + + ceph auth get-or-create client.mon_write mon 'allow *' >> $KEYRING } expect() { @@ -142,9 +158,83 @@ test_volumes_access() { rbd -k $KEYRING --id volumes rm volumes/child } +create_self_managed_snapshot() { + ID=$1 + POOL=$2 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster = rados.Rados(conffile="", rados_id="${ID}") +cluster.connect() +ioctx = cluster.open_ioctx("${POOL}") + +snap_id = ioctx.create_self_managed_snap() +print ("Created snap id {}".format(snap_id)) +EOF +} + +remove_self_managed_snapshot() { + ID=$1 + POOL=$2 + + cat << EOF | CEPH_ARGS="-k $KEYRING" python +import rados + +cluster1 = rados.Rados(conffile="", rados_id="mon_write") +cluster1.connect() +ioctx1 = cluster1.open_ioctx("${POOL}") + +snap_id = ioctx1.create_self_managed_snap() +print ("Created snap id {}".format(snap_id)) + +cluster2 = rados.Rados(conffile="", rados_id="${ID}") +cluster2.connect() +ioctx2 = cluster2.open_ioctx("${POOL}") + +ioctx2.remove_self_managed_snap(snap_id) +print ("Removed snap id {}".format(snap_id)) +EOF +} + +test_remove_self_managed_snapshots() { + # Ensure users cannot create self-managed snapshots w/o permissions + expect 1 create_self_managed_snapshot snap_none images + expect 1 create_self_managed_snapshot snap_none volumes + + create_self_managed_snapshot snap_all images + create_self_managed_snapshot snap_all volumes + + create_self_managed_snapshot snap_pool images + expect 1 create_self_managed_snapshot snap_pool volumes + + create_self_managed_snapshot snap_profile_all images + create_self_managed_snapshot snap_profile_all volumes + + create_self_managed_snapshot snap_profile_pool images + expect 1 create_self_managed_snapshot snap_profile_pool volumes + + # Ensure users cannot delete self-managed snapshots w/o permissions + expect 1 remove_self_managed_snapshot snap_none images + expect 1 remove_self_managed_snapshot snap_none volumes + + remove_self_managed_snapshot snap_all images + remove_self_managed_snapshot snap_all volumes + + remove_self_managed_snapshot snap_pool images + expect 1 remove_self_managed_snapshot snap_pool volumes + + remove_self_managed_snapshot snap_profile_all images + remove_self_managed_snapshot snap_profile_all volumes + + remove_self_managed_snapshot snap_profile_pool images + expect 1 remove_self_managed_snapshot snap_profile_pool volumes +} + cleanup() { rm -f $KEYRING } + KEYRING=$(mktemp) trap cleanup EXIT ERR HUP INT QUIT @@ -157,6 +247,8 @@ test_images_access recreate_pools test_volumes_access +test_remove_self_managed_snapshots + delete_pools delete_users diff --git a/src/mon/OSDMonitor.cc b/src/mon/OSDMonitor.cc index 5ef6d7e0bb6dc..65682a5d7469e 100644 --- a/src/mon/OSDMonitor.cc +++ b/src/mon/OSDMonitor.cc @@ -76,6 +76,9 @@ #include "include/str_map.h" #include "include/scope_guard.h" +#include "auth/cephx/CephxKeyServer.h" +#include "osd/OSDCap.h" + #include "json_spirit/json_spirit_reader.h" #include @@ -91,6 +94,87 @@ const uint32_t MAX_POOL_APPLICATIONS = 4; const uint32_t MAX_POOL_APPLICATION_KEYS = 64; const uint32_t MAX_POOL_APPLICATION_LENGTH = 128; +bool is_osd_writable(const OSDCapGrant& grant, const std::string* pool_name) { + // Note: this doesn't include support for the application tag match + if ((grant.spec.allow & OSD_CAP_W) != 0) { + auto& match = grant.match; + if (match.is_match_all()) { + return true; + } else if (pool_name != nullptr && match.auid < 0 && + !match.pool_namespace.pool_name.empty() && + match.pool_namespace.pool_name == *pool_name) { + return true; + } + } + return false; +} + +bool is_unmanaged_snap_op_permitted(CephContext* cct, + const KeyServer& key_server, + const EntityName& entity_name, + const MonCap& mon_caps, + const std::string* pool_name) +{ + typedef std::map CommandArgs; + + if (mon_caps.is_capable(cct, CEPH_ENTITY_TYPE_MON, + entity_name, "osd", + "osd pool op unmanaged-snap", + (pool_name == nullptr ? + CommandArgs{} /* pool DNE, require unrestricted cap */ : + CommandArgs{{"poolname", *pool_name}}), + false, true, false)) { + return true; + } + + AuthCapsInfo caps_info; + if (!key_server.get_service_caps(entity_name, CEPH_ENTITY_TYPE_OSD, + caps_info)) { + dout(10) << "unable to locate OSD cap data for " << entity_name + << " in auth db" << dendl; + return false; + } + + string caps_str; + if (caps_info.caps.length() > 0) { + auto p = caps_info.caps.cbegin(); + try { + decode(caps_str, p); + } catch (const buffer::error &err) { + derr << "corrupt OSD cap data for " << entity_name << " in auth db" + << dendl; + return false; + } + } + + OSDCap osd_cap; + if (!osd_cap.parse(caps_str, nullptr)) { + dout(10) << "unable to parse OSD cap data for " << entity_name + << " in auth db" << dendl; + return false; + } + + // if the entity has write permissions in one or all pools, permit + // usage of unmanaged-snapshots + if (osd_cap.allow_all()) { + return true; + } + + for (auto& grant : osd_cap.grants) { + if (grant.profile.is_valid()) { + for (auto& profile_grant : grant.profile_grants) { + if (is_osd_writable(profile_grant, pool_name)) { + return true; + } + } + } else if (is_osd_writable(grant, pool_name)) { + return true; + } + } + + return false; +} + } // anonymous namespace void LastEpochClean::Lec::report(ps_t ps, epoch_t last_epoch_clean) @@ -11707,11 +11791,61 @@ bool OSDMonitor::prepare_command_impl(MonOpRequestRef op, return true; } -bool OSDMonitor::preprocess_pool_op(MonOpRequestRef op) +bool OSDMonitor::enforce_pool_op_caps(MonOpRequestRef op) { op->mark_osdmon_event(__func__); + MPoolOp *m = static_cast(op->get_req()); - + MonSession *session = m->get_session(); + if (!session) { + _pool_op_reply(op, -EPERM, osdmap.get_epoch()); + return true; + } + + switch (m->op) { + case POOL_OP_CREATE_UNMANAGED_SNAP: + case POOL_OP_DELETE_UNMANAGED_SNAP: + { + const std::string* pool_name = nullptr; + const pg_pool_t *pg_pool = osdmap.get_pg_pool(m->pool); + if (pg_pool != nullptr) { + pool_name = &osdmap.get_pool_name(m->pool); + } + + if (!is_unmanaged_snap_op_permitted(cct, mon->key_server, + session->entity_name, session->caps, + pool_name)) { + dout(0) << "got unmanaged-snap pool op from entity with insufficient " + << "privileges. message: " << *m << std::endl + << "caps: " << session->caps << dendl; + _pool_op_reply(op, -EPERM, osdmap.get_epoch()); + return true; + } + } + break; + default: + if (!session->is_capable("osd", MON_CAP_W)) { + dout(0) << "got pool op from entity with insufficient privileges. " + << "message: " << *m << std::endl + << "caps: " << session->caps << dendl; + _pool_op_reply(op, -EPERM, osdmap.get_epoch()); + return true; + } + break; + } + + return false; +} + +bool OSDMonitor::preprocess_pool_op(MonOpRequestRef op) +{ + op->mark_osdmon_event(__func__); + MPoolOp *m = static_cast(op->get_req()); + + if (enforce_pool_op_caps(op)) { + return true; + } + if (m->fsid != mon->monmap->fsid) { dout(0) << __func__ << " drop message on fsid " << m->fsid << " != " << mon->monmap->fsid << " for " << *m << dendl; @@ -11795,19 +11929,6 @@ bool OSDMonitor::preprocess_pool_op_create(MonOpRequestRef op) { op->mark_osdmon_event(__func__); MPoolOp *m = static_cast(op->get_req()); - MonSession *session = m->get_session(); - if (!session) { - _pool_op_reply(op, -EPERM, osdmap.get_epoch()); - return true; - } - if (!session->is_capable("osd", MON_CAP_W)) { - dout(5) << "attempt to create new pool without sufficient auid privileges!" - << "message: " << *m << std::endl - << "caps: " << session->caps << dendl; - _pool_op_reply(op, -EPERM, osdmap.get_epoch()); - return true; - } - int64_t pool = osdmap.lookup_pg_pool_name(m->name.c_str()); if (pool >= 0) { _pool_op_reply(op, 0, osdmap.get_epoch()); diff --git a/src/mon/OSDMonitor.h b/src/mon/OSDMonitor.h index 6bb1218a807a4..5e37af1b36855 100644 --- a/src/mon/OSDMonitor.h +++ b/src/mon/OSDMonitor.h @@ -400,6 +400,7 @@ class OSDMonitor : public PaxosService { int _prepare_remove_pool(int64_t pool, ostream *ss, bool no_fake); int _prepare_rename_pool(int64_t pool, string newname); + bool enforce_pool_op_caps(MonOpRequestRef op); bool preprocess_pool_op (MonOpRequestRef op); bool preprocess_pool_op_create (MonOpRequestRef op); bool prepare_pool_op (MonOpRequestRef op); diff --git a/src/pybind/rados/rados.pyx b/src/pybind/rados/rados.pyx index a795fd7344e6c..67725295dcd36 100644 --- a/src/pybind/rados/rados.pyx +++ b/src/pybind/rados/rados.pyx @@ -235,6 +235,17 @@ cdef extern from "rados/librados.h" nogil: int rados_ioctx_snap_list(rados_ioctx_t io, rados_snap_t * snaps, int maxlen) int rados_ioctx_snap_get_stamp(rados_ioctx_t io, rados_snap_t id, time_t * t) + int rados_ioctx_selfmanaged_snap_create(rados_ioctx_t io, + rados_snap_t *snapid) + int rados_ioctx_selfmanaged_snap_remove(rados_ioctx_t io, + rados_snap_t snapid) + int rados_ioctx_selfmanaged_snap_set_write_ctx(rados_ioctx_t io, + rados_snap_t snap_seq, + rados_snap_t *snap, + int num_snaps) + int rados_ioctx_selfmanaged_snap_rollback(rados_ioctx_t io, const char *oid, + rados_snap_t snapid) + int rados_lock_exclusive(rados_ioctx_t io, const char * oid, const char * name, const char * cookie, const char * desc, timeval * duration, uint8_t flags) @@ -3182,6 +3193,101 @@ returned %d, but should return zero on success." % (self.name, ret)) if ret != 0: raise make_ex(ret, "Failed to rollback %s" % oid) + def create_self_managed_snap(self): + """ + Creates a self-managed snapshot + + :returns: snap id on success + + :raises: :class:`Error` + """ + self.require_ioctx_open() + cdef: + rados_snap_t _snap_id + with nogil: + ret = rados_ioctx_selfmanaged_snap_create(self.io, &_snap_id) + if ret != 0: + raise make_ex(ret, "Failed to create self-managed snapshot") + return int(_snap_id) + + @requires(('snap_id', int)) + def remove_self_managed_snap(self, snap_id): + """ + Removes a self-managed snapshot + + :param snap_id: the name of the snapshot + :type snap_id: int + + :raises: :class:`TypeError` + :raises: :class:`Error` + """ + self.require_ioctx_open() + cdef: + rados_snap_t _snap_id = snap_id + with nogil: + ret = rados_ioctx_selfmanaged_snap_remove(self.io, _snap_id) + if ret != 0: + raise make_ex(ret, "Failed to remove self-managed snapshot") + + def set_self_managed_snap_write(self, snaps): + """ + Updates the write context to the specified self-managed + snapshot ids. + + :param snaps: all associated self-managed snapshot ids + :type snaps: list + + :raises: :class:`TypeError` + :raises: :class:`Error` + """ + self.require_ioctx_open() + sorted_snaps = [] + snap_seq = 0 + if snaps: + sorted_snaps = sorted([int(x) for x in snaps], reverse=True) + snap_seq = sorted_snaps[0] + + cdef: + rados_snap_t _snap_seq = snap_seq + rados_snap_t *_snaps = NULL + int _num_snaps = len(sorted_snaps) + try: + _snaps = malloc(_num_snaps * sizeof(rados_snap_t)) + for i in range(len(sorted_snaps)): + _snaps[i] = sorted_snaps[i] + with nogil: + ret = rados_ioctx_selfmanaged_snap_set_write_ctx(self.io, + _snap_seq, + _snaps, + _num_snaps) + if ret != 0: + raise make_ex(ret, "Failed to update snapshot write context") + finally: + free(_snaps) + + @requires(('oid', str_type), ('snap_id', int)) + def rollback_self_managed_snap(self, oid, snap_id): + """ + Rolls an specific object back to a self-managed snapshot revision + + :param oid: the name of the object + :type oid: str + :param snap_id: the name of the snapshot + :type snap_id: int + + :raises: :class:`TypeError` + :raises: :class:`Error` + """ + self.require_ioctx_open() + oid = cstr(oid, 'oid') + cdef: + char *_oid = oid + rados_snap_t _snap_id = snap_id + with nogil: + ret = rados_ioctx_selfmanaged_snap_rollback(self.io, _oid, _snap_id) + if ret != 0: + raise make_ex(ret, "Failed to rollback %s" % oid) + def get_last_version(self): """ Return the version of the last object read or written to. diff --git a/src/test/pybind/test_rados.py b/src/test/pybind/test_rados.py index c8f958bb57d47..76ee286f81241 100644 --- a/src/test/pybind/test_rados.py +++ b/src/test/pybind/test_rados.py @@ -1011,6 +1011,43 @@ def test_write(self): eq(self.object.read(3), b'bar') eq(self.object.read(3), b'baz') +class TestIoCtxSelfManagedSnaps(object): + def setUp(self): + self.rados = Rados(conffile='') + self.rados.connect() + self.rados.create_pool('test_pool') + assert self.rados.pool_exists('test_pool') + self.ioctx = self.rados.open_ioctx('test_pool') + + def tearDown(self): + cmd = {"prefix":"osd unset", "key":"noup"} + self.rados.mon_command(json.dumps(cmd), b'') + self.ioctx.close() + self.rados.delete_pool('test_pool') + self.rados.shutdown() + + def test(self): + # cannot mix-and-match pool and self-managed snapshot mode + self.ioctx.set_self_managed_snap_write([]) + self.ioctx.write('abc', b'abc') + snap_id_1 = self.ioctx.create_self_managed_snap() + self.ioctx.set_self_managed_snap_write([snap_id_1]) + + self.ioctx.write('abc', b'def') + snap_id_2 = self.ioctx.create_self_managed_snap() + self.ioctx.set_self_managed_snap_write([snap_id_1, snap_id_2]) + + self.ioctx.write('abc', b'ghi') + + self.ioctx.rollback_self_managed_snap('abc', snap_id_1) + eq(self.ioctx.read('abc'), b'abc') + + self.ioctx.rollback_self_managed_snap('abc', snap_id_2) + eq(self.ioctx.read('abc'), b'def') + + self.ioctx.remove_self_managed_snap(snap_id_1) + self.ioctx.remove_self_managed_snap(snap_id_2) + class TestCommand(object): def setUp(self):