Skip to content

Commit

Permalink
Merge pull request #6567 from jcsp/wip-layout-restrict
Browse files Browse the repository at this point in the history
mds: add 'p' flag in auth caps to control setting pool in layout

Reviewed-by: Sage Weil <sage@redhat.com>
  • Loading branch information
gregsfortytwo committed Dec 7, 2015
2 parents e62954e + 2829e9d commit 66de0e3
Show file tree
Hide file tree
Showing 6 changed files with 155 additions and 45 deletions.
81 changes: 81 additions & 0 deletions doc/cephfs/client-auth.rst
@@ -0,0 +1,81 @@
================================
CephFS Client Capabilities
================================

Use Ceph authentication capabilities to restrict your filesystem clients
to the lowest possible level of authority needed.

Path restriction
================

By default, clients are not restricted in what paths they are allowed to mount.
Further, when clients mount a subdirectory, e.g., /home/user, the MDS does not
by default verify that subsequent operations
are ‘locked’ within that directory.

To restrict clients to only mount and work within a certain directory, use
path-based MDS authentication capabilities.

Syntax
------

To grant rw access to the specified directory only, we mention the specified
directory while creating key for a client following the undermentioned syntax. ::

./ceph auth get-or-create client.*client_name* mon 'allow r' mds 'allow r, allow rw path=/*specified_directory*' osd 'allow rw pool=data'

for example, to restrict client ``foo`` to ``bar`` directory, we will use. ::

./ceph auth get-or-create client.foo mon 'allow r' mds 'allow r, allow rw path=/bar' osd 'allow rw pool=data'


To restrict a client to the specfied sub-directory only, we mention the specified
directory while mounting following the undermentioned syntax. ::

./ceph-fuse -n client.*client_name* *mount_path* -r *directory_to_be_mounted*

for example, to restrict client ``foo`` to ``mnt/bar`` directory, we will use. ::

./ceph-fuse -n client.foo mnt -r /bar

OSD restriction
===============

To prevent clients from writing or reading data to pools other than
those in use for CephFS, set an OSD authentication capability that
restricts access to the CephFS data pool(s):

::
client.0
key: AQAz7EVWygILFRAAdIcuJ12opU/JKyfFmxhuaw==
caps: [mds] allow rw
caps: [mon] allow r
caps: [osd] allow rw pool=data1, allow rw pool=data2

You may also restrict clients from writing data by using 'r' instead of
'rw' in OSD capabilities. This does not affect the ability of the client
to update filesystem metadata for these files, but it will prevent them
from persistently writing data in a way that would be visible to other clients.

Layout modification restriction
===============================

To prevent clients from modifying the data pool used for files or
directories, use the 'p' modifier in MDS authentication capabilities.

For example, in the following snippet client.0 can modify the pool used
for files, but client.1 cannot.

::
client.0
key: AQAz7EVWygILFRAAdIcuJ12opU/JKyfFmxhuaw==
caps: [mds] allow rwp
caps: [mon] allow r
caps: [osd] allow rw pool=data

client.1
key: AQAz7EVWygILFRAAdIcuJ12opU/JKyfFmxhuaw==
caps: [mds] allow rw
caps: [mon] allow r
caps: [osd] allow rw pool=data

1 change: 1 addition & 0 deletions doc/cephfs/index.rst
Expand Up @@ -90,6 +90,7 @@ authentication keyring.
Handling full filesystems <full>
Troubleshooting <troubleshooting>
Disaster recovery <disaster-recovery>
Client authentication <client-auth>

.. raw:: html

Expand Down
31 changes: 0 additions & 31 deletions doc/cephfs/path-based restriction.rst

This file was deleted.

21 changes: 16 additions & 5 deletions src/mds/MDSAuthCaps.cc
Expand Up @@ -70,11 +70,13 @@ struct MDSCapParser : qi::grammar<Iterator, MDSAuthCaps()>

// capspec = * | r[w]
capspec = spaces >> (
lit("*")[_val = MDSCapSpec(true, true, true)]
lit("*")[_val = MDSCapSpec(true, true, true, true)]
|
(lit("rw"))[_val = MDSCapSpec(true, true, false)]
(lit("rwp"))[_val = MDSCapSpec(true, true, false, true)]
|
(lit("r"))[_val = MDSCapSpec(true, false, false)]
(lit("rw"))[_val = MDSCapSpec(true, true, false, false)]
|
(lit("r"))[_val = MDSCapSpec(true, false, false, false)]
);

grant = lit("allow") >> (capspec >> match)[_val = phoenix::construct<MDSCapGrant>(_1, _2)];
Expand Down Expand Up @@ -159,6 +161,13 @@ bool MDSAuthCaps::is_capable(const std::string &inode_path,
if (i->match.match(inode_path, caller_uid, caller_gid) &&
i->spec.allows(mask & (MAY_READ|MAY_EXECUTE), mask & MAY_WRITE)) {

// Spec is non-allowing if caller asked for set pool but spec forbids it
if (mask & MAY_SET_POOL) {
if (!i->spec.allows_set_pool()) {
continue;
}
}

// check unix permissions?
if (i->match.uid == MDSCapMatch::MDS_AUTH_UID_ANY) {
return true;
Expand Down Expand Up @@ -209,15 +218,17 @@ bool MDSAuthCaps::is_capable(const std::string &inode_path,
void MDSAuthCaps::set_allow_all()
{
grants.clear();
grants.push_back(MDSCapGrant(MDSCapSpec(true, true, true), MDSCapMatch()));
grants.push_back(MDSCapGrant(
MDSCapSpec(true, true, true, true),
MDSCapMatch()));
}

bool MDSAuthCaps::parse(CephContext *c, const std::string& str, ostream *err)
{
// Special case for legacy caps
if (str == "allow") {
grants.clear();
grants.push_back(MDSCapGrant(MDSCapSpec(true, true, false), MDSCapMatch()));
grants.push_back(MDSCapGrant(MDSCapSpec(true, true, false, true), MDSCapMatch()));
return true;
}

Expand Down
16 changes: 13 additions & 3 deletions src/mds/MDSAuthCaps.h
Expand Up @@ -28,7 +28,8 @@ enum {
MAY_WRITE = 2,
MAY_EXECUTE = 4,
MAY_CHOWN = 16,
MAY_CHGRP = 32
MAY_CHGRP = 32,
MAY_SET_POOL = 64,
};

class CephContext;
Expand All @@ -37,12 +38,17 @@ class CephContext;
struct MDSCapSpec {
bool read, write, any;

MDSCapSpec() : read(false), write(false), any(false) {}
MDSCapSpec(bool r, bool w, bool a) : read(r), write(w), any(a) {}
// True if the capability permits modifying the pool on file layouts
bool layout_pool;

MDSCapSpec() : read(false), write(false), any(false), layout_pool(false) {}
MDSCapSpec(bool r, bool w, bool a, bool lop)
: read(r), write(w), any(a), layout_pool(lop) {}

bool allow_all() const {
return any;
}

bool allows(bool r, bool w) const {
if (any)
return true;
Expand All @@ -52,6 +58,10 @@ struct MDSCapSpec {
return false;
return true;
}

bool allows_set_pool() const {
return layout_pool;
}
};

// conditions before we are allowed to do it
Expand Down
50 changes: 44 additions & 6 deletions src/mds/Server.cc
Expand Up @@ -3063,6 +3063,9 @@ void Server::handle_client_openc(MDRequestRef& mdr)
else
layout = mdcache->default_file_layout;

// What kind of client caps are required to complete this operation
uint64_t access = MAY_WRITE;

// fill in any special params from client
if (req->head.args.open.stripe_unit)
layout.fl_stripe_unit = req->head.args.open.stripe_unit;
Expand All @@ -3074,6 +3077,17 @@ void Server::handle_client_openc(MDRequestRef& mdr)
(__s32)req->head.args.open.pool >= 0) {
layout.fl_pg_pool = req->head.args.open.pool;

// If client doesn't have capability to modify layout pools, then
// only permit this request if the requested pool matches what the
// file would have inherited anyway from its parent.
CDir *parent = dn->get_dir();
CInode *parent_in = parent->get_inode();
int64_t parent_pool = parent_in->inode.layout.fl_pg_pool;

if (layout.fl_pg_pool != parent_pool) {
access |= MAY_SET_POOL;
}

// make sure we have as new a map as the client
if (req->get_mdsmap_epoch() > mds->mdsmap->get_epoch()) {
mds->wait_for_mdsmap(req->get_mdsmap_epoch(), new C_MDS_RetryRequest(mdcache, mdr));
Expand All @@ -3097,7 +3111,7 @@ void Server::handle_client_openc(MDRequestRef& mdr)
if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
return;

if (!check_access(mdr, diri, MAY_WRITE))
if (!check_access(mdr, diri, access))
return;

CDentry::linkage_t *dnl = dn->get_projected_linkage();
Expand Down Expand Up @@ -3778,6 +3792,8 @@ void Server::handle_client_setlayout(MDRequestRef& mdr)
// save existing layout for later
int64_t old_pool = layout.fl_pg_pool;

int access = MAY_WRITE;

if (req->head.args.setlayout.layout.fl_object_size > 0)
layout.fl_object_size = req->head.args.setlayout.layout.fl_object_size;
if (req->head.args.setlayout.layout.fl_stripe_unit > 0)
Expand All @@ -3791,6 +3807,10 @@ void Server::handle_client_setlayout(MDRequestRef& mdr)
if (req->head.args.setlayout.layout.fl_pg_pool > 0) {
layout.fl_pg_pool = req->head.args.setlayout.layout.fl_pg_pool;

if (layout.fl_pg_pool != old_pool) {
access |= MAY_SET_POOL;
}

// make sure we have as new a map as the client
if (req->get_mdsmap_epoch() > mds->mdsmap->get_epoch()) {
mds->wait_for_mdsmap(req->get_mdsmap_epoch(), new C_MDS_RetryRequest(mdcache, mdr));
Expand All @@ -3812,7 +3832,7 @@ void Server::handle_client_setlayout(MDRequestRef& mdr)
if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
return;

if (!check_access(mdr, cur, MAY_WRITE))
if (!check_access(mdr, cur, access))
return;

// project update
Expand Down Expand Up @@ -3856,9 +3876,6 @@ void Server::handle_client_setdirlayout(MDRequestRef& mdr)
if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
return;

if (!check_access(mdr, cur, MAY_WRITE))
return;

// validate layout
const inode_t *old_pi = cur->get_projected_inode();
ceph_file_layout layout;
Expand All @@ -3869,6 +3886,9 @@ void Server::handle_client_setdirlayout(MDRequestRef& mdr)
else
layout = mdcache->default_file_layout;

// Level of access required to complete
int access = MAY_WRITE;

if (req->head.args.setlayout.layout.fl_object_size > 0)
layout.fl_object_size = req->head.args.setlayout.layout.fl_object_size;
if (req->head.args.setlayout.layout.fl_stripe_unit > 0)
Expand All @@ -3880,6 +3900,9 @@ void Server::handle_client_setdirlayout(MDRequestRef& mdr)
if (req->head.args.setlayout.layout.fl_object_stripe_unit > 0)
layout.fl_object_stripe_unit = req->head.args.setlayout.layout.fl_object_stripe_unit;
if (req->head.args.setlayout.layout.fl_pg_pool > 0) {
if (req->head.args.setlayout.layout.fl_pg_pool != layout.fl_pg_pool) {
access |= MAY_SET_POOL;
}
layout.fl_pg_pool = req->head.args.setlayout.layout.fl_pg_pool;
// make sure we have as new a map as the client
if (req->get_mdsmap_epoch() > mds->mdsmap->get_epoch()) {
Expand All @@ -3898,6 +3921,9 @@ void Server::handle_client_setdirlayout(MDRequestRef& mdr)
return;
}

if (!check_access(mdr, cur, access))
return;

inode_t *pi = cur->project_inode();
pi->layout = layout;
pi->version = cur->pre_dirty();
Expand Down Expand Up @@ -4086,6 +4112,12 @@ void Server::handle_set_vxattr(MDRequestRef& mdr, CInode *cur,
if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
return;

if (cur->inode.layout.fl_pg_pool != layout.fl_pg_pool) {
if (!check_access(mdr, cur, MAY_SET_POOL)) {
return;
}
}

pi = cur->project_inode();
pi->layout = layout;
} else if (name.find("ceph.file.layout") == 0) {
Expand Down Expand Up @@ -4125,10 +4157,16 @@ void Server::handle_set_vxattr(MDRequestRef& mdr, CInode *cur,
return;
}

xlocks.insert(&cur->filelock);
if (!mds->locker->acquire_locks(mdr, rdlocks, wrlocks, xlocks))
return;

if (cur->inode.layout.fl_pg_pool != layout.fl_pg_pool) {
if (!check_access(mdr, cur, MAY_SET_POOL)) {
return;
}
}

xlocks.insert(&cur->filelock);
pi = cur->project_inode();
int64_t old_pool = pi->layout.fl_pg_pool;
pi->add_old_pool(old_pool);
Expand Down

0 comments on commit 66de0e3

Please sign in to comment.