Skip to content

Commit

Permalink
Merge PR 16068 into master
Browse files Browse the repository at this point in the history
* refs/remotes/upstream/pull/16068/head:
	mds: wait auth pinned objects when deactivating mds
	mds: fix "wait for stray manager to start"
	ceph: kick purge queue when mds active
	mds: properly do null snapflush
	mds: force client to flush data when waiting on LOCK_SNAP_SYNC lock
	mds: don't issue caps for frozen inode

Reviewed-by: Patrick Donnelly <pdonnell@redhat.com>
  • Loading branch information
batrick committed Jul 18, 2017
2 parents ba548ff + 2b98f47 commit b39a402
Show file tree
Hide file tree
Showing 6 changed files with 60 additions and 22 deletions.
15 changes: 10 additions & 5 deletions src/mds/Locker.cc
Expand Up @@ -1332,10 +1332,13 @@ bool Locker::rdlock_start(SimpleLock *lock, MDRequestRef& mut, bool as_anon)
// okay, we actually need to kick the head's lock to get ourselves synced up.
CInode *head = mdcache->get_inode(in->ino());
assert(head);
SimpleLock *hlock = head->get_lock(lock->get_type());
SimpleLock *hlock = head->get_lock(CEPH_LOCK_IFILE);
if (hlock->get_state() == LOCK_SYNC)
hlock = head->get_lock(lock->get_type());

if (hlock->get_state() != LOCK_SYNC) {
dout(10) << "rdlock_start trying head inode " << *head << dendl;
if (!rdlock_start(head->get_lock(lock->get_type()), mut, true)) // ** as_anon, no rdlock on EXCL **
if (!rdlock_start(hlock, mut, true)) // ** as_anon, no rdlock on EXCL **
return false;
// oh, check our lock again then
}
Expand Down Expand Up @@ -2532,11 +2535,11 @@ void Locker::adjust_cap_wanted(Capability *cap, int wanted, int issue_seq)



void Locker::_do_null_snapflush(CInode *head_in, client_t client)
void Locker::_do_null_snapflush(CInode *head_in, client_t client, snapid_t last)
{
dout(10) << "_do_null_snapflush client." << client << " on " << *head_in << dendl;
compact_map<snapid_t, set<client_t> >::iterator p = head_in->client_need_snapflush.begin();
while (p != head_in->client_need_snapflush.end()) {
for (auto p = head_in->client_need_snapflush.begin();
p != head_in->client_need_snapflush.end() && p->first < last; ) {
snapid_t snapid = p->first;
set<client_t>& clients = p->second;
++p; // be careful, q loop below depends on this
Expand Down Expand Up @@ -2762,6 +2765,8 @@ void Locker::handle_client_caps(MClientCaps *m)
// this cap now follows a later snap (i.e. the one initiating this flush, or later)
if (in == head_in)
cap->client_follows = snap < CEPH_NOSNAP ? snap : realm->get_newest_seq();
else if (head_in->client_need_snapflush.begin()->first < snap)
_do_null_snapflush(head_in, client, snap);

_do_snap_update(in, snap, m->get_dirty(), follows, client, m, ack);

Expand Down
2 changes: 1 addition & 1 deletion src/mds/Locker.h
Expand Up @@ -193,7 +193,7 @@ class Locker {
void handle_client_caps(class MClientCaps *m);
void _update_cap_fields(CInode *in, int dirty, MClientCaps *m, inode_t *pi);
void _do_snap_update(CInode *in, snapid_t snap, int dirty, snapid_t follows, client_t client, MClientCaps *m, MClientCaps *ack);
void _do_null_snapflush(CInode *head_in, client_t client);
void _do_null_snapflush(CInode *head_in, client_t client, snapid_t last=CEPH_NOSNAP);
bool _do_cap_update(CInode *in, Capability *cap, int dirty, snapid_t follows, MClientCaps *m,
MClientCaps *ack=0, bool *need_flush=NULL);
void handle_client_cap_release(class MClientCapRelease *m);
Expand Down
24 changes: 24 additions & 0 deletions src/mds/MDCache.cc
Expand Up @@ -6083,6 +6083,20 @@ void MDCache::rejoin_send_acks()
rejoin_imported_caps.clear();
}

class C_MDC_ReIssueCaps : public MDCacheContext {
CInode *in;
public:
C_MDC_ReIssueCaps(MDCache *mdc, CInode *i) :
MDCacheContext(mdc), in(i)
{
in->get(CInode::PIN_PTRWAITER);
}
void finish(int r) override {
if (!mdcache->mds->locker->eval(in, CEPH_CAP_LOCKS))
mdcache->mds->locker->issue_caps(in);
in->put(CInode::PIN_PTRWAITER);
}
};

void MDCache::reissue_all_caps()
{
Expand All @@ -6093,6 +6107,11 @@ void MDCache::reissue_all_caps()
++p) {
CInode *in = p->second;
if (in->is_head() && in->is_any_caps()) {
// called by MDSRank::active_start(). There shouldn't be any frozen subtree.
if (in->is_frozen_inode()) {
in->add_waiter(CInode::WAIT_UNFREEZE, new C_MDC_ReIssueCaps(this, in));
continue;
}
if (!mds->locker->eval(in, CEPH_CAP_LOCKS))
mds->locker->issue_caps(in);
}
Expand Down Expand Up @@ -7611,6 +7630,11 @@ bool MDCache::shutdown_pass()
assert(!migrator->is_exporting());
assert(!migrator->is_importing());

if ((myin && myin->is_auth_pinned()) ||
(mydir && mydir->is_auth_pinned())) {
dout(7) << "still have auth pinned objects" << dendl;
return false;
}

// flush what we can from the log
mds->mdlog->trim(0);
Expand Down
15 changes: 15 additions & 0 deletions src/mds/PurgeQueue.cc
Expand Up @@ -117,6 +117,21 @@ void PurgeQueue::init()
timer.init();
}

void PurgeQueue::activate()
{
Mutex::Locker l(lock);
if (journaler.get_read_pos() == journaler.get_write_pos())
return;

if (in_flight.empty()) {
dout(4) << "start work (by drain)" << dendl;
finisher.queue(new FunctionContext([this](int r) {
Mutex::Locker l(lock);
_consume();
}));
}
}

void PurgeQueue::shutdown()
{
Mutex::Locker l(lock);
Expand Down
1 change: 1 addition & 0 deletions src/mds/PurgeQueue.h
Expand Up @@ -133,6 +133,7 @@ class PurgeQueue

public:
void init();
void activate();
void shutdown();

void create_logger();
Expand Down
25 changes: 9 additions & 16 deletions src/mds/StrayManager.cc
Expand Up @@ -314,6 +314,8 @@ class C_OpenSnapParents : public StrayManagerContext {

void StrayManager::_enqueue(CDentry *dn, bool trunc)
{
assert(started);

CInode *in = dn->get_linkage()->get_inode();
if (in->snaprealm &&
!in->snaprealm->have_past_parents_open() &&
Expand All @@ -322,22 +324,6 @@ void StrayManager::_enqueue(CDentry *dn, bool trunc)
return;
}

if (!started) {
// If the MDS is not yet active, defer executing this purge
// in order to avoid the mdlog writes we do on purge completion.
mds->wait_for_active(
new MDSInternalContextWrapper(mds,
new FunctionContext([this, dn, trunc](int r){
// It is safe to hold on to this CDentry* pointer
// because the dentry is pinned with PIN_PURGING
_enqueue(dn, trunc);
})
)
);

return;
}

if (trunc) {
truncate(dn);
} else {
Expand All @@ -348,6 +334,9 @@ void StrayManager::_enqueue(CDentry *dn, bool trunc)

void StrayManager::advance_delayed()
{
if (!started)
return;

for (elist<CDentry*>::iterator p = delayed_eval_stray.begin(); !p.end(); ) {
CDentry *dn = *p;
++p;
Expand Down Expand Up @@ -435,6 +424,9 @@ bool StrayManager::_eval_stray(CDentry *dn, bool delay)
return false;
}

if (!started)
delay = true;

if (dn->item_stray.is_on_list()) {
if (delay)
return false;
Expand Down Expand Up @@ -545,6 +537,7 @@ void StrayManager::activate()
{
dout(10) << __func__ << dendl;
started = true;
purge_queue.activate();
}

bool StrayManager::eval_stray(CDentry *dn, bool delay)
Expand Down

0 comments on commit b39a402

Please sign in to comment.