Skip to content

Commit

Permalink
dlm: fix lvb invalidation conditions
Browse files Browse the repository at this point in the history
When a node is removed that held a PW/EX lock, the
existing master node should invalidate the lvb on the
resource due to the purged lock.

Previously, the existing master node was invalidating
the lvb if it found only NL/CR locks on the resource
during recovery for the removed node.  This could lead
to cases where it invalidated the lvb and shouldn't
have, or cases where it should have invalidated and
didn't.

When recovery selects a *new* master node for a
resource, and that new master finds only NL/CR locks
on the resource after lock recovery, it should
invalidate the lvb.  This case was handled correctly
(but was incorrectly applied to the existing master
case also.)

When a process exits while holding a PW/EX lock,
the lvb on the resource should be invalidated.
This was not happening.

The lvb contents and VALNOTVALID flag should be
recovered before granting locks in recovery so that
the recovered lvb state is provided in the callback.
The lvb was being recovered after the lock was granted.

Signed-off-by: David Teigland <teigland@redhat.com>
  • Loading branch information
teigland committed Nov 16, 2012
1 parent a3de56b commit da8c666
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 10 deletions.
1 change: 1 addition & 0 deletions fs/dlm/dlm_internal.h
Expand Up @@ -337,6 +337,7 @@ enum rsb_flags {
RSB_NEW_MASTER2,
RSB_RECOVER_CONVERT,
RSB_RECOVER_GRANT,
RSB_RECOVER_LVB_INVAL,
};

static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
Expand Down
16 changes: 13 additions & 3 deletions fs/dlm/lock.c
Expand Up @@ -5393,6 +5393,13 @@ static void purge_dead_list(struct dlm_ls *ls, struct dlm_rsb *r,
if ((lkb->lkb_nodeid == nodeid_gone) ||
dlm_is_removed(ls, lkb->lkb_nodeid)) {

/* tell recover_lvb to invalidate the lvb
because a node holding EX/PW failed */
if ((lkb->lkb_exflags & DLM_LKF_VALBLK) &&
(lkb->lkb_grmode >= DLM_LOCK_PW)) {
rsb_set_flag(r, RSB_RECOVER_LVB_INVAL);
}

del_lkb(r, lkb);

/* this put should free the lkb */
Expand Down Expand Up @@ -6025,15 +6032,18 @@ static int orphan_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
return error;
}

/* The force flag allows the unlock to go ahead even if the lkb isn't granted.
Regardless of what rsb queue the lock is on, it's removed and freed. */
/* The FORCEUNLOCK flag allows the unlock to go ahead even if the lkb isn't
granted. Regardless of what rsb queue the lock is on, it's removed and
freed. The IVVALBLK flag causes the lvb on the resource to be invalidated
if our lock is PW/EX (it's ignored if our granted mode is smaller.) */

static int unlock_proc_lock(struct dlm_ls *ls, struct dlm_lkb *lkb)
{
struct dlm_args args;
int error;

set_unlock_args(DLM_LKF_FORCEUNLOCK, lkb->lkb_ua, &args);
set_unlock_args(DLM_LKF_FORCEUNLOCK | DLM_LKF_IVVALBLK,
lkb->lkb_ua, &args);

error = unlock_lock(ls, lkb, &args);
if (error == -DLM_EUNLOCK)
Expand Down
37 changes: 30 additions & 7 deletions fs/dlm/recover.c
Expand Up @@ -717,8 +717,14 @@ void dlm_recovered_lock(struct dlm_rsb *r)
* the VALNOTVALID flag if necessary, and determining the correct lvb contents
* based on the lvb's of the locks held on the rsb.
*
* RSB_VALNOTVALID is set if there are only NL/CR locks on the rsb. If it
* was already set prior to recovery, it's not cleared, regardless of locks.
* RSB_VALNOTVALID is set in two cases:
*
* 1. we are master, but not new, and we purged an EX/PW lock held by a
* failed node (in dlm_recover_purge which set RSB_RECOVER_LVB_INVAL)
*
* 2. we are a new master, and there are only NL/CR locks left.
* (We could probably improve this by only invaliding in this way when
* the previous master left uncleanly. VMS docs mention that.)
*
* The LVB contents are only considered for changing when this is a new master
* of the rsb (NEW_MASTER2). Then, the rsb's lvb is taken from any lkb with
Expand All @@ -734,6 +740,19 @@ static void recover_lvb(struct dlm_rsb *r)
int big_lock_exists = 0;
int lvblen = r->res_ls->ls_lvblen;

if (!rsb_flag(r, RSB_NEW_MASTER2) &&
rsb_flag(r, RSB_RECOVER_LVB_INVAL)) {
/* case 1 above */
rsb_set_flag(r, RSB_VALNOTVALID);
return;
}

if (!rsb_flag(r, RSB_NEW_MASTER2))
return;

/* we are the new master, so figure out if VALNOTVALID should
be set, and set the rsb lvb from the best lkb available. */

list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
continue;
Expand Down Expand Up @@ -772,13 +791,10 @@ static void recover_lvb(struct dlm_rsb *r)
if (!lock_lvb_exists)
goto out;

/* lvb is invalidated if only NL/CR locks remain */
if (!big_lock_exists)
rsb_set_flag(r, RSB_VALNOTVALID);

/* don't mess with the lvb unless we're the new master */
if (!rsb_flag(r, RSB_NEW_MASTER2))
goto out;

if (!r->res_lvbptr) {
r->res_lvbptr = dlm_allocate_lvb(r->res_ls);
if (!r->res_lvbptr)
Expand Down Expand Up @@ -852,12 +868,19 @@ void dlm_recover_rsbs(struct dlm_ls *ls)
if (is_master(r)) {
if (rsb_flag(r, RSB_RECOVER_CONVERT))
recover_conversion(r);

/* recover lvb before granting locks so the updated
lvb/VALNOTVALID is presented in the completion */
recover_lvb(r);

if (rsb_flag(r, RSB_NEW_MASTER2))
recover_grant(r);
recover_lvb(r);
count++;
} else {
rsb_clear_flag(r, RSB_VALNOTVALID);
}
rsb_clear_flag(r, RSB_RECOVER_CONVERT);
rsb_clear_flag(r, RSB_RECOVER_LVB_INVAL);
rsb_clear_flag(r, RSB_NEW_MASTER2);
unlock_rsb(r);
}
Expand Down

0 comments on commit da8c666

Please sign in to comment.