Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use CT for leader tests, let agent monitor nodes, incr leader test #13

Merged
merged 4 commits into from
Aug 8, 2015
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -22,10 +22,10 @@ examples: compile
rebar compile

test:
rebar clean compile eunit
rebar clean compile eunit ct

test_debug:
rebar clean compile eunit eunit_compile_opts="\[\{d,'DEBUG'\}\]"
rebar clean compile eunit ct eunit_compile_opts="\[\{d,'DEBUG'\}\]"

doc:
rebar get-deps compile doc
74 changes: 44 additions & 30 deletions examples/src/test_cb.erl
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,18 @@
terminate/2,
code_change/4]).

-record(st, {am_leader = false,
-export([record_fields/1]).

-record(cb, {am_leader = false,
dict}).

-define(event(E), event(?LINE, E)).
-define(event(E), event(?LINE, E, none)).
-define(event(E, S), event(?LINE, E, S)).

record_fields(cb) -> record_info(fields, cb);
record_fields(st) -> locks_leader:record_fields(st);
record_fields(_) ->
no.

%% @spec init(Arg::term()) -> {ok, State}
%%
Expand All @@ -64,7 +72,7 @@
%%
init(Dict) ->
?event({init, Dict}),
{ok, #st{dict = Dict}}.
{ok, #cb{dict = Dict}}.

%% @spec elected(State::state(), I::info(), Cand::pid() | undefined) ->
%% {ok, Broadcast, NState}
Expand Down Expand Up @@ -114,27 +122,26 @@ init(Dict) ->
%% Example:
%%
%% <pre lang="erlang">
%% elected(#st{dict = Dict} = St, _I, undefined) -&gt;
%% elected(#cb{dict = Dict} = St, _I, undefined) -&gt;
%% {ok, Dict, St};
%% elected(#st{dict = Dict} = St, _I, Pid) when is_pid(Pid) -&gt;
%% elected(#cb{dict = Dict} = St, _I, Pid) when is_pid(Pid) -&gt;
%% %% reply only to Pid
%% {reply, Dict, St}.
%% </pre>
%% @end
%%
elected(#st{dict = Dict} = S, I, undefined) ->
?event(elected_leader),
elected(#cb{dict = Dict} = S, I, _) ->
?event({elected_leader, I}),
case locks_leader:new_candidates(I) of
[] ->
?event({elected, Dict}),
{ok, {sync, Dict}, S#st{am_leader = true}};
?event({elected, Dict}, S),
{ok, {sync, Dict}, S#cb{am_leader = true}};
Cands ->
?event({new_candidates, Cands}),
NewDict = merge_dicts(Dict, I),
{ok, {sync, NewDict}, S#st{am_leader = true, dict = NewDict}}
end;
elected(#st{dict = Dict} = S, _E, Pid) when is_pid(Pid) ->
{reply, {sync, Dict}, S#st{am_leader = true}}.
?event({merge_result, NewDict}),
{ok, {sync, NewDict}, S#cb{am_leader = true, dict = NewDict}}
end.

%% This is sub-optimal, but it's only an example!
merge_dicts(D, I) ->
Expand Down Expand Up @@ -167,9 +174,9 @@ merge_dicts(D, I) ->
%% {ok, LeaderDict}.
%% </pre>
%% @end
surrendered(#st{dict = _OurDict} = S, {sync, LeaderDict}, _I) ->
surrendered(#cb{dict = _OurDict} = S, {sync, LeaderDict}, _I) ->
?event({surrendered, LeaderDict}),
{ok, S#st{dict = LeaderDict, am_leader = false}}.
{ok, S#cb{dict = LeaderDict, am_leader = false}}.

%% @spec handle_DOWN(Candidate::pid(), State::state(), I::info()) ->
%% {ok, NState} | {ok, Broadcast, NState}
Expand Down Expand Up @@ -206,10 +213,10 @@ handle_DOWN(_Pid, S, _I) ->
%% Example:
%%
%% <pre lang="erlang">
%% handle_leader_call({store,F}, From, #st{dict = Dict} = S, E) -&gt;
%% handle_leader_call({store,F}, From, #cb{dict = Dict} = S, E) -&gt;
%% NewDict = F(Dict),
%% {reply, ok, {store, F}, S#st{dict = NewDict}};
%% handle_leader_call({leader_lookup,F}, From, #st{dict = Dict} = S, E) -&gt;
%% {reply, ok, {store, F}, S#cb{dict = NewDict}};
%% handle_leader_call({leader_lookup,F}, From, #cb{dict = Dict} = S, E) -&gt;
%% Reply = F(Dict),
%% {reply, Reply, S}.
%% </pre>
Expand All @@ -221,14 +228,16 @@ handle_DOWN(_Pid, S, _I) ->
%% leader; normally, lookups are served locally and updates by the leader,
%% which can lead to race conditions.
%% @end
handle_leader_call({store,F} = Op, _From, #st{dict = Dict} = S, _I) ->
handle_leader_call({store,F} = Op, _From, #cb{dict = Dict} = S, _I) ->
?event({handle_leader_call, Op}),
NewDict = F(Dict),
{reply, ok, {store, F}, S#st{dict = NewDict}};
handle_leader_call({leader_lookup,F} = Op, _From, #st{dict = Dict} = S, _I) ->
?event({new_dict, NewDict}),
{reply, ok, {store, F}, S#cb{dict = NewDict}};
handle_leader_call({leader_lookup,F} = Op, _From, #cb{dict = Dict} = S, _I) ->
?event({handle_leader_call, Op}),
Reply = F(Dict),
{reply, Reply, S#st{dict = Dict}}.
?event({reply, Reply}),
{reply, Reply, S#cb{dict = Dict}}.


%% @spec handle_leader_cast(Msg::term(), State::term(), I::info()) ->
Expand All @@ -251,12 +260,14 @@ handle_leader_cast(_Msg, S, _I) ->
%% In this particular module, the leader passes an update function to be
%% applied to the candidate's state.
%% @end
from_leader({sync, D}, #st{} = S, _I) ->
{ok, S#st{dict = D}};
from_leader({store,F} = Op, #st{dict = Dict} = S, _I) ->
?event({from_leader, Op}),
from_leader({sync, D} = Msg, #cb{} = S, _I) ->
?event({from_leader, Msg}, S),
{ok, S#cb{dict = D}};
from_leader({store,F} = Op, #cb{dict = Dict} = S, _I) ->
?event({from_leader, Op}, S),
NewDict = F(Dict),
{ok, S#st{dict = NewDict}}.
?event({new_dict, NewDict}),
{ok, S#cb{dict = NewDict}}.

%% @spec handle_call(Request::term(), From::callerRef(), State::state(),
%% I::info()) ->
Expand All @@ -275,15 +286,18 @@ from_leader({store,F} = Op, #st{dict = Dict} = S, _I) ->
%% used to it from gen_server.
%% @end
%%
handle_call(merge, _From, #st{am_leader = AmLeader,
handle_call(merge, _From, #cb{am_leader = AmLeader,
dict = Dict} = S, _I) ->
?event({handle_call, merge}, S),
if AmLeader ->
{reply, {true, Dict}, S};
true ->
{reply, false, S}
end;
handle_call({lookup, F}, _From, #st{dict = Dict} = S, _I) ->
handle_call({lookup, F}, _From, #cb{dict = Dict} = S, _I) ->
?event({handle_call, lookup}, S),
Reply = F(Dict),
?event({reply, Reply}),
{reply, Reply, S}.

%% @spec handle_cast(Msg::term(), State::state(), I::info()) ->
Expand Down Expand Up @@ -331,5 +345,5 @@ terminate(_Reason, _S) ->
ok.


event(_Line, _Event) ->
event(_Line, _Event, _State) ->
ok.
77 changes: 66 additions & 11 deletions src/locks_agent.erl
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
lock_objects/2,
surrender_nowait/4,
await_all_locks/1,
monitor_nodes/2,
change_flag/3,
lock_info/1,
transaction_status/1,
Expand Down Expand Up @@ -104,6 +105,7 @@
down = [] :: [node()],
monitored = [] :: [{node(), reference()}],
await_nodes = false :: boolean(),
monitor_nodes = false :: boolean(),
pending :: ets:tab(),
sync = [] :: [#lock{}],
client :: pid(),
Expand Down Expand Up @@ -183,6 +185,12 @@ start() ->
%% the agent will wait for the nodes to reappear, and reclaim the lock(s) when
%% they do.
%%
%% * `{monitor_nodes, boolean()}' - default: `false'. Works like
%% {@link net_kernel:monitor_nodes/1}, but `nodedown' and `nodeup' messages are
%% sent when the `locks' server on a given node either appears or disappears.
%% The messages (`{nodeup,Node}' and `{nodedown,Node}') are sent only to
%% the client. Can also be toggled using {@link monitor_nodes/2}.
%%
%% * `{notify, boolean()}' - default: `false'. If `{notify, true}', the client
%% will receive `{locks_agent, Agent, Info}' messages, where `Info' is either
%% a `#locks_info{}' record or `{have_all_locks, Deadlocks}'.
Expand Down Expand Up @@ -318,6 +326,17 @@ begin_transaction(Objects, Opts) ->
await_all_locks(Agent) ->
call(Agent, await_all_locks,infinity).

-spec monitor_nodes(agent(), boolean()) -> boolean().
%% @doc Toggles monitoring of nodes, like net_kernel:monitor_nodes/1.
%%
%% Works like {@link net_kernel:monitor_nodes/1}, but `nodedown' and `nodeup'
%% messages are sent when the `locks' server on a given node either appears
%% or disappears. In this sense, the monitoring will signal when a viable
%% `locks' node becomes operational (or inoperational).
%% The messages (`{nodeup,Node}' and `{nodedown,Node}') are sent only to
%% the client.
monitor_nodes(Agent, Bool) when is_boolean(Bool) ->
call(Agent, {monitor_nodes, Bool}).

-spec end_transaction(agent()) -> ok.
%% @doc Ends the transaction, terminating the agent.
Expand Down Expand Up @@ -462,6 +481,7 @@ init(Opts) ->
false -> []
end,
AwaitNodes = proplists:get_value(await_nodes, Opts, false),
MonNodes = proplists:get_value(monitor_nodes, Opts, false),
net_kernel:monitor_nodes(true),
{ok,#state{
locks = ets_new(locks, [ordered_set, {keypos, #lock.object}]),
Expand All @@ -470,6 +490,7 @@ init(Opts) ->
down = [],
monitored = orddict:new(),
await_nodes = AwaitNodes,
monitor_nodes = MonNodes,
pending = ets_new(pending, [bag, {keypos, #req.object}]),
sync = [],
client = Client,
Expand All @@ -486,6 +507,8 @@ handle_call(transaction_status, _From, #state{status = Status} = State) ->
{reply, Status, State};
handle_call(await_all_locks, {Client, Tag}, State) ->
{noreply, check_if_done(add_waiter(wait, Client, Tag, State))};
handle_call({monitor_nodes, Bool}, _From, St) when is_boolean(Bool) ->
{reply, St#state.monitor_nodes, St#state{monitor_nodes = Bool}};
handle_call(stop, {Client, _}, State) when Client==?myclient ->
{stop, normal, ok, State};
handle_call(R, _, State) ->
Expand Down Expand Up @@ -677,20 +700,31 @@ handle_info({'DOWN',_,_,_,_}, S) ->
{noreply, S};
handle_info({nodeup, N} = _Msg, #state{down = Down} = S) ->
?event(_Msg),
case lists:member(N, Down) of
case S#state.monitor_nodes orelse lists:member(N, Down) of
true -> watch_node(N);
false -> ignore
end,
{noreply, S};
handle_info({nodedown,_}, S) ->
%% We react on 'DOWN' messages above instead
{noreply, S};
handle_info({locks_running,N} = Msg, #state{down=Down, pending=Pending} = S) ->
handle_info({locks_running,N} = Msg, #state{down=Down, pending=Pending,
requests = Reqs,
monitor_nodes = MonNodes,
client = C} = S) ->
?event(Msg),
if MonNodes ->
C ! {nodeup, N};
true ->
ignore
end,
case lists:member(N, Down) of
true ->
S1 = S#state{down = Down -- [N]},
case requests_with_node(N, Pending) of
{R, P} = Res = {requests_with_node(N, Reqs),
requests_with_node(N, Pending)},
?event({{reqs, pending}, Res}),
case R ++ P of
[] ->
{noreply, S1};
Reissue ->
Expand Down Expand Up @@ -777,16 +811,21 @@ handle_nodedown(Node, #state{down = Down, requests = Reqs,
pending = Pending,
monitored = Mon, locks = Locks,
interesting = I,
agents = Agents} = S) ->
agents = Agents,
monitor_nodes = MonNodes} = S) ->
?event({handle_nodedown, Node}, S),
handle_monitor_on_down(Node, S),
ets_match_delete(Locks, #lock{object = {'_',Node}, _ = '_'}),
ets_match_delete(Agents, {{'_',{'_',Node}}}),
Down1 = [Node|Down -- [Node]],
S1 = S#state{down = Down1, interesting = prune_interesting(I, node, Node)},
case {requests_with_node(Node, Reqs),
requests_with_node(Node, Pending)} of
S1 = S#state{down = Down1, interesting = prune_interesting(I, node, Node),
monitored = lists:keydelete(Node, 1, Mon)},
Res = {requests_with_node(Node, Reqs),
requests_with_node(Node, Pending)},
?event({{reqs,pending}, Res}),
case Res of
{[], []} ->
{noreply, S#state{down = lists:keydelete(Node, 1, Mon)}};
{noreply, S1};
{Rs, PRs} ->
move_requests(Rs, Reqs, Pending),
case S1#state.await_nodes of
Expand All @@ -799,7 +838,8 @@ handle_nodedown(Node, #state{down = Down, requests = Reqs,
{stop, {cannot_lock_objects, Lost}, S1}
end;
true ->
case lists:member(Node, nodes()) of
case MonNodes == false
andalso lists:member(Node, nodes()) of
true -> watch_node(Node);
false -> ignore
end,
Expand All @@ -811,6 +851,20 @@ handle_nodedown(Node, #state{down = Down, requests = Reqs,
end
end.

handle_monitor_on_down(_, #state{monitor_nodes = false}) ->
ok;
handle_monitor_on_down(Node, #state{monitor_nodes = true,
client = C}) ->
C ! {nodedown, Node},
case lists:member(Node, nodes()) of
true ->
watch_node(Node);
false ->
ignore
end,
ok.


prune_interesting(I, node, Node) ->
[OID || {_, N} = OID <- I, N =/= Node];
prune_interesting(I, object, Object) ->
Expand Down Expand Up @@ -859,7 +913,8 @@ ensure_monitor_(Locker, Node, Mon) ->
orddict:store(Node, Ref, Mon)
end.

request_lock({OID, Node}, Mode, #state{client = Client} = State) ->
request_lock({OID, Node} = _LockID, Mode, #state{client = Client} = State) ->
?event({request_lock, _LockID}),
P = {?LOCKER, Node},
erlang:monitor(process, P),
locks_server:lock(OID, [Node], Client, Mode),
Expand Down Expand Up @@ -946,7 +1001,7 @@ do_surrender(ShouldSurrender, ToObject, InvolvedAgents,
NewDeadlocks = [{ShouldSurrender, ToObject} | Deadlocks],
?event({do_surrender, [{should_surrender, ShouldSurrender},
{to_object, ToObject},
{involved_agents, InvolvedAgents}]}),
{involved_agents, lists:sort(InvolvedAgents)}]}),
if ShouldSurrender == self() ->
request_surrender(ToObject, State1),
send_surrender_info(InvolvedAgents, OldLock),
Expand Down
8 changes: 5 additions & 3 deletions src/locks_leader.erl
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,8 @@ candidates(#st{candidates = C}) ->
%% new candidates to see whether one of them was a leader, which could
%% be the case if the candidates appeared after a healed netsplit.
%% @end
new_candidates(#st{candidates = C, synced = S}) ->
new_candidates(#st{candidates = C, synced = S} = St) ->
?event({new_candidates, St}),
C -- S.

-spec workers(election()) -> [pid()].
Expand Down Expand Up @@ -456,9 +457,9 @@ init_(Module, ModSt0, Options, Parent, Reg) ->
Agent =
case Role of
candidate ->
net_kernel:monitor_nodes(true),
{ok, A} = locks_agent:start([{notify,true},
{await_nodes, true}]),
{await_nodes, true},
{monitor_nodes, true}]),
locks_agent:lock_nowait(
A, Lock, write, AllNodes, all_alive),
A;
Expand Down Expand Up @@ -756,6 +757,7 @@ monitor_cand(Client) ->

maybe_announce_leader(Pid, Type, #st{leader = L, mod = M,
mod_state = MSt} = S) ->
?event({maybe_announce_leader, Pid, Type}, S),
IsSynced = is_synced(Pid, Type, S),
if L == self(), IsSynced == false ->
case M:elected(MSt, opaque(S), Pid) of
Expand Down
Loading