Skip to content

Commit

Permalink
Merge of slf-core-ets-match-replacement + hand-edits onto master.
Browse files Browse the repository at this point in the history
Avoid use of ets:match() in hot code paths

Profiling with DTrace on Solaris suggests that ets:match() is causing
a much higher than necessary lock contention rate within the Erlang
R14B03 and R14B04 VMs.  Eliminating ets:match() in the riak_core and
riak_kv applications is substantial when using Pthread locking instead
of Ericsson's hand-tuned locking (which is the default).  The results
are more ambiguous when the default locking strategy is used.

Hand-edits were required due to changes in master after the 1.0 branch
was created.
  • Loading branch information
slfritchie committed Jan 3, 2012
1 parent dbaebcc commit 50b8ae6
Show file tree
Hide file tree
Showing 2 changed files with 51 additions and 13 deletions.
49 changes: 40 additions & 9 deletions src/riak_core_node_watcher.erl
Expand Up @@ -65,13 +65,13 @@ node_down() ->
gen_server:call(?MODULE, {node_status, down}, infinity).

services() ->
ordsets:from_list([Service || [Service] <- ets:match(?MODULE, {{'_', '$1'}, '_'})]).
gen_server:call(?MODULE, services, infinity).

services(Node) ->
[Service || [Service] <- ets:match(?MODULE, {{Node, '$1'}, '_'})].
pubtab_get_services(Node).

nodes(Service) ->
[Node || [Node] <- ets:match(?MODULE, {{'$1', Service}, '_'})].
pubtab_get_nodes(Service).


%% ===================================================================
Expand Down Expand Up @@ -151,7 +151,11 @@ handle_call({node_status, Status}, _From, State) ->
{Status, Status} -> %% noop
State
end,
{reply, ok, update_avsn(S2)}.
{reply, ok, update_avsn(S2)};
handle_call(services, _From, State) ->
Res = [Service || {{by_service, Service}, Nds} <- ets:tab2list(?MODULE),
Nds /= []],
{reply, lists:sort(Res), State}.


handle_cast({ring_update, R}, State) ->
Expand Down Expand Up @@ -313,8 +317,8 @@ node_down(Node, State) ->


node_delete(Node) ->
Services = services(Node),
ets:match_delete(?MODULE, {{Node, '_'}, '_'}),
Services = pubtab_get_services(Node),
[pubtab_delete(Node, Service) || Service <- Services],
ets:delete(?MODULE, Node),
Services.

Expand All @@ -327,12 +331,11 @@ node_update(Node, Services) ->

Added = ordsets:subtract(NewStatus, OldStatus),
Deleted = ordsets:subtract(OldStatus, NewStatus),
Unchanged = ordsets:intersection(NewStatus, OldStatus),

%% Update ets table with changes; make sure to touch unchanged
%% service with latest timestamp
[ets:delete(?MODULE, {Node, Ss}) || Ss <- Deleted],
ets:insert(?MODULE, [{{Node, Ss}, Now} || Ss <- Added ++ Unchanged]),
[pubtab_delete(Node, Ss) || Ss <- Deleted],
[pubtab_insert(Node, Ss) || Ss <- Added],

%% Keep track of the last time we recv'd data from a node
ets:insert(?MODULE, {Node, Now}),
Expand Down Expand Up @@ -392,3 +395,31 @@ peers_update(NewPeers, State) ->
%% Broadcast our current status to new peers
broadcast(Added, State#state { peers = NewPeers }).

pubtab_delete(Node, Service) ->
Svcs = pubtab_get_services(Node),
ets:insert(?MODULE, {{by_node, Node}, Svcs -- [Service]}),
Nds = pubtab_get_nodes(Service),
ets:insert(?MODULE, {{by_service, Service}, Nds -- [Node]}).

pubtab_insert(Node, Service) ->
%% Remove Service & node before adding: avoid accidental duplicates
Svcs = pubtab_get_services(Node) -- [Service],
ets:insert(?MODULE, {{by_node, Node}, [Service|Svcs]}),
Nds = pubtab_get_nodes(Service) -- [Node],
ets:insert(?MODULE, {{by_service, Service}, [Node|Nds]}).

pubtab_get_services(Node) ->
case ets:lookup(?MODULE, {by_node, Node}) of
[{{by_node, Node}, Ss}] ->
Ss;
[] ->
[]
end.

pubtab_get_nodes(Service) ->
case ets:lookup(?MODULE, {by_service, Service}) of
[{{by_service, Service}, Ns}] ->
Ns;
[] ->
[]
end.
15 changes: 11 additions & 4 deletions src/riak_core_vnode_manager.erl
Expand Up @@ -31,6 +31,8 @@
force_handoffs/0]).
-export([all_index_pid/1, get_vnode_pid/2, start_vnode/2,
unregister_vnode/2, unregister_vnode/3, vnode_event/4]).
%% Field debugging
-export([get_tab/0]).

-ifdef(TEST).
-include_lib("eunit/include/eunit.hrl").
Expand Down Expand Up @@ -94,6 +96,9 @@ start_vnode(Index, VNodeMod) ->
vnode_event(Mod, Idx, Pid, Event) ->
gen_server:cast(?MODULE, {vnode_event, Mod, Idx, Pid, Event}).

get_tab() ->
gen_server:call(?MODULE, get_tab, infinity).

%% ===================================================================
%% gen_server behaviour
%% ===================================================================
Expand All @@ -118,7 +123,7 @@ find_vnodes(State) ->
VnodePids = [Pid || {_, Pid, worker, _}
<- supervisor:which_children(riak_core_vnode_sup),
is_pid(Pid) andalso is_process_alive(Pid)],
IdxTable = ets:new(ets_vnodes, [{keypos, 2}]),
IdxTable = ets:new(ets_vnodes, [{keypos, 2}, private]),

%% If the vnode manager is being restarted, scan the existing
%% vnode children and work out which module and index they are
Expand Down Expand Up @@ -160,6 +165,8 @@ handle_call({all_index_pid, Mod}, _From, State) ->
handle_call({Partition, Mod, get_vnode}, _From, State) ->
Pid = get_vnode(Partition, Mod, State),
{reply, {ok, Pid}, State};
handle_call(get_tab, _From, State) ->
{reply, ets:tab2list(State#state.idxtab), State};
handle_call(_, _From, State) ->
{reply, ok, State}.

Expand Down Expand Up @@ -281,9 +288,9 @@ get_all_vnodes(Mod, State) ->

%% @private
idx2vnode(Idx, Mod, _State=#state{idxtab=T}) ->
case ets:match(T, {idxrec, {Idx,Mod}, Idx, Mod, '$1', '_'}) of
[[VNodePid]] -> VNodePid;
[] -> no_match
case ets:lookup(T, {Idx, Mod}) of
[I] -> I#idxrec.pid;
[] -> no_match
end.

%% @private
Expand Down

0 comments on commit 50b8ae6

Please sign in to comment.