Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

Clarify types a bit

Remove some confusion around the handoff types.
  • Loading branch information...
commit d732e2b2be2e5a1a6a4e78de28fc05152e1202da 1 parent 96e4eae
@rzezeski rzezeski authored
View
7 include/riak_core_handoff.hrl
@@ -13,7 +13,7 @@
}).
-type ho_stats() :: #ho_stats{}.
--type ho_type() :: ownership_handoff | hinted_handoff.
+-type ho_type() :: ownership_handoff | hinted_handoff | repair.
-type predicate() :: fun((any()) -> boolean()).
-type index() :: integer().
@@ -32,9 +32,8 @@
stats :: dict(),
vnode_pid :: pid() | undefined,
vnode_mon :: reference(),
- type :: ownership | hinted_handoff | repair,
+ type :: ho_type(),
req_origin :: node(),
filter_mod_fun :: {module(), atom()}
}).
--type handoff() :: #handoff_status{}.
--type handoffs() :: [handoff()].
+-type handoff_status() :: #handoff_status{}.
View
14 src/riak_core_handoff_manager.erl
@@ -55,7 +55,7 @@
-record(state,
{ excl,
- handoffs=[] :: handoffs()
+ handoffs=[] :: [handoff_status()]
}).
%% this can be overridden with riak_core handoff_concurrency
@@ -83,7 +83,7 @@ add_inbound(SSLOpts) ->
%% @doc Initiate a transfer from `SrcPartition' to `TargetPartition'
%% for the given `Module' using the `FilterModFun' filter.
-spec xfer({index(), node()}, mod_partition(), pid(), {module(), atom()}) ->
- handoff().
+ handoff_status().
xfer({SrcPartition, SrcOwner}, {Module, TargetPartition},
VNode, FilterModFun) ->
%% NOTE: This will not work with old nodes
@@ -119,7 +119,7 @@ xfer({SrcPartition, SrcOwner}, {Module, TargetPartition},
end.
%% @doc Retry the given `Xfer'.
--spec retry_xfer(handoff()) -> NewXfer::handoff().
+-spec retry_xfer(handoff_status()) -> NewXfer::handoff_status().
retry_xfer(Xfer) ->
#handoff_status{mod_src_tgt={Module, SrcPartition, TargetPartition},
src_node=SrcOwner,
@@ -128,7 +128,7 @@ retry_xfer(Xfer) ->
xfer({SrcPartition, SrcOwner}, {Module, TargetPartition},
VNode, FilterModFun).
--spec xfer_status(handoff() | max_concurrency) ->
+-spec xfer_status(handoff_status() | max_concurrency) ->
complete | in_progress | max_concurrency | not_found.
xfer_status(HS) ->
case HS#handoff_status.status of
@@ -158,7 +158,7 @@ get_concurrency() ->
gen_server:call(?MODULE, get_concurrency).
%% @doc Kill the transfer `Xfer' with `Reason'.
--spec kill_xfer(handoff(), any()) -> ok.
+-spec kill_xfer(handoff_status(), any()) -> ok.
kill_xfer(Xfer, Reason) ->
SrcNode = Xfer#handoff_status.src_node,
ok = gen_server:call({?MODULE, SrcNode}, {kill_xfer, Xfer, Reason}).
@@ -442,9 +442,9 @@ send_handoff(Mod, Partition, Node, Pid, HS) ->
-spec send_handoff({module(), index(), index()}, node(),
pid(), list(),
{predicate() | none, {module(), atom()} | none}, node()) ->
- {ok, handoff()}
+ {ok, handoff_status()}
| {error, max_concurrency}
- | {false, handoff()}.
+ | {false, handoff_status()}.
send_handoff({Mod, Src, Target}, Node, Vnode, HS, {Filter, FilterModFun}, Origin) ->
case handoff_concurrency_limit_reached() of
true ->
View
8 src/riak_core_vnode_manager.erl
@@ -45,8 +45,8 @@
{
mod_partition :: mod_partition(),
filter_mod_fun :: {module(), atom()},
- minus_one_xfer :: handoff(),
- plus_one_xfer :: handoff()
+ minus_one_xfer :: handoff_status(),
+ plus_one_xfer :: handoff_status()
}).
-type repair() :: #repair{}.
-type repairs() :: [repair()].
@@ -123,7 +123,7 @@ repair_status({_Module, Partition}=ModPartition) ->
Owner = riak_core_ring:index_owner(Ring, Partition),
gen_server:call({?MODULE, Owner}, {repair_status, ModPartition}).
--spec xfer_complete(node(), handoff()) -> ok.
+-spec xfer_complete(node(), handoff_status()) -> ok.
xfer_complete(Origin, Xfer) ->
gen_server:call({?MODULE, Origin}, {xfer_complete, Xfer}).
@@ -712,7 +712,7 @@ check_repairs(Repairs) ->
end,
lists:reverse(lists:foldl(Check, [], Repairs)).
--spec maybe_retry(handoff()) -> Xfer2::handoff().
+-spec maybe_retry(handoff_status()) -> Xfer2::handoff_status().
maybe_retry(Xfer) ->
case riak_core_handoff_manager:xfer_status(Xfer) of
complete -> Xfer;
Please sign in to comment.
Something went wrong with that request. Please try again.