Permalink
Browse files

Merge #554 from jdb-large-master into staging-1.4

Fix eunit test failures in riak_kv_vnode that were not caught pre-merge.

Conflicts:
	src/riak_kv_coverage_filter.erl
  • Loading branch information...
2 parents 4649a8c + fdd8d02 commit 56358f85e564f93038ca91f70a5fec2ec7a8dd51 @jtuple jtuple committed May 28, 2013
@@ -66,13 +66,13 @@ build_filter(Bucket, ItemFilterInput, FilterVNode) ->
%% Compose a key filtering function for the VNode
ItemFilter;
(ItemFilter == none) -> % only vnode filtering required
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
- PrefListFun = build_preflist_fun(Bucket, Ring),
+ {ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
+ PrefListFun = build_preflist_fun(Bucket, CHBin),
%% Create a VNode filter
compose_filter(FilterVNode, PrefListFun);
true -> % key and vnode filtering
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
- PrefListFun = build_preflist_fun(Bucket, Ring),
+ {ok, CHBin} = riak_core_ring_manager:get_chash_bin(),
+ PrefListFun = build_preflist_fun(Bucket, CHBin),
%% Create a filter for the VNode
compose_filter(FilterVNode, PrefListFun, ItemFilter)
end.
@@ -112,16 +112,16 @@ build_item_filter(FilterInput) ->
%% @private
-build_preflist_fun(Bucket, Ring) ->
+build_preflist_fun(Bucket, CHBin) ->
fun({o, Key, _Value}) -> %% $ index return_body
ChashKey = riak_core_util:chash_key({Bucket, Key}),
- riak_core_ring:responsible_index(ChashKey, Ring);
+ chashbin:responsible_index(ChashKey, CHBin);
({_Value, Key}) ->
ChashKey = riak_core_util:chash_key({Bucket, Key}),
- riak_core_ring:responsible_index(ChashKey, Ring);
+ chashbin:responsible_index(ChashKey, CHBin);
(Key) ->
ChashKey = riak_core_util:chash_key({Bucket, Key}),
- riak_core_ring:responsible_index(ChashKey, Ring)
+ chashbin:responsible_index(ChashKey, CHBin)
end.
@@ -104,8 +104,7 @@ delete(ReqId,Bucket,Key,Options,Timeout,Client,ClientId,VClock) ->
end.
get_r_options(Bucket, Options) ->
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
- BucketProps = riak_core_bucket:get_bucket(Bucket, Ring),
+ BucketProps = riak_core_bucket:get_bucket(Bucket),
N = proplists:get_value(n_val,BucketProps),
%% specifying R/W AND RW together doesn't make sense, so check if R or W
%is defined first. If not, use RW or default.
@@ -139,8 +138,7 @@ get_r_options(Bucket, Options) ->
end.
get_w_options(Bucket, Options) ->
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
- BucketProps = riak_core_bucket:get_bucket(Bucket, Ring),
+ BucketProps = riak_core_bucket:get_bucket(Bucket),
N = proplists:get_value(n_val,BucketProps),
%% specifying R/W AND RW together doesn't make sense, so check if R or W
%is defined first. If not, use RW or default.
@@ -172,14 +172,12 @@ init({test, Args, StateProps}) ->
%% @private
prepare(timeout, StateData=#state{bkey=BKey={Bucket,_Key}}) ->
?DTRACE(?C_GET_FSM_PREPARE, [], ["prepare"]),
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
- BucketProps = riak_core_bucket:get_bucket(Bucket, Ring),
+ BucketProps = riak_core_bucket:get_bucket(Bucket),
DocIdx = riak_core_util:chash_key(BKey),
N = proplists:get_value(n_val,BucketProps),
StatTracked = proplists:get_value(stat_tracked, BucketProps, false),
UpNodes = riak_core_node_watcher:nodes(riak_kv),
- Preflist2 = riak_core_apl:get_apl_ann(DocIdx, N, Ring, UpNodes),
-
+ Preflist2 = riak_core_apl:get_apl_ann(DocIdx, N, UpNodes),
new_state_timeout(validate, StateData#state{starttime=riak_core_util:moment(),
n = N,
bucket_props=BucketProps,
@@ -239,13 +239,11 @@ handle_call({insert, Id, Key, Hash, Options}, _From, State) ->
State2 = do_insert(Id, Key, Hash, Options, State),
{reply, ok, State2};
handle_call({insert_object, BKey, RObj}, _From, State) ->
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
- IndexN = riak_kv_util:get_index_n(BKey, Ring),
+ IndexN = riak_kv_util:get_index_n(BKey),
State2 = do_insert(IndexN, term_to_binary(BKey), hash_object(BKey, RObj), [], State),
{reply, ok, State2};
handle_call({delete, BKey}, _From, State) ->
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
- IndexN = riak_kv_util:get_index_n(BKey, Ring),
+ IndexN = riak_kv_util:get_index_n(BKey),
State2 = do_delete(IndexN, term_to_binary(BKey), State),
{reply, ok, State2};
@@ -299,8 +297,7 @@ handle_cast(stop, State) ->
{stop, normal, State};
handle_cast({insert_object, BKey, RObj}, State) ->
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
- IndexN = riak_kv_util:get_index_n(BKey, Ring),
+ IndexN = riak_kv_util:get_index_n(BKey),
State2 = do_insert(IndexN, term_to_binary(BKey), hash_object(BKey, RObj), [], State),
{noreply, State2};
@@ -398,9 +395,8 @@ hash_object({Bucket, Key}, RObjBin) ->
%% key/hash pair will be ignored.
-spec fold_keys(index(), pid()) -> ok.
fold_keys(Partition, Tree) ->
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
Req = ?FOLD_REQ{foldfun=fun(BKey={Bucket,Key}, RObj, _) ->
- IndexN = riak_kv_util:get_index_n({Bucket, Key}, Ring),
+ IndexN = riak_kv_util:get_index_n({Bucket, Key}),
insert(IndexN, term_to_binary(BKey), hash_object(BKey, RObj),
Tree, [if_missing]),
ok
@@ -1057,17 +1057,5 @@ random_constant_hash()->
%% work, plus all work assigned to their predecessor); perhaps
%% something that also skips a random number of up vnodes in the
%% next version?
- {ok, Ring} = riak_core_ring_manager:get_my_ring(),
- Preflist = riak_core_ring:preflist(Random, Ring),
- {Partition, _Node} = first_up(Preflist),
- riak_pipe_vnode:hash_for_partition(Partition).
-
-%% this will fail if: this node() is a new member, owning no
-%% partitions or not having started its riak_pipe service yet, and all
-%% other nodes are down
-first_up(Preflist) ->
- UpSet = ordsets:from_list(riak_core_node_watcher:nodes(riak_pipe)),
- hd(lists:dropwhile(fun({_P, Node}) ->
- not ordsets:is_element(Node, UpSet)
- end,
- Preflist)).
+ {Partition, _Node} = riak_core_apl:first_up(Random, riak_pipe),
+ riak_pipe_vnode:hash_for_partition(Partition).
@@ -206,13 +206,12 @@ init({test, Args, StateProps}) ->
prepare(timeout, StateData0 = #state{from = From, robj = RObj,
bkey = BKey,
options = Options}) ->
- {ok,Ring} = riak_core_ring_manager:get_my_ring(),
- BucketProps = riak_core_bucket:get_bucket(riak_object:bucket(RObj), Ring),
+ BucketProps = riak_core_bucket:get_bucket(riak_object:bucket(RObj)),
DocIdx = riak_core_util:chash_key(BKey),
N = proplists:get_value(n_val,BucketProps),
StatTracked = proplists:get_value(stat_tracked, BucketProps, false),
UpNodes = riak_core_node_watcher:nodes(riak_kv),
- Preflist2 = riak_core_apl:get_apl_ann(DocIdx, N, Ring, UpNodes),
+ Preflist2 = riak_core_apl:get_apl_ann(DocIdx, N, UpNodes),
%% Check if this node is in the preference list so it can coordinate
LocalPL = [IndexNode || {{_Index, Node} = IndexNode, _Type} <- Preflist2,
Node == node()],
View
@@ -33,7 +33,7 @@
expand_rw_value/4,
normalize_rw_value/2,
make_request/2,
- get_index_n/2,
+ get_index_n/1,
preflist_siblings/1,
fix_incorrect_index_entries/1,
fix_incorrect_index_entries/0,
@@ -164,12 +164,13 @@ normalize_rw_value(_, _) -> error.
%% ===================================================================
%% @doc Given a bucket/key, determine the associated preflist index_n.
--spec get_index_n({binary(), binary()}, riak_core_ring()) -> index_n().
-get_index_n({Bucket, Key}, Ring) ->
- BucketProps = riak_core_bucket:get_bucket(Bucket, Ring),
+-spec get_index_n({binary(), binary()}) -> index_n().
+get_index_n({Bucket, Key}) ->
+ BucketProps = riak_core_bucket:get_bucket(Bucket),
N = proplists:get_value(n_val, BucketProps),
ChashKey = riak_core_util:chash_key({Bucket, Key}),
- Index = riak_core_ring:responsible_index(ChashKey, Ring),
+ CHBin = riak_core_ring_manager:get_chash_bin(),
+ Index = chashbin:responsible_index(ChashKey, CHBin),
{Index, N}.
%% @doc Given an index, determine all sibling indices that participate in one
@@ -875,8 +875,7 @@ handle_exit(_Pid, Reason, State) ->
do_put(Sender, {Bucket,_Key}=BKey, RObj, ReqID, StartTime, Options, State) ->
case proplists:get_value(bucket_props, Options) of
undefined ->
- {ok,Ring} = riak_core_ring_manager:get_my_ring(),
- BProps = riak_core_bucket:get_bucket(Bucket, Ring);
+ BProps = riak_core_bucket:get_bucket(Bucket);
BProps ->
BProps
end,
@@ -1801,6 +1800,7 @@ backend_with_known_key(BackendMod) ->
list_buckets_test_() ->
{foreach,
fun() ->
+ riak_core_ring_manager:setup_ets(test),
clean_test_dirs(),
application:start(sasl),
Env = application:get_all_env(riak_kv),
@@ -1810,6 +1810,7 @@ list_buckets_test_() ->
Env
end,
fun(Env) ->
+ riak_core_ring_manager:cleanup_ets(test),
riak_core_stat_cache:stop(),
application:stop(folsom),
application:stop(sasl),
@@ -1860,6 +1861,7 @@ list_buckets_test_i(BackendMod) ->
flush_msgs().
filter_keys_test() ->
+ riak_core_ring_manager:setup_ets(test),
clean_test_dirs(),
{S, B, K} = backend_with_known_key(riak_kv_memory_backend),
Caller1 = new_result_listener(keys),
@@ -1880,6 +1882,7 @@ filter_keys_test() ->
{fsm, {126, {0, node()}}, Caller3}, S),
?assertEqual({ok, []}, results_from_listener(Caller3)),
+ riak_core_ring_manager:cleanup_ets(test),
flush_msgs().
%% include bitcask.hrl for HEADER_SIZE macro
@@ -1888,6 +1891,7 @@ filter_keys_test() ->
%% Verify that a bad CRC on read will not crash the vnode, which when done in
%% preparation for a write prevents the write from going through.
bitcask_badcrc_test() ->
+ riak_core_ring_manager:setup_ets(test),
clean_test_dirs(),
{S, B, K} = backend_with_known_key(riak_kv_bitcask_backend),
DataDir = filename:join(bitcask_test_dir(), "0"),
@@ -1903,6 +1907,7 @@ bitcask_badcrc_test() ->
options=[]},
{raw, 456, self()},
S),
+ riak_core_ring_manager:cleanup_ets(test),
flush_msgs().

0 comments on commit 56358f8

Please sign in to comment.