Skip to content

Commit

Permalink
Merge pull request #1062 from basho/bch-merge-develop-2.2-to-develop
Browse files Browse the repository at this point in the history
Merge develop-2.2 to develop
  • Loading branch information
Brett Hazen committed May 3, 2016
2 parents f56f38e + 0212204 commit 35520eb
Show file tree
Hide file tree
Showing 32 changed files with 1,702 additions and 1,176 deletions.
3 changes: 2 additions & 1 deletion src/riak_test_escript.erl
Expand Up @@ -110,8 +110,9 @@ main(Args) ->
notice
end,

Formatter = {lager_default_formatter, [time," [",severity,"] ", pid, " ", message, "\n"]},
application:set_env(lager, error_logger_hwm, 250), %% helpful for debugging
application:set_env(lager, handlers, [{lager_console_backend, ConsoleLagerLevel},
application:set_env(lager, handlers, [{lager_console_backend, [ConsoleLagerLevel, Formatter]},
{lager_file_backend, [{file, "log/test.log"},
{level, ConsoleLagerLevel}]}]),
lager:start(),
Expand Down
56 changes: 33 additions & 23 deletions src/rt.erl
Expand Up @@ -144,6 +144,7 @@
upgrade/2,
upgrade/3,
versions/0,
wait_for_any_webmachine_route/2,
wait_for_cluster_service/2,
wait_for_cmd/1,
wait_for_service/2,
Expand Down Expand Up @@ -455,7 +456,7 @@ staged_join(Node, PNode) ->

plan_and_commit(Node) ->
timer:sleep(500),
lager:info("planning and commiting cluster join"),
lager:info("planning cluster join"),
case rpc:call(Node, riak_core_claimant, plan, []) of
{error, ring_not_ready} ->
lager:info("plan: ring not ready"),
Expand All @@ -467,6 +468,7 @@ plan_and_commit(Node) ->
end.

do_commit(Node) ->
lager:info("planning cluster commit"),
case rpc:call(Node, riak_core_claimant, commit, []) of
{error, plan_changed} ->
lager:info("commit: plan changed"),
Expand All @@ -478,8 +480,9 @@ do_commit(Node) ->
timer:sleep(100),
maybe_wait_for_changes(Node),
do_commit(Node);
{error,nothing_planned} ->
{error, nothing_planned} ->
%% Assume plan actually committed somehow
lager:info("commit: nothing planned"),
ok;
ok ->
ok
Expand Down Expand Up @@ -668,7 +671,7 @@ wait_until(Fun) when is_function(Fun) ->

%% @doc Convenience wrapper for wait_until for the myriad functions that
%% take a node as single argument.
-spec wait_until([node()], fun((node()) -> boolean())) -> ok.
-spec wait_until(node(), fun(() -> boolean())) -> ok | {fail, Result :: term()}.
wait_until(Node, Fun) when is_atom(Node), is_function(Fun) ->
wait_until(fun() -> Fun(Node) end);

Expand Down Expand Up @@ -725,7 +728,13 @@ wait_until_no_pending_changes(Nodes) ->
rpc:multicall(Nodes, riak_core_vnode_manager, force_handoffs, []),
{Rings, BadNodes} = rpc:multicall(Nodes, riak_core_ring_manager, get_raw_ring, []),
Changes = [ riak_core_ring:pending_changes(Ring) =:= [] || {ok, Ring} <- Rings ],
BadNodes =:= [] andalso length(Changes) =:= length(Nodes) andalso lists:all(fun(T) -> T end, Changes)
case BadNodes =:= [] andalso length(Changes) =:= length(Nodes) andalso lists:all(fun(T) -> T end, Changes) of
true -> true;
false ->
NodesWithChanges = [Node || {Node, false} <- lists:zip(Nodes -- BadNodes, Changes)],
lager:info("Changes not yet complete, or bad nodes. BadNodes=~p, Nodes with Pending Changes=~p~n", [BadNodes, NodesWithChanges]),
false
end
end,
?assertEqual(ok, wait_until(F)),
ok.
Expand Down Expand Up @@ -1930,30 +1939,31 @@ wait_for_control(_Vsn, Node) when is_atom(Node) ->
end
end),

lager:info("Waiting for routes to be added to supervisor..."),

%% Wait for routes to be added by supervisor.
wait_for_any_webmachine_route(Node, [admin_gui, riak_control_wm_gui]).

wait_for_any_webmachine_route(Node, Routes) ->
lager:info("Waiting for routes ~p to be added to webmachine.", [Routes]),
rt:wait_until(Node, fun(N) ->
case rpc:call(N,
webmachine_router,
get_routes,
[]) of
{badrpc, Error} ->
lager:info("Error was ~p.", [Error]),
case rpc:call(N, webmachine_router, get_routes, []) of
{badrpc, Error} ->
lager:info("Error was ~p.", [Error]),
false;
RegisteredRoutes ->
case is_any_route_loaded(Routes, RegisteredRoutes) of
false ->
false;
Routes ->
case is_control_gui_route_loaded(Routes) of
false ->
false;
_ ->
true
end
_ ->
true
end
end).
end
end).

is_any_route_loaded(SearchRoutes, RegisteredRoutes) ->
lists:any(fun(Route) -> is_route_loaded(Route, RegisteredRoutes) end, SearchRoutes).

%% @doc Is the riak_control GUI route loaded?
is_control_gui_route_loaded(Routes) ->
lists:keymember(admin_gui, 2, Routes) orelse lists:keymember(riak_control_wm_gui, 2, Routes).
is_route_loaded(Route, Routes) ->
lists:keymember(Route, 2, Routes).

%% @doc Wait for Riak Control to start on a series of nodes.
wait_for_control(VersionedNodes) when is_list(VersionedNodes) ->
Expand Down
10 changes: 4 additions & 6 deletions tests/basic_command_line.erl
Expand Up @@ -24,9 +24,7 @@
-compile(export_all).
-export([confirm/0]).

% node_package 3.x changes this - new first, old second
-define(PING_FAILURE_OUTPUT,
["Node did not respond to ping!", "Node is not running!"]).
-define(PING_FAILURE_OUTPUT, "Node did not respond to ping!").

confirm() ->

Expand Down Expand Up @@ -122,7 +120,7 @@ ping_down_test(Node) ->
attach_down_test(Node) ->
lager:info("Testing riak attach while down"),
{ok, AttachOut} = rt:riak(Node, ["attach"]),
?assert(rt:str_mult(AttachOut, ?PING_FAILURE_OUTPUT)),
?assert(rt:str(AttachOut, ?PING_FAILURE_OUTPUT)),
ok.

attach_direct_up_test(Node) ->
Expand All @@ -137,7 +135,7 @@ attach_direct_up_test(Node) ->
attach_direct_down_test(Node) ->
lager:info("Testing riak attach-direct while down"),
{ok, AttachOut} = rt:riak(Node, ["attach-direct"]),
?assert(rt:str_mult(AttachOut, ?PING_FAILURE_OUTPUT)),
?assert(rt:str(AttachOut, ?PING_FAILURE_OUTPUT)),
ok.

status_up_test(Node) ->
Expand All @@ -155,7 +153,7 @@ status_down_test(Node) ->
lager:info("Test riak-admin status while down"),
{ok, {ExitCode, StatusOut}} = rt:admin(Node, ["status"], [return_exit_code]),
?assertEqual(1, ExitCode),
?assert(rt:str_mult(StatusOut, ?PING_FAILURE_OUTPUT)),
?assert(rt:str(StatusOut, ?PING_FAILURE_OUTPUT)),
ok.

getpid_up_test(Node) ->
Expand Down
6 changes: 5 additions & 1 deletion tests/bucket_types.erl
Expand Up @@ -10,7 +10,11 @@ confirm() ->
lager:info("Deploy some nodes"),
Nodes = rt:build_cluster(4, [], [
{riak_core, [{default_bucket_props,
[{n_val, 2}]}]}]),
[
{n_val, 2},
{allow_mult, true},
{dvv_enabled, true}
]}]}]),
Node = hd(Nodes),

RMD = riak_test_runner:metadata(),
Expand Down
13 changes: 9 additions & 4 deletions tests/ensemble_byzantine.erl
Expand Up @@ -56,15 +56,20 @@ confirm() ->
test_lose_minority_synctrees(PBC, Bucket, Key, Val, PL),
test_lose_majority_synctrees(PBC, Bucket, Key, Val, PL),
test_lose_minority_synctrees_one_node_partitioned(PBC, Bucket, Key, Val,
PL, Nodes),
PL, Nodes),
test_lose_all_data_and_trees_except_one_node(PBC, Bucket, Key, Val, PL),
{ok, _NewVal} = test_backup_restore_data_not_trees(Bucket, Key, Val, PL),
test_lose_all_data(PBC, Bucket, Key, PL),

pass.

config() ->
[{riak_core, [{default_bucket_props, [{n_val, 5}]},
[{riak_core, [{default_bucket_props,
[
{n_val, 5},
{allow_mult, true},
{dvv_enabled, true}
]},
{vnode_management_timer, 1000},
{ring_creation_size, 16},
{enable_consensus, true},
Expand All @@ -79,7 +84,7 @@ test_lose_majority_synctrees(PBC, Bucket, Key, Val, PL) ->
assert_lose_synctrees_and_recover(PBC, Bucket, Key, Val, PL, Majority).

test_lose_minority_synctrees_one_node_partitioned(PBC, Bucket, Key, Val, PL,
Nodes) ->
Nodes) ->
Minority = minority_vnodes(PL),
{{Idx0, Node0}, primary} = hd(PL),
Ensemble = {kv, Idx0, 5},
Expand Down Expand Up @@ -251,7 +256,7 @@ kill_peers(Ensemble, Nodes) ->
Peers = [P || P={_Id, N} <- View, lists:member(N, Nodes)],
lager:info("Killing Peers: ~p", [Peers]),
Pids = [rpc:call(Node, riak_ensemble_manager, get_peer_pid,
[Ensemble, Peer]) || Peer <- Peers],
[Ensemble, Peer]) || Peer <- Peers],
[exit(Pid, kill) || Pid <- Pids, Pid =/= undefined].

wipe_partitions(PL) ->
Expand Down
7 changes: 6 additions & 1 deletion tests/ensemble_ring_changes.erl
Expand Up @@ -27,7 +27,12 @@
-define(RING_SIZE, 16).

config() ->
[{riak_core, [{default_bucket_props, [{n_val, 5}]},
[{riak_core, [{default_bucket_props,
[
{n_val, 5},
{allow_mult, true},
{dvv_enabled, true}
]},
{vnode_management_timer, 1000},
{ring_creation_size, ?RING_SIZE},
{enable_consensus, true},
Expand Down
13 changes: 9 additions & 4 deletions tests/ensemble_util.erl
Expand Up @@ -58,10 +58,15 @@ fast_config(Nval, EnableAAE) when is_boolean(EnableAAE) ->

fast_config(NVal, RingSize, EnableAAE) ->
[config_aae(EnableAAE),
{riak_core, [{default_bucket_props, [{n_val, NVal}]},
{vnode_management_timer, 1000},
{ring_creation_size, RingSize},
{enable_consensus, true}]}].
{riak_core, [{default_bucket_props,
[
{n_val, NVal},
{allow_mult, true},
{dvv_enabled, true}
]},
{vnode_management_timer, 1000},
{ring_creation_size, RingSize},
{enable_consensus, true}]}].

config_aae(true) ->
{riak_kv, [{anti_entropy_build_limit, {100, 1000}},
Expand Down
6 changes: 5 additions & 1 deletion tests/http_bucket_types.erl
Expand Up @@ -13,7 +13,11 @@ confirm() ->
lager:info("Deploy some nodes"),
Nodes = rt:build_cluster(4, [], [
{riak_core, [{default_bucket_props,
[{n_val, 2}]}]}]),
[
{n_val, 2},
{allow_mult, true},
{dvv_enabled, true}
]}]}]),
Node = hd(Nodes),

RMD = riak_test_runner:metadata(),
Expand Down
2 changes: 1 addition & 1 deletion tests/http_security.erl
Expand Up @@ -30,7 +30,7 @@ confirm() ->
PrivDir = rt:priv_dir(),
Conf = [
{riak_core, [
{default_bucket_props, [{allow_mult, true}]},
{default_bucket_props, [{allow_mult, true}, {dvv_enabled, true}]},
{ssl, [
{certfile, filename:join([CertDir,
"site3.basho.com/cert.pem"])},
Expand Down
19 changes: 14 additions & 5 deletions tests/overload.erl
Expand Up @@ -59,7 +59,12 @@ default_config(#config{
fsm_limit = FsmLimit
}) ->
[{riak_core, [{ring_creation_size, 8},
{default_bucket_props, [{n_val, 5}]},
{default_bucket_props,
[
{n_val, 5},
{allow_mult, true},
{dvv_enabled, true}
]},
{vnode_management_timer, 1000},
{enable_health_checks, false},
{enable_consensus, true},
Expand Down Expand Up @@ -100,6 +105,7 @@ confirm() ->
BKV <- [?NORMAL_BKV,
?CONSISTENT_BKV,
?WRITE_ONCE_BKV]],

%% Test cover queries doesn't depend on bucket/keyvalue, just run it once
test_cover_queries_overload(Nodes),
pass.
Expand Down Expand Up @@ -163,12 +169,15 @@ test_vnode_protection(Nodes, BKV) ->
Pid ! resume,
ok.

%% Don't check on fast path
test_fsm_protection(_, ?WRITE_ONCE_BKV) ->
ok;
%% Or consistent gets, as they don't use the FSM either

%% Don't check consistent gets, as they don't use the FSM
test_fsm_protection(_, ?CONSISTENT_BKV) ->
ok;

%% Don't check on fast path either.
test_fsm_protection(_, ?WRITE_ONCE_BKV) ->
ok;

test_fsm_protection(Nodes, BKV) ->
lager:info("Testing with coordinator protection enabled"),
lager:info("Setting FSM limit to ~b", [?THRESHOLD]),
Expand Down
2 changes: 1 addition & 1 deletion tests/pb_security.erl
Expand Up @@ -53,7 +53,7 @@ confirm() ->
PrivDir = rt:priv_dir(),
Conf = [
{riak_core, [
{default_bucket_props, [{allow_mult, true}]},
{default_bucket_props, [{allow_mult, true}, {dvv_enabled, true}]},
{ssl, [
{certfile, filename:join([CertDir,"site3.basho.com/cert.pem"])},
{keyfile, filename:join([CertDir, "site3.basho.com/key.pem"])},
Expand Down

0 comments on commit 35520eb

Please sign in to comment.