From 4559bb3d56096e1f7ad696eeb3eff793bc6f7bd8 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Mon, 29 Jul 2013 09:55:23 -0700 Subject: [PATCH 1/7] Handle incompatible record in Riak 1.4.0. Return badrpc calls with an incompatible message when unable to determine which versions of nodes are communicating in a mixed- version cluster. Switch to macros for record definitions. Also, suppress information in the UI when detecting this incompatibility to prevent from the display of NaN values. --- include/riak_control.hrl | 29 ++++- priv/admin/js/core.js | 19 ++++ priv/admin/js/generated/templates.js | 4 +- .../js/templates/current_cluster_item.hbs | 2 +- .../admin/js/templates/current_nodes_item.hbs | 2 +- src/riak_control_app.erl | 10 +- src/riak_control_session.erl | 106 ++++++++++++++---- src/riak_control_wm_cluster.erl | 46 ++++---- src/riak_control_wm_nodes.erl | 49 +++++--- 9 files changed, 196 insertions(+), 71 deletions(-) diff --git a/include/riak_control.hrl b/include/riak_control.hrl index 52f24c8..62589e5 100644 --- a/include/riak_control.hrl +++ b/include/riak_control.hrl @@ -58,10 +58,27 @@ partition :: integer(), owner :: owner(), vnodes :: services(), - handoffs :: handoffs() - }). + handoffs :: handoffs() }). +-define(PARTITION_INFO, #partition_info). +-type partition() :: ?PARTITION_INFO{}. +-type partitions() :: [partition()]. + +%% Riak 1.3 -record(member_info, + { node :: atom(), + status :: status(), + reachable :: boolean(), + vnodes :: vnodes(), + handoffs :: handoffs(), + ring_pct :: float(), + pending_pct :: float(), + mem_total :: integer(), + mem_used :: integer(), + mem_erlang :: integer() }). + +%% Riak 1.4.1+ +-record(member_info_v2, { node :: atom(), status :: status(), reachable :: boolean(), @@ -73,11 +90,11 @@ mem_used :: integer(), mem_erlang :: integer(), action :: action(), - replacement :: node() - }). + replacement :: node() }). --type partitions() :: [#partition_info{}]. --type members() :: [#member_info{}]. +-define(MEMBER_INFO, #member_info_v2). +-type member() :: ?MEMBER_INFO{}. +-type members() :: [member()]. %% These two should always match, in terms of webmachine dispatcher %% logic, and ADMIN_BASE_PATH should always end with a / diff --git a/priv/admin/js/core.js b/priv/admin/js/core.js index 27fd237..83e4f6d 100644 --- a/priv/admin/js/core.js +++ b/priv/admin/js/core.js @@ -59,6 +59,25 @@ minispade.register('core', function() { riak_pipe_vnode_status: DS.attr("string"), riak_search_vnode_status: DS.attr("string"), + /** + * Return status of whether the node is incompatible or not. + * + * @returns {Boolean} + */ + incompatible: function() { + return this.get('status') === 'incompatible'; + }.property('status'), + + /** + * Consider an available node one which is compatible, and + * reachable. + * + * @returns {Boolean} + */ + available: function() { + return !this.get('incompatible') && this.get('reachable'); + }.property('status', 'reachable'), + /** * Coerce vnode status into representations that are useful * for the user interface. diff --git a/priv/admin/js/generated/templates.js b/priv/admin/js/generated/templates.js index 37a383d..8e946ea 100644 --- a/priv/admin/js/generated/templates.js +++ b/priv/admin/js/generated/templates.js @@ -5,6 +5,6 @@ Ember.TEMPLATES['nodes'] = Ember.Handlebars.compile('

Current Ring

{{outlet partitionFilter}}
  • Prev
  • {{#each pages}} {{view RiakControl.PaginationItemView contentBinding="this"}} {{/each}}
  • Next
{{#collection RiakControl.PartitionView contentBinding="controller.paginatedContent"}} {{#with view.content}} {{/with}} {{#with view}} {{/with}} {{/collection}}

#

Owner Node

KV

Pipe

Search

{{i}}
{{node}}
{{index}}
{{kvStatus}} {{pipeStatus}}
  • Prev
  • {{#each pages}} {{view RiakControl.PaginationItemView contentBinding="this"}} {{/each}}
  • Next
'); Ember.TEMPLATES['partition_filter'] = Ember.Handlebars.compile('
Filter by...
{{view RiakControl.PartitionFilterSelectView id="filter" classNames="gui-dropdown" contentBinding="filters" optionLabelPath="content.name" optionValuePath="content.value" prompt="All" selectionBinding="controller.selectedPartitionFilter"}}
'); Ember.TEMPLATES['pagination_item'] = Ember.Handlebars.compile('{{#with view}} {{content.page_id}}{{/with}}'); -Ember.TEMPLATES['current_cluster_item'] = Ember.Handlebars.compile('{{#with view}}
{{#view RiakControl.CurrentClusterToggleView}}
{{/view}}
{{name}}
{{ringPctReadable}}%
{{#if reachable}}
{{memUsedReadable}}% {{else}}
{{/if}}

Use these actions to prepare this node to leave the cluster.

{{#if me}} Warning: This node is hosting Riak Control. If it leaves the cluster, Riak Control will be shut down. {{/if}}
{{#if controller.joiningNodesExist}}
Select Replacement Node
{{view RiakControl.ClusterItemSelectView prompt="Select Replacement Node" classNames="gui-dropdown" contentBinding="controller.joiningNodes" optionLabelPath="content.name"}}
{{else}}
No new nodes are currently staged to join.
{{/if}}
Click "STAGE" when you are ready to stage this action. STAGE
{{/with}}'); -Ember.TEMPLATES['current_nodes_item'] = Ember.Handlebars.compile('{{#with view}}
{{name}}
{{ringPctReadable}}%
{{#if reachable}}
{{memUsedReadable}}% {{else}}
{{/if}}
{{/with}}'); +Ember.TEMPLATES['current_cluster_item'] = Ember.Handlebars.compile('{{#with view}}
{{#view RiakControl.CurrentClusterToggleView}}
{{/view}}
{{name}}
{{ringPctReadable}}%
{{#if available}}
{{memUsedReadable}}% {{else}}
{{/if}}

Use these actions to prepare this node to leave the cluster.

{{#if me}} Warning: This node is hosting Riak Control. If it leaves the cluster, Riak Control will be shut down. {{/if}}
{{#if controller.joiningNodesExist}}
Select Replacement Node
{{view RiakControl.ClusterItemSelectView prompt="Select Replacement Node" classNames="gui-dropdown" contentBinding="controller.joiningNodes" optionLabelPath="content.name"}}
{{else}}
No new nodes are currently staged to join.
{{/if}}
Click "STAGE" when you are ready to stage this action. STAGE
{{/with}}'); +Ember.TEMPLATES['current_nodes_item'] = Ember.Handlebars.compile('{{#with view}}
{{name}}
{{ringPctReadable}}%
{{#if available}}
{{memUsedReadable}}% {{else}}
{{/if}}
{{/with}}'); Ember.TEMPLATES['staged_cluster_item'] = Ember.Handlebars.compile('{{#with view}}
{{name}}
{{ringPctReadable}}%
{{#if isAction}}
{{node_action}}
{{/if}} {{#if isReplaced}}
{{replacement}}
{{/if}}
{{/with}}'); diff --git a/priv/admin/js/templates/current_cluster_item.hbs b/priv/admin/js/templates/current_cluster_item.hbs index 07a2361..fb5795e 100644 --- a/priv/admin/js/templates/current_cluster_item.hbs +++ b/priv/admin/js/templates/current_cluster_item.hbs @@ -21,7 +21,7 @@
- {{#if reachable}} + {{#if available}}
diff --git a/priv/admin/js/templates/current_nodes_item.hbs b/priv/admin/js/templates/current_nodes_item.hbs index f5bec89..5080ebe 100644 --- a/priv/admin/js/templates/current_nodes_item.hbs +++ b/priv/admin/js/templates/current_nodes_item.hbs @@ -26,7 +26,7 @@
- {{#if reachable}} + {{#if available}}
diff --git a/src/riak_control_app.erl b/src/riak_control_app.erl index b313d7b..be0a7c1 100644 --- a/src/riak_control_app.erl +++ b/src/riak_control_app.erl @@ -30,7 +30,15 @@ %% =================================================================== start(_StartType, _StartArgs) -> - riak_control_sup:start_link(). + case riak_control_sup:start_link() of + {error, Reason} -> + {error, Reason}; + {ok, Pid} -> + riak_core_capability:register({riak_control, member_info_version}, + [v0, v1], + v0), + {ok, Pid} + end. stop(_State) -> ok. diff --git a/src/riak_control_session.erl b/src/riak_control_session.erl index 9ac2ffd..40e685c 100644 --- a/src/riak_control_session.erl +++ b/src/riak_control_session.erl @@ -272,7 +272,7 @@ update_partitions(State=#state{ring=Ring}) -> State#state{partitions=Partitions}. %% @doc Ping and retrieve vnode workers. --spec get_member_info({node(), status()}, ring()) -> #member_info{}. +-spec get_member_info({node(), status()}, ring()) -> member(). get_member_info(_Member={Node, Status}, Ring) -> RingSize = riak_core_ring:num_partitions(Ring), @@ -285,7 +285,7 @@ get_member_info(_Member={Node, Status}, Ring) -> %% try and get a list of all the vnodes running on the node case rpc:call(Node, riak_control_session, get_my_info, []) of {badrpc,nodedown} -> - #member_info{node = Node, + ?MEMBER_INFO{node = Node, status = Status, reachable = false, vnodes = [], @@ -293,35 +293,62 @@ get_member_info(_Member={Node, Status}, Ring) -> ring_pct = PctRing, pending_pct = PctPending}; {badrpc,_Reason} -> - #member_info{node = Node, + ?MEMBER_INFO{node = Node, status = incompatible, reachable = true, vnodes = [], handoffs = [], ring_pct = PctRing, pending_pct = PctPending}; - MemberInfo = #member_info{} -> - %% there is a race condition here, when a node is stopped - %% gracefully (e.g. `riak stop`) the event will reach us - %% before the node is actually down and the rpc call will - %% succeed, but since it's shutting down it won't have any - %% vnode workers running... - MemberInfo#member_info{status = Status, + MemberInfo = ?MEMBER_INFO{} -> + MemberInfo?MEMBER_INFO{status = Status, ring_pct = PctRing, - pending_pct = PctPending} + pending_pct = PctPending}; + MemberInfo0 = #member_info{} -> + %% Upgrade older member information record. + MemberInfo = upgrade_member_info(MemberInfo0), + MemberInfo?MEMBER_INFO{status = Status, + ring_pct = PctRing, + pending_pct = PctPending}; + _ -> + %% default case where a record incompatibility causes a + %% failure matching the record format. + ?MEMBER_INFO{node = Node, + status = incompatible, + reachable = true, + vnodes = [], + handoffs = [], + ring_pct = PctRing, + pending_pct = PctPending} end. %% @doc Return current nodes information. --spec get_my_info() -> #member_info{}. +-spec get_my_info() -> member(). get_my_info() -> {Total, Used} = get_my_memory(), - #member_info{node = node(), - reachable = true, - mem_total = Total, - mem_used = Used, - mem_erlang = proplists:get_value(total,erlang:memory()), - vnodes = riak_core_vnode_manager:all_vnodes(), - handoffs = get_handoff_status()}. + Handoffs = get_handoff_status(), + VNodes = riak_core_vnode_manager:all_vnodes(), + ErlangMemory = proplists:get_value(total,erlang:memory()), + try + case riak_core_capability:get({riak_control, member_info_version}) of + v1 -> + %% >= 1.4.1, where we have the upgraded cluster record. + ?MEMBER_INFO{node = node(), + reachable = true, + mem_total = Total, + mem_used = Used, + mem_erlang = ErlangMemory, + vnodes = VNodes, + handoffs = Handoffs}; + v0 -> + %% pre-1.4.1. + handle_bad_record(Total, Used, ErlangMemory, VNodes, Handoffs) + end + catch + _:{unknown_capability, _} -> + %% capabilities are not registered yet. + erlang:throw({badrpc, unknown_capability}) + end. %% @doc Return current nodes memory. -spec get_my_memory() -> {term(), term()}. @@ -364,7 +391,7 @@ get_handoff_status() -> %% @doc Get handoffs for every node. -spec get_all_handoffs(#state{}) -> handoffs(). get_all_handoffs(#state{nodes=Members}) -> - lists:flatten([HS || #member_info{handoffs=HS} <- Members]). + lists:flatten([HS || ?MEMBER_INFO{handoffs=HS} <- Members]). %% @doc Get information for a particular index. -spec get_partition_details(#state{}, {integer(), term()}, handoffs()) @@ -469,3 +496,42 @@ maybe_stage_change(Node, Action, Replacement) -> stop -> rpc:call(Node, riak_core, stop, []) end. + +%% @doc Conditionally upgrade member info records once they cross node +%% boundaries. +-spec upgrade_member_info(member() | #member_info{}) -> member(). +upgrade_member_info(MemberInfo = ?MEMBER_INFO{}) -> + MemberInfo; +upgrade_member_info(MemberInfo = #member_info{}) -> + ?MEMBER_INFO{ + node = MemberInfo#member_info.node, + status = MemberInfo#member_info.status, + reachable = MemberInfo#member_info.reachable, + vnodes = MemberInfo#member_info.vnodes, + handoffs = MemberInfo#member_info.handoffs, + ring_pct = MemberInfo#member_info.ring_pct, + pending_pct = MemberInfo#member_info.pending_pct, + mem_total = MemberInfo#member_info.mem_total, + mem_used = MemberInfo#member_info.mem_used, + mem_erlang = MemberInfo#member_info.mem_erlang}. + +%% @doc Handle incompatible record for the 1.4.0 release. +handle_bad_record(Total, Used, ErlangMemory, VNodes, Handoffs) -> + Counters = riak_core_capability:get({riak_kv, crdt}), + case lists:member(pncounter, Counters) of + true -> + %% 1.4.0, where we have a bad record. + {member_info, + node(), incompatible, true, VNodes, Handoffs, undefined, + undefined, Total, Used, ErlangMemory, undefined, + undefined}; + false -> + %% < 1.4.0, where we have the old style record. + #member_info{node = node(), + reachable = true, + mem_total = Total, + mem_used = Used, + mem_erlang = ErlangMemory, + vnodes = VNodes, + handoffs = Handoffs} + end. diff --git a/src/riak_control_wm_cluster.erl b/src/riak_control_wm_cluster.erl index 9555f3d..64d5c35 100644 --- a/src/riak_control_wm_cluster.erl +++ b/src/riak_control_wm_cluster.erl @@ -196,7 +196,7 @@ to_json(ReqData, Context) -> %% Get the current node list. {ok, _V, Nodes} = riak_control_session:get_nodes(), - Current = [jsonify_node(Node) || Node=#member_info{} <- Nodes], + Current = [jsonify_node(Node) || Node=?MEMBER_INFO{} <- Nodes], %% Get the current list of planned changes and updated claim. Planned = case riak_control_session:get_plan() of @@ -215,7 +215,7 @@ to_json(ReqData, Context) -> {mochijson2:encode({struct,[{cluster,Clusters}]}), ReqData, Context}. %% @doc Generate a new "planned" cluster which outlines transitions. --spec merge_transitions(list(#member_info{}), list(), list()) -> +-spec merge_transitions(list(member()), list(), list()) -> [{struct, list()}]. merge_transitions(Nodes, Changes, Claim) -> lists:foldl(fun(Node, TransitionedNodes) -> @@ -224,32 +224,32 @@ merge_transitions(Nodes, Changes, Claim) -> end, [], Nodes). %% @doc Merge change into member info record. --spec apply_changes(#member_info{}, list(), list()) -> #member_info{}. +-spec apply_changes(member(), list(), list()) -> member(). apply_changes(Node, Changes, Claim) -> apply_status_change(apply_claim_change(Node, Claim), Changes). %% @doc Merge change into member info record. --spec apply_status_change(#member_info{}, list()) -> #member_info{}. +-spec apply_status_change(member(), list()) -> member(). apply_status_change(Node, Changes) -> - Name = Node#member_info.node, + Name = Node?MEMBER_INFO.node, case lists:keyfind(Name, 1, Changes) of false -> Node; {_, {Action, Replacement}} -> - Node#member_info{action=Action, replacement=Replacement}; + Node?MEMBER_INFO{action=Action, replacement=Replacement}; {_, Action} -> - Node#member_info{action=Action} + Node?MEMBER_INFO{action=Action} end. %% @doc Merge change into member info record. --spec apply_claim_change(#member_info{}, list()) -> #member_info{}. +-spec apply_claim_change(member(), list()) -> member(). apply_claim_change(Node, Claim) -> - Name = Node#member_info.node, + Name = Node?MEMBER_INFO.node, case lists:keyfind(Name, 1, Claim) of false -> - Node#member_info{ring_pct=0.0, pending_pct=0.0}; + Node?MEMBER_INFO{ring_pct=0.0, pending_pct=0.0}; {_, {_, Future}} -> %% @doc Hack until core returns normalized values. Normalized = if @@ -258,29 +258,29 @@ apply_claim_change(Node, Claim) -> true -> Future end, - Node#member_info{ring_pct=Normalized, pending_pct=Normalized} + Node?MEMBER_INFO{ring_pct=Normalized, pending_pct=Normalized} end. %% @doc Turn a node into a proper struct for serialization. --spec jsonify_node(#member_info{}) -> {struct, list()}. +-spec jsonify_node(member()) -> {struct, list()}. jsonify_node(Node) -> LWM=app_helper:get_env(riak_control,low_mem_watermark,0.1), - MemUsed = Node#member_info.mem_used, - MemTotal = Node#member_info.mem_total, - Reachable = Node#member_info.reachable, + MemUsed = Node?MEMBER_INFO.mem_used, + MemTotal = Node?MEMBER_INFO.mem_total, + Reachable = Node?MEMBER_INFO.reachable, LowMem = low_mem(Reachable, MemUsed, MemTotal, LWM), - {struct,[{"name",Node#member_info.node}, - {"status",Node#member_info.status}, + {struct,[{"name",Node?MEMBER_INFO.node}, + {"status",Node?MEMBER_INFO.status}, {"reachable",Reachable}, - {"ring_pct",Node#member_info.ring_pct}, - {"pending_pct",Node#member_info.pending_pct}, + {"ring_pct",Node?MEMBER_INFO.ring_pct}, + {"pending_pct",Node?MEMBER_INFO.pending_pct}, {"mem_total",MemTotal}, {"mem_used",MemUsed}, - {"mem_erlang",Node#member_info.mem_erlang}, + {"mem_erlang",Node?MEMBER_INFO.mem_erlang}, {"low_mem",LowMem}, - {"me",Node#member_info.node == node()}, - {"action",Node#member_info.action}, - {"replacement",Node#member_info.replacement}]}. + {"me",Node?MEMBER_INFO.node == node()}, + {"action",Node?MEMBER_INFO.action}, + {"replacement",Node?MEMBER_INFO.replacement}]}. %% @doc Given a struct/proplist that we've received via JSON, %% recursively turn the keys into atoms from binaries. diff --git a/src/riak_control_wm_nodes.erl b/src/riak_control_wm_nodes.erl index 4c7e266..33c2b6d 100644 --- a/src/riak_control_wm_nodes.erl +++ b/src/riak_control_wm_nodes.erl @@ -79,31 +79,46 @@ to_json(ReqData, Context) -> %% Get the current node list. {ok, _V, RawNodes} = riak_control_session:get_nodes(), - Nodes = [jsonify_node(Node) || Node=#member_info{} <- RawNodes], + Nodes = [jsonify_node(Node) || Node=?MEMBER_INFO{} <- RawNodes], Encoded = mochijson2:encode({struct, [{nodes, Nodes}]}), {Encoded, ReqData, Context}. %% @doc Turn a node into a proper struct for serialization. --spec jsonify_node(#member_info{}) -> {struct, list()}. +-spec jsonify_node(member()) -> {struct, list()}. jsonify_node(Node) -> LWM=app_helper:get_env(riak_control,low_mem_watermark,0.1), - MemUsed = Node#member_info.mem_used, - MemTotal = Node#member_info.mem_total, - Reachable = Node#member_info.reachable, - LowMem = case Reachable of - false -> - false; - true -> - 1.0 - (MemUsed/MemTotal) < LWM - end, - {struct,[{"name",Node#member_info.node}, - {"status",Node#member_info.status}, + MemUsed = Node?MEMBER_INFO.mem_used, + MemTotal = Node?MEMBER_INFO.mem_total, + Reachable = Node?MEMBER_INFO.reachable, + LowMem = low_mem(Reachable, MemUsed, MemTotal, LWM), + {struct,[{"name",Node?MEMBER_INFO.node}, + {"status",Node?MEMBER_INFO.status}, {"reachable",Reachable}, - {"ring_pct",Node#member_info.ring_pct}, - {"pending_pct",Node#member_info.pending_pct}, + {"ring_pct",Node?MEMBER_INFO.ring_pct}, + {"pending_pct",Node?MEMBER_INFO.pending_pct}, {"mem_total",MemTotal}, {"mem_used",MemUsed}, - {"mem_erlang",Node#member_info.mem_erlang}, + {"mem_erlang",Node?MEMBER_INFO.mem_erlang}, {"low_mem",LowMem}, - {"me",Node#member_info.node == node()}]}. + {"me",Node?MEMBER_INFO.node == node()}, + {"action",Node?MEMBER_INFO.action}, + {"replacement",Node?MEMBER_INFO.replacement}]}. + +%% @doc Determine if a node has low memory. +-spec low_mem(boolean(), number() | atom(), number() | atom(), number()) + -> boolean(). +low_mem(Reachable, MemUsed, MemTotal, LWM) -> + case Reachable of + false -> + false; + true -> + %% There is a race where the node is online, but memsup is + %% still starting so memory is unavailable. + case MemTotal of + undefined -> + false; + _ -> + 1.0 - (MemUsed/MemTotal) < LWM + end + end. From aa704da6f1451017a235b4a320bb92dd3ad2a64b Mon Sep 17 00:00:00 2001 From: Ryan Zezeski Date: Thu, 1 Aug 2013 13:06:04 -0400 Subject: [PATCH 2/7] Roll version 1.4.1 --- src/riak_control.app.src | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riak_control.app.src b/src/riak_control.app.src index 0e4948e..ba50c7c 100644 --- a/src/riak_control.app.src +++ b/src/riak_control.app.src @@ -1,7 +1,7 @@ {application, riak_control, [ {description, "Riak Admin Interface"}, - {vsn, "1.4.0"}, + {vsn, "1.4.1"}, {registered, []}, {applications, [ kernel, From 935536b1bef7d5020187111416480553bef53a5f Mon Sep 17 00:00:00 2001 From: Ryan Zezeski Date: Thu, 1 Aug 2013 14:26:12 -0400 Subject: [PATCH 3/7] Use new deps --- rebar.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/rebar.config b/rebar.config index 0120a0f..74485cd 100644 --- a/rebar.config +++ b/rebar.config @@ -13,7 +13,7 @@ {webmachine, ".*", {git, "git://github.com/basho/webmachine", {tag, "1.10.2"}}}, {riak_core, ".*", - {git, "git://github.com/basho/riak_core", {tag, "1.4.0"}}}, + {git, "git://github.com/basho/riak_core", {tag, "1.4.1"}}}, {erlydtl, ".*", {git, "git://github.com/evanmiller/erlydtl.git", {tag, "d20b53f0"}}} ]}. From 4177e0efbf90a45794786f8830e70068d2b55e82 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Tue, 6 Aug 2013 11:03:22 -0700 Subject: [PATCH 4/7] Resolve incorrect capability negotation order. --- src/riak_control_app.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riak_control_app.erl b/src/riak_control_app.erl index be0a7c1..96ea017 100644 --- a/src/riak_control_app.erl +++ b/src/riak_control_app.erl @@ -35,7 +35,7 @@ start(_StartType, _StartArgs) -> {error, Reason}; {ok, Pid} -> riak_core_capability:register({riak_control, member_info_version}, - [v0, v1], + [v1, v0], v0), {ok, Pid} end. From 3c6c75e699a9f03fa05b1c07bddea087ad9ab328 Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Tue, 6 Aug 2013 12:05:41 -0700 Subject: [PATCH 5/7] Use expanded record macro. --- src/riak_control_formatting.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/riak_control_formatting.erl b/src/riak_control_formatting.erl index c4ee5eb..4bb730f 100644 --- a/src/riak_control_formatting.erl +++ b/src/riak_control_formatting.erl @@ -39,7 +39,7 @@ action_result(_,Req,C) -> %% return a proplist of details for a given index node_ring_details (P=#partition_info{index=Index,vnodes=Vnodes},Nodes) -> case lists:keyfind(P#partition_info.owner,2,Nodes) of - #member_info{node=Node,status=Status,reachable=Reachable} -> + ?MEMBER_INFO{node=Node, status=Status, reachable=Reachable} -> Handoffs = P#partition_info.handoffs, VnodeStatuses = [{atom_to_list(VnodeName) ++ "_vnode_status", vnode_status(VnodeName, VnodeStatus, Handoffs)} From de8e0fa4ff89e2261928c0d3648b0c048911d18e Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Tue, 6 Aug 2013 15:51:38 -0700 Subject: [PATCH 6/7] Incompatible is less serious. Ensure that the message regarding incompatible nodes is explicitly clear and doesn't trigger errors. This is necessary because even in a pure 1.4.1 cluster, depending on how nodes are started capability negotation might take a few seconds, where some nodes will be shown as incompatible. --- priv/admin/css/compiled/style.css | 9 +++++++++ priv/admin/css/snapshot.styl | 8 ++++++++ priv/admin/js/generated/templates.js | 2 +- priv/admin/js/templates/snapshot.hbs | 6 +++++- 4 files changed, 23 insertions(+), 2 deletions(-) diff --git a/priv/admin/css/compiled/style.css b/priv/admin/css/compiled/style.css index d9cad2d..ed34935 100644 --- a/priv/admin/css/compiled/style.css +++ b/priv/admin/css/compiled/style.css @@ -918,6 +918,15 @@ input.gui-point-button-right:active { #snapshot-page #healthy-cluster h3, #snapshot-page #unhealthy-cluster h3 { margin-top: 30px; + line-height: 1.5; +} +#snapshot-page #healthy-cluster h4, +#snapshot-page #unhealthy-cluster h4 { + font-family: 'noticia', georgia, serif; + margin-top: 20px; + font-size: 14px; + font-style: italic; + line-height: 1.5; } #snapshot-page #healthy-cluster ul, #snapshot-page #unhealthy-cluster ul { diff --git a/priv/admin/css/snapshot.styl b/priv/admin/css/snapshot.styl index 48d9e54..a22e6b8 100644 --- a/priv/admin/css/snapshot.styl +++ b/priv/admin/css/snapshot.styl @@ -18,7 +18,15 @@ CSS to be applied ONLY on the snapshot page h3 margin-top : 30px + line-height : 1.5 + h4 + copy-font() + margin-top : 20px + font-size: 14px + font-style: italic + line-height : 1.5 + ul margin : 10px 0 0 20px diff --git a/priv/admin/js/generated/templates.js b/priv/admin/js/generated/templates.js index 8e946ea..65d77e2 100644 --- a/priv/admin/js/generated/templates.js +++ b/priv/admin/js/generated/templates.js @@ -1,5 +1,5 @@ Ember.TEMPLATES['application'] = Ember.Handlebars.compile('
{{outlet}}
'); -Ember.TEMPLATES['snapshot'] = Ember.Handlebars.compile('

Current Snapshot

{{#if healthyCluster}}

Your cluster is healthy.

You currently have...

  • 0 Unreachable nodes
  • 0 Incompatible nodes
  • 0 Nodes marked as down
  • 0 Nodes experiencing low memory
  • Nothing to worry about because Riak is your friend
{{else}}

Your cluster has problems.

{{#if areUnreachableNodes}}

The following nodes are currently unreachable:

    {{#each unreachableNodes}}
  • {{name}}
  • {{/each}}
{{/if}} {{#if areIncompatibleNodes}}

The following nodes are currently incompatible with Riak Control:

    {{#each incompatibleNodes}}
  • {{name}}
  • {{/each}}
{{/if}} {{#if areDownNodes}}

The following nodes are currently marked down:

    {{#each downNodes}}
  • {{name}}
  • {{/each}}
{{/if}} {{#if areLowMemNodes}}

The following nodes are currently experiencing low memory:

    {{#each lowMemNodes}}
  • {{name}}
  • {{/each}}
{{/if}}
{{/if}}
'); +Ember.TEMPLATES['snapshot'] = Ember.Handlebars.compile('

Current Snapshot

{{#if healthyCluster}}

Your cluster is healthy.

You currently have...

  • 0 Unreachable nodes
  • 0 Incompatible nodes
  • 0 Nodes marked as down
  • 0 Nodes experiencing low memory
  • Nothing to worry about because Riak is your friend
{{else}}

Your cluster has problems.

{{#if areUnreachableNodes}}

The following nodes are currently unreachable:

    {{#each unreachableNodes}}
  • {{name}}
  • {{/each}}
{{/if}} {{#if areIncompatibleNodes}}

Some information about the following nodes may be temporarily unavailable:

    {{#each incompatibleNodes}}
  • {{name}}
  • {{/each}}

This may be triggered by using control in the middle of a rolling upgrade or during startup of the node.

{{/if}} {{#if areDownNodes}}

The following nodes are currently marked down:

    {{#each downNodes}}
  • {{name}}
  • {{/each}}
{{/if}} {{#if areLowMemNodes}}

The following nodes are currently experiencing low memory:

    {{#each lowMemNodes}}
  • {{name}}
  • {{/each}}
{{/if}}
{{/if}}
'); Ember.TEMPLATES['cluster'] = Ember.Handlebars.compile('

Cluster Management

{{#if standalone}}

Join Node

Type the name of a node in an existing cluster to join this node to. {{else}}

Add Node

Type the name of a node to add to this cluster. {{/if}}
{{view RiakControl.JoinNodeView}} {{#if standalone}} JOIN NODE {{else}} ADD NODE {{/if}}
{{#if errorMessage}} {{/if}}

Current Cluster

{{#if controller.isLoading}}

Loading...

{{else}}
  • Actions

  • Name & Status

  • Partitions

  • RAM Usage

{{collection RiakControl.CurrentClusterView contentBinding="activeCurrentCluster"}} {{/if}}

Staged Changes (Your new cluster after convergence.)

{{#if controller.displayPlan}}
  • Name & Status

  • Partitions

  • Action

  • Replacement

{{collection RiakControl.StagedClusterView contentBinding="activeStagedCluster"}}
COMMIT
Changed your mind? Click this button to remove all staged changes. CLEAR PLAN
{{else}}
{{#if controller.ringNotReady}}

Please wait, the ring is converging.

{{else}} {{#if controller.legacyRing}}

You are currently running a legacy version of Riak that does not support staged changes.

{{else}} {{#if controller.emptyPlan}}

Currently no staged changes to display.

{{else}} {{#if controller.isLoading}}

Loading...

{{/if}} {{/if}} {{/if}} {{/if}}
{{/if}}
'); Ember.TEMPLATES['nodes'] = Ember.Handlebars.compile('

Node Management

{{#if errorMessage}} {{/if}}

Current Cluster

Click the radio button for each node you would like to stop or mark as down, then click "APPLY" to apply your changes. If the radio button is grayed out, the action is not available due to the current status of the node.
{{#if controller.isLoading}}

Loading...

{{else}}
  • Stop

  • Down

  • Name & Status

  • Partitions

  • RAM Usage

{{collection RiakControl.CurrentNodesView contentBinding="content"}} {{/if}}
APPLY CLEAR
'); Ember.TEMPLATES['ring'] = Ember.Handlebars.compile('

Current Ring

{{outlet partitionFilter}}
  • Prev
  • {{#each pages}} {{view RiakControl.PaginationItemView contentBinding="this"}} {{/each}}
  • Next
{{#collection RiakControl.PartitionView contentBinding="controller.paginatedContent"}} {{#with view.content}} {{/with}} {{#with view}} {{/with}} {{/collection}}

#

Owner Node

KV

Pipe

Search

{{i}}
{{node}}
{{index}}
{{kvStatus}} {{pipeStatus}}
  • Prev
  • {{#each pages}} {{view RiakControl.PaginationItemView contentBinding="this"}} {{/each}}
  • Next
'); diff --git a/priv/admin/js/templates/snapshot.hbs b/priv/admin/js/templates/snapshot.hbs index bcf83ae..750adfb 100644 --- a/priv/admin/js/templates/snapshot.hbs +++ b/priv/admin/js/templates/snapshot.hbs @@ -45,12 +45,16 @@ {{#if areIncompatibleNodes}} -

The following nodes are currently incompatible with Riak Control:

+

Some + information about the following nodes may be temporarily + unavailable:

    {{#each incompatibleNodes}}
  • {{name}}
  • {{/each}}
+

This may be triggered by using control in the middle + of a rolling upgrade or during startup of the node.

{{/if}} {{#if areDownNodes}} From 44bc72b307f0a86cb38c12b68382074ccbf6b19a Mon Sep 17 00:00:00 2001 From: Christopher Meiklejohn Date: Thu, 8 Aug 2013 15:07:14 -0700 Subject: [PATCH 7/7] Add overall status. Return an overall status that can be used to monitor cluster transitions. --- include/riak_control.hrl | 2 +- src/riak_control_session.erl | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 1 deletion(-) diff --git a/include/riak_control.hrl b/include/riak_control.hrl index 62589e5..51bcb16 100644 --- a/include/riak_control.hrl +++ b/include/riak_control.hrl @@ -20,7 +20,7 @@ -type version() :: integer(). -type index() :: integer(). --type status() :: valid | invalid | down | leaving | incompatible. +-type status() :: valid | invalid | down | leaving | incompatible | transitioning. -type home() :: primary | fallback | undefined. -type service() :: {atom(), home()}. -type services() :: [service()]. diff --git a/src/riak_control_session.erl b/src/riak_control_session.erl index 40e685c..6bccbed 100644 --- a/src/riak_control_session.erl +++ b/src/riak_control_session.erl @@ -35,6 +35,7 @@ get_nodes/0, get_services/0, get_partitions/0, + get_status/0, get_plan/0, clear_plan/0, stage_change/3, @@ -86,6 +87,11 @@ start_link() -> get_version() -> gen_server:call(?MODULE, get_version, infinity). +%% @doc Get overall cluster status. +-spec get_status() -> {ok, version(), status()}. +get_status() -> + gen_server:call(?MODULE, get_status, infinity). + %% @doc Return ring. -spec get_ring() -> {ok, version(), ring()}. get_ring() -> @@ -178,6 +184,9 @@ handle_call(get_plan, _From, State) -> {reply, retrieve_plan(), State}; handle_call(get_version, _From, State=#state{vsn=V}) -> {reply, {ok, V}, State}; +handle_call(get_status, _From, State=#state{vsn=V,nodes=N}) -> + Status = determine_overall_status(N), + {reply, {ok, V, Status}, State}; handle_call(get_ring, _From, State=#state{vsn=V,ring=R}) -> {reply, {ok, V, R}, State}; handle_call(get_nodes, _From, State=#state{vsn=V,nodes=N}) -> @@ -535,3 +544,24 @@ handle_bad_record(Total, Used, ErlangMemory, VNodes, Handoffs) -> vnodes = VNodes, handoffs = Handoffs} end. + +%% @doc Determine overall cluster status. +%% If the cluster is of one status, return it; default to valid. +%% If one or more nodes is incompatible, return incompatible, else +%% introduce a new state called transitioning. +-spec determine_overall_status(members()) -> status(). +determine_overall_status(Nodes) -> + Statuses = lists:usort([Node?MEMBER_INFO.status || Node <- Nodes]), + case length(Statuses) of + 0 -> + valid; + 1 -> + lists:nth(1, Statuses); + _ -> + case lists:member(incompatible, Statuses) of + true -> + incompatible; + false -> + transitioning + end + end.