Skip to content

Add functions for clearing bucket properties. #202

Closed
wants to merge 2 commits into from
View
29 src/riak_core_bucket.erl
@@ -27,6 +27,7 @@
-export([append_bucket_defaults/1,
set_bucket/2,
+ clear_bucket/1,
get_bucket/1,
get_bucket/2,
get_buckets/1,
@@ -84,6 +85,17 @@ merge_props(Overriding, Other) ->
lists:ukeymerge(1, lists:ukeysort(1, Overriding),
lists:ukeysort(1, Other)).
+
+%% @spec clear_bucket(riak_object:bucket()) -> ok
@rzezeski
rzezeski added a note Aug 22, 2012

riak_object is not visible in riak_core. I think binary() is good enough if there isn't already a bucket type in core. Also, do you mind using module attributed specs? We are trying to move away from edoc specs.

@jerith
jerith added a note Aug 23, 2012

When I merge in all the 1.2 work, I'll update my changes to match whatever the surrounding code uses. Would that be sufficient?

@rzezeski
rzezeski added a note Sep 4, 2012

I still see riak_object in the spec. It may be used in other places in core but that is a mistake. Please change this to binary().

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
+%% @doc Clear all properties in Bucket, resetting to defaults.
+clear_bucket(Name) ->
+ F = fun(Ring, _Args) ->
+ {new_ring, riak_core_ring:clear_meta({bucket,Name}, Ring)}
+ end,
+ {ok, _NewRing} = riak_core_ring_manager:ring_trans(F, undefined),
+ ok.
+
+
%% @spec get_bucket(riak_object:bucket()) ->
%% {ok, BucketProps :: riak_core_bucketprops()}
%% @doc Return the complete current list of properties for Bucket.
@@ -157,4 +169,21 @@ simple_set_test() ->
riak_core_ring_manager:stop(),
?assertEqual(value, proplists:get_value(key, Bucket)).
+simple_clear_test() ->
+ application:load(riak_core),
+ %% appending an empty list of defaults makes up for the fact that
+ %% riak_core_app:start/2 is not called during eunit runs
+ %% (that's where the usual defaults are set at startup),
+ %% while also not adding any trash that might affect other tests
+ append_bucket_defaults([]),
+ riak_core_ring_events:start_link(),
+ riak_core_ring_manager:start_link(test),
+ ok = set_bucket(a_bucket,[{key,value}]),
+ BucketSet = get_bucket(a_bucket),
+ ok = clear_bucket(a_bucket),
+ BucketClear = get_bucket(a_bucket),
+ riak_core_ring_manager:stop(),
+ ?assertEqual(value, proplists:get_value(key, BucketSet)),
+ ?assertEqual(undefined, proplists:get_value(key, BucketClear)).
+
-endif.
View
21 src/riak_core_ring.erl
@@ -51,7 +51,8 @@
rename_node/3,
responsible_index/2,
transfer_node/3,
- update_meta/3]).
+ update_meta/3,
+ clear_meta/2]).
-export([cluster_name/1,
legacy_ring/1,
@@ -500,6 +501,24 @@ update_meta(Key, Val, State) ->
State
end.
+% @doc Clear a key in the cluster metadata dict
@rzezeski
rzezeski added a note Sep 4, 2012

Why not just do it all in one case to cut done on number of lines:

case dict:find(...) of
    {ok, _} ->
        VClock = ...,
        State?CH....
    error ->
        State
end
@rzezeski
rzezeski added a note Sep 4, 2012

Actually, this isn't going to work because it doesn't play nice with metadata reconciliation. The short story is that you can't just remove the meta entry. Depending on the state of cluster ring reconciliation might make it reappear. I think you can achieve your goal with riak_core_ring:update_meta/3 and using an empty list as value but I would have to run through the code to verify.

@jerith
jerith added a note Sep 5, 2012

Cool, thanks for the feedback.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
+-spec clear_meta(Key :: term(), State :: chstate()) -> chstate().
+clear_meta(Key, State) ->
+ Change = case dict:find(Key, State?CHSTATE.meta) of
+ {ok, _} ->
+ true;
+ error ->
+ false
+ end,
+ if Change ->
+ VClock = vclock:increment(State?CHSTATE.nodename,
+ State?CHSTATE.vclock),
+ State?CHSTATE{vclock=VClock,
+ meta=dict:erase(Key, State?CHSTATE.meta)};
+ true ->
+ State
+ end.
+
%% @doc Return the current claimant.
-spec claimant(State :: chstate()) -> node().
claimant(?CHSTATE{claimant=Claimant}) ->
Something went wrong with that request. Please try again.