diff --git a/elvis.config b/elvis.config index d00e395..0c7dde1 100644 --- a/elvis.config +++ b/elvis.config @@ -7,15 +7,9 @@ filter => "*.erl", ruleset => erl_files, rules => [ - {elvis_style, line_length, #{limit => 100}}, + {elvis_style, line_length, #{limit => 120}}, {elvis_style, nesting_level, #{level => 4}}, {elvis_style, god_modules, #{limit => 35}}, - %% the default rule included {right, ","} and not {right, "=>"} or {left, "=>"} - { - elvis_style, - operator_spaces, - #{rules => [{right, "++"}, {left, "++"}, {right, "=>"}, {left, "=>"}]} - }, {elvis_style, dont_repeat_yourself, #{min_complexity => 30}} ] }, @@ -23,16 +17,10 @@ filter => "*.erl", ruleset => erl_files, rules => [ - {elvis_style, line_length, #{limit => 100}}, + {elvis_style, line_length, #{limit => 120}}, {elvis_style, nesting_level, #{level => 4}}, %% Variables in eunit macros are called, for instance, __V {elvis_style, variable_naming_convention, #{regex => "^_?_?([A-Z][0-9a-zA-Z]*)_?$"}}, - %% the default rule included {right, ","} and not {right, "=>"} or {left, "=>"} - { - elvis_style, - operator_spaces, - #{rules => [{right, "++"}, {left, "++"}, {right, "=>"}, {left, "=>"}]} - }, %% we want to be able to ct:pal in tests {elvis_style, no_debug_call, disable}, %% we can go lighter on this one here diff --git a/include/mero.hrl b/include/mero.hrl index cba14ba..173fb6e 100644 --- a/include/mero.hrl +++ b/include/mero.hrl @@ -27,8 +27,8 @@ %% OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. %% -ifndef(MEMCACHERL_HRL). --define(MEMCACHERL_HRL, true). +-define(MEMCACHERL_HRL, true). -define(MEMCACHE_INCREMENT, 16#05). -define(MEMCACHE_INCREMENTQ, 16#15). -define(MEMCACHE_GET, 16#00). @@ -42,36 +42,28 @@ -define(MEMCACHE_DELETE, 16#04). -define(MEMCACHE_DELETEQ, 16#14). -define(MEMCACHE_FLUSH_ALL, 16#08). - --define(NO_ERROR, 16#0000). --define(NOT_FOUND, 16#0001). --define(KEY_EXISTS, 16#0002). --define(VALUE_TOO_LARGE, 16#0003). +-define(NO_ERROR, 16#0000). +-define(NOT_FOUND, 16#0001). +-define(KEY_EXISTS, 16#0002). +-define(VALUE_TOO_LARGE, 16#0003). -define(INVALID_ARGUMENTS, 16#0004). --define(NOT_STORED, 16#0005). --define(NON_NUMERIC_INCR, 16#0006). --define(UNKNOWN_COMMAND, 16#0081). --define(OOM, 16#0082). - +-define(NOT_STORED, 16#0005). +-define(NON_NUMERIC_INCR, 16#0006). +-define(UNKNOWN_COMMAND, 16#0081). +-define(OOM, 16#0082). %%% If a connection attempt fails, or a connection is broken -define(RECONNECT_WAIT_TIME, 200). - %%% Default timeout for instrospection functions -define(DEFAULT_TIMEOUT, 5000). - --define(LOG_EVENT(MFA, KeyAndTags), begin - {StatModule, StatFunction, GlobalTags} = MFA, - apply(StatModule, StatFunction, [KeyAndTags ++ GlobalTags]) - end). - +-define(LOG_EVENT(MFA, KeyAndTags), + begin + {StatModule, StatFunction, GlobalTags} = MFA, + apply(StatModule, StatFunction, [KeyAndTags ++ GlobalTags]) + end). -define(CALLBACK_CONTEXT(StatModule, StatFunction, ClusterName, Host, Port), - {StatModule, StatFunction, - [{cluster_name, ClusterName}, - {host, Host}, - {port, Port}]}). + {StatModule, StatFunction, [{cluster_name, ClusterName}, {host, Host}, {port, Port}]}). --record(mero_item, {key, - value, - cas}). +-record(mero_item, {key, value, cas}). -endif. + diff --git a/rebar.config b/rebar.config index 49f254f..1f03c1f 100644 --- a/rebar.config +++ b/rebar.config @@ -49,9 +49,12 @@ ]} ]}. -{alias, [{test, [xref, dialyzer, lint, eunit, ct, cover]}]}. +{alias, [{test, [format, lint, xref, dialyzer, eunit, ct, cover]}]}. {plugins, [ - {rebar3_lint, "0.1.10"}, + rebar3_format, + rebar3_lint, rebar3_hex ]}. + +{format, [{files, ["src/*.erl", "test/*.erl", "include/*.hrl"]}]}. diff --git a/src/mero.erl b/src/mero.erl index 2b6baf5..d0cbbce 100644 --- a/src/mero.erl +++ b/src/mero.erl @@ -32,10 +32,7 @@ -behaviour(application). --export([start/0, - start/2, - stop/1]). - +-export([start/0, start/2, stop/1]). -export([increment_counter/2, increment_counter/7, mincrement_counter/2, @@ -50,38 +47,35 @@ mget/3, mgets/2, mgets/3, - set/5, mset/3, - cas/6, mcas/3, - add/5, madd/3, + set/5, + mset/3, + cas/6, + mcas/3, + add/5, + madd/3, flush_all/1, shard_phash2/2, shard_crc32/2, clustering_key/1, - storage_key/1 - ]). - --export([state/0, - state/1, - deep_state/0, - deep_state/1]). + storage_key/1]). +-export([state/0, state/1, deep_state/0, deep_state/1]). -include_lib("mero/include/mero.hrl"). -type cas_token() :: undefined | integer(). -type result() :: {Key :: binary(), Value :: undefined | binary()}. --type extended_result() :: {Key :: binary(), Value :: undefined | binary(), CAS :: cas_token()}. - +-type extended_result() :: {Key :: binary(), + Value :: undefined | binary(), + CAS :: cas_token()}. %% {ClusteringKey, Key}. ClusteringKey is used to select the memcached node %% where to store the data, and Key is used to store the data in that %% node. If a single binary Key is used, then ClusteringKey = Key. -type mero_key() :: binary() | {ClusteringKey :: binary(), Key :: binary()}. --export_type([mero_key/0, - cas_token/0, - result/0, - extended_result/0]). +-export_type([mero_key/0, cas_token/0, result/0, extended_result/0]). -type cluster_config() :: [{ClusterName :: atom(), Config :: proplists:proplist()}]. + -export_type([cluster_config/0]). %%%============================================================================= @@ -103,141 +97,134 @@ stop(_State) -> %%% External functions %%%============================================================================= --spec get(ClusterName :: atom(), Key :: mero_key(), Timeout :: integer()) -> - result() | {error, Reason :: term()}. +-spec get(ClusterName :: atom(), Key :: mero_key(), Timeout :: integer()) -> result() | + {error, + Reason :: term()}. get(ClusterName, Key, Timeout) -> case gets(ClusterName, Key, Timeout) of - {error, Reason} -> - {error, Reason}; - {Key, Value, _CAS} -> - {Key, Value} + {error, Reason} -> + {error, Reason}; + {Key, Value, _CAS} -> + {Key, Value} end. + get(ClusterName, Key) -> get(ClusterName, Key, mero_conf:pool_timeout_read(ClusterName)). - --spec mget(ClusterName :: atom(), Keys :: [mero_key()], Timeout :: integer()) -> - [result()] | {error, [Reason :: term()], ProcessedKeyValues :: [result()]}. +-spec mget(ClusterName :: atom(), + Keys :: [mero_key()], + Timeout :: integer()) -> [result()] | + {error, [Reason :: term()], ProcessedKeyValues :: [result()]}. mget(ClusterName, Keys, Timeout) when is_list(Keys), is_atom(ClusterName) -> Extract = fun (Items) -> - [{Key, Value} - || {Key, Value, _} <- Items] + [{Key, Value} || {Key, Value, _} <- Items] end, case mgets(ClusterName, Keys, Timeout) of - {error, Reason, ProcessedKeyValues} -> - {error, Reason, Extract(ProcessedKeyValues)}; - KeyValues -> - Extract(KeyValues) + {error, Reason, ProcessedKeyValues} -> + {error, Reason, Extract(ProcessedKeyValues)}; + KeyValues -> + Extract(KeyValues) end. + mget(ClusterName, Keys) -> mget(ClusterName, Keys, mero_conf:pool_timeout_read(ClusterName)). - --spec gets(ClusterName :: atom(), Key :: mero_key(), Timeout :: integer()) -> - extended_result() | {error, Reason :: term()}. +-spec gets(ClusterName :: atom(), + Key :: mero_key(), + Timeout :: integer()) -> extended_result() | {error, Reason :: term()}. gets(ClusterName, Key, Timeout) -> case mgets(ClusterName, [Key], Timeout) of - {error, [Reason], []} -> - {error, Reason}; - {error, _Reason, [Processed]} -> - Processed; - [Result] -> - Result; - [] -> - {Key, undefined, undefined} + {error, [Reason], []} -> + {error, Reason}; + {error, _Reason, [Processed]} -> + Processed; + [Result] -> + Result; + [] -> + {Key, undefined, undefined} end. + gets(ClusterName, Key) -> gets(ClusterName, Key, mero_conf:pool_timeout_read(ClusterName)). - --spec mgets(ClusterName :: atom(), Keys :: [mero_key()], Timeout :: integer()) -> - [extended_result()] - | {error, [Reason :: term()], ProcessedKeyValues :: [extended_result()]}. +-spec mgets(ClusterName :: atom(), + Keys :: [mero_key()], + Timeout :: integer()) -> [extended_result()] | + {error, + [Reason :: term()], + ProcessedKeyValues :: [extended_result()]}. mgets(ClusterName, Keys, Timeout) when is_list(Keys), is_atom(ClusterName) -> Extract = fun (Items) -> [{Key, Value, CAS} || #mero_item{key = Key, value = Value, cas = CAS} <- Items] end, case mero_conn:get(ClusterName, Keys, Timeout) of - {error, Reason, ProcessedKeyValues} -> - {error, Reason, Extract(ProcessedKeyValues)}; - KeyValues -> - Extract(KeyValues) + {error, Reason, ProcessedKeyValues} -> + {error, Reason, Extract(ProcessedKeyValues)}; + KeyValues -> + Extract(KeyValues) end. + mgets(ClusterName, Keys) -> mgets(ClusterName, Keys, mero_conf:pool_timeout_read(ClusterName)). - --spec add(ClusterName :: atom(), Key :: mero_key(), Value :: binary(), ExpTime :: integer(), - Timeout :: integer()) -> - ok | {error, Reason :: term()}. +-spec add(ClusterName :: atom(), + Key :: mero_key(), + Value :: binary(), + ExpTime :: integer(), + Timeout :: integer()) -> ok | {error, Reason :: term()}. add(ClusterName, Key, Value, ExpTime, Timeout) when is_atom(ClusterName), is_binary(Value), is_integer(ExpTime) -> BExpTime = list_to_binary(integer_to_list(ExpTime)), mero_conn:add(ClusterName, Key, Value, BExpTime, Timeout). - -spec madd(ClusterName :: atom(), - [{Key :: mero_key(), - Value :: binary(), - ExpTime :: integer()}], - Timeout :: integer()) -> - [ok | {error, Reason :: term()}]. -madd(ClusterName, KVEs, Timeout) - when is_atom(ClusterName) -> + [{Key :: mero_key(), Value :: binary(), ExpTime :: integer()}], + Timeout :: integer()) -> [ok | {error, Reason :: term()}]. +madd(ClusterName, KVEs, Timeout) when is_atom(ClusterName) -> L = [{Key, Value, list_to_binary(integer_to_list(ExpTime))} - || {Key, Value, ExpTime} <- KVEs, - is_binary(Key), - is_binary(Value), - is_integer(ExpTime)], + || {Key, Value, ExpTime} <- KVEs, is_binary(Key), is_binary(Value), is_integer(ExpTime)], mero_conn:madd(ClusterName, L, Timeout). - -spec set(ClusterName :: atom(), Key :: mero_key(), Value :: binary(), - ExpTime :: integer(), % value is in seconds - Timeout :: integer()) -> - ok | {error, Reason :: term()}. + ExpTime :: integer(), + Timeout :: integer()) -> ok | {error, Reason :: term()}. + % value is in seconds + set(ClusterName, Key, Value, ExpTime, Timeout) -> cas(ClusterName, Key, Value, ExpTime, Timeout, undefined). - -spec mset(ClusterName :: atom(), - [{Key :: mero_key(), - Value :: binary(), - ExpTime :: integer()}], % value is in seconds - Timeout :: integer()) -> - [ok | {error, Reason :: term()}]. + [{Key :: mero_key(), Value :: binary(), ExpTime :: integer()}], + Timeout :: integer()) -> [ok | {error, Reason :: term()}]. + % value is in seconds + mset(ClusterName, KVEs, Timeout) -> - L = [{Key, Value, ExpTime, undefined} - || {Key, Value, ExpTime} <- KVEs], + L = [{Key, Value, ExpTime, undefined} || {Key, Value, ExpTime} <- KVEs], mcas(ClusterName, L, Timeout). - -spec cas(ClusterName :: atom(), Key :: mero_key(), Value :: binary(), - ExpTime :: integer(), % value is in seconds + ExpTime :: integer(), Timeout :: integer(), - CAS :: cas_token()) -> - ok | {error, Reason :: term()}. + CAS :: cas_token()) -> ok | {error, Reason :: term()}. + % value is in seconds + cas(ClusterName, Key, Value, ExpTime, Timeout, CAS) when is_atom(ClusterName), is_binary(Value), is_integer(ExpTime) -> BExpTime = list_to_binary(integer_to_list(ExpTime)), %% note: if CAS is undefined, this will be an unconditional set: mero_conn:set(ClusterName, Key, Value, BExpTime, Timeout, CAS). - -spec mcas(ClusterName :: atom(), - [{Key :: mero_key(), - Value :: binary(), - ExpTime :: integer(), % value is in seconds - CAS :: cas_token()}], - Timeout :: integer()) -> - [ok | {error, Reason :: term()}]. -mcas(ClusterName, KVECs, Timeout) - when is_atom(ClusterName) -> + [{Key :: mero_key(), Value :: binary(), ExpTime :: integer(), CAS :: cas_token()}], + Timeout :: integer()) -> [ok | {error, Reason :: term()}]. + % value is in seconds + +mcas(ClusterName, KVECs, Timeout) when is_atom(ClusterName) -> %% note: if CAS is undefined, the corresponding set will be unconditional. L = [{Key, Value, list_to_binary(integer_to_list(ExpTime)), CAS} || {Key, Value, ExpTime, CAS} <- KVECs, @@ -246,53 +233,82 @@ mcas(ClusterName, KVECs, Timeout) is_integer(ExpTime)], mero_conn:mset(ClusterName, L, Timeout). - %% @doc: Increments a counter: initial value is 1, steps of 1, timeout defaults to 24 hours. %% 3 retries. --spec increment_counter(ClusterName :: atom(), Key :: mero_key()) -> - {ok, integer()} | {error, Reason :: term()}. +-spec increment_counter(ClusterName :: atom(), Key :: mero_key()) -> {ok, integer()} | + {error, Reason :: term()}. increment_counter(ClusterName, Key) when is_atom(ClusterName) -> - increment_counter(ClusterName, Key, 1, 1, + increment_counter(ClusterName, + Key, + 1, + 1, mero_conf:pool_key_expiration_time(ClusterName), mero_conf:pool_write_retries(ClusterName), mero_conf:pool_timeout_write(ClusterName)). - --spec increment_counter(ClusterName :: atom(), Key :: mero_key(), Value :: integer(), - Initial :: integer(), ExpTime :: integer(), - Retries :: integer(), Timeout :: integer()) -> - {ok, integer()} | {error, Reason :: term()}. +-spec increment_counter(ClusterName :: atom(), + Key :: mero_key(), + Value :: integer(), + Initial :: integer(), + ExpTime :: integer(), + Retries :: integer(), + Timeout :: integer()) -> {ok, integer()} | {error, Reason :: term()}. increment_counter(ClusterName, Key, Value, Initial, ExpTime, Retries, Timeout) - when is_integer(Value), is_integer(ExpTime), is_atom(ClusterName), - (Initial >= 0), (Value >=0) -> + when is_integer(Value), + is_integer(ExpTime), + is_atom(ClusterName), + Initial >= 0, + Value >= 0 -> BValue = list_to_binary(integer_to_list(Value)), BInitial = list_to_binary(integer_to_list(Initial)), BExpTime = list_to_binary(integer_to_list(ExpTime)), - mero_conn:increment_counter(ClusterName, Key, BValue, BInitial, BExpTime, Retries, Timeout). - --spec mincrement_counter(ClusterName :: atom(), Key :: [mero_key()]) -> - ok | {error, Reason :: term()}. + mero_conn:increment_counter(ClusterName, + Key, + BValue, + BInitial, + BExpTime, + Retries, + Timeout). + +-spec mincrement_counter(ClusterName :: atom(), Key :: [mero_key()]) -> ok | + {error, Reason :: term()}. mincrement_counter(ClusterName, Keys) when is_atom(ClusterName), is_list(Keys) -> - mincrement_counter(ClusterName, Keys, 1, 1, + mincrement_counter(ClusterName, + Keys, + 1, + 1, mero_conf:pool_key_expiration_time(ClusterName), mero_conf:pool_write_retries(ClusterName), mero_conf:pool_timeout_write(ClusterName)). --spec mincrement_counter(ClusterName :: atom(), Keys :: [mero_key()], Value :: integer(), - Initial :: integer(), ExpTime :: integer(), - Retries :: integer(), Timeout :: integer()) -> - ok | {error, Reason :: term()}. +-spec mincrement_counter(ClusterName :: atom(), + Keys :: [mero_key()], + Value :: integer(), + Initial :: integer(), + ExpTime :: integer(), + Retries :: integer(), + Timeout :: integer()) -> ok | {error, Reason :: term()}. mincrement_counter(ClusterName, Keys, Value, Initial, ExpTime, Retries, Timeout) - when is_list(Keys), is_integer(Value), is_integer(ExpTime), is_atom(ClusterName), - (Initial >= 0), (Value >=0) -> + when is_list(Keys), + is_integer(Value), + is_integer(ExpTime), + is_atom(ClusterName), + Initial >= 0, + Value >= 0 -> BValue = list_to_binary(integer_to_list(Value)), BInitial = list_to_binary(integer_to_list(Initial)), BExpTime = list_to_binary(integer_to_list(ExpTime)), - mero_conn:mincrement_counter(ClusterName, Keys, BValue, BInitial, BExpTime, Retries, Timeout). - - --spec delete(ClusterName :: atom(), Key :: mero_key(), Timeout :: integer()) -> - ok | {error, Reason :: term()}. + mero_conn:mincrement_counter(ClusterName, + Keys, + BValue, + BInitial, + BExpTime, + Retries, + Timeout). + +-spec delete(ClusterName :: atom(), Key :: mero_key(), Timeout :: integer()) -> ok | + {error, + Reason :: term()}. delete(ClusterName, Key, Timeout) when is_atom(ClusterName) -> mero_conn:delete(ClusterName, Key, Timeout). @@ -301,12 +317,10 @@ mdelete(ClusterName, Keys, Timeout) when is_list(Keys), is_atom(ClusterName) -> mero_conn:mdelete(ClusterName, Keys, Timeout). %% The response is a list of all the individual requests, one per shard --spec flush_all(ClusterName :: atom()) -> - [ ok | {error, Response :: term()}]. +-spec flush_all(ClusterName :: atom()) -> [ok | {error, Response :: term()}]. flush_all(ClusterName) -> mero_conn:flush_all(ClusterName, ?DEFAULT_TIMEOUT). - %%%============================================================================= %%% Sharding algorithms %%%============================================================================= @@ -327,10 +341,9 @@ storage_key(Key) when is_binary(Key) -> shard_phash2(Key, ShardSize) -> erlang:phash2(Key, ShardSize). - -spec shard_crc32(Key :: binary(), ShardSize :: pos_integer()) -> pos_integer(). shard_crc32(Key, ShardSize) -> - ((erlang:crc32(Key) bsr 16) band 16#7fff) rem ShardSize. + (erlang:crc32(Key) bsr 16) band 16#7fff rem ShardSize. %%%============================================================================= %%% Introspection functions @@ -338,19 +351,18 @@ shard_crc32(Key, ShardSize) -> %% @doc: Returns the state of the sockets of a Cluster state(ClusterName) -> - ZeroState = [ - {links, 0}, - {monitors, 0}, - {free, 0}, - {connected, 0}, - {connecting, 0}, - {failed, 0}, - {message_queue_len, 0} - ], - lists:foldr( - fun({_, _, Pool, _}, Acc) -> - inc_state(mero_pool:state(Pool), Acc) - end, ZeroState, mero_cluster:child_definitions(ClusterName)). + ZeroState = [{links, 0}, + {monitors, 0}, + {free, 0}, + {connected, 0}, + {connecting, 0}, + {failed, 0}, + {message_queue_len, 0}], + lists:foldr(fun ({_, _, Pool, _}, Acc) -> + inc_state(mero_pool:state(Pool), Acc) + end, + ZeroState, + mero_cluster:child_definitions(ClusterName)). %% @doc: Returns the state of the sockets for all clusters state() -> @@ -359,26 +371,25 @@ state() -> inc_state({error, _}, Acc) -> Acc; inc_state(St, Acc) -> - lists:map( - fun ({connected, AccV}) -> - {connected, AccV + proplists:get_value(num_connected, St)}; - ({connecting, AccV}) -> - {connecting, AccV + proplists:get_value(num_connecting, St)}; - ({failed, AccV}) -> - {failed, AccV + proplists:get_value(num_failed_connecting, St)}; - ({K, AccV}) -> - {K, AccV + proplists:get_value(K, St)} - end, Acc). - + lists:map(fun ({connected, AccV}) -> + {connected, AccV + proplists:get_value(num_connected, St)}; + ({connecting, AccV}) -> + {connecting, AccV + proplists:get_value(num_connecting, St)}; + ({failed, AccV}) -> + {failed, AccV + proplists:get_value(num_failed_connecting, St)}; + ({K, AccV}) -> + {K, AccV + proplists:get_value(K, St)} + end, + Acc). deep_state(ClusterName) -> - F = fun({_, _, Pool, _}, Acc) -> - St = mero_pool:state(Pool), - [[{pool, Pool} | St] | Acc] + F = fun ({_, _, Pool, _}, Acc) -> + St = mero_pool:state(Pool), + [[{pool, Pool} | St] | Acc] end, lists:foldr(F, [], mero_cluster:child_definitions(ClusterName)). - %% @doc: Returns the state of the sockets for all clusters deep_state() -> [{Cluster, deep_state(Cluster)} || Cluster <- mero_cluster:clusters()]. + diff --git a/src/mero_cluster.erl b/src/mero_cluster.erl index 51fe09e..7080b0d 100644 --- a/src/mero_cluster.erl +++ b/src/mero_cluster.erl @@ -106,42 +106,38 @@ -author('Miriam Pena '). --export([ - child_definitions/1, - sup_by_cluster_name/1, - cluster_shards/1, - workers_per_shard/1, - sharding_algorithm/1, - load_clusters/1, - total_workers/1, - server/2, - one_pool_of_each_shard_of_cluster/1, - group_by_shards/2, group_by_shards/3, - pool_worker_module/1, - random_pool_of_shard/2, - clusters/0, - version/0, - purge/0 -]). - --ignore_xref([ - {mero_cluster_util, cluster_shards, 1}, - {mero_cluster_util, workers_per_shard, 1}, - {mero_cluster_util, child_definitions, 1}, - {mero_cluster_util, sup_by_cluster_name, 1}, - {mero_cluster_util, clusters, 0}, - {mero_cluster_util, sharding_algorithm, 1}, - {mero_cluster_util, pool_worker_module, 1}, - {mero_cluster_util, worker_by_index, 3}, - {mero_cluster_util, module_info, 1} -]). - --type child_definitions() :: [{ - Host ::string(), - Port ::pos_integer(), - WorkerName ::atom(), - WorkerModule::module() -}]. +-export([child_definitions/1, + sup_by_cluster_name/1, + cluster_shards/1, + workers_per_shard/1, + sharding_algorithm/1, + load_clusters/1, + total_workers/1, + server/2, + one_pool_of_each_shard_of_cluster/1, + group_by_shards/2, + group_by_shards/3, + pool_worker_module/1, + random_pool_of_shard/2, + clusters/0, + version/0, + purge/0]). + +-ignore_xref([{mero_cluster_util, cluster_shards, 1}, + {mero_cluster_util, workers_per_shard, 1}, + {mero_cluster_util, child_definitions, 1}, + {mero_cluster_util, sup_by_cluster_name, 1}, + {mero_cluster_util, clusters, 0}, + {mero_cluster_util, sharding_algorithm, 1}, + {mero_cluster_util, pool_worker_module, 1}, + {mero_cluster_util, worker_by_index, 3}, + {mero_cluster_util, module_info, 1}]). + +-type child_definitions() :: [{Host :: string(), + Port :: pos_integer(), + WorkerName :: atom(), + WorkerModule :: module()}]. + -export_type([child_definitions/0]). %%%=================================================================== @@ -152,27 +148,20 @@ -spec load_clusters(mero:cluster_config()) -> ok. load_clusters(ClusterConfig) -> WorkerDefs = worker_defs(ClusterConfig), - DynModuleBegin = - "-module(mero_cluster_util). \n" - "-export([child_definitions/1,\n" - " sup_by_cluster_name/1,\n" - " worker_by_index/3,\n" - " cluster_shards/1,\n" - " workers_per_shard/1,\n" - " pool_worker_module/1,\n" - " clusters/0,\n" - " sharding_algorithm/1]).\n\n", - ModuleStringTotal = lists:flatten( - [DynModuleBegin, - child_definitions_function(WorkerDefs), - sup_by_cluster_name_function(WorkerDefs), - worker_by_index_function(WorkerDefs), - cluster_shards_function(ClusterConfig), - workers_per_shard_function(ClusterConfig), - sharding_algorithm_function(ClusterConfig), - pool_worker_module_function(ClusterConfig), - clusters_function(ClusterConfig) - ]), + DynModuleBegin = "-module(mero_cluster_util). \n-export([child_definitions/1,\n " + " sup_by_cluster_name/1,\n worker_by_index/3,\n " + " cluster_shards/1,\n workers_per_shard/1,\n " + " pool_worker_module/1,\n clusters/0,\n " + " sharding_algorithm/1]).\n\n", + ModuleStringTotal = lists:flatten([DynModuleBegin, + child_definitions_function(WorkerDefs), + sup_by_cluster_name_function(WorkerDefs), + worker_by_index_function(WorkerDefs), + cluster_shards_function(ClusterConfig), + workers_per_shard_function(ClusterConfig), + sharding_algorithm_function(ClusterConfig), + pool_worker_module_function(ClusterConfig), + clusters_function(ClusterConfig)]), {M, B} = dynamic_compile:from_string(ModuleStringTotal), {module, mero_cluster_util} = code:load_binary(M, "", B), ok. @@ -187,14 +176,14 @@ version() -> -spec purge() -> ok. purge() -> case code:purge(mero_cluster_util) of - false -> - ok; - true -> - error_logger:warning_msg("Some processes were killed while purging mero_cluster_util"), - ok + false -> + ok; + true -> + error_logger:warning_msg("Some processes were killed while purging mero_cluster_util"), + ok end. --spec child_definitions(ClusterName ::atom()) -> child_definitions(). +-spec child_definitions(ClusterName :: atom()) -> child_definitions(). child_definitions(ClusterName) -> mero_cluster_util:child_definitions(ClusterName). @@ -216,42 +205,39 @@ clusters() -> sharding_algorithm(Name) -> mero_cluster_util:sharding_algorithm(Name). + %% Selects a worker based on the cluster identifier and the key. --spec server(Name :: atom(), Key :: mero:mero_key()) -> - Server :: atom(). +-spec server(Name :: atom(), Key :: mero:mero_key()) -> Server :: atom(). server(Name, Key) -> ShardIdentifier = shard_identifier(Name, Key), random_pool_of_shard(Name, ShardIdentifier). --spec group_by_shards(ClusterName :: atom(), Keys :: list(mero:mero_key())) -> - [{PoolName ::atom(), Keys :: list(binary())}]. +-spec group_by_shards(ClusterName :: atom(), Keys :: [mero:mero_key()]) -> [{PoolName :: + atom(), + Keys :: [binary()]}]. group_by_shards(ClusterName, Keys) -> group_by_shards_(ClusterName, Keys, undefined, []). --spec group_by_shards(ClusterName :: atom(), Items :: list(tuple()), KeyPos :: pos_integer()) -> - [{PoolName ::atom(), Items :: list(tuple())}]. +-spec group_by_shards(ClusterName :: atom(), + Items :: [tuple()], + KeyPos :: pos_integer()) -> [{PoolName :: atom(), Items :: [tuple()]}]. group_by_shards(ClusterName, Items, KeyPos) -> group_by_shards_(ClusterName, Items, KeyPos, []). - one_pool_of_each_shard_of_cluster(ClusterName) -> Shards = cluster_shards(ClusterName), - [mero_cluster_util:worker_by_index(ClusterName, Shard, 0) || Shard <- - lists:seq(0, Shards -1)]. - + [mero_cluster_util:worker_by_index(ClusterName, Shard, 0) + || Shard <- lists:seq(0, Shards - 1)]. random_pool_of_shard(Name, ShardIdentifier) -> RandomWorker = random_integer(mero_cluster_util:workers_per_shard(Name)), mero_cluster_util:worker_by_index(Name, ShardIdentifier, RandomWorker). - total_workers(Name) -> - mero_cluster_util:cluster_shards(Name) * - mero_cluster_util:workers_per_shard(Name). + mero_cluster_util:cluster_shards(Name) * mero_cluster_util:workers_per_shard(Name). %% @doc: Returns an integer between 0 and max -1 --spec random_integer(Max :: integer()) -> - integer(). +-spec random_integer(Max :: integer()) -> integer(). random_integer(Max) when Max > 0 -> rand:uniform(Max) - 1. @@ -261,8 +247,9 @@ random_integer(Max) when Max > 0 -> shard_identifier(Name, Key) -> {Module, Function} = mero_cluster_util:sharding_algorithm(Name), - apply(Module, Function, [mero:clustering_key(Key), mero_cluster_util:cluster_shards(Name)]). - + apply(Module, + Function, + [mero:clustering_key(Key), mero_cluster_util:cluster_shards(Name)]). key_to_storage_key(undefined, Key, Key) -> mero:storage_key(Key); @@ -273,27 +260,30 @@ group_by_shards_(_ClusterName, [], _, Acc) -> Acc; group_by_shards_(ClusterName, [Item | Items], KeyPos, Acc) -> Key = case KeyPos of - undefined -> - Item; - N when is_integer(N), N > 0 -> - element(N, Item) + undefined -> + Item; + N when is_integer(N), N > 0 -> + element(N, Item) end, Identifier = shard_identifier(ClusterName, Key), Item2 = key_to_storage_key(KeyPos, Item, Key), case lists:keyfind(Identifier, 1, Acc) of - false -> - group_by_shards_(ClusterName, Items, KeyPos, [{Identifier, [Item2]} | Acc]); - {Identifier, List} -> - group_by_shards_(ClusterName, Items, KeyPos, - lists:keyreplace(Identifier, 1, Acc, {Identifier, List ++ [Item2]})) + false -> + group_by_shards_(ClusterName, Items, KeyPos, [{Identifier, [Item2]} | Acc]); + {Identifier, List} -> + group_by_shards_(ClusterName, + Items, + KeyPos, + lists:keyreplace(Identifier, 1, Acc, {Identifier, List ++ [Item2]})) end. worker_defs(ClusterConfig) -> - lists:foldl( - fun(Cluster, Acc) -> - Def = get_server_defs(Cluster), - Acc ++ Def - end, [], ClusterConfig). + lists:foldl(fun (Cluster, Acc) -> + Def = get_server_defs(Cluster), + Acc ++ Def + end, + [], + ClusterConfig). get_server_defs({ClusterName, ClusterConfig}) -> Servers = get_config(servers, ClusterConfig), @@ -301,123 +291,132 @@ get_server_defs({ClusterName, ClusterConfig}) -> Workers = get_config(workers_per_shard, ClusterConfig), SortedServers = lists:sort(Servers), - {Elements, _} = lists:foldl( - fun - ({Host, Port}, {Acc, ShardSizeAcc}) -> - Elements = - [begin - WorkerName = - worker_name(ClusterName, Host, ReplicationNumber, ShardSizeAcc), - {ClusterName, ShardSizeAcc, ReplicationNumber, - {ClusterName, Host, Port, WorkerName, WorkerModule}} - end || ReplicationNumber <- lists:seq(0, (Workers - 1))], - {Acc ++ Elements, ShardSizeAcc + 1} - end, {[], 0}, SortedServers), + {Elements, _} = lists:foldl(fun ({Host, Port}, {Acc, ShardSizeAcc}) -> + Elements = [begin + WorkerName = worker_name(ClusterName, + Host, + ReplicationNumber, + ShardSizeAcc), + {ClusterName, + ShardSizeAcc, + ReplicationNumber, + {ClusterName, + Host, + Port, + WorkerName, + WorkerModule}} + end + || ReplicationNumber + <- lists:seq(0, Workers - 1)], + {Acc ++ Elements, ShardSizeAcc + 1} + end, + {[], 0}, + SortedServers), Elements. cluster_shards_function(ClusterConfig) -> - lists:foldl( - fun - ({ClusterName, Config}, []) -> - Servers = length(get_config(servers, Config)), - [io_lib:format("cluster_shards(~p) -> ~p.\n\n", - [ClusterName, Servers])]; - ({ClusterNameIn, Config}, Acc) -> - Servers = length(get_config(servers, Config)), - [io_lib:format("cluster_shards(~p) -> ~p;\n", - [ClusterNameIn, Servers]) | Acc] - end, [], lists:reverse(ClusterConfig)). - + lists:foldl(fun ({ClusterName, Config}, []) -> + Servers = length(get_config(servers, Config)), + [io_lib:format("cluster_shards(~p) -> ~p.\n\n", [ClusterName, Servers])]; + ({ClusterNameIn, Config}, Acc) -> + Servers = length(get_config(servers, Config)), + [io_lib:format("cluster_shards(~p) -> ~p;\n", [ClusterNameIn, Servers]) + | Acc] + end, + [], + lists:reverse(ClusterConfig)). workers_per_shard_function(ClusterConfig) -> - lists:foldl( - fun - ({ClusterName, Config}, []) -> - WorkersPerServer = get_config(workers_per_shard, Config), - [io_lib:format("workers_per_shard(~p) -> ~p.\n\n", - [ClusterName, WorkersPerServer])]; - ({ClusterNameIn, Config}, Acc) -> - WorkersPerServer = get_config(workers_per_shard, Config), - [io_lib:format("workers_per_shard(~p) -> ~p;\n", - [ClusterNameIn, WorkersPerServer]) | Acc] - end, [], lists:reverse(ClusterConfig)). - + lists:foldl(fun ({ClusterName, Config}, []) -> + WorkersPerServer = get_config(workers_per_shard, Config), + [io_lib:format("workers_per_shard(~p) -> ~p.\n\n", + [ClusterName, WorkersPerServer])]; + ({ClusterNameIn, Config}, Acc) -> + WorkersPerServer = get_config(workers_per_shard, Config), + [io_lib:format("workers_per_shard(~p) -> ~p;\n", + [ClusterNameIn, WorkersPerServer]) + | Acc] + end, + [], + lists:reverse(ClusterConfig)). sharding_algorithm_function(ClusterConfig) -> - lists:foldl( - fun - ({ClusterName, Config}, []) -> - {Module, Function} = get_config(sharding_algorithm, Config), - [io_lib:format("sharding_algorithm(~p) -> {~p, ~p}.\n\n", - [ClusterName, Module, Function])]; - ({ClusterNameIn, Config}, Acc) -> - {Module, Function} = get_config(sharding_algorithm, Config), - [io_lib:format("sharding_algorithm(~p) -> {~p, ~p};\n", - [ClusterNameIn, Module, Function]) | Acc] - end, [], lists:reverse(ClusterConfig)). - + lists:foldl(fun ({ClusterName, Config}, []) -> + {Module, Function} = get_config(sharding_algorithm, Config), + [io_lib:format("sharding_algorithm(~p) -> {~p, ~p}.\n\n", + [ClusterName, Module, Function])]; + ({ClusterNameIn, Config}, Acc) -> + {Module, Function} = get_config(sharding_algorithm, Config), + [io_lib:format("sharding_algorithm(~p) -> {~p, ~p};\n", + [ClusterNameIn, Module, Function]) + | Acc] + end, + [], + lists:reverse(ClusterConfig)). pool_worker_module_function(ClusterConfig) -> - lists:foldl( - fun - ({ClusterName, Config}, []) -> - Module = get_config(pool_worker_module, Config), - [io_lib:format("pool_worker_module(~p) -> ~p.\n\n", - [ClusterName, Module])]; - ({ClusterNameIn, Config}, Acc) -> - Module = get_config(pool_worker_module, Config), - [io_lib:format("pool_worker_module(~p) -> ~p;\n", - [ClusterNameIn, Module]) | Acc] - end, [], lists:reverse(ClusterConfig)). + lists:foldl(fun ({ClusterName, Config}, []) -> + Module = get_config(pool_worker_module, Config), + [io_lib:format("pool_worker_module(~p) -> ~p.\n\n", [ClusterName, Module])]; + ({ClusterNameIn, Config}, Acc) -> + Module = get_config(pool_worker_module, Config), + [io_lib:format("pool_worker_module(~p) -> ~p;\n", [ClusterNameIn, Module]) + | Acc] + end, + [], + lists:reverse(ClusterConfig)). clusters_function(ClusterConfig) -> Clusters = [ClusterName || {ClusterName, _} <- ClusterConfig], io_lib:format("clusters() -> \n ~p.\n\n", [Clusters]). - child_definitions_function(WorkerDefs) -> AllDefs = [Args || {_Name, _, _, Args} <- WorkerDefs], Clusters = lists:usort([Cluster || {Cluster, _Host, _Port, _Name, _Module} <- AllDefs]), - [io_lib:format( - "child_definitions(~p) ->\n ~p;\n\n", - [Cluster, [{H, P, N, M} || {C, H, P, N, M} <- AllDefs, C == Cluster]] - ) || Cluster <- Clusters] ++ "child_definitions(_) ->\n [].\n\n". - + [io_lib:format("child_definitions(~p) ->\n ~p;\n\n", + [Cluster, [{H, P, N, M} || {C, H, P, N, M} <- AllDefs, C == Cluster]]) + || Cluster <- Clusters] + ++ "child_definitions(_) ->\n [].\n\n". sup_by_cluster_name_function(WorkerDefs) -> AllDefs = [Args || {_Name, _, _, Args} <- WorkerDefs], Clusters = lists:usort([Cluster || {Cluster, _Host, _Port, _Name, _Module} <- AllDefs]), [io_lib:format("sup_by_cluster_name(~p) ->\n mero_~p_sup;\n\n", [Cluster, Cluster]) - || Cluster <- Clusters - ] ++ "sup_by_cluster_name(_) ->\n undefined.\n\n". - + || Cluster <- Clusters] + ++ "sup_by_cluster_name(_) ->\n undefined.\n\n". worker_by_index_function(WorkerDefs) -> - lists:foldr( - fun - (WorkerDef, []) -> - worker_by_index_clause(WorkerDef, "."); - (WorkerDef, Acc) -> - [worker_by_index_clause(WorkerDef, ";"), Acc] - end, [], WorkerDefs). - -worker_by_index_clause( - {Name, ShardSizeAcc, ReplicationNumber, {_Name, _Host, _Port, WorkerName, _WorkerModule}}, - Separator) -> - io_lib:format( - "worker_by_index(~p, ~p, ~p) -> ~p~s\n\n", - [Name, ShardSizeAcc, ReplicationNumber, WorkerName, Separator] - ). + lists:foldr(fun (WorkerDef, []) -> + worker_by_index_clause(WorkerDef, "."); + (WorkerDef, Acc) -> + [worker_by_index_clause(WorkerDef, ";"), Acc] + end, + [], + WorkerDefs). + +worker_by_index_clause({Name, + ShardSizeAcc, + ReplicationNumber, + {_Name, _Host, _Port, WorkerName, _WorkerModule}}, + Separator) -> + io_lib:format("worker_by_index(~p, ~p, ~p) -> ~p~s\n\n", + [Name, ShardSizeAcc, ReplicationNumber, WorkerName, Separator]). get_config(Type, ClusterConfig) -> case proplists:get_value(Type, ClusterConfig) of - undefined -> - error({undefined_config, Type, ClusterConfig}); - Value -> - Value + undefined -> + error({undefined_config, Type, ClusterConfig}); + Value -> + Value end. %% PoolName :: mero_pool_127.0.0.1_0 worker_name(ClusterName, Host, ReplicationNumber, ShardSizeAcc) -> - list_to_atom("mero_" ++ atom_to_list(ClusterName) ++ "_" ++ Host ++ "_" ++ - integer_to_list(ShardSizeAcc) ++ "_" ++ integer_to_list(ReplicationNumber)). + list_to_atom("mero_" ++ + atom_to_list(ClusterName) ++ + "_" ++ + Host ++ + "_" ++ + integer_to_list(ShardSizeAcc) ++ + "_" ++ integer_to_list(ReplicationNumber)). + diff --git a/src/mero_cluster_sup.erl b/src/mero_cluster_sup.erl index 25e992e..cce94c6 100644 --- a/src/mero_cluster_sup.erl +++ b/src/mero_cluster_sup.erl @@ -52,5 +52,10 @@ init({ClusterName, PoolDefs}) -> {ok, {{one_for_one, 10, 10}, Children}}. child(ClusterName, {Host, Port, Name, WrkModule}) -> - {Name, {mero_pool, start_link, [ClusterName, Host, Port, Name, WrkModule]}, permanent, - 5000, worker, [mero_pool]}. + {Name, + {mero_pool, start_link, [ClusterName, Host, Port, Name, WrkModule]}, + permanent, + 5000, + worker, + [mero_pool]}. + diff --git a/src/mero_conf.erl b/src/mero_conf.erl index 209bd61..cf290d9 100644 --- a/src/mero_conf.erl +++ b/src/mero_conf.erl @@ -37,46 +37,23 @@ %% It's dynamically invoked using rpc:pmap/3 -ignore_xref({?MODULE, get_elasticache_cluster_configs, 1}). --export([cluster_config/0, - cluster_config/1, - pool_timeout_read/1, - timeout_read/1, - pool_timeout_write/1, - timeout_write/1, - pool_key_expiration_time/1, - key_expiration_time/1, - pool_write_retries/1, - write_retries/1, - pool_max_connections/1, - max_connections_per_pool/1, - pool_initial_connections/1, - initial_connections_per_pool/1, - pool_expiration_interval/1, - expiration_interval/1, - pool_min_free_connections/1, - min_free_connections_per_pool/1, - pool_connection_unused_max_time/1, - connection_unused_max_time/1, - pool_max_connection_delay_time/1, - pool_min_connection_interval/1, - max_connection_delay_time/1, - stat_callback/0, - stat_callback/1, - add_now/1, - add_now/2, - millis_to/1, - millis_to/2, - process_server_specs/1, - elasticache_load_config_delay/0, - elasticache_load_config_delay/1, - monitor_heartbeat_delay/0, - monitor_heartbeat_delay/2, - get_elasticache_cluster_configs/1 - ]). +-export([cluster_config/0, cluster_config/1, pool_timeout_read/1, timeout_read/1, + pool_timeout_write/1, timeout_write/1, pool_key_expiration_time/1, key_expiration_time/1, + pool_write_retries/1, write_retries/1, pool_max_connections/1, max_connections_per_pool/1, + pool_initial_connections/1, initial_connections_per_pool/1, pool_expiration_interval/1, + expiration_interval/1, pool_min_free_connections/1, min_free_connections_per_pool/1, + pool_connection_unused_max_time/1, connection_unused_max_time/1, + pool_max_connection_delay_time/1, pool_min_connection_interval/1, + max_connection_delay_time/1, stat_callback/0, stat_callback/1, add_now/1, add_now/2, + millis_to/1, millis_to/2, process_server_specs/1, elasticache_load_config_delay/0, + elasticache_load_config_delay/1, monitor_heartbeat_delay/0, monitor_heartbeat_delay/2, + get_elasticache_cluster_configs/1]). -include_lib("mero/include/mero.hrl"). --type per_pool_config_value(Type) :: {by_pool, Default :: Type, [{Pool :: atom(), Value :: Type}]}. +-type per_pool_config_value(Type) :: {by_pool, + Default :: Type, + [{Pool :: atom(), Value :: Type}]}. -type mero_conf_value(Type) :: Type | per_pool_config_value(Type). %%%============================================================================= @@ -113,38 +90,34 @@ cluster_config() -> cluster_config(ClusterConfig) -> application:set_env(mero, cluster_config, ClusterConfig). - %% @doc: Number of sockets that each pool will open on startup -spec pool_initial_connections(Pool :: atom()) -> integer(). pool_initial_connections(Pool) -> - get_env_per_pool(initial_connections_per_pool, Pool). + get_env_per_pool(initial_connections_per_pool, Pool). -spec initial_connections_per_pool(Initial :: mero_conf_value(integer())) -> ok. initial_connections_per_pool(Initial) -> - application:set_env(mero, initial_connections_per_pool, Initial). - + application:set_env(mero, initial_connections_per_pool, Initial). %% @doc: If the number of free sockets is smaller than this %% the pool will asyncronously create new ones to ensure we %% dont run out of them. -spec pool_min_free_connections(Pool :: atom()) -> integer(). pool_min_free_connections(Pool) -> - get_env_per_pool(min_free_connections_per_pool, Pool). + get_env_per_pool(min_free_connections_per_pool, Pool). -spec min_free_connections_per_pool(MinFree :: mero_conf_value(integer())) -> ok. min_free_connections_per_pool(MinFree) -> - application:set_env(mero, min_free_connections_per_pool, MinFree). - + application:set_env(mero, min_free_connections_per_pool, MinFree). %% Maximun number of connections that each pool will open. -spec pool_max_connections(Pool :: atom()) -> integer(). pool_max_connections(Pool) -> - get_env_per_pool(max_connections_per_pool, Pool). + get_env_per_pool(max_connections_per_pool, Pool). -spec max_connections_per_pool(Max :: mero_conf_value(integer())) -> ok. max_connections_per_pool(Max) -> - application:set_env(mero, max_connections_per_pool, Max). - + application:set_env(mero, max_connections_per_pool, Max). %% @doc: Read timeout in milliseconds -spec pool_timeout_read(Pool :: atom()) -> integer(). @@ -155,7 +128,6 @@ pool_timeout_read(Pool) -> timeout_read(Timeout) -> application:set_env(mero, timeout_read, Timeout). - %% @doc: Write timeout in milliseconds -spec pool_timeout_write(Pool :: atom()) -> integer(). pool_timeout_write(Pool) -> @@ -165,16 +137,14 @@ pool_timeout_write(Pool) -> timeout_write(Timeout) -> application:set_env(mero, timeout_write, Timeout). - %% @doc: Number of retries for write operations -spec pool_write_retries(Pool :: atom()) -> integer(). pool_write_retries(Pool) -> - get_env_per_pool(write_retries, Pool). + get_env_per_pool(write_retries, Pool). -spec write_retries(Timeout :: mero_conf_value(integer())) -> ok. write_retries(Timeout) -> - application:set_env(mero, write_retries, Timeout). - + application:set_env(mero, write_retries, Timeout). %% @doc: Gets the default value for a key expiration time -spec pool_key_expiration_time(Pool :: atom()) -> integer(). @@ -185,7 +155,6 @@ pool_key_expiration_time(Pool) -> key_expiration_time(Time) -> application:set_env(mero, expiration_time, Time). - %% @doc: Checks for unused sockets every XX (millis) and closes them. -spec pool_expiration_interval(Pool :: atom()) -> integer(). pool_expiration_interval(Pool) -> @@ -204,58 +173,57 @@ pool_connection_unused_max_time(Pool) -> connection_unused_max_time(Val) -> application:set_env(mero, connection_unused_max_time, Val). - %% @doc: maximum delay establishing initial connections -spec pool_max_connection_delay_time(Pool :: atom()) -> integer(). pool_max_connection_delay_time(Pool) -> - get_env_per_pool(max_connection_delay_time, Pool). + get_env_per_pool(max_connection_delay_time, Pool). %% @doc: min delay between connection attempts -spec pool_min_connection_interval(Pool :: atom()) -> integer(). pool_min_connection_interval(Pool) -> - try - get_env_per_pool(min_connection_interval, Pool) - catch _:_ -> - %% Don't want to make this mandatory, but the rest are mandatory already. - undefined - end. + try + get_env_per_pool(min_connection_interval, Pool) + catch + _:_ -> + %% Don't want to make this mandatory, but the rest are mandatory already. + undefined + end. -spec max_connection_delay_time(mero_conf_value(integer())) -> ok. max_connection_delay_time(Val) -> - application:set_env(mero, max_connection_delay_time, Val). - + application:set_env(mero, max_connection_delay_time, Val). %% @doc: maximum delay establishing initial connections (ms) --spec stat_callback() -> {Module::module(), Function :: atom()}. +-spec stat_callback() -> {Module :: module(), Function :: atom()}. stat_callback() -> - get_env(stat_event_callback). + get_env(stat_event_callback). --spec stat_callback({Module::module(), Function :: atom()}) -> ok. +-spec stat_callback({Module :: module(), Function :: atom()}) -> ok. stat_callback(Val) -> - application:set_env(mero, stat_event_callback, Val). - + application:set_env(mero, stat_event_callback, Val). add_now(Timeout) -> - add_now(Timeout, os:timestamp()). - + add_now(Timeout, os:timestamp()). add_now(Timeout, Then) -> - {M, S, MS} = Then, - {M, S, MS + (Timeout * 1000)}. - + {M, S, MS} = Then, + {M, S, MS + Timeout * 1000}. millis_to(TimeLimit) -> - millis_to(TimeLimit, os:timestamp()). + millis_to(TimeLimit, os:timestamp()). millis_to(TimeLimit, Then) -> - case (timer:now_diff(TimeLimit, Then) div 1000) of - N when N > 0 -> N; - _ -> 0 - end. + case timer:now_diff(TimeLimit, Then) div 1000 of + N when N > 0 -> + N; + _ -> + 0 + end. -spec process_server_specs(mero:cluster_config()) -> mero:cluster_config(). process_server_specs(Clusters) -> - [{ClusterName, [process_value(Attr) || Attr <- Attrs]} || {ClusterName, Attrs} <- Clusters]. + [{ClusterName, [process_value(Attr) || Attr <- Attrs]} + || {ClusterName, Attrs} <- Clusters]. %%%============================================================================= %%% Internal functions @@ -263,39 +231,38 @@ process_server_specs(Clusters) -> get_env(Key) -> case application:get_env(mero, Key) of - {ok, Value} -> - Value; - undefined -> - exit({undefined_configuration, Key}) + {ok, Value} -> + Value; + undefined -> + exit({undefined_configuration, Key}) end. get_env_per_pool(Key, Pool) -> case get_env(Key) of - {by_pool, Default, ByPool} -> - maps:get(Pool, ByPool, Default); - Value -> - Value + {by_pool, Default, ByPool} -> + maps:get(Pool, ByPool, Default); + Value -> + Value end. - - process_value({servers, {elasticache, ConfigEndpoint, ConfigPort}}) -> process_value({servers, {elasticache, [{ConfigEndpoint, ConfigPort, 1}]}}); process_value({servers, {elasticache, ConfigList}}) when is_list(ConfigList) -> - HostsPorts = - try rpc:pmap({?MODULE, get_elasticache_cluster_configs}, [], ConfigList) - catch - _:badrpc -> % Fallback to sequential execution, mostly to get proper error descriptions - lists:map(fun get_elasticache_cluster_configs/1, ConfigList) - end, + HostsPorts = try + rpc:pmap({?MODULE, get_elasticache_cluster_configs}, [], ConfigList) + catch + _:badrpc -> + % Fallback to sequential execution, mostly to get proper error descriptions + lists:map(fun get_elasticache_cluster_configs/1, ConfigList) + end, {servers, lists:flatten(HostsPorts)}; process_value({servers, {mfa, {Module, Function, Args}}}) -> try erlang:apply(Module, Function, Args) of - {ok, HostsPorts} when is_list(HostsPorts) -> - {servers, HostsPorts} + {ok, HostsPorts} when is_list(HostsPorts) -> + {servers, HostsPorts} catch - Type:Reason -> - error({invalid_call, {Module, Function, Args}, {Type, Reason}}) + Type:Reason -> + error({invalid_call, {Module, Function, Args}, {Type, Reason}}) end; process_value(V) -> V. @@ -305,9 +272,11 @@ get_elasticache_cluster_configs({Host, Port, ClusterSpeedFactor}) -> get_elasticache_cluster_configs({Host, Port}) -> [get_elasticache_cluster_config(Host, Port)]. - get_elasticache_cluster_config(Host, Port) -> - get_elasticache_cluster_config(Host, Port, 0, mero_elasticache:get_cluster_config(Host, Port)). + get_elasticache_cluster_config(Host, + Port, + 0, + mero_elasticache:get_cluster_config(Host, Port)). get_elasticache_cluster_config(_Host, _Port, _Retries, {ok, Entries}) -> Entries; @@ -315,5 +284,8 @@ get_elasticache_cluster_config(Host, Port, ?MAX_RETRIES, {error, Reason}) -> error({Reason, Host, Port}); get_elasticache_cluster_config(Host, Port, Retries, {error, _Reason}) -> timer:sleep(trunc(math:pow(2, Retries)) * 100), - get_elasticache_cluster_config( - Host, Port, Retries + 1, mero_elasticache:get_cluster_config(Host, Port)). + get_elasticache_cluster_config(Host, + Port, + Retries + 1, + mero_elasticache:get_cluster_config(Host, Port)). + diff --git a/src/mero_conf_monitor.erl b/src/mero_conf_monitor.erl index ae0071b..f3f7a83 100644 --- a/src/mero_conf_monitor.erl +++ b/src/mero_conf_monitor.erl @@ -30,37 +30,26 @@ -behaviour(gen_server). --export([ - start_link/2, - init/1, - handle_call/3, - handle_cast/2, - handle_info/2 -]). - --record(state, { - orig_config :: cluster_config(), - processed_config :: cluster_config(), - cluster_version :: pos_integer() -}). +-export([start_link/2, init/1, handle_call/3, handle_cast/2, handle_info/2]). --type state() :: #state{}. +-record(state, + {orig_config :: cluster_config(), + processed_config :: cluster_config(), + cluster_version :: pos_integer()}). --type cluster_config() :: mero:cluster_config(). --type init_args() :: #{orig_config | processed_config := cluster_config()}. +-type state() :: #state{}. +-type cluster_config() :: mero:cluster_config(). +-type init_args() :: #{orig_config | processed_config := cluster_config()}. %%%----------------------------------------------------------------------------- %%% API %%%----------------------------------------------------------------------------- -spec start_link(cluster_config(), cluster_config()) -> {ok, pid()} | {error, term()}. start_link(OrigConfig, ProcessedConfig) -> - gen_server:start_link( - {local, ?MODULE}, - ?MODULE, - #{orig_config => OrigConfig, processed_config => ProcessedConfig}, - [] - ). - + gen_server:start_link({local, ?MODULE}, + ?MODULE, + #{orig_config => OrigConfig, processed_config => ProcessedConfig}, + []). %%%----------------------------------------------------------------------------- %%% Interesting Callbacks @@ -68,42 +57,41 @@ start_link(OrigConfig, ProcessedConfig) -> -spec init(init_args()) -> {ok, state()}. init(#{orig_config := OrigConfig, processed_config := ProcessedConfig}) -> program_heartbeat(), - {ok, #state{ - orig_config = OrigConfig, - processed_config = ProcessedConfig, - cluster_version = mero_cluster:version() - }}. + {ok, + #state{orig_config = OrigConfig, + processed_config = ProcessedConfig, + cluster_version = mero_cluster:version()}}. -spec handle_info(heartbeat | _, State) -> {noreply, State} when State :: state(). handle_info(heartbeat, State) -> program_heartbeat(), - NewState = - try update_cluster_defs(State) - catch - Kind:Desc -> - error_logger:error_report([ - {error, mero_config_heartbeat_failed}, - {kind, Kind}, - {desc, Desc}, - {stack, erlang:get_stacktrace()}, - {orig_config, State#state.orig_config}, - {processed_config, State#state.processed_config} - ]), - State - end, + NewState = try + update_cluster_defs(State) + catch + Kind:Desc -> + error_logger:error_report([{error, mero_config_heartbeat_failed}, + {kind, Kind}, + {desc, Desc}, + {stack, erlang:get_stacktrace()}, + {orig_config, State#state.orig_config}, + {processed_config, State#state.processed_config}]), + State + end, {noreply, NewState}; -handle_info(_, State) -> {noreply, State}. - +handle_info(_, State) -> + {noreply, State}. %%%----------------------------------------------------------------------------- %%% Boilerplate Callbacks %%%----------------------------------------------------------------------------- --spec handle_call(Msg, _From, State) -> {reply, {unknown_call, Msg}, State} when State :: state(). -handle_call(Msg, _From, State) -> {reply, {unknown_call, Msg}, State}. +-spec handle_call(Msg, _From, State) -> {reply, {unknown_call, Msg}, State} when State :: + state(). +handle_call(Msg, _From, State) -> + {reply, {unknown_call, Msg}, State}. -spec handle_cast(_Msg, State) -> {noreply, State} when State :: state(). -handle_cast(_Msg, State) -> {noreply, State}. - +handle_cast(_Msg, State) -> + {noreply, State}. %%%----------------------------------------------------------------------------- %%% Private Functions @@ -111,18 +99,15 @@ handle_cast(_Msg, State) -> {noreply, State}. program_heartbeat() -> erlang:send_after(mero_conf:monitor_heartbeat_delay(), self(), heartbeat). - update_cluster_defs(#state{orig_config = OrigConfig} = State) -> update_cluster_defs(mero_conf:process_server_specs(OrigConfig), State). -update_cluster_defs(ProcessedConfig, #state{processed_config = ProcessedConfig} = State) -> +update_cluster_defs(ProcessedConfig, + #state{processed_config = ProcessedConfig} = State) -> State; %% Nothing has changed update_cluster_defs(NewProcessedConfig, State) -> - #state{ - processed_config = OldProcessedConfig, - cluster_version = OldClusterVersion - } = State, - + #state{processed_config = OldProcessedConfig, cluster_version = OldClusterVersion} = + State, ok = mero_cluster:load_clusters(NewProcessedConfig), NewClusterVersion = mero_cluster:version(), @@ -132,26 +117,27 @@ update_cluster_defs(NewProcessedConfig, State) -> State#state{processed_config = NewProcessedConfig, cluster_version = NewClusterVersion}. - purge_if_version_changed(ClusterVersion, ClusterVersion) -> ok; purge_if_version_changed(_OldVersion, _NewClusterVersion) -> mero_cluster:purge(). - %% NOTE: since both cluster definitions are generated through mero_conf:process_server_specs/1 %% with the same input, we can be sure that the resulting lists will contain the same number %% of elements, with the same keys in the same order. update_clusters([], []) -> ok; -update_clusters([ClusterDef | OldClusterDefs], [ClusterDef | NewClusterDefs]) -> %% nothing changed +update_clusters([ClusterDef | OldClusterDefs], + [ClusterDef | NewClusterDefs]) -> %% nothing changed update_clusters(OldClusterDefs, NewClusterDefs); -update_clusters( - [{ClusterName, OldAttrs} | OldClusterDefs], [{ClusterName, NewAttrs} | NewClusterDefs]) -> +update_clusters([{ClusterName, OldAttrs} | OldClusterDefs], + [{ClusterName, NewAttrs} | NewClusterDefs]) -> OldServers = lists:sort(proplists:get_value(servers, OldAttrs)), - ok = - case lists:sort(proplists:get_value(servers, NewAttrs)) of - OldServers -> ok; %% Nothing of relevance changed - _ -> mero_sup:restart_child(ClusterName) - end, + ok = case lists:sort(proplists:get_value(servers, NewAttrs)) of + OldServers -> + ok; %% Nothing of relevance changed + _ -> + mero_sup:restart_child(ClusterName) + end, update_clusters(OldClusterDefs, NewClusterDefs). + diff --git a/src/mero_conn.erl b/src/mero_conn.erl index 7a92875..74e2193 100644 --- a/src/mero_conn.erl +++ b/src/mero_conn.erl @@ -33,21 +33,21 @@ -export([increment_counter/7, mincrement_counter/7, get/3, - set/6, mset/3, + set/6, + mset/3, delete/3, mdelete/3, - add/5, madd/3, - flush_all/2 - ]). + add/5, + madd/3, + flush_all/2]). -include_lib("mero/include/mero.hrl"). --record(async_op, { - op :: atom(), % name of worker op which sends this request - op_error :: atom(), % name of error for errors occuring when sending request - response :: atom(), % name of worker op which reads response - response_error :: atom() % name of error for errors occuring when reading response - }). +-record(async_op, + {op :: atom(), % name of worker op which sends this request + op_error :: atom(), % name of error for errors occuring when sending request + response :: atom(), % name of worker op which reads response + response_error :: atom()}). % name of error for errors occuring when reading response %%%============================================================================= %%% External functions @@ -56,71 +56,81 @@ increment_counter(Name, Key, Value, Initial, ExpTime, Retries, Timeout) -> TimeLimit = mero_conf:add_now(Timeout), PoolName = mero_cluster:server(Name, Key), - increment_counter_timelimit( - PoolName, mero:storage_key(Key), Value, Initial, ExpTime, Retries, TimeLimit). + increment_counter_timelimit(PoolName, + mero:storage_key(Key), + Value, + Initial, + ExpTime, + Retries, + TimeLimit). mincrement_counter(Name, Keys, Value, Initial, ExpTime, _Retries, Timeout) -> TimeLimit = mero_conf:add_now(Timeout), KeysGroupedByShards = mero_cluster:group_by_shards(Name, Keys), - Payload = - [ - {Shard, [{mero:storage_key(K), Value, Initial, ExpTime} || K <- Ks]} - || {Shard, Ks} <- KeysGroupedByShards], - case async_by_shard(Name, Payload, TimeLimit, + Payload = [{Shard, [{mero:storage_key(K), Value, Initial, ExpTime} || K <- Ks]} + || {Shard, Ks} <- KeysGroupedByShards], + case async_by_shard(Name, + Payload, + TimeLimit, #async_op{op = async_increment, op_error = async_increment_error, response = async_blank_response, - response_error = async_increment_response_error}) of - {error, [not_supportable], _} -> {error, not_supportable}; - _Other -> ok + response_error = async_increment_response_error}) + of + {error, [not_supportable], _} -> + {error, not_supportable}; + _Other -> + ok end. - set(Name, Key, Value, ExpTime, Timeout, CAS) -> TimeLimit = mero_conf:add_now(Timeout), PoolName = mero_cluster:server(Name, Key), - pool_execute(PoolName, set, [mero:storage_key(Key), Value, ExpTime, TimeLimit, CAS], TimeLimit). - + pool_execute(PoolName, + set, + [mero:storage_key(Key), Value, ExpTime, TimeLimit, CAS], + TimeLimit). mset(Name, KVECs, Timeout) -> mset_(Name, KVECs, Timeout, async_mset). - madd(Name, KVEs, Timeout) -> - mset_(Name, [{Key, Value, ExpTime, undefined} - || {Key, Value, ExpTime} <- KVEs], Timeout, async_madd). - + mset_(Name, + [{Key, Value, ExpTime, undefined} || {Key, Value, ExpTime} <- KVEs], + Timeout, + async_madd). get(Name, [Key], Timeout) -> TimeLimit = mero_conf:add_now(Timeout), PoolName = mero_cluster:server(Name, Key), case pool_execute(PoolName, get, [mero:storage_key(Key), TimeLimit], TimeLimit) of - {error, Reason} -> - {error, [Reason], []}; - Value -> - [Value] + {error, Reason} -> + {error, [Reason], []}; + Value -> + [Value] end; - get(Name, Keys, Timeout) -> TimeLimit = mero_conf:add_now(Timeout), KeysGroupedByShards = mero_cluster:group_by_shards(Name, Keys), - async_by_shard(Name, KeysGroupedByShards, TimeLimit, + async_by_shard(Name, + KeysGroupedByShards, + TimeLimit, #async_op{op = async_mget, op_error = async_mget_error, response = async_mget_response, response_error = async_mget_response_error}). - delete(Name, Key, Timeout) -> TimeLimit = mero_conf:add_now(Timeout), PoolName = mero_cluster:server(Name, Key), pool_execute(PoolName, delete, [mero:storage_key(Key), TimeLimit], TimeLimit). - mdelete(Name, Keys, Timeout) -> TimeLimit = mero_conf:add_now(Timeout), KeysGroupedByShards = mero_cluster:group_by_shards(Name, Keys), - async_by_shard(Name, KeysGroupedByShards, TimeLimit, + async_by_shard(Name, + KeysGroupedByShards, + TimeLimit, #async_op{op = async_delete, op_error = async_delete_error, response = async_blank_response, @@ -130,14 +140,15 @@ mdelete(Name, Keys, Timeout) -> add(Name, Key, Value, ExpTime, Timeout) -> TimeLimit = mero_conf:add_now(Timeout), PoolName = mero_cluster:server(Name, Key), - pool_execute(PoolName, add, [mero:storage_key(Key), Value, ExpTime, TimeLimit], TimeLimit). - + pool_execute(PoolName, + add, + [mero:storage_key(Key), Value, ExpTime, TimeLimit], + TimeLimit). flush_all(Name, Timeout) -> TimeLimit = mero_conf:add_now(Timeout), - [{Name, pool_execute(PoolName, flush_all, [TimeLimit], TimeLimit)} || PoolName <- - mero_cluster:one_pool_of_each_shard_of_cluster(Name)]. - + [{Name, pool_execute(PoolName, flush_all, [TimeLimit], TimeLimit)} + || PoolName <- mero_cluster:one_pool_of_each_shard_of_cluster(Name)]. %%%============================================================================= %%% Internal functions @@ -148,18 +159,20 @@ mset_(Name, KVECs, Timeout, Op) -> Requests = lists:zip(lists:seq(1, length(KVECs)), KVECs), %% we number each request according to its original position so we can return results %% in the same order: - NKVECs = [{N, Key, Value, ExpTime, CAS} - || {N, {Key, Value, ExpTime, CAS}} <- Requests], + NKVECs = [{N, Key, Value, ExpTime, CAS} || {N, {Key, Value, ExpTime, CAS}} <- Requests], ItemsGroupedByShards = mero_cluster:group_by_shards(Name, NKVECs, 2), - Processed = case async_by_shard(Name, ItemsGroupedByShards, TimeLimit, + Processed = case async_by_shard(Name, + ItemsGroupedByShards, + TimeLimit, #async_op{op = Op, op_error = async_mset_error, response = async_mset_response, - response_error = async_mset_response_error}) of - {error, _ErrorsOut, ProcessedOut} -> - ProcessedOut; - ProcessedOut -> - ProcessedOut + response_error = async_mset_response_error}) + of + {error, _ErrorsOut, ProcessedOut} -> + ProcessedOut; + ProcessedOut -> + ProcessedOut end, tuple_to_list(lists:foldl(fun ({N, Result}, Acc) -> setelement(N, Acc, Result) @@ -167,89 +180,97 @@ mset_(Name, KVECs, Timeout, Op) -> list_to_tuple(lists:duplicate(length(KVECs), {error, failed})), Processed)). - increment_counter_timelimit(Name, Key, Value, Initial, ExpTime, Retries, TimeLimit) -> - case pool_execute( - Name, increment_counter, [Key, Value, Initial, ExpTime, TimeLimit], TimeLimit) of - {ok, ActualValue} -> - {ok, ActualValue}; - {error, _Reason} when Retries >= 1 -> - increment_counter_timelimit(Name, Key, Value, Initial, ExpTime, Retries - 1, TimeLimit); - {error, Reason} -> - {error, Reason} + case pool_execute(Name, + increment_counter, + [Key, Value, Initial, ExpTime, TimeLimit], + TimeLimit) + of + {ok, ActualValue} -> + {ok, ActualValue}; + {error, _Reason} when Retries >= 1 -> + increment_counter_timelimit(Name, Key, Value, Initial, ExpTime, Retries - 1, TimeLimit); + {error, Reason} -> + {error, Reason} end. -async_by_shard(Name, ItemsGroupedByShards, TimeLimit, +async_by_shard(Name, + ItemsGroupedByShards, + TimeLimit, #async_op{op = AsyncOp, op_error = AsyncOpError, response = AsyncOpResponse, response_error = AsyncOpResponseError}) -> - {Processed, Errors} = - lists:foldl( - fun({ShardIdentifier, Items}, {Processed, Errors}) -> - begin - PoolName = mero_cluster:random_pool_of_shard(Name, ShardIdentifier), - case mero_pool:checkout(PoolName, TimeLimit) of - {ok, Conn} -> - case mero_pool:transaction(Conn, AsyncOp, [Items]) of - {error, Reason} -> - mero_pool:close(Conn, AsyncOpError), - mero_pool:checkin_closed(Conn), - {Processed, [Reason | Errors]}; - {NConn, {error, Reason}} -> - mero_pool:checkin(NConn), - {Processed, [Reason | Errors]}; - {NConn, ok} -> - {[{NConn, Items} | Processed], Errors} - end; - {error, Reason} -> - {Processed, [Reason | Errors]} - end - end - end, - {[], []}, - ItemsGroupedByShards), - {ProcessedOut, ErrorsOut} = - lists:foldl( - fun({Conn, Items}, {ProcessedIn, ErrorsIn}) -> - case mero_pool:transaction(Conn, AsyncOpResponse, [Items, TimeLimit]) of - {error, Reason} -> - mero_pool:close(Conn, AsyncOpResponseError), - mero_pool:checkin_closed(Conn), - {ProcessedIn, [Reason | ErrorsIn]}; - {Client, {error, Reason}} -> - mero_pool:checkin(Client), - {ProcessedIn, [Reason | ErrorsIn]}; - {Client, Responses} when is_list(Responses) -> - mero_pool:checkin(Client), - {Responses ++ ProcessedIn, ErrorsIn} - end - end, - {[], Errors}, - Processed), + {Processed, Errors} = lists:foldl(fun ({ShardIdentifier, Items}, {Processed, Errors}) -> + begin + PoolName = mero_cluster:random_pool_of_shard(Name, + ShardIdentifier), + case mero_pool:checkout(PoolName, TimeLimit) of + {ok, Conn} -> + case mero_pool:transaction(Conn, + AsyncOp, + [Items]) + of + {error, Reason} -> + mero_pool:close(Conn, AsyncOpError), + mero_pool:checkin_closed(Conn), + {Processed, [Reason | Errors]}; + {NConn, {error, Reason}} -> + mero_pool:checkin(NConn), + {Processed, [Reason | Errors]}; + {NConn, ok} -> + {[{NConn, Items} | Processed], Errors} + end; + {error, Reason} -> + {Processed, [Reason | Errors]} + end + end + end, + {[], []}, + ItemsGroupedByShards), + {ProcessedOut, ErrorsOut} = lists:foldl(fun ({Conn, Items}, {ProcessedIn, ErrorsIn}) -> + case mero_pool:transaction(Conn, + AsyncOpResponse, + [Items, TimeLimit]) + of + {error, Reason} -> + mero_pool:close(Conn, + AsyncOpResponseError), + mero_pool:checkin_closed(Conn), + {ProcessedIn, [Reason | ErrorsIn]}; + {Client, {error, Reason}} -> + mero_pool:checkin(Client), + {ProcessedIn, [Reason | ErrorsIn]}; + {Client, Responses} when is_list(Responses) -> + mero_pool:checkin(Client), + {Responses ++ ProcessedIn, ErrorsIn} + end + end, + {[], Errors}, + Processed), case ErrorsOut of - [] -> - ProcessedOut; - ErrorsOut -> - {error, ErrorsOut, ProcessedOut} + [] -> + ProcessedOut; + ErrorsOut -> + {error, ErrorsOut, ProcessedOut} end. - %% @doc: Request a socket form the pool, uses it and returns it once finished. pool_execute(PoolName, Op, Args, TimeLimit) when is_tuple(TimeLimit) -> case mero_pool:checkout(PoolName, TimeLimit) of - {ok, Conn} -> - case mero_pool:transaction(Conn, Op, Args) of - {error, Reason} -> - mero_pool:close(Conn, sync_transaction_error), - mero_pool:checkin_closed(Conn), - {error, Reason}; - {NConn, Return} -> - mero_pool:checkin(NConn), - Return - end; - {error, reject} -> - {error, reject}; - {error, Reason} -> - {error, Reason} + {ok, Conn} -> + case mero_pool:transaction(Conn, Op, Args) of + {error, Reason} -> + mero_pool:close(Conn, sync_transaction_error), + mero_pool:checkin_closed(Conn), + {error, Reason}; + {NConn, Return} -> + mero_pool:checkin(NConn), + Return + end; + {error, reject} -> + {error, reject}; + {error, Reason} -> + {error, Reason} end. + diff --git a/src/mero_elasticache.erl b/src/mero_elasticache.erl index a5dcfc1..018cb55 100644 --- a/src/mero_elasticache.erl +++ b/src/mero_elasticache.erl @@ -35,36 +35,42 @@ %% API -export([get_cluster_config/2]). - %% Just for testing purposes -export([request_response/4]). -define(GET_CLUSTER, <<"config get cluster\n">>). --type cluster_entry() :: {Host :: string(), Addr :: inet:ip_address(), Port :: pos_integer()}. +-type cluster_entry() :: {Host :: string(), + Addr :: inet:ip_address(), + Port :: pos_integer()}. + -export_type([cluster_entry/0]). %%%============================================================================= %%% External functions %%%============================================================================= %% Given an elasticache config endpoint:port, returns parsed list of {host, port} nodes in cluster --spec get_cluster_config(string(), integer()) -> - {ok, list({string(), integer()})} | {error, Reason :: atom()}. +-spec get_cluster_config(string(), integer()) -> {ok, [{string(), integer()}]} | + {error, Reason :: atom()}. get_cluster_config(ConfigHost, ConfigPort) -> %% We wait for a bit before loading elasticache configuration to prevent runaway elasticache %% spam during error loops (which used to occur on occasion). timer:sleep(mero_conf:elasticache_load_config_delay()), LineDefinitions = [banner, version, hosts, crlf, eom], - case mero_elasticache:request_response(ConfigHost, ConfigPort, ?GET_CLUSTER, LineDefinitions) of - {ok, Result} -> - case parse_cluster_config(proplists:get_value(hosts, Result)) of - {error, Reason} -> - {error, Reason}; - {ok, Config} -> - {ok, [{Host, Port} || {Host, _IPAddr, Port} <- Config]} - end; - {error, Reason} -> - {error, Reason} + case mero_elasticache:request_response(ConfigHost, + ConfigPort, + ?GET_CLUSTER, + LineDefinitions) + of + {ok, Result} -> + case parse_cluster_config(proplists:get_value(hosts, Result)) of + {error, Reason} -> + {error, Reason}; + {ok, Config} -> + {ok, [{Host, Port} || {Host, _IPAddr, Port} <- Config]} + end; + {error, Reason} -> + {error, Reason} end. %%%============================================================================= @@ -75,34 +81,33 @@ request_response(Host, Port, Command, Names) -> %% @see https://github.com/erlang/otp/pull/2191 %% Even with Timeout == infinity, connect attempts may result in {error, etimedout} case gen_tcp:connect(Host, Port, Opts) of - {ok, Socket} -> - ok = gen_tcp:send(Socket, Command), - Lines = receive_lines(Names, Socket), - ok = gen_tcp:close(Socket), - Lines; - Error -> - Error + {ok, Socket} -> + ok = gen_tcp:send(Socket, Command), + Lines = receive_lines(Names, Socket), + ok = gen_tcp:close(Socket), + Lines; + Error -> + Error end. --spec receive_lines([atom()], gen_tcp:socket()) -> {ok, [{atom(), binary()}]} | {error, atom()}. +-spec receive_lines([atom()], gen_tcp:socket()) -> {ok, [{atom(), binary()}]} | + {error, atom()}. receive_lines(Names, Socket) -> receive_lines(Names, Socket, []). - receive_lines([], _Socket, Lines) -> {ok, lists:reverse(Lines)}; - -receive_lines([Name|Names], Socket, Acc) -> +receive_lines([Name | Names], Socket, Acc) -> case gen_tcp:recv(Socket, 0, 10000) of - {ok, Line} -> - receive_lines(Names, Socket, [{Name, Line}|Acc]); - {error, Error} -> - {error, Error} + {ok, Line} -> + receive_lines(Names, Socket, [{Name, Line} | Acc]); + {error, Error} -> + {error, Error} end. %% Parse host and version lines to return version and list of {host, port} cluster nodes --spec parse_cluster_config(binary()) -> - {ok, Config :: [cluster_entry()]} | {error, Reason :: atom()}. +-spec parse_cluster_config(binary()) -> {ok, Config :: [cluster_entry()]} | + {error, Reason :: atom()}. parse_cluster_config(HostLine) -> %% Strip any newlines Entries = binary:replace(HostLine, <<"\n">>, <<>>), @@ -115,24 +120,24 @@ parse_cluster_config(HostLine) -> parse_cluster_entries([], Accum) -> {ok, lists:reverse(Accum)}; -parse_cluster_entries([H|T], Accum) -> +parse_cluster_entries([H | T], Accum) -> case string:tokens(H, "|") of - [Host, IP, Port] -> - case inet:parse_ipv4_address(IP) of - {ok, IPAddr} -> - case catch erlang:list_to_integer(Port) of - {'EXIT', _} -> - {error, bad_port}; - P when P < 1 orelse P > 65535 -> - {error, bad_port}; - P -> - parse_cluster_entries(T, [{Host, IPAddr, P}|Accum]) - end; - {error, _} -> - {error, bad_ip} - end; - _BadClusterEntry -> - {error, bad_cluster_entry} + [Host, IP, Port] -> + case inet:parse_ipv4_address(IP) of + {ok, IPAddr} -> + case catch erlang:list_to_integer(Port) of + {'EXIT', _} -> + {error, bad_port}; + P when P < 1 orelse P > 65535 -> + {error, bad_port}; + P -> + parse_cluster_entries(T, [{Host, IPAddr, P} | Accum]) + end; + {error, _} -> + {error, bad_ip} + end; + _BadClusterEntry -> + {error, bad_cluster_entry} end. %%%=================================================================== @@ -144,42 +149,43 @@ parse_cluster_entries([H|T], Accum) -> -include_lib("eunit/include/eunit.hrl"). get_cluster_config_test() -> - HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|11211 " - "server2.cache.amazonaws.com|10.101.101.0|11211 " - "server3.cache.amazonaws.com|10.102.00.102|11211\n">>, + HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|11211 server2.cache.amazo" + "naws.com|10.101.101.0|11211 server3.cache.amazonaws.com|10.102.00.10" + "2|11211\n">>, ExpectedParse = [{"server1.cache.amazonaws.com", {10, 100, 100, 100}, 11211}, {"server2.cache.amazonaws.com", {10, 101, 101, 0}, 11211}, {"server3.cache.amazonaws.com", {10, 102, 0, 102}, 11211}], ?assertEqual({ok, ExpectedParse}, parse_cluster_config(HostLine)). get_bad_ip_addr_config_test() -> - HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|11211 " - "server2.cache.amazonaws.com|10.101.101.|11211 " - "server3.cache.amazonaws.com|10.102.00.102|11211\n">>, + HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|11211 server2.cache.amazo" + "naws.com|10.101.101.|11211 server3.cache.amazonaws.com|10.102.00.102" + "|11211\n">>, ?assertEqual({error, bad_ip}, parse_cluster_config(HostLine)). get_non_integer_port_config_test() -> - HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|11211 " - "server2.cache.amazonaws.com|10.101.101.0|11211 " - "server3.cache.amazonaws.com|10.102.00.102|1l211\n">>, + HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|11211 server2.cache.amazo" + "naws.com|10.101.101.0|11211 server3.cache.amazonaws.com|10.102.00.10" + "2|1l211\n">>, ?assertEqual({error, bad_port}, parse_cluster_config(HostLine)). get_bad_low_port_config_test() -> - HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|0 " - "server2.cache.amazonaws.com|10.101.101.0|11211 " - "server3.cache.amazonaws.com|10.102.00.102|11211\n">>, + HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|0 server2.cache.amazonaws" + ".com|10.101.101.0|11211 server3.cache.amazonaws.com|10.102.00.102|11" + "211\n">>, ?assertEqual({error, bad_port}, parse_cluster_config(HostLine)). get_bad_high_port_config_test() -> - HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|72000 " - "server2.cache.amazonaws.com|10.101.101.0|11211 " - "server3.cache.amazonaws.com|10.102.00.102|11211\n">>, + HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|72000 server2.cache.amazo" + "naws.com|10.101.101.0|11211 server3.cache.amazonaws.com|10.102.00.10" + "2|11211\n">>, ?assertEqual({error, bad_port}, parse_cluster_config(HostLine)). get_bad_entry_config_test() -> - HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|11211 " - "server2.cache.amazonaws.com|10.101.101.0| " - "server3.cache.amazonaws.com|10.102.00.102|11211\n">>, + HostLine = <<"server1.cache.amazonaws.com|10.100.100.100|11211 server2.cache.amazo" + "naws.com|10.101.101.0| server3.cache.amazonaws.com|10.102.00.102|112" + "11\n">>, ?assertEqual({error, bad_cluster_entry}, parse_cluster_config(HostLine)). -endif. + diff --git a/src/mero_pool.erl b/src/mero_pool.erl index 950428f..e022c4d 100644 --- a/src/mero_pool.erl +++ b/src/mero_pool.erl @@ -39,10 +39,8 @@ pool_loop/3, system_continue/3, system_terminate/4]). - %%% Internal & introspection functions --export([init/6, - state/1]). +-export([init/6, state/1]). -include_lib("mero/include/mero.hrl"). @@ -50,52 +48,53 @@ -type client() :: term(). -type host() :: inet:socket_address() | inet:hostname(). --record(conn, {updated :: erlang:timestamp(), - pool :: module(), - worker_module :: module(), - client :: client()}). --type conn() :: #conn{}. - --record(pool_st, {cluster, - host :: host(), - port :: inet:port_number(), - max_connections :: non_neg_integer(), - min_connections :: non_neg_integer(), - - %% List of free connections - free :: list(term()), - - %% Busy connections (pid -> #conn) - busy :: dict:dict(), - - %% Number of connections established (busy + free) - num_connected :: non_neg_integer(), +-record(conn, + {updated :: erlang:timestamp(), + pool :: module(), + worker_module :: module(), + client :: client()}). - %% Number of connection attempts in progress - num_connecting :: non_neg_integer(), - - %% Number of failed connection attempts - %% (reset to zero when connect attempt succeds) - num_failed_connecting :: non_neg_integer(), - - reconnect_wait_time :: non_neg_integer(), - min_connection_interval_ms :: non_neg_integer() | undefined, - worker_module :: atom(), - callback_info :: mfargs(), - pool :: term(), - last_connection_attempt :: non_neg_integer()}). +-type conn() :: #conn{}. --callback transaction(client(), atom(), [term()]) -> {error, term()} | {client(), {ok, any()}}. +-record(pool_st, + {cluster, + host :: host(), + port :: inet:port_number(), + max_connections :: non_neg_integer(), + min_connections :: non_neg_integer(), + %% List of free connections + free :: [term()], + %% Busy connections (pid -> #conn) + busy :: dict:dict(), + %% Number of connections established (busy + free) + num_connected :: non_neg_integer(), + %% Number of connection attempts in progress + num_connecting :: non_neg_integer(), + %% Number of failed connection attempts + %% (reset to zero when connect attempt succeds) + num_failed_connecting :: non_neg_integer(), + reconnect_wait_time :: non_neg_integer(), + min_connection_interval_ms :: non_neg_integer() | undefined, + worker_module :: atom(), + callback_info :: mfargs(), + pool :: term(), + last_connection_attempt :: non_neg_integer()}). + +-callback transaction(client(), atom(), [term()]) -> {error, term()} | + {client(), {ok, any()}}. -callback close(client(), Reason :: term()) -> _. --callback connect(host(), inet:port_number(), mfargs()) -> {ok, client()} | {error, term()}. --callback controlling_process(client(), Parent::pid()) -> ok | {error, term()}. +-callback connect(host(), inet:port_number(), mfargs()) -> {ok, client()} | + {error, term()}. +-callback controlling_process(client(), Parent :: pid()) -> ok | {error, term()}. %%%============================================================================= %%% External functions %%%============================================================================= start_link(ClusterName, Host, Port, PoolName, WorkerModule) -> - proc_lib:start_link(?MODULE, init, [self(), ClusterName, Host, Port, PoolName, WorkerModule]). + proc_lib:start_link(?MODULE, + init, + [self(), ClusterName, Host, Port, PoolName, WorkerModule]). %% @doc Checks out an element of the pool. -spec checkout(atom(), TimeLimit :: tuple()) -> {ok, conn()} | {error, Reason :: term()}. @@ -104,204 +103,190 @@ checkout(PoolName, TimeLimit) -> MRef = erlang:monitor(process, PoolName), safe_send(PoolName, {checkout, {self(), MRef}}), receive - {'DOWN', MRef, _, _, _} -> - {error, down}; - {MRef, {reject, _State}} -> - erlang:demonitor(MRef), - {error, reject}; - {MRef, Connection} -> - erlang:demonitor(MRef), - {ok, Connection} - after Timeout -> + {'DOWN', MRef, _, _, _} -> + {error, down}; + {MRef, {reject, _State}} -> + erlang:demonitor(MRef), + {error, reject}; + {MRef, Connection} -> + erlang:demonitor(MRef), + {ok, Connection} + after Timeout -> erlang:demonitor(MRef), safe_send(PoolName, {checkout_cancel, self()}), {error, pool_timeout} end. - %% @doc Return a connection to specfied pool updating its timestamp -spec checkin(Connection :: conn()) -> ok. checkin(#conn{pool = PoolName} = Connection) -> - safe_send(PoolName, {checkin, self(), - Connection#conn{updated = os:timestamp()}}), + safe_send(PoolName, {checkin, self(), Connection#conn{updated = os:timestamp()}}), ok. - %% @doc Return a connection that has been closed. -spec checkin_closed(Connection :: conn()) -> ok. checkin_closed(#conn{pool = PoolName}) -> safe_send(PoolName, {checkin_closed, self()}), ok. - %% @doc Executes an operation --spec transaction(Connection :: conn(), atom(), list()) -> - {NewConnection :: conn(), {ok, any()}} | {error, any()}. -transaction(#conn{worker_module = WorkerModule, - client = Client} = Conn, Function, Args) -> +-spec transaction(Connection :: conn(), atom(), list()) -> {NewConnection :: conn(), + {ok, any()}} | + {error, any()}. +transaction(#conn{worker_module = WorkerModule, client = Client} = Conn, + Function, + Args) -> case WorkerModule:transaction(Client, Function, Args) of - {error, Reason} -> {error, Reason}; - {NClient, Res} -> - {Conn#conn{client = NClient}, Res} + {error, Reason} -> + {error, Reason}; + {NClient, Res} -> + {Conn#conn{client = NClient}, Res} end. - -close(#conn{worker_module = WorkerModule, - client = Client}, Reason) -> +close(#conn{worker_module = WorkerModule, client = Client}, Reason) -> WorkerModule:close(Client, Reason). - system_continue(Parent, Deb, State) -> pool_loop(State, Parent, Deb). - -spec system_terminate(term(), _, _, _) -> no_return(). system_terminate(Reason, _Parent, _Deb, _State) -> exit(Reason). - %%%============================================================================= %%% Internal functions %%%============================================================================= init(Parent, ClusterName, Host, Port, PoolName, WrkModule) -> case is_config_valid(ClusterName) of - false -> - proc_lib:init_ack(Parent, {error, invalid_config}); - true -> - register(PoolName, self()), - process_flag(trap_exit, true), - Deb = sys:debug_options([]), - {Module, Function} = mero_conf:stat_callback(), - CallBackInfo = ?CALLBACK_CONTEXT(Module, Function, ClusterName, Host, Port), - Initial = mero_conf:pool_initial_connections(ClusterName), - spawn_connections(ClusterName, PoolName, WrkModule, Host, Port, CallBackInfo, Initial), - proc_lib:init_ack(Parent, {ok, self()}), - State = #pool_st{ - cluster = ClusterName, - free = [], - host = Host, - port = Port, - busy = dict:new(), - max_connections = 0, %%make dialyzer happy. These are populated from config - min_connections = 0, - num_connected = 0, - num_connecting = Initial, - num_failed_connecting = 0, - reconnect_wait_time = ?RECONNECT_WAIT_TIME, - pool = PoolName, - callback_info = CallBackInfo, - worker_module = WrkModule, - last_connection_attempt = 0}, - timer:send_interval(5000, reload_pool_min_max_settings), - pool_loop(schedule_expiration(reload_pool_min_max_settings(State)), Parent, Deb) + false -> + proc_lib:init_ack(Parent, {error, invalid_config}); + true -> + register(PoolName, self()), + process_flag(trap_exit, true), + Deb = sys:debug_options([]), + {Module, Function} = mero_conf:stat_callback(), + CallBackInfo = ?CALLBACK_CONTEXT(Module, Function, ClusterName, Host, Port), + Initial = mero_conf:pool_initial_connections(ClusterName), + spawn_connections(ClusterName, PoolName, WrkModule, Host, Port, CallBackInfo, Initial), + proc_lib:init_ack(Parent, {ok, self()}), + State = #pool_st{cluster = ClusterName, + free = [], + host = Host, + port = Port, + busy = dict:new(), + max_connections = + 0, %%make dialyzer happy. These are populated from config + min_connections = 0, + num_connected = 0, + num_connecting = Initial, + num_failed_connecting = 0, + reconnect_wait_time = ?RECONNECT_WAIT_TIME, + pool = PoolName, + callback_info = CallBackInfo, + worker_module = WrkModule, + last_connection_attempt = 0}, + timer:send_interval(5000, reload_pool_min_max_settings), + pool_loop(schedule_expiration(reload_pool_min_max_settings(State)), Parent, Deb) end. - %%% @doc Returns the specified PoolName state. -spec state(PoolName :: atom()) -> term(). state(PoolName) -> MRef = erlang:monitor(process, PoolName), safe_send(PoolName, {state, {self(), MRef}}), receive - {MRef, State} -> - erlang:demonitor(MRef), - PoolPid = whereis(PoolName), - {links, Links} = process_info(PoolPid, links), - {monitors, Monitors} = process_info(PoolPid, monitors), - [ - process_info(PoolPid, message_queue_len), - {links, length(Links)}, - {monitors, length(Monitors)}, - {free, length(State#pool_st.free)}, - {num_connected, State#pool_st.num_connected}, - {num_connecting, State#pool_st.num_connecting}, - - {num_failed_connecting, State#pool_st.num_failed_connecting} - ]; - {'DOWN', MRef, _, _, _} -> - {error, down} - after ?DEFAULT_TIMEOUT -> - erlang:demonitor(MRef), - {error, timeout} + {MRef, State} -> + erlang:demonitor(MRef), + PoolPid = whereis(PoolName), + {links, Links} = process_info(PoolPid, links), + {monitors, Monitors} = process_info(PoolPid, monitors), + [process_info(PoolPid, message_queue_len), + {links, length(Links)}, + {monitors, length(Monitors)}, + {free, length(State#pool_st.free)}, + {num_connected, State#pool_st.num_connected}, + {num_connecting, State#pool_st.num_connecting}, + {num_failed_connecting, State#pool_st.num_failed_connecting}]; + {'DOWN', MRef, _, _, _} -> + {error, down} + after ?DEFAULT_TIMEOUT -> + erlang:demonitor(MRef), + {error, timeout} end. - %%%============================================================================= %%% Internal functions %%%============================================================================= pool_loop(State, Parent, Deb) -> receive - {connect_success, Conn} -> - ?MODULE:pool_loop(connect_success(State, Conn), Parent, Deb); - connect_failed -> - ?MODULE:pool_loop(connect_failed(State), Parent, Deb); - connect -> - NumConnecting = State#pool_st.num_connecting, - Connected = State#pool_st.num_connected, - MaxConns = State#pool_st.max_connections, - case (NumConnecting + Connected) > MaxConns of - true -> - ?MODULE:pool_loop( - State#pool_st{num_connecting = NumConnecting - 1}, Parent, Deb); - false -> - spawn_connect(State#pool_st.cluster, - State#pool_st.pool, - State#pool_st.worker_module, - State#pool_st.host, - State#pool_st.port, - State#pool_st.callback_info), - ?MODULE:pool_loop(State#pool_st{ - last_connection_attempt = erlang:system_time(millisecond)}, - Parent, Deb) - end; - reload_pool_min_max_settings -> - ?MODULE:pool_loop(reload_pool_min_max_settings(State), Parent, Deb); - {checkout, From} -> - ?MODULE:pool_loop(get_connection(State, From), Parent, Deb); - {checkin, Pid, Conn} -> - ?MODULE:pool_loop(checkin(State, Pid, Conn), Parent, Deb); - {checkin_closed, Pid} -> - ?MODULE:pool_loop(checkin_closed_pid(State, Pid), Parent, Deb); - {checkout_cancel, Pid} -> - ?MODULE:pool_loop(checkout_cancel(State, Pid), Parent, Deb); - expire -> - ?MODULE:pool_loop(schedule_expiration(expire_connections(State)), Parent, Deb); - {state, {Pid, Ref}} -> - safe_send(Pid, {Ref, State}), - ?MODULE:pool_loop(State, Parent, Deb); - {'DOWN', _, _, Pid, _} -> - ?MODULE:pool_loop(down(State, Pid), Parent, Deb); - {'EXIT', Parent, Reason} -> - exit(Reason); - %% Assume exit signal from connecting process - {'EXIT', _, Reason} when Reason /= normal -> - ?MODULE:pool_loop(connect_failed(State), Parent, Deb); - {system, From, Msg} -> - sys:handle_system_msg(Msg, From, Parent, ?MODULE, Deb, State); - _ -> - ?MODULE:pool_loop(State, Parent, Deb) + {connect_success, Conn} -> + ?MODULE:pool_loop(connect_success(State, Conn), Parent, Deb); + connect_failed -> + ?MODULE:pool_loop(connect_failed(State), Parent, Deb); + connect -> + NumConnecting = State#pool_st.num_connecting, + Connected = State#pool_st.num_connected, + MaxConns = State#pool_st.max_connections, + case NumConnecting + Connected > MaxConns of + true -> + ?MODULE:pool_loop(State#pool_st{num_connecting = NumConnecting - 1}, Parent, Deb); + false -> + spawn_connect(State#pool_st.cluster, + State#pool_st.pool, + State#pool_st.worker_module, + State#pool_st.host, + State#pool_st.port, + State#pool_st.callback_info), + ?MODULE:pool_loop(State#pool_st{last_connection_attempt = + erlang:system_time(millisecond)}, + Parent, + Deb) + end; + reload_pool_min_max_settings -> + ?MODULE:pool_loop(reload_pool_min_max_settings(State), Parent, Deb); + {checkout, From} -> + ?MODULE:pool_loop(get_connection(State, From), Parent, Deb); + {checkin, Pid, Conn} -> + ?MODULE:pool_loop(checkin(State, Pid, Conn), Parent, Deb); + {checkin_closed, Pid} -> + ?MODULE:pool_loop(checkin_closed_pid(State, Pid), Parent, Deb); + {checkout_cancel, Pid} -> + ?MODULE:pool_loop(checkout_cancel(State, Pid), Parent, Deb); + expire -> + ?MODULE:pool_loop(schedule_expiration(expire_connections(State)), Parent, Deb); + {state, {Pid, Ref}} -> + safe_send(Pid, {Ref, State}), + ?MODULE:pool_loop(State, Parent, Deb); + {'DOWN', _, _, Pid, _} -> + ?MODULE:pool_loop(down(State, Pid), Parent, Deb); + {'EXIT', Parent, Reason} -> + exit(Reason); + %% Assume exit signal from connecting process + {'EXIT', _, Reason} when Reason /= normal -> + ?MODULE:pool_loop(connect_failed(State), Parent, Deb); + {system, From, Msg} -> + sys:handle_system_msg(Msg, From, Parent, ?MODULE, Deb, State); + _ -> + ?MODULE:pool_loop(State, Parent, Deb) end. - get_connection(#pool_st{free = Free} = State, From) when Free /= [] -> give(State, From); get_connection(State, {Pid, Ref} = _From) -> safe_send(Pid, {Ref, {reject, State}}), maybe_spawn_connect(State). - -maybe_spawn_connect(#pool_st{ - free = Free, - num_connecting = Connecting, - num_connected = Connected, - max_connections = MaxConn, - min_connections = MinConn} = State) -> +maybe_spawn_connect(#pool_st{free = Free, + num_connecting = Connecting, + num_connected = Connected, + max_connections = MaxConn, + min_connections = MinConn} = + State) -> %% Length could be big.. better to not have more than a few dozens of sockets %% May be worth to keep track of the length of the free in a counter. - FreeSockets = length(Free), Needed = max(0, calculate_needed(FreeSockets, Connected, Connecting, MaxConn, MinConn)), maybe_spawn_connect(State, Needed, erlang:system_time(millisecond)). @@ -312,17 +297,18 @@ maybe_spawn_connect(#pool_st{ %% connection %% - There are in-flight connection attempts maybe_spawn_connect(State = #pool_st{min_connection_interval_ms = Min, - last_connection_attempt = Last, - num_connecting = Connecting}, + last_connection_attempt = Last, + num_connecting = Connecting}, Needed, - Now) when Min /= undefined, (Now - Last) < Min; - Connecting > 0; - Needed == 0-> + Now) + when Min /= undefined, Now - Last < Min; Connecting > 0; Needed == 0 -> State; maybe_spawn_connect(State = #pool_st{num_failed_connecting = NumFailed, reconnect_wait_time = WaitTime, - num_connecting = Connecting}, _Needed, _Now) - when NumFailed > 0 -> + num_connecting = Connecting}, + _Needed, + _Now) + when NumFailed > 0 -> %% Wait before reconnection if more than one successive %% connection attempt has failed. Don't open more than %% one connection until an attempt has succeeded again. @@ -334,180 +320,171 @@ maybe_spawn_connect(State = #pool_st{num_connecting = Connecting, cluster = ClusterName, host = Host, port = Port, - callback_info = CallbackInfo}, Needed, Now) -> + callback_info = CallbackInfo}, + Needed, + Now) -> spawn_connections(ClusterName, Pool, WrkModule, Host, Port, CallbackInfo, Needed), State#pool_st{num_connecting = Connecting + Needed, last_connection_attempt = Now}. - calculate_needed(FreeSockets, Connected, Connecting, MaxConn, MinConn) -> TotalSockets = Connected + Connecting, MaxAllowed = MaxConn - TotalSockets, IdleSockets = FreeSockets + Connecting, case MinConn - IdleSockets of - MaxNeeded when MaxNeeded > MaxAllowed -> - MaxAllowed; - MaxNeeded -> - MaxNeeded + MaxNeeded when MaxNeeded > MaxAllowed -> + MaxAllowed; + MaxNeeded -> + MaxNeeded end. connect_success(#pool_st{free = Free, num_connected = Num, - num_connecting = NumConnecting} = State, + num_connecting = NumConnecting} = + State, Conn) -> - State#pool_st{free = [Conn|Free], - num_connected = Num + 1, - num_connecting = NumConnecting - 1, - num_failed_connecting = 0}. - + State#pool_st{free = [Conn | Free], + num_connected = Num + 1, + num_connecting = NumConnecting - 1, + num_failed_connecting = 0}. -connect_failed(#pool_st{num_connecting = Num, - num_failed_connecting = NumFailed} = State) -> +connect_failed(#pool_st{num_connecting = Num, num_failed_connecting = NumFailed} = + State) -> maybe_spawn_connect(State#pool_st{num_connecting = Num - 1, num_failed_connecting = NumFailed + 1}). - checkin(#pool_st{busy = Busy, free = Free} = State, Pid, Conn) -> case dict:find(Pid, Busy) of - {ok, {MRef, _}} -> - erlang:demonitor(MRef), - State#pool_st{busy = dict:erase(Pid, Busy), - free = [Conn|Free]}; - error -> - State + {ok, {MRef, _}} -> + erlang:demonitor(MRef), + State#pool_st{busy = dict:erase(Pid, Busy), free = [Conn | Free]}; + error -> + State end. - checkin_closed_pid(#pool_st{busy = Busy, num_connected = Num} = State, Pid) -> case dict:find(Pid, Busy) of - {ok, {MRef, _}} -> - erlang:demonitor(MRef), - maybe_spawn_connect(State#pool_st{busy = dict:erase(Pid, Busy), - num_connected = Num - 1 - }); - error -> - State + {ok, {MRef, _}} -> + erlang:demonitor(MRef), + maybe_spawn_connect(State#pool_st{busy = dict:erase(Pid, Busy), num_connected = Num - 1}); + error -> + State end. - -down(#pool_st{busy = Busy, num_connected = Num, callback_info = CallbackInfo} = State, Pid) -> +down(#pool_st{busy = Busy, num_connected = Num, callback_info = CallbackInfo} = State, + Pid) -> case dict:find(Pid, Busy) of - {ok, {_, Conn}} -> - catch close(Conn, down), - ?LOG_EVENT(CallbackInfo, [socket, connect, close]), - NewState = State#pool_st{busy = dict:erase(Pid, Busy), - num_connected = Num - 1}, - maybe_spawn_connect(NewState); - error -> - State + {ok, {_, Conn}} -> + catch close(Conn, down), + ?LOG_EVENT(CallbackInfo, [socket, connect, close]), + NewState = State#pool_st{busy = dict:erase(Pid, Busy), num_connected = Num - 1}, + maybe_spawn_connect(NewState); + error -> + State end. - -give(#pool_st{free = [Conn|Free], - busy = Busy} = State, {Pid, Ref}) -> +give(#pool_st{free = [Conn | Free], busy = Busy} = State, {Pid, Ref}) -> MRef = erlang:monitor(process, Pid), safe_send(Pid, {Ref, Conn}), State#pool_st{busy = dict:store(Pid, {MRef, Conn}, Busy), free = Free}. - spawn_connect(ClusterName, Pool, WrkModule, Host, Port, CallbackInfo) -> MaxConnectionDelayTime = mero_conf:pool_max_connection_delay_time(ClusterName), do_spawn_connect(Pool, WrkModule, Host, Port, CallbackInfo, MaxConnectionDelayTime). do_spawn_connect(Pool, WrkModule, Host, Port, CallbackInfo, SleepTime) -> - spawn_link(fun() -> - case (SleepTime > 0) of - true -> - %% Wait before reconnect - timer:sleep(rand:uniform(SleepTime)); - false -> - ignore + spawn_link(fun () -> + case SleepTime > 0 of + true -> + %% Wait before reconnect + timer:sleep(rand:uniform(SleepTime)); + false -> + ignore end, try_connect(Pool, WrkModule, Host, Port, CallbackInfo) end). - spawn_connections(ClusterName, Pool, WrkModule, Host, Port, CallbackInfo, 1) -> spawn_connect(ClusterName, Pool, WrkModule, Host, Port, CallbackInfo); spawn_connections(ClusterName, Pool, WrkModule, Host, Port, CallbackInfo, Number) - when (Number > 0) -> + when Number > 0 -> SleepTime = mero_conf:pool_max_connection_delay_time(ClusterName), - [ do_spawn_connect(Pool, WrkModule, Host, Port, CallbackInfo, SleepTime) - || _Number <- lists:seq(1, Number) ]. - + [do_spawn_connect(Pool, WrkModule, Host, Port, CallbackInfo, SleepTime) + || _Number <- lists:seq(1, Number)]. try_connect(Pool, WrkModule, Host, Port, CallbackInfo) -> case connect(WrkModule, Host, Port, CallbackInfo) of - {ok, Client} -> - case controlling_process(WrkModule, Client, whereis(Pool)) of - ok -> - safe_send(Pool, {connect_success, #conn{worker_module = WrkModule, - client = Client, - pool = Pool, - updated = os:timestamp() - }}); - {error, Reason} -> - safe_send(Pool, connect_failed), - WrkModule:close(Client, Reason) - end; - {error, _Reason} -> - safe_send(Pool, connect_failed) + {ok, Client} -> + case controlling_process(WrkModule, Client, whereis(Pool)) of + ok -> + safe_send(Pool, + {connect_success, + #conn{worker_module = WrkModule, + client = Client, + pool = Pool, + updated = os:timestamp()}}); + {error, Reason} -> + safe_send(Pool, connect_failed), + WrkModule:close(Client, Reason) + end; + {error, _Reason} -> + safe_send(Pool, connect_failed) end. - connect(WrkModule, Host, Port, CallbackInfo) -> WrkModule:connect(Host, Port, CallbackInfo). controlling_process(WrkModule, WrkState, Parent) -> WrkModule:controlling_process(WrkState, Parent). - conn_time_to_live(ClusterName) -> case mero_conf:pool_connection_unused_max_time(ClusterName) of - infinity -> infinity; - Milliseconds -> Milliseconds * 1000 + infinity -> + infinity; + Milliseconds -> + Milliseconds * 1000 end. - schedule_expiration(State = #pool_st{cluster = ClusterName}) -> erlang:send_after(mero_conf:pool_expiration_interval(ClusterName), self(), expire), State. - -expire_connections(#pool_st{cluster = ClusterName, free = Conns, num_connected = Num} = State) -> +expire_connections(#pool_st{cluster = ClusterName, free = Conns, num_connected = Num} = + State) -> Now = os:timestamp(), try conn_time_to_live(ClusterName) of - TTL -> - case lists:foldl(fun filter_expired/2, {Now, TTL, [], []}, Conns) of - {_, _, [], _} -> State; - {_, _, ExpConns, ActConns} -> - spawn_link(fun() -> close_connections(ExpConns) end), - maybe_spawn_connect( - State#pool_st{free = ActConns, - num_connected = Num - length(ExpConns)}) - end + TTL -> + case lists:foldl(fun filter_expired/2, {Now, TTL, [], []}, Conns) of + {_, _, [], _} -> + State; + {_, _, ExpConns, ActConns} -> + spawn_link(fun () -> + close_connections(ExpConns) + end), + maybe_spawn_connect(State#pool_st{free = ActConns, + num_connected = Num - length(ExpConns)}) + end catch - error:badarg -> ok + error:badarg -> + ok end. - checkout_cancel(#pool_st{busy = Busy, free = Free} = State, Pid) -> case dict:find(Pid, Busy) of - {ok, {MRef, Conn}} -> - erlang:demonitor(MRef), - State#pool_st{busy = dict:erase(Pid, Busy), - free = [Conn|Free]}; - error -> - State + {ok, {MRef, Conn}} -> + erlang:demonitor(MRef), + State#pool_st{busy = dict:erase(Pid, Busy), free = [Conn | Free]}; + error -> + State end. - filter_expired(#conn{updated = Updated} = Conn, {Now, TTL, ExpConns, ActConns}) -> case timer:now_diff(Now, Updated) < TTL of - true -> {Now, TTL, ExpConns, [Conn | ActConns]}; - false -> {Now, TTL, [Conn | ExpConns], ActConns} + true -> + {Now, TTL, ExpConns, [Conn | ActConns]}; + false -> + {Now, TTL, [Conn | ExpConns], ActConns} end. - %% Note: If current # of connections < new min_connections, new ones will be created %% next time we call maybe_spawn_connect/1. %% If current # of connections > new max_connections, no action is taken to @@ -521,7 +498,8 @@ reload_pool_min_max_settings(State = #pool_st{cluster = ClusterName}) -> safe_send(PoolName, Cmd) -> catch PoolName ! Cmd. -close_connections([]) -> ok; +close_connections([]) -> + ok; close_connections([Conn | Conns]) -> catch close(Conn, expire), close_connections(Conns). @@ -530,14 +508,14 @@ is_config_valid(ClusterName) -> Initial = mero_conf:pool_initial_connections(ClusterName), Max = mero_conf:pool_max_connections(ClusterName), Min = mero_conf:pool_min_free_connections(ClusterName), - case (Min =< Initial) andalso (Initial =< Max) of - true -> - true; - false -> - error_logger:error_report( - [{error, invalid_config}, - {min_connections, Min}, - {max_connections, Max}, - {initial_connections, Initial}]), - false + case Min =< Initial andalso Initial =< Max of + true -> + true; + false -> + error_logger:error_report([{error, invalid_config}, + {min_connections, Min}, + {max_connections, Max}, + {initial_connections, Initial}]), + false end. + diff --git a/src/mero_stat.erl b/src/mero_stat.erl index 5db7a9f..4c0e087 100644 --- a/src/mero_stat.erl +++ b/src/mero_stat.erl @@ -1,8 +1,6 @@ -module(mero_stat). --export([ - noop/1, incr/1 - ]). +-export([noop/1, incr/1]). %%%=================================================================== %%% API @@ -15,6 +13,7 @@ incr(Key) -> noop(_Key) -> ok. + %%%=================================================================== %%% Internal Functions %%%=================================================================== diff --git a/src/mero_sup.erl b/src/mero_sup.erl index 7655aec..a1f8fda 100644 --- a/src/mero_sup.erl +++ b/src/mero_sup.erl @@ -30,15 +30,11 @@ -author('Miriam Pena '). --export([ - start_link/1, - restart_child/1, - init/1 -]). +-export([start_link/1, restart_child/1, init/1]). -behaviour(supervisor). --ignore_xref([{mero_cluster, size,0}, +-ignore_xref([{mero_cluster, size, 0}, {mero_cluster, cluster_size, 0}, {mero_cluster, pools, 0}, {mero_cluster, server, 1}]). @@ -53,11 +49,9 @@ start_link(OrigConfig) -> ProcessedConfig = mero_conf:process_server_specs(OrigConfig), ok = mero_cluster:load_clusters(ProcessedConfig), - supervisor:start_link( - {local, ?MODULE}, - ?MODULE, - #{orig_config => OrigConfig, processed_config => ProcessedConfig} - ). + supervisor:start_link({local, ?MODULE}, + ?MODULE, + #{orig_config => OrigConfig, processed_config => ProcessedConfig}). -spec restart_child(ClusterName :: atom()) -> ok. restart_child(ClusterName) -> @@ -66,7 +60,6 @@ restart_child(ClusterName) -> {ok, _} = supervisor:start_child(?MODULE, cluster_sup_spec(ClusterName)), ok. - %%%=================================================================== %%% Supervisor callbacks %%%=================================================================== @@ -77,21 +70,18 @@ init(#{orig_config := OrigConfig, processed_config := ProcessedConfig}) -> {ok, {{one_for_one, 10, 10}, [MonitorSpec | ClusterSupSpecs]}}. cluster_sup_spec(ClusterName) -> - { - ClusterName, - {mero_cluster_sup, start_link, [ClusterName]}, - permanent, - 5000, - supervisor, - [mero_cluster_sup] - }. + {ClusterName, + {mero_cluster_sup, start_link, [ClusterName]}, + permanent, + 5000, + supervisor, + [mero_cluster_sup]}. monitor_spec(OrigConfig, ProcessedConfig) -> - { - mero_conf_monitor, - {mero_conf_monitor, start_link, [OrigConfig, ProcessedConfig]}, - permanent, - 5000, - worker, - [mero_conf_monitor] - }. + {mero_conf_monitor, + {mero_conf_monitor, start_link, [OrigConfig, ProcessedConfig]}, + permanent, + 5000, + worker, + [mero_conf_monitor]}. + diff --git a/src/mero_util.erl b/src/mero_util.erl index 712f5c3..1705001 100644 --- a/src/mero_util.erl +++ b/src/mero_util.erl @@ -6,8 +6,7 @@ %%% API %%%=================================================================== --spec foreach(fun((Elem :: term()) -> {break, term()} | continue), - list()) -> term(). +-spec foreach(fun((Elem :: term()) -> {break, term()} | continue), list()) -> term(). foreach(Fun, L) -> foreach(continue, Fun, L). @@ -33,5 +32,6 @@ foreach({break, Reason}, _Fun, _L) -> Reason; foreach(continue, _Fun, []) -> ok; -foreach(continue, Fun, [H|Rest]) -> +foreach(continue, Fun, [H | Rest]) -> foreach(Fun(H), Fun, Rest). + diff --git a/src/mero_wrk_tcp_binary.erl b/src/mero_wrk_tcp_binary.erl index 06f8a96..b48d64c 100755 --- a/src/mero_wrk_tcp_binary.erl +++ b/src/mero_wrk_tcp_binary.erl @@ -35,20 +35,17 @@ -behavior(mero_pool). %%% Start/stop functions --export([connect/3, - controlling_process/2, - transaction/3, - close/2]). - +-export([connect/3, controlling_process/2, transaction/3, close/2]). -record(client, {socket, pool, event_callback :: {module(), atom(), [term()]}}). --define(SOCKET_OPTIONS, [binary, - {packet, raw}, - {recbuf, 1024 * 32}, - {active, false}, - {reuseaddr, true}, - {nodelay, true}]). +-define(SOCKET_OPTIONS, + [binary, + {packet, raw}, + {recbuf, 1024 * 32}, + {active, false}, + {reuseaddr, true}, + {nodelay, true}]). %%%============================================================================= %%% External functions @@ -58,219 +55,193 @@ connect(Host, Port, CallbackInfo) -> ?LOG_EVENT(CallbackInfo, [socket, connecting]), case gen_tcp:connect(Host, Port, ?SOCKET_OPTIONS) of - {ok, Socket} -> - ?LOG_EVENT(CallbackInfo, [socket, connect, ok]), - {ok, #client{socket = Socket, event_callback = CallbackInfo}}; - {error, Reason} -> - ?LOG_EVENT(CallbackInfo, [socket, connect, error, {reason, Reason}]), - {error, Reason} + {ok, Socket} -> + ?LOG_EVENT(CallbackInfo, [socket, connect, ok]), + {ok, #client{socket = Socket, event_callback = CallbackInfo}}; + {error, Reason} -> + ?LOG_EVENT(CallbackInfo, [socket, connect, error, {reason, Reason}]), + {error, Reason} end. - controlling_process(Client, Pid) -> case gen_tcp:controlling_process(Client#client.socket, Pid) of - ok -> - ok; - {error, Reason} -> - ?LOG_EVENT( - Client#client.event_callback, - [socket, controlling_process, error, {reason, Reason}]), - {error, Reason} + ok -> + ok; + {error, Reason} -> + ?LOG_EVENT(Client#client.event_callback, + [socket, controlling_process, error, {reason, Reason}]), + {error, Reason} end. transaction(Client, delete, [Key, TimeLimit]) -> case send_receive(Client, {?MEMCACHE_DELETE, {Key}}, TimeLimit) of - {ok, #mero_item{key = <<>>, value = <<>>}} -> - {Client, ok}; - {ok, #mero_item{key = <<>>, value = undefined}} -> - {Client, {error, not_found}}; - {ok, {error, Reason}} -> - {Client, {error, Reason}}; - {error, Reason} -> - {error, Reason} + {ok, #mero_item{key = <<>>, value = <<>>}} -> + {Client, ok}; + {ok, #mero_item{key = <<>>, value = undefined}} -> + {Client, {error, not_found}}; + {ok, {error, Reason}} -> + {Client, {error, Reason}}; + {error, Reason} -> + {error, Reason} end; - - transaction(Client, increment_counter, [Key, Value, Initial, ExpTime, TimeLimit]) -> - case send_receive(Client, {?MEMCACHE_INCREMENT, {Key, Value, Initial, ExpTime}}, TimeLimit) of - {error, Reason} -> - {error, Reason}; - {ok, {error, Reason}} -> - {Client, {error, Reason}}; - {ok, #mero_item{key = <<>>, value = ActualValue}} -> - {Client, {ok, to_integer(ActualValue)}} + case send_receive(Client, + {?MEMCACHE_INCREMENT, {Key, Value, Initial, ExpTime}}, + TimeLimit) + of + {error, Reason} -> + {error, Reason}; + {ok, {error, Reason}} -> + {Client, {error, Reason}}; + {ok, #mero_item{key = <<>>, value = ActualValue}} -> + {Client, {ok, to_integer(ActualValue)}} end; - transaction(Client, get, [Key, TimeLimit]) -> case send_receive(Client, {?MEMCACHE_GET, {[Key]}}, TimeLimit) of - {ok, #mero_item{key = <<>>} = Result} -> - {Client, Result#mero_item{key = Key}}; - {ok, #mero_item{} = Result} -> - {Client, Result}; - {ok, {error, Reason}} -> - {Client, {error, Reason}}; - {error, Reason} -> - {error, Reason} + {ok, #mero_item{key = <<>>} = Result} -> + {Client, Result#mero_item{key = Key}}; + {ok, #mero_item{} = Result} -> + {Client, Result}; + {ok, {error, Reason}} -> + {Client, {error, Reason}}; + {error, Reason} -> + {error, Reason} end; - transaction(Client, set, [Key, Value, ExpTime, TimeLimit, CAS]) -> case send_receive(Client, {?MEMCACHE_SET, {Key, Value, ExpTime, CAS}}, TimeLimit) of - {ok, #mero_item{key = <<>>, value = <<>>}} -> - {Client, ok}; - {ok, #mero_item{key = <<>>, value = undefined}} when CAS /= undefined -> - %% attempt to set a key using CAS, but key wasn't present. - {Client, {error, not_found}}; - {ok, {error, Reason}} -> - {Client, {error, Reason}}; - {error, Reason} -> - {error, Reason} + {ok, #mero_item{key = <<>>, value = <<>>}} -> + {Client, ok}; + {ok, #mero_item{key = <<>>, value = undefined}} when CAS /= undefined -> + %% attempt to set a key using CAS, but key wasn't present. + {Client, {error, not_found}}; + {ok, {error, Reason}} -> + {Client, {error, Reason}}; + {error, Reason} -> + {error, Reason} end; - transaction(Client, add, [Key, Value, ExpTime, TimeLimit]) -> case send_receive(Client, {?MEMCACHE_ADD, {Key, Value, ExpTime}}, TimeLimit) of - {ok, #mero_item{key = <<>>, value = <<>>}} -> - {Client, ok}; - {ok, {error, Reason}} -> - {Client, {error, Reason}}; - {error, Reason} -> - {error, Reason} + {ok, #mero_item{key = <<>>, value = <<>>}} -> + {Client, ok}; + {ok, {error, Reason}} -> + {Client, {error, Reason}}; + {error, Reason} -> + {error, Reason} end; - transaction(Client, flush_all, [TimeLimit]) -> case send_receive(Client, {?MEMCACHE_FLUSH_ALL, {}}, TimeLimit) of - {ok, #mero_item{key = <<>>, value = <<>>}} -> - {Client, ok}; - {ok, {error, Reason}} -> - {Client, {error, Reason}}; - {error, Reason} -> - {error, Reason} + {ok, #mero_item{key = <<>>, value = <<>>}} -> + {Client, ok}; + {ok, {error, Reason}} -> + {Client, {error, Reason}}; + {error, Reason} -> + {error, Reason} end; - transaction(Client, async_mset, [KVECs]) -> case async_mset(Client, KVECs) of - {error, Reason} -> - {error, Reason}; - {ok, ok} -> - {Client, ok} + {error, Reason} -> + {error, Reason}; + {ok, ok} -> + {Client, ok} end; - transaction(Client, async_madd, [KVECs]) -> case async_madd(Client, KVECs) of - {error, Reason} -> - {error, Reason}; - {ok, ok} -> - {Client, ok} + {error, Reason} -> + {error, Reason}; + {ok, ok} -> + {Client, ok} end; - transaction(Client, async_mget, [Keys]) -> case async_mget(Client, Keys) of - {error, Reason} -> - {error, Reason}; - {ok, ok} -> - {Client, ok} + {error, Reason} -> + {error, Reason}; + {ok, ok} -> + {Client, ok} end; - transaction(Client, async_increment, [Keys]) -> case async_increment(Client, Keys) of - {error, Reason} -> - {error, Reason}; - {ok, ok} -> - {Client, ok} + {error, Reason} -> + {error, Reason}; + {ok, ok} -> + {Client, ok} end; - transaction(Client, async_delete, [Keys]) -> case async_delete(Client, Keys) of - {error, Reason} -> - {error, Reason}; - {ok, ok} -> - {Client, ok} + {error, Reason} -> + {error, Reason}; + {ok, ok} -> + {Client, ok} end; - transaction(Client, async_mget_response, [Keys, Timeout]) -> case async_mget_response(Client, Keys, Timeout) of - {error, Reason} -> - {error, Reason}; - {ok, Results} -> - {Client, Results} + {error, Reason} -> + {error, Reason}; + {ok, Results} -> + {Client, Results} end; - transaction(Client, async_blank_response, [Keys, Timeout]) -> {ok, Results} = async_blank_response(Client, Keys, Timeout), {Client, Results}; - transaction(Client, async_mset_response, [Items, Timeout]) -> case async_mset_response(Client, Items, Timeout) of - {error, Reason} -> - {error, Reason}; - {ok, Results} -> - {Client, Results} + {error, Reason} -> + {error, Reason}; + {ok, Results} -> + {Client, Results} end. - close(Client, Reason) -> ?LOG_EVENT(Client#client.event_callback, [closing_socket, {reason, Reason}]), gen_tcp:close(Client#client.socket). - %%%============================================================================= %%% Internal functions %%%============================================================================= send_receive(Client, {Op, _Args} = Cmd, TimeLimit) -> try - Data = pack(Cmd), - ok = send(Client, Data), - {ok, receive_response(Client, Op, <<>>, TimeLimit)} + Data = pack(Cmd), + ok = send(Client, Data), + {ok, receive_response(Client, Op, <<>>, TimeLimit)} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. - pack({?MEMCACHE_INCREMENT, {Key, Value, Initial, ExpTime}}) -> IntValue = value_to_integer(Value), IntInitial = value_to_integer(Initial), IntExpTime = value_to_integer(ExpTime), pack(<>, ?MEMCACHE_INCREMENT, Key); - pack({?MEMCACHE_INCREMENTQ, {Key, Value, Initial, ExpTime}}) -> IntValue = value_to_integer(Value), IntInitial = value_to_integer(Initial), IntExpTime = value_to_integer(ExpTime), pack(<>, ?MEMCACHE_INCREMENTQ, Key); - pack({?MEMCACHE_DELETE, {Key}}) -> pack(<<>>, ?MEMCACHE_DELETE, Key); - pack({?MEMCACHE_DELETEQ, {Key}}) -> pack(<<>>, ?MEMCACHE_DELETEQ, Key); - pack({?MEMCACHE_ADD, {Key, Value, ExpTime}}) -> pack({?MEMCACHE_ADD, {0, Key, Value, ExpTime, undefined}}); - pack({?MEMCACHE_SET, {Key, Value, ExpTime, CAS}}) -> pack({?MEMCACHE_SET, {0, Key, Value, ExpTime, CAS}}); - pack({?MEMCACHE_GET, {[Key]}}) -> pack(<<>>, ?MEMCACHE_GET, Key); - -pack({Op, Key}) when Op == ?MEMCACHE_GETKQ; - Op == ?MEMCACHE_GETK -> +pack({Op, Key}) when Op == ?MEMCACHE_GETKQ; Op == ?MEMCACHE_GETK -> pack(<<>>, Op, Key); - -pack({Op, {N, Key, Value, ExpTime, CAS}}) when Op == ?MEMCACHE_SETQ; - Op == ?MEMCACHE_SET; - Op == ?MEMCACHE_ADDQ; - Op == ?MEMCACHE_ADD -> +pack({Op, {N, Key, Value, ExpTime, CAS}}) + when Op == ?MEMCACHE_SETQ; + Op == ?MEMCACHE_SET; + Op == ?MEMCACHE_ADDQ; + Op == ?MEMCACHE_ADD -> IntExpTime = value_to_integer(ExpTime), pack(<<16#DEADBEEF:32, IntExpTime:32>>, Op, Key, Value, CAS, N); - pack({?MEMCACHE_FLUSH_ALL, {}}) -> %% Flush inmediately by default ExpirationTime = 16#00, pack(<>, ?MEMCACHE_FLUSH_ALL, <<>>). - pack(Extras, Operator, Key) -> pack(Extras, Operator, Key, <<>>). @@ -280,18 +251,18 @@ pack(Extras, Operator, Key, Value) -> pack(Extras, Operator, Key, Value, CAS) -> pack(Extras, Operator, Key, Value, CAS, 0). -pack(Extras, Operator, Key, Value, CAS, Index) - when is_integer(Index), Index >= 0 -> +pack(Extras, Operator, Key, Value, CAS, Index) when is_integer(Index), Index >= 0 -> KeySize = size(Key), ExtrasSize = size(Extras), Body = <>, BodySize = size(Body), CASValue = case CAS of - undefined -> 16#00; - CAS when is_integer(CAS) -> CAS + undefined -> + 16#00; + CAS when is_integer(CAS) -> + CAS end, - << - 16#80:8, % magic (0) + <<16#80:8, % magic (0) Operator:8, % opcode (1) KeySize:16, % key length (2,3) ExtrasSize:8, % extra length (4) @@ -300,75 +271,67 @@ pack(Extras, Operator, Key, Value, CAS, Index) BodySize:32, % total body (8-11) Index:32, % opaque (12-15) CASValue:64, % CAS (16-23) - Body:BodySize/binary - >>. - + Body:BodySize/binary>>. send(Client, Data) -> case gen_tcp:send(Client#client.socket, Data) of - ok -> - ok; - {error, Reason} -> - ?LOG_EVENT(Client#client.event_callback, [memcached_send_error, {reason, Reason}]), - throw({failed, {send, Reason}}) + ok -> + ok; + {error, Reason} -> + ?LOG_EVENT(Client#client.event_callback, [memcached_send_error, {reason, Reason}]), + throw({failed, {send, Reason}}) end. - cas_value(16#00) -> undefined; cas_value(Value) when is_integer(Value) andalso Value > 0 -> Value. - receive_response(Client, Op, Buffer, TimeLimit) -> case recv_bytes(Client, 24, Buffer, TimeLimit) of - {<< - 16#81:8, % magic (0) - Op:8, % opcode (1) - KeySize:16, % key length (2,3) - ExtrasSize:8, % extra length (4) - _DT:8, % data type (5) - StatusCode:16,% status (6,7) - BodySize:32, % total body (8-11) - _Opq:32, % opaque (12-15) - CAS:64 % CAS (16-23) - >>, Rest} -> - case recv_bytes(Client, BodySize, Rest, TimeLimit) of - {<<_Extras:ExtrasSize/binary, Key:KeySize/binary, Value/binary>>, <<>>} -> - case response_status(StatusCode) of - ok -> - #mero_item{key = Key, value = Value, cas = cas_value(CAS)}; - {error, not_found} -> - #mero_item{key = Key, cas = cas_value(CAS)}; - Error -> - Error - end; - Data -> - throw({failed, {unexpected_body, Data}}) - end; - Data -> - throw({failed, {unexpected_header, Data, {expected, Op}}}) + {<<16#81:8, % magic (0) + Op:8, % opcode (1) + KeySize:16, % key length (2,3) + ExtrasSize:8, % extra length (4) + _DT:8, % data type (5) + StatusCode:16, % status (6,7) + BodySize:32, % total body (8-11) + _Opq:32, % opaque (12-15) + CAS:64>>, % CAS (16-23) + Rest} -> + case recv_bytes(Client, BodySize, Rest, TimeLimit) of + {<<_Extras:ExtrasSize/binary, Key:KeySize/binary, Value/binary>>, <<>>} -> + case response_status(StatusCode) of + ok -> + #mero_item{key = Key, value = Value, cas = cas_value(CAS)}; + {error, not_found} -> + #mero_item{key = Key, cas = cas_value(CAS)}; + Error -> + Error + end; + Data -> + throw({failed, {unexpected_body, Data}}) + end; + Data -> + throw({failed, {unexpected_header, Data, {expected, Op}}}) end. recv_bytes(Client, NumBytes, Buffer, TimeLimit) -> case Buffer of - <> -> - {Data, Rest}; - _ -> - Timeout = mero_conf:millis_to(TimeLimit), - case gen_tcp_recv(Client#client.socket, Timeout) of - {ok, Bin} -> - recv_bytes(Client, NumBytes, <>, TimeLimit); - {error, Reason} -> - ?LOG_EVENT( - Client#client.event_callback, [memcached_receive_error, {reason, Reason}]), - throw({failed, {receive_bytes, Reason}}) - end + <> -> + {Data, Rest}; + _ -> + Timeout = mero_conf:millis_to(TimeLimit), + case gen_tcp_recv(Client#client.socket, Timeout) of + {ok, Bin} -> + recv_bytes(Client, NumBytes, <>, TimeLimit); + {error, Reason} -> + ?LOG_EVENT(Client#client.event_callback, + [memcached_receive_error, {reason, Reason}]), + throw({failed, {receive_bytes, Reason}}) + end end. - - - value_to_integer(Value) -> binary_to_integer(binary:replace(Value, <<0>>, <<>>)). @@ -380,63 +343,64 @@ to_integer(Binary) when is_binary(Binary) -> gen_tcp_recv(Socket, Timeout) -> gen_tcp:recv(Socket, 0, Timeout). - async_mset(Client, NKVECs) -> try - {ok, send_mset(Client, NKVECs)} + {ok, send_mset(Client, NKVECs)} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. async_madd(Client, NKVECs) -> try - {ok, send_madd(Client, NKVECs)} + {ok, send_madd(Client, NKVECs)} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. async_mget(Client, Keys) -> try - {ok, send_gets(Client, Keys)} + {ok, send_gets(Client, Keys)} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. async_delete(Client, Keys) -> try - {ok, lists:foldl(fun(K, ok) -> send(Client, pack({?MEMCACHE_DELETEQ, {K}})) end, ok, Keys)} + {ok, + lists:foldl(fun (K, ok) -> + send(Client, pack({?MEMCACHE_DELETEQ, {K}})) + end, + ok, + Keys)} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. async_increment(Client, Keys) -> - try { - ok, - lists:foreach( - fun({K, Value, Initial, ExpTime}) -> - send(Client, pack({?MEMCACHE_INCREMENTQ, {K, Value, Initial, ExpTime}})) - end, Keys) - } + try + {ok, + lists:foreach(fun ({K, Value, Initial, ExpTime}) -> + send(Client, + pack({?MEMCACHE_INCREMENTQ, {K, Value, Initial, ExpTime}})) + end, + Keys)} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. - - multipack([Item], _QuietOp, NoisyOp) -> [pack({NoisyOp, Item})]; -multipack([Item|Rest], QuietOp, NoisyOp) -> +multipack([Item | Rest], QuietOp, NoisyOp) -> [pack({QuietOp, Item}) | multipack(Rest, QuietOp, NoisyOp)]. send_quietly_butlast(Client, Items, QuietOp, NoisyOp) -> ok = send(Client, multipack(Items, QuietOp, NoisyOp)). - send_mset(Client, Items) -> send_quietly_butlast(Client, Items, ?MEMCACHE_SETQ, ?MEMCACHE_SET). @@ -446,67 +410,64 @@ send_madd(Client, Items) -> send_gets(Client, Keys) -> send_quietly_butlast(Client, Keys, ?MEMCACHE_GETKQ, ?MEMCACHE_GETK). - async_blank_response(_Client, _Keys, _TimeLimit) -> {ok, [ok]}. - async_mget_response(Client, Keys, TimeLimit) -> try - {ok, receive_mget_response(Client, TimeLimit, Keys, <<>>, [])} + {ok, receive_mget_response(Client, TimeLimit, Keys, <<>>, [])} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. receive_mget_response(Client, TimeLimit, Keys, Buffer, Acc) -> case recv_bytes(Client, 24, Buffer, TimeLimit) of - {<< - 16#81:8, % magic (0) - Op:8, % opcode (1) - KeySize:16, % key length (2,3) - ExtrasSize:8, % extra length (4) - _DT:8, % data type (5) - Status:16, % status (6,7) - BodySize:32, % total body (8-11) - _Opq:32, % opaque (12-15) - CAS:64 % CAS (16-23) - >>, BufferRest} = Data -> - case recv_bytes(Client, BodySize, BufferRest, TimeLimit) of - { - <<_Extras:ExtrasSize/binary, Key:KeySize/binary, ValueReceived/binary>>, - BufferRest2 - } -> - {Key, Value} = filter_by_status(Status, Op, Key, ValueReceived), - Responses = [#mero_item{key = Key, value = Value, cas = cas_value(CAS)} - | Acc], - NKeys = lists:delete(Key, Keys), - case Op of - %% On silent we expect more values - ?MEMCACHE_GETKQ -> - receive_mget_response(Client, TimeLimit, NKeys, BufferRest2, Responses); - %% This was the last one!. Ensure there is no further data - ?MEMCACHE_GETK when BufferRest2 == <<>> -> - Responses ++ [#mero_item{key = KeyIn} || KeyIn <- NKeys] - end; - Data -> - throw({failed, {unexpected_body, Data}}) - end; - Data -> - throw({failed, {unexpected_header, Data}}) + {<<16#81:8, % magic (0) + Op:8, % opcode (1) + KeySize:16, % key length (2,3) + ExtrasSize:8, % extra length (4) + _DT:8, % data type (5) + Status:16, % status (6,7) + BodySize:32, % total body (8-11) + _Opq:32, % opaque (12-15) + CAS:64>>, % CAS (16-23) + BufferRest} = + Data -> + case recv_bytes(Client, BodySize, BufferRest, TimeLimit) of + {<<_Extras:ExtrasSize/binary, Key:KeySize/binary, ValueReceived/binary>>, + BufferRest2} -> + {Key, Value} = filter_by_status(Status, Op, Key, ValueReceived), + Responses = [#mero_item{key = Key, value = Value, cas = cas_value(CAS)} | Acc], + NKeys = lists:delete(Key, Keys), + case Op of + %% On silent we expect more values + ?MEMCACHE_GETKQ -> + receive_mget_response(Client, TimeLimit, NKeys, BufferRest2, Responses); + %% This was the last one!. Ensure there is no further data + ?MEMCACHE_GETK when BufferRest2 == <<>> -> + Responses ++ [#mero_item{key = KeyIn} || KeyIn <- NKeys] + end; + Data -> + throw({failed, {unexpected_body, Data}}) + end; + Data -> + throw({failed, {unexpected_header, Data}}) end. -filter_by_status(?NO_ERROR, _Op, Key, ValueReceived) -> {Key, ValueReceived}; -filter_by_status(?NOT_FOUND, _Op, Key, _ValueReceived) -> {Key, undefined}; -filter_by_status(Status, _Op, _Key, _ValueReceived) -> throw({failed, {response_status, Status}}). - +filter_by_status(?NO_ERROR, _Op, Key, ValueReceived) -> + {Key, ValueReceived}; +filter_by_status(?NOT_FOUND, _Op, Key, _ValueReceived) -> + {Key, undefined}; +filter_by_status(Status, _Op, _Key, _ValueReceived) -> + throw({failed, {response_status, Status}}). async_mset_response(Client, NKVECs, TimeLimit) -> try - {ok, receive_mset_response(Client, TimeLimit, NKVECs, <<>>, [])} + {ok, receive_mset_response(Client, TimeLimit, NKVECs, <<>>, [])} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. receive_mset_response(Client, TimeLimit, NKVECs, Buffer, Acc) -> @@ -523,52 +484,57 @@ receive_mset_response(Client, TimeLimit, NKVECs, Buffer, Acc) -> %% where N was set as opaque data in the request associated with Key. %% case recv_bytes(Client, 24, Buffer, TimeLimit) of - {<< - 16#81:8, % magic (0) - Op:8, % opcode (1) - KeySize:16, % key length (2,3) - ExtrasSize:8, % extra length (4) - _DT:8, % data type (5) - StatusCode:16,% status (6,7) - BodySize:32, % total body (8-11) - Index:32, % opaque (12-15) - _CAS:64 % CAS (16-23) - >>, Rest} -> - case recv_bytes(Client, BodySize, Rest, TimeLimit) of - {<<_Extras:ExtrasSize/binary, _Key:KeySize/binary, _Value/binary>>, Rest2} -> - %% the response to set/add/replace should have no extras, no key, and - %% no value, but may have a body if an error occurred. - NAcc = [{Index, response_status(StatusCode)} | Acc], - NItems = lists:keydelete(Index, 1, NKVECs), - case Op of - Op when Op == ?MEMCACHE_SETQ; - Op == ?MEMCACHE_ADDQ -> - %% we are receiving a response for a 'quiet' command, meaning the - %% associated request failed. - receive_mset_response(Client, TimeLimit, NItems, Rest2, NAcc); - - Op when Rest2 == <<>>, Op == ?MEMCACHE_SET; - Rest2 == <<>>, Op == ?MEMCACHE_ADD -> - %% last response. any other request which had no response was - %% successful. - NAcc ++ [{element(1, Item), ok} - || Item <- NItems] - end; - Data -> - throw({failed, {unexpected_body, Data}}) - end; - Data -> - throw({failed, {unexpected_header, Data}}) + {<<16#81:8, % magic (0) + Op:8, % opcode (1) + KeySize:16, % key length (2,3) + ExtrasSize:8, % extra length (4) + _DT:8, % data type (5) + StatusCode:16, % status (6,7) + BodySize:32, % total body (8-11) + Index:32, % opaque (12-15) + _CAS:64>>, % CAS (16-23) + Rest} -> + case recv_bytes(Client, BodySize, Rest, TimeLimit) of + {<<_Extras:ExtrasSize/binary, _Key:KeySize/binary, _Value/binary>>, Rest2} -> + %% the response to set/add/replace should have no extras, no key, and + %% no value, but may have a body if an error occurred. + NAcc = [{Index, response_status(StatusCode)} | Acc], + NItems = lists:keydelete(Index, 1, NKVECs), + case Op of + Op when Op == ?MEMCACHE_SETQ; Op == ?MEMCACHE_ADDQ -> + %% we are receiving a response for a 'quiet' command, meaning the + %% associated request failed. + receive_mset_response(Client, TimeLimit, NItems, Rest2, NAcc); + Op when Rest2 == <<>>, Op == ?MEMCACHE_SET; Rest2 == <<>>, Op == ?MEMCACHE_ADD -> + %% last response. any other request which had no response was + %% successful. + NAcc ++ [{element(1, Item), ok} || Item <- NItems] + end; + Data -> + throw({failed, {unexpected_body, Data}}) + end; + Data -> + throw({failed, {unexpected_header, Data}}) end. +response_status(?NO_ERROR) -> + ok; +response_status(?NOT_FOUND) -> + {error, not_found}; +response_status(?KEY_EXISTS) -> + {error, already_exists}; +response_status(?VALUE_TOO_LARGE) -> + {error, value_too_large}; +response_status(?INVALID_ARGUMENTS) -> + {error, invalid_arguments}; +response_status(?NOT_STORED) -> + {error, not_stored}; +response_status(?NON_NUMERIC_INCR) -> + {error, incr_decr_on_non_numeric_value}; +response_status(?UNKNOWN_COMMAND) -> + {error, unknown_command}; +response_status(?OOM) -> + {error, out_of_memory}; +response_status(StatusCode) -> + throw({failed, {response_status, StatusCode}}). -response_status(?NO_ERROR) -> ok; -response_status(?NOT_FOUND) -> {error, not_found}; -response_status(?KEY_EXISTS) -> {error, already_exists}; -response_status(?VALUE_TOO_LARGE) -> {error, value_too_large}; -response_status(?INVALID_ARGUMENTS) -> {error, invalid_arguments}; -response_status(?NOT_STORED) -> {error, not_stored}; -response_status(?NON_NUMERIC_INCR) -> {error, incr_decr_on_non_numeric_value}; -response_status(?UNKNOWN_COMMAND) -> {error, unknown_command}; -response_status(?OOM) -> {error, out_of_memory}; -response_status(StatusCode) -> throw({failed, {response_status, StatusCode}}). diff --git a/src/mero_wrk_tcp_txt.erl b/src/mero_wrk_tcp_txt.erl index 10988fb..92f7d12 100755 --- a/src/mero_wrk_tcp_txt.erl +++ b/src/mero_wrk_tcp_txt.erl @@ -35,260 +35,263 @@ -behavior(mero_pool). %%% Start/stop functions --export([connect/3, - controlling_process/2, - transaction/3, - close/2]). +-export([connect/3, controlling_process/2, transaction/3, close/2]). -record(client, {socket, pool, event_callback}). --define(SOCKET_OPTIONS, [binary, - {packet, raw}, - {active, false}, - {reuseaddr, true}, - {nodelay, true}]). +-define(SOCKET_OPTIONS, + [binary, {packet, raw}, {active, false}, {reuseaddr, true}, {nodelay, true}]). %%%============================================================================= %%% External functions %%%============================================================================= - %% API functions connect(Host, Port, CallbackInfo) -> ?LOG_EVENT(CallbackInfo, [socket, connecting]), case gen_tcp:connect(Host, Port, ?SOCKET_OPTIONS) of - {ok, Socket} -> - ?LOG_EVENT(CallbackInfo, [socket, connect, ok]), - {ok, #client{socket = Socket, event_callback = CallbackInfo}}; - {error, Reason} -> - ?LOG_EVENT(CallbackInfo, [socket, connect, error, {reason, Reason}]), - {error, Reason} + {ok, Socket} -> + ?LOG_EVENT(CallbackInfo, [socket, connect, ok]), + {ok, #client{socket = Socket, event_callback = CallbackInfo}}; + {error, Reason} -> + ?LOG_EVENT(CallbackInfo, [socket, connect, error, {reason, Reason}]), + {error, Reason} end. - controlling_process(Client, Pid) -> case gen_tcp:controlling_process(Client#client.socket, Pid) of - ok -> - ok; - {error, Reason} -> - ?LOG_EVENT( - Client#client.event_callback, - [socket, controlling_process, error, {reason, Reason}]), - {error, Reason} + ok -> + ok; + {error, Reason} -> + ?LOG_EVENT(Client#client.event_callback, + [socket, controlling_process, error, {reason, Reason}]), + {error, Reason} end. transaction(Client, increment_counter, [Key, Value, Initial, ExpTime, TimeLimit]) -> %% First attempt case send_receive(Client, {?MEMCACHE_INCREMENT, {Key, Value}}, TimeLimit) of - {error, not_found} -> - %% Key does not exist, create the key - case send_receive(Client, {?MEMCACHE_ADD, {Key, Initial, ExpTime}}, TimeLimit) of - {ok, stored} -> - {Client, {ok, to_integer(Initial)}}; - {error, not_stored} -> - %% Key was already created by other thread, - %% Second attempt (just in case of someone added right at that time) - case send_receive(Client, {?MEMCACHE_INCREMENT, {Key, Value}}, TimeLimit) of - {error, Reason} -> - {error, Reason}; - {ok, NValue} -> - {Client, {ok, to_integer(NValue)}} - end; - {error, Reason} -> - {error, Reason} - end; - {error, Reason} -> - {error, Reason}; - {ok, NValue} -> - {Client, {ok, to_integer(NValue)}} + {error, not_found} -> + %% Key does not exist, create the key + case send_receive(Client, {?MEMCACHE_ADD, {Key, Initial, ExpTime}}, TimeLimit) of + {ok, stored} -> + {Client, {ok, to_integer(Initial)}}; + {error, not_stored} -> + %% Key was already created by other thread, + %% Second attempt (just in case of someone added right at that time) + case send_receive(Client, {?MEMCACHE_INCREMENT, {Key, Value}}, TimeLimit) of + {error, Reason} -> + {error, Reason}; + {ok, NValue} -> + {Client, {ok, to_integer(NValue)}} + end; + {error, Reason} -> + {error, Reason} + end; + {error, Reason} -> + {error, Reason}; + {ok, NValue} -> + {Client, {ok, to_integer(NValue)}} end; - -transaction(Client, delete, [Key, TimeLimit]) -> +transaction(Client, delete, [Key, TimeLimit]) -> case send_receive(Client, {?MEMCACHE_DELETE, {Key}}, TimeLimit) of - {ok, deleted} -> - {Client, ok}; - {error, not_found} -> - {Client, {error, not_found}}; - {error, Reason} -> - {error, Reason} + {ok, deleted} -> + {Client, ok}; + {error, not_found} -> + {Client, {error, not_found}}; + {error, Reason} -> + {error, Reason} end; - -transaction(Client, mdelete, [Keys, TimeLimit]) -> - Resp = mero_util:foreach(fun(Key) -> - case send_receive(Client, {?MEMCACHE_DELETE, {Key}}, TimeLimit) of - {ok, deleted} -> - continue; - {error, not_found} -> - continue; - {error, Reason} -> - {break, {error, Reason}} - end - end, Keys), +transaction(Client, mdelete, [Keys, TimeLimit]) -> + Resp = mero_util:foreach(fun (Key) -> + case send_receive(Client, {?MEMCACHE_DELETE, {Key}}, TimeLimit) + of + {ok, deleted} -> + continue; + {error, not_found} -> + continue; + {error, Reason} -> + {break, {error, Reason}} + end + end, + Keys), {Client, Resp}; - transaction(Client, add, [Key, Value, ExpTime, TimeLimit]) -> case send_receive(Client, {?MEMCACHE_ADD, {Key, Value, ExpTime}}, TimeLimit) of - {ok, stored} -> - {Client, ok}; - {error, not_stored} -> - {Client, {error, not_stored}}; - {error, Reason} -> - {error, Reason} + {ok, stored} -> + {Client, ok}; + {error, not_stored} -> + {Client, {error, not_stored}}; + {error, Reason} -> + {error, Reason} end; - transaction(Client, get, [Key, TimeLimit]) -> case send_receive(Client, {?MEMCACHE_GET, {[Key]}}, TimeLimit) of - {ok, [Found]} -> - {Client, Found}; - {error, Reason} -> - {error, Reason} + {ok, [Found]} -> + {Client, Found}; + {error, Reason} -> + {error, Reason} end; - transaction(Client, set, [Key, Value, ExpTime, TimeLimit, CAS]) -> case send_receive(Client, {?MEMCACHE_SET, {Key, Value, ExpTime, CAS}}, TimeLimit) of - {ok, stored} -> - {Client, ok}; - {error, already_exists} -> - {Client, {error, already_exists}}; - {error, not_found} -> - {Client, {error, not_found}}; - {error, Reason} -> - {error, Reason} + {ok, stored} -> + {Client, ok}; + {error, already_exists} -> + {Client, {error, already_exists}}; + {error, not_found} -> + {Client, {error, not_found}}; + {error, Reason} -> + {error, Reason} end; - transaction(Client, flush_all, [TimeLimit]) -> case send_receive(Client, {?MEMCACHE_FLUSH_ALL, {}}, TimeLimit) of - {ok, ok} -> - {Client, ok}; - {error, Reason} -> - {error, Reason} + {ok, ok} -> + {Client, ok}; + {error, Reason} -> + {error, Reason} end; - %% mset/madd are currently only supported by the binary protocol implementation. transaction(Client, async_mset, _) -> {Client, {error, unsupported_operation}}; - transaction(Client, async_madd, _) -> {Client, {error, unsupported_operation}}; - transaction(Client, async_mget, [Keys]) -> case async_mget(Client, Keys) of - {error, Reason} -> - {error, Reason}; - {ok, ok} -> - {Client, ok} + {error, Reason} -> + {error, Reason}; + {ok, ok} -> + {Client, ok} end; - transaction(Client, async_delete, [Keys]) -> case async_delete(Client, Keys) of - {error, Reason} -> - {error, Reason}; - {ok, ok} -> - {Client, ok} + {error, Reason} -> + {error, Reason}; + {ok, ok} -> + {Client, ok} end; - transaction(Client, async_increment, [Keys]) -> async_increment(Client, Keys); - transaction(Client, async_blank_response, [Keys, Timeout]) -> {ok, Results} = async_blank_response(Client, Keys, Timeout), {Client, Results}; - transaction(Client, async_mget_response, [Keys, Timeout]) -> case async_mget_response(Client, Keys, Timeout) of - {error, Reason} -> - {error, Reason}; - {ok, {ok, FoundItems}} -> - FoundKeys = [Key || #mero_item{key = Key} <- FoundItems], - NotFoundKeys = lists:subtract(Keys, FoundKeys), - Result = [#mero_item{key = Key, value = undefined} - || Key <- NotFoundKeys] ++ FoundItems, - {Client, Result} + {error, Reason} -> + {error, Reason}; + {ok, {ok, FoundItems}} -> + FoundKeys = [Key || #mero_item{key = Key} <- FoundItems], + NotFoundKeys = lists:subtract(Keys, FoundKeys), + Result = [#mero_item{key = Key, value = undefined} || Key <- NotFoundKeys] ++ FoundItems, + {Client, Result} end. - close(Client, Reason) -> ?LOG_EVENT(Client#client.event_callback, [closing_socket, {reason, Reason}]), gen_tcp:close(Client#client.socket). - %%%============================================================================= %%% Internal functions %%%============================================================================= - send_receive(Client, {_Op, _Args} = Cmd, TimeLimit) -> try - Data = pack(Cmd), - ok = send(Client, Data), - receive_response(Client, Cmd, TimeLimit, <<>>, []) + Data = pack(Cmd), + ok = send(Client, Data), + receive_response(Client, Cmd, TimeLimit, <<>>, []) catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. - pack({?MEMCACHE_DELETE, {Key}}) when is_binary(Key) -> [<<"delete ">>, Key, <<"\r\n">>]; pack({?MEMCACHE_DELETEQ, {Key}}) when is_binary(Key) -> [<<"delete ">>, Key, <<" noreply ">>, <<"\r\n">>]; pack({?MEMCACHE_ADD, {Key, Initial, ExpTime}}) -> NBytes = integer_to_list(size(Initial)), - [<<"add ">>, Key, <<" ">>, <<"00">>, <<" ">>, ExpTime, - <<" ">>, NBytes, <<"\r\n">>, Initial, <<"\r\n">>]; + [<<"add ">>, + Key, + <<" ">>, + <<"00">>, + <<" ">>, + ExpTime, + <<" ">>, + NBytes, + <<"\r\n">>, + Initial, + <<"\r\n">>]; pack({?MEMCACHE_SET, {Key, Initial, ExpTime, undefined}}) -> NBytes = integer_to_list(size(Initial)), - [<<"set ">>, Key, <<" ">>, <<"00">>, <<" ">>, ExpTime, - <<" ">>, NBytes, <<"\r\n">>, Initial, <<"\r\n">>]; + [<<"set ">>, + Key, + <<" ">>, + <<"00">>, + <<" ">>, + ExpTime, + <<" ">>, + NBytes, + <<"\r\n">>, + Initial, + <<"\r\n">>]; pack({?MEMCACHE_SET, {Key, Initial, ExpTime, CAS}}) when is_integer(CAS) -> %% note: CAS should only be supplied if setting a value after looking it up. if the %% value has changed since we looked it up, the result of a cas command will be EXISTS %% (otherwise STORED). NBytes = integer_to_list(size(Initial)), - [<<"cas ">>, Key, <<" ">>, <<"00">>, <<" ">>, ExpTime, - <<" ">>, NBytes, <<" ">>, integer_to_binary(CAS), <<"\r\n">>, Initial, <<"\r\n">>]; + [<<"cas ">>, + Key, + <<" ">>, + <<"00">>, + <<" ">>, + ExpTime, + <<" ">>, + NBytes, + <<" ">>, + integer_to_binary(CAS), + <<"\r\n">>, + Initial, + <<"\r\n">>]; pack({?MEMCACHE_INCREMENT, {Key, Value}}) -> [<<"incr ">>, Key, <<" ">>, Value, <<"\r\n">>]; pack({?MEMCACHE_FLUSH_ALL, {}}) -> [<<"flush_all\r\n">>]; pack({?MEMCACHE_GET, {Keys}}) when is_list(Keys) -> - Query = lists:foldr(fun(Key, Acc) -> [Key, <<" ">> | Acc] end, [], Keys), + Query = lists:foldr(fun (Key, Acc) -> + [Key, <<" ">> | Acc] + end, + [], + Keys), [<<"gets ">>, Query, <<"\r\n">>]. - - send(Client, Data) -> case gen_tcp:send(Client#client.socket, Data) of - ok -> ok; - {error, Reason} -> - ?LOG_EVENT(Client#client.event_callback, [socket, send, error, {reason, Reason}]), - throw({failed, Reason}) + ok -> + ok; + {error, Reason} -> + ?LOG_EVENT(Client#client.event_callback, [socket, send, error, {reason, Reason}]), + throw({failed, Reason}) end. - receive_response(Client, Cmd, TimeLimit, AccBinary, AccResult) -> case gen_tcp_recv(Client, 0, TimeLimit) of - {ok, Data} -> - NAcc = <>, - case parse_reply(Client, Cmd, NAcc, TimeLimit, AccResult) of - {ok, Atom, []} when (Atom == stored) or - (Atom == deleted) or - (Atom == ok) -> - {ok, Atom}; - {ok, Binary} when is_binary(Binary) -> - {ok, Binary}; - {ok, finished, Result} -> - {ok, Result}; - {ok, NBuffer, NewCmd, NAccResult} -> - receive_response(Client, NewCmd, TimeLimit, NBuffer, NAccResult); - {error, Reason} -> - ?LOG_EVENT( - Client#client.event_callback, [socket, rcv, error, {reason, Reason}]), - throw({failed, Reason}) - end; - {error, Reason} -> - ?LOG_EVENT(Client#client.event_callback, [socket, rcv, error, {reason, Reason}]), - throw({failed, Reason}) + {ok, Data} -> + NAcc = <>, + case parse_reply(Client, Cmd, NAcc, TimeLimit, AccResult) of + {ok, Atom, []} when (Atom == stored) or (Atom == deleted) or (Atom == ok) -> + {ok, Atom}; + {ok, Binary} when is_binary(Binary) -> + {ok, Binary}; + {ok, finished, Result} -> + {ok, Result}; + {ok, NBuffer, NewCmd, NAccResult} -> + receive_response(Client, NewCmd, TimeLimit, NBuffer, NAccResult); + {error, Reason} -> + ?LOG_EVENT(Client#client.event_callback, [socket, rcv, error, {reason, Reason}]), + throw({failed, Reason}) + end; + {error, Reason} -> + ?LOG_EVENT(Client#client.event_callback, [socket, rcv, error, {reason, Reason}]), + throw({failed, Reason}) end. process_result({?MEMCACHE_GET, {Keys}}, finished, Result) -> @@ -296,63 +299,62 @@ process_result({?MEMCACHE_GET, {Keys}}, finished, Result) -> process_result(_Cmd, _Status, Result) -> Result. - %% This is so extremely shitty that I will do the binary prototol no matter what :( parse_reply(Client, Cmd, Buffer, TimeLimit, AccResult) -> case split_command(Buffer) of - {error, uncomplete} -> - {ok, Buffer, Cmd, AccResult}; - {Command, BinaryRest} -> - case {Cmd, parse_command(Command)} of - {_, {ok, Status}} when is_atom(Status) -> - {ok, Status, process_result(Cmd, Status, AccResult)}; - {{?MEMCACHE_GET, {Keys}}, {ok, {value, Key, Bytes, CAS}}} -> - case parse_value(Client, Key, Bytes, CAS, TimeLimit, BinaryRest) of - {ok, Item, NewBinaryRest} -> - parse_reply(Client, {?MEMCACHE_GET, {lists:delete(Key, Keys)}}, - NewBinaryRest, TimeLimit, [Item | AccResult]); - {error, Reason} -> - {error, Reason} - end; - {_, {ok, Binary}} when is_binary(Binary) -> - {ok, Binary}; - {_, {error, Reason}} -> - {error, Reason} - end + {error, uncomplete} -> + {ok, Buffer, Cmd, AccResult}; + {Command, BinaryRest} -> + case {Cmd, parse_command(Command)} of + {_, {ok, Status}} when is_atom(Status) -> + {ok, Status, process_result(Cmd, Status, AccResult)}; + {{?MEMCACHE_GET, {Keys}}, {ok, {value, Key, Bytes, CAS}}} -> + case parse_value(Client, Key, Bytes, CAS, TimeLimit, BinaryRest) of + {ok, Item, NewBinaryRest} -> + parse_reply(Client, + {?MEMCACHE_GET, {lists:delete(Key, Keys)}}, + NewBinaryRest, + TimeLimit, + [Item | AccResult]); + {error, Reason} -> + {error, Reason} + end; + {_, {ok, Binary}} when is_binary(Binary) -> + {ok, Binary}; + {_, {error, Reason}} -> + {error, Reason} + end end. parse_value(Client, Key, Bytes, CAS, TimeLimit, Buffer) -> case Buffer of - <> -> - {ok, #mero_item{key = Key, value = Value, cas = CAS}, - RestAfterValue}; - _ when size(Buffer) < (Bytes + size(<<"\r\n">>)) -> - case gen_tcp_recv(Client, 0, TimeLimit) of - {ok, Data} -> - parse_value(Client, Key, Bytes, CAS, TimeLimit, <>); - {error, Reason} -> - {error, Reason} - end; - _ -> - {error, invalid_value_size} + <> -> + {ok, #mero_item{key = Key, value = Value, cas = CAS}, RestAfterValue}; + _ when size(Buffer) < Bytes + size(<<"\r\n">>) -> + case gen_tcp_recv(Client, 0, TimeLimit) of + {ok, Data} -> + parse_value(Client, Key, Bytes, CAS, TimeLimit, <>); + {error, Reason} -> + {error, Reason} + end; + _ -> + {error, invalid_value_size} end. split_command(Buffer) -> - case binary:split(Buffer, [<<"\r\n">>], []) of - [Line, RemainBuffer] -> - {binary:split(Line, [<<" ">>], [global, trim]), RemainBuffer}; - [_UncompletedLine] -> - {error, uncomplete} + case binary:split(Buffer, [<<"\r\n">>], []) of + [Line, RemainBuffer] -> + {binary:split(Line, [<<" ">>], [global, trim]), RemainBuffer}; + [_UncompletedLine] -> + {error, uncomplete} end. - parse_command([<<"ERROR">> | Reason]) -> {error, Reason}; parse_command([<<"CLIENT_ERROR">> | Reason]) -> {error, Reason}; parse_command([<<"SERVER_ERROR">> | Reason]) -> {error, Reason}; - parse_command([<<"EXISTS">>]) -> {error, already_exists}; parse_command([<<"NOT_FOUND">>]) -> @@ -378,38 +380,39 @@ parse_command([Value]) -> parse_command(Line) -> {error, {unknown, Line}}. - gen_tcp_recv(Client, Bytes, TimeLimit) -> Timeout = mero_conf:millis_to(TimeLimit), case Timeout of 0 -> - {error, timeout}; + {error, timeout}; Timeout -> - gen_tcp:recv(Client#client.socket, Bytes, Timeout) + gen_tcp:recv(Client#client.socket, Bytes, Timeout) end. - to_integer(Value) when is_integer(Value) -> Value; to_integer(Value) when is_binary(Value) -> binary_to_integer(Value). - async_mget(Client, Keys) -> try - Data = pack({?MEMCACHE_GET, {Keys}}), - {ok, send(Client, Data)} + Data = pack({?MEMCACHE_GET, {Keys}}), + {ok, send(Client, Data)} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. async_delete(Client, Keys) -> try - {ok, lists:foreach(fun(K) -> send(Client, pack({?MEMCACHE_DELETEQ, {K}})) end, Keys)} + {ok, + lists:foreach(fun (K) -> + send(Client, pack({?MEMCACHE_DELETEQ, {K}})) + end, + Keys)} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. async_increment(_Client, _Keys) -> @@ -420,8 +423,9 @@ async_blank_response(_Client, _Keys, _TimeLimit) -> async_mget_response(Client, Keys, TimeLimit) -> try - {ok, receive_response(Client, {?MEMCACHE_GET, {Keys}}, TimeLimit, <<>>, [])} + {ok, receive_response(Client, {?MEMCACHE_GET, {Keys}}, TimeLimit, <<>>, [])} catch - throw:{failed, Reason} -> - {error, Reason} + {failed, Reason} -> + {error, Reason} end. + diff --git a/test/mero_SUITE.erl b/test/mero_SUITE.erl index b7e0eb2..9b4e094 100644 --- a/test/mero_SUITE.erl +++ b/test/mero_SUITE.erl @@ -33,34 +33,12 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). --export([ - all/0, - groups/0, - init_per_group/2, - end_per_group/2, - init_per_testcase/2, - end_per_testcase/2, - add/1, - delete/1, - get_undefineds/1, - increase_counter/1, - increase_counter_clustered_key/1, - increment/1, - mdelete/1, - multiget_defineds/1, - multiget_defineds_clustered_keys/1, - multiget_undefineds/1, - set/1, - undefined_counter/1, - mincrease_counter/1, - cas/1, - madd/1, - mset/1, - mcas/1, - state_ok/1, - state_error/1, - state_timeout/1 -]). +-export([all/0, groups/0, init_per_group/2, end_per_group/2, init_per_testcase/2, + end_per_testcase/2, add/1, delete/1, get_undefineds/1, increase_counter/1, + increase_counter_clustered_key/1, increment/1, mdelete/1, multiget_defineds/1, + multiget_defineds_clustered_keys/1, multiget_undefineds/1, set/1, undefined_counter/1, + mincrease_counter/1, cas/1, madd/1, mset/1, mcas/1, state_ok/1, state_error/1, + state_timeout/1]). -define(HOST, "127.0.0.1"). -define(PORT, 11911). @@ -69,16 +47,13 @@ %%% common_test callbacks %%%============================================================================= -all() -> [ - {group, text_protocol}, - {group, binary_protocol} - ]. +all() -> + [{group, text_protocol}, {group, binary_protocol}]. groups() -> - [ - {text_protocol, [shuffle, {repeat_until_any_fail, 1}], - [ - add, + [{text_protocol, + [shuffle, {repeat_until_any_fail, 1}], + [add, delete, get_undefineds, increase_counter, @@ -93,12 +68,10 @@ groups() -> cas, state_ok, state_error, - state_timeout - ] - }, - {binary_protocol, [shuffle, {repeat_until_any_fail, 1}], - [ - add, + state_timeout]}, + {binary_protocol, + [shuffle, {repeat_until_any_fail, 1}], + [add, delete, get_undefineds, increase_counter, @@ -117,44 +90,38 @@ groups() -> mcas, state_ok, state_error, - state_timeout - ] - } - ]. + state_timeout]}]. init_per_group(text_protocol, Config) -> ClusterConfig = [{cluster, [{servers, [{"localhost", 11298}, {"localhost", 11299}]}, {sharding_algorithm, {mero, shard_phash2}}, {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_txt}] - }, + {pool_worker_module, mero_wrk_tcp_txt}]}, {cluster2, [{servers, [{"localhost", 11300}]}, {sharding_algorithm, {mero, shard_crc32}}, {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_txt}] - }], + {pool_worker_module, mero_wrk_tcp_txt}]}], [{cluster_config, ClusterConfig} | Config]; init_per_group(binary_protocol, Config) -> ClusterConfig = [{cluster, [{servers, [{"localhost", 11298}, {"localhost", 11299}]}, {sharding_algorithm, {mero, shard_phash2}}, {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_binary}] - }, + {pool_worker_module, mero_wrk_tcp_binary}]}, {cluster2, [{servers, [{"localhost", 11300}]}, {sharding_algorithm, {mero, shard_crc32}}, {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_binary}] - }], + {pool_worker_module, mero_wrk_tcp_binary}]}], [{cluster_config, ClusterConfig} | Config]. end_per_group(_GroupName, _Config) -> ok. -init_per_testcase(TestCase, Conf) when TestCase == state_error; TestCase == state_timeout -> +init_per_testcase(TestCase, Conf) + when TestCase == state_error; TestCase == state_timeout -> meck:new(mero_pool, [passthrough]), init_per_testcase(default, Conf); init_per_testcase(_TestCase, Conf) -> @@ -165,7 +132,8 @@ init_per_testcase(_TestCase, Conf) -> mero_conf:timeout_read(1000), [{pids, Pids} | Conf]. -end_per_testcase(TestCase, Conf) when TestCase == state_error; TestCase == state_timeout -> +end_per_testcase(TestCase, Conf) + when TestCase == state_error; TestCase == state_timeout -> meck:unload(mero_pool), end_per_testcase(default, Conf); end_per_testcase(_TestCase, Conf) -> @@ -174,7 +142,6 @@ end_per_testcase(_TestCase, Conf) -> ok = application:stop(mero), ok. - %%%============================================================================= %%% Tests %%%============================================================================= @@ -193,7 +160,8 @@ cas(_Conf) -> %% CAS with a token other than the token which was returned from %% `gets` should result in an "exists" error: - ?assertEqual({error, already_exists}, mero:cas(cluster, Key, <<"y">>, 1000, 1000, CAS + 123)), + ?assertEqual({error, already_exists}, + mero:cas(cluster, Key, <<"y">>, 1000, 1000, CAS + 123)), await_connected(cluster), ?assertEqual({Key, <<"x">>}, mero:get(cluster, Key, 1000)), @@ -210,7 +178,6 @@ cas(_Conf) -> ?assertNotEqual(CAS1, CAS2), ok. - undefined_counter(_Conf) -> Key = key(), ct:log("state ~p", [mero:state()]), @@ -220,7 +187,6 @@ undefined_counter(_Conf) -> ?assertMatch({Key, undefined}, mero:get(cluster2, Key, 1000)), ok. - increase_counter(_Conf) -> Key = key(), ct:log("state ~p", [mero:state()]), @@ -247,15 +213,13 @@ mincrease_counter(_Conf) -> Ret = mero:mget(cluster, [Key0, Key1]), ?assertMatch(Expected, Ret). - delete(_Conf) -> ?assertMatch({<<"11">>, undefined}, mero:get(cluster, <<"11">>)), ?assertMatch(ok, mero:set(cluster, <<"11">>, <<"Adroll">>, 11111, 1000)), ?assertMatch({<<"11">>, <<"Adroll">>}, mero:get(cluster, <<"11">>)), ?assertMatch(ok, mero:delete(cluster, <<"11">>, 1000)), ?assertMatch({<<"11">>, undefined}, mero:get(cluster, <<"11">>)), - ?assertMatch({error, not_found}, mero:delete(cluster, <<"11">>, 1000)). - + ?assertMatch({error, not_found}, mero:delete(cluster, <<"11">>, 1000)). mdelete(_Conf) -> ?assertMatch({<<"11">>, undefined}, mero:get(cluster, <<"11">>)), @@ -274,8 +238,7 @@ mdelete(_Conf) -> %% mdelete is fire and forget. If this is undesirable an alternate approach %% can be taken but it's Good Enough for the motivating problem. - ?assertMatch(ok, mero:mdelete(cluster, [<<"11">>, <<"22">>], 1000)). - + ?assertMatch(ok, mero:mdelete(cluster, [<<"11">>, <<"22">>], 1000)). set(_Conf) -> ct:log("state ~p", [mero:state()]), @@ -286,12 +249,10 @@ set(_Conf) -> ?assertMatch({<<"12">>, <<"Adroll2">>}, mero:get(cluster, <<"12">>)), Resp0 = mero:mget(cluster, [<<"11">>, <<"12">>], 5000), - [{<<"11">>, <<"Adroll">>}, - {<<"12">>, <<"Adroll2">>}] = lists:sort(Resp0), + [{<<"11">>, <<"Adroll">>}, {<<"12">>, <<"Adroll2">>}] = lists:sort(Resp0), Resp1 = mero:mget(cluster2, [<<"11">>, <<"12">>], 5000), - [{<<"11">>, undefined}, - {<<"12">>, undefined}] = lists:sort(Resp1), + [{<<"11">>, undefined}, {<<"12">>, undefined}] = lists:sort(Resp1), ok. @@ -305,19 +266,20 @@ get_undefineds(_Conf) -> {Key3, undefined} = mero:get(cluster, Key3, 1000). multiget_undefineds(_Conf) -> - [] = mero:mget(cluster, [], 1000), + [] = mero:mget(cluster, [], 1000), %% 13, 14 and 15 will go to the same server %% 11, 12 and 16 to a different one - Resp = mero:mget(cluster, [<<"11">>,<<"12">>,<<"13">>,<<"14">>,<<"15">>,<<"16">>], 1000), - + Resp = mero:mget(cluster, + [<<"11">>, <<"12">>, <<"13">>, <<"14">>, <<"15">>, <<"16">>], + 1000), [{<<"11">>, undefined}, {<<"12">>, undefined}, {<<"13">>, undefined}, {<<"14">>, undefined}, {<<"15">>, undefined}, - {<<"16">>, undefined}] = lists:sort(Resp). - + {<<"16">>, undefined}] = + lists:sort(Resp). multiget_defineds(_Conf) -> ?assertMatch({ok, 1}, mero:increment_counter(cluster, <<"11">>)), @@ -339,14 +301,16 @@ multiget_defineds(_Conf) -> {<<"15">>, <<"2">>}, {<<"16">>, <<"3">>}, {<<"17">>, undefined}], - ?assertEqual( - Expected, - lists:sort( - mero:mget(cluster, - [<<"11">>,<<"12">>,<<"13">>,<<"14">>,<<"15">>,<<"16">>,<<"17">>], - 1000) - ) - ). + ?assertEqual(Expected, + lists:sort(mero:mget(cluster, + [<<"11">>, + <<"12">>, + <<"13">>, + <<"14">>, + <<"15">>, + <<"16">>, + <<"17">>], + 1000))). multiget_defineds_clustered_keys(_Conf) -> ?assertMatch({ok, 1}, mero:increment_counter(cluster, {<<"1">>, <<"11">>})), @@ -357,17 +321,19 @@ multiget_defineds_clustered_keys(_Conf) -> ?assertMatch({ok, 3}, mero:increment_counter(cluster, {<<"3">>, <<"16">>})), %% 13, 14 and 15 will go to the same server %% 11, 12 and 16 to a different one - Expected = [{<<"11">>, <<"1">>}, - {<<"12">>, <<"1">>}, - {<<"13">>, <<"1">>}, - {<<"16">>, <<"3">>}, - {<<"17">>, undefined}], - ?assertEqual(Expected, lists:sort(mero:mget(cluster, - [{<<"1">>, <<"11">>}, - {<<"2">>, <<"12">>}, - {<<"3">>, <<"13">>}, - {<<"3">>, <<"16">>}, - {<<"3">>, <<"17">>}], 1000))). + Expected = [{<<"11">>, <<"1">>}, + {<<"12">>, <<"1">>}, + {<<"13">>, <<"1">>}, + {<<"16">>, <<"3">>}, + {<<"17">>, undefined}], + ?assertEqual(Expected, + lists:sort(mero:mget(cluster, + [{<<"1">>, <<"11">>}, + {<<"2">>, <<"12">>}, + {<<"3">>, <<"13">>}, + {<<"3">>, <<"16">>}, + {<<"3">>, <<"17">>}], + 1000))). increment(_Conf) -> ?assertMatch({<<"11">>, undefined}, mero:get(cluster, <<"11">>)), @@ -406,24 +372,24 @@ increment(_Conf) -> ?assertMatch({<<"14">>, undefined}, mero:get(cluster2, <<"14">>)), ok. - add(_Conf) -> ?assertEqual(ok, mero:add(cluster, <<"11">>, <<"Adroll">>, 11111, 1000)), ct:log("First not stored"), - ?assertEqual({error, already_exists}, mero:add(cluster, <<"11">>, <<"Adroll2">>, 111111, 1000)), + ?assertEqual({error, already_exists}, + mero:add(cluster, <<"11">>, <<"Adroll2">>, 111111, 1000)), await_connected(cluster), ct:log("Second not stored"), - ?assertEqual({error, already_exists}, mero:add(cluster, <<"11">>, <<"Adroll2">>, 111111, 1000)), + ?assertEqual({error, already_exists}, + mero:add(cluster, <<"11">>, <<"Adroll2">>, 111111, 1000)), await_connected(cluster), ?assertEqual({<<"11">>, <<"Adroll">>}, mero:get(cluster, <<"11">>)), - ?assertEqual(ok, mero:delete(cluster, <<"11">>, 1000)), + ?assertEqual(ok, mero:delete(cluster, <<"11">>, 1000)), ?assertEqual({<<"11">>, undefined}, mero:get(cluster, <<"11">>)), ?assertEqual(ok, mero:add(cluster, <<"11">>, <<"Adroll3">>, 11111, 1000)), ?assertEqual({<<"11">>, <<"Adroll3">>}, mero:get(cluster, <<"11">>)). - madd(_) -> %% with one existing key, add new keys repeatedly, moving the %% position of the existing key each time: @@ -436,24 +402,23 @@ madd(_) -> lists:foreach(fun ({Start, N}) -> mero:flush_all(cluster), ok = mero:add(cluster, ExistingKey, ExistingKey, 10000, 1000), - Keys = MakeKeys(Start, N) - ++ [ExistingKey] - ++ MakeKeys(Start + N + 1, Total - N - 1), + Keys = MakeKeys(Start, N) ++ + [ExistingKey] ++ MakeKeys(Start + N + 1, Total - N - 1), Expected = [case Key of - ExistingKey -> {error, already_exists}; - _ -> ok + ExistingKey -> + {error, already_exists}; + _ -> + ok end || Key <- Keys], ?assertEqual(Expected, - mero:madd(cluster, [{Key, Key, 10000} - || Key <- Keys], 5000)), - ?assertEqual(lists:keysort(1, [{Key, Key} - || Key <- Keys]), + mero:madd(cluster, + [{Key, Key, 10000} || Key <- Keys], + 5000)), + ?assertEqual(lists:keysort(1, [{Key, Key} || Key <- Keys]), lists:keysort(1, mero:mget(cluster, Keys, 5000))) end, - [{1, N} - || N <- lists:seq(1, Total - 1)]). - + [{1, N} || N <- lists:seq(1, Total - 1)]). mset(_) -> Keys = [key() || _ <- lists:seq(1, 10)], @@ -461,97 +426,81 @@ mset(_) -> Expected = lists:duplicate(length(Updates), ok), ?assertEqual(Expected, mero:mset(cluster, Updates, 5000)). - mcas(_) -> Keys = [key() || _ <- lists:seq(1, 10)], Updates = [{Key, Key, 10000} || Key <- Keys], - ?assertEqual(lists:duplicate(length(Updates), ok), - mero:mset(cluster, Updates, 5000)), + ?assertEqual(lists:duplicate(length(Updates), ok), mero:mset(cluster, Updates, 5000)), await_connected(cluster), KVCs = mero:mgets(cluster, Keys, 5000), FailingKeys = [hd(Keys), lists:nth(length(Keys), Keys)], - {NUpdates, Expected} = - lists:unzip([ - case lists:member(Key, FailingKeys) of - true -> - {{Key, <<"should not update">>, 10000, CAS + 1}, {error, already_exists}}; - false -> - {{Key, <>, 10000, CAS}, ok} - end || {Key, _, CAS} <- KVCs] - ), + {NUpdates, Expected} = lists:unzip([case lists:member(Key, FailingKeys) of + true -> + {{Key, <<"should not update">>, 10000, CAS + 1}, + {error, already_exists}}; + false -> + {{Key, <>, 10000, CAS}, ok} + end + || {Key, _, CAS} <- KVCs]), ?assertEqual(Expected, mero:mcas(cluster, NUpdates, 5000)). state_ok(_) -> State = mero:state(), - ?assertEqual( - [ - {connected, 1}, - {connecting, 0}, - {failed, 0}, - {free, 1}, - {links, 3}, - {message_queue_len, 0}, - {monitors, 0} - ], lists:sort(proplists:get_value(cluster2, State))), - ?assertEqual( - [ - {connected, 2}, - {connecting, 0}, - {failed, 0}, - {free, 2}, - {links, 6}, - {message_queue_len, 0}, - {monitors, 0} - ], lists:sort(proplists:get_value(cluster, State))). + ?assertEqual([{connected, 1}, + {connecting, 0}, + {failed, 0}, + {free, 1}, + {links, 3}, + {message_queue_len, 0}, + {monitors, 0}], + lists:sort(proplists:get_value(cluster2, State))), + ?assertEqual([{connected, 2}, + {connecting, 0}, + {failed, 0}, + {free, 2}, + {links, 6}, + {message_queue_len, 0}, + {monitors, 0}], + lists:sort(proplists:get_value(cluster, State))). state_error(_) -> meck:expect(mero_pool, state, 1, {error, down}), State = mero:state(), - ?assertEqual( - [ - {connected, 0}, - {connecting, 0}, - {failed, 0}, - {free, 0}, - {links, 0}, - {message_queue_len, 0}, - {monitors, 0} - ], lists:sort(proplists:get_value(cluster2, State))), - ?assertEqual( - [ - {connected, 0}, - {connecting, 0}, - {failed, 0}, - {free, 0}, - {links, 0}, - {message_queue_len, 0}, - {monitors, 0} - ], lists:sort(proplists:get_value(cluster, State))). + ?assertEqual([{connected, 0}, + {connecting, 0}, + {failed, 0}, + {free, 0}, + {links, 0}, + {message_queue_len, 0}, + {monitors, 0}], + lists:sort(proplists:get_value(cluster2, State))), + ?assertEqual([{connected, 0}, + {connecting, 0}, + {failed, 0}, + {free, 0}, + {links, 0}, + {message_queue_len, 0}, + {monitors, 0}], + lists:sort(proplists:get_value(cluster, State))). state_timeout(_) -> meck:expect(mero_pool, state, 1, {error, timeout}), State = mero:state(), - ?assertEqual( - [ - {connected, 0}, - {connecting, 0}, - {failed, 0}, - {free, 0}, - {links, 0}, - {message_queue_len, 0}, - {monitors, 0} - ], lists:sort(proplists:get_value(cluster2, State))), - ?assertEqual( - [ - {connected, 0}, - {connecting, 0}, - {failed, 0}, - {free, 0}, - {links, 0}, - {message_queue_len, 0}, - {monitors, 0} - ], lists:sort(proplists:get_value(cluster, State))). - + ?assertEqual([{connected, 0}, + {connecting, 0}, + {failed, 0}, + {free, 0}, + {links, 0}, + {message_queue_len, 0}, + {monitors, 0}], + lists:sort(proplists:get_value(cluster2, State))), + ?assertEqual([{connected, 0}, + {connecting, 0}, + {failed, 0}, + {free, 0}, + {links, 0}, + {message_queue_len, 0}, + {monitors, 0}], + lists:sort(proplists:get_value(cluster, State))). %%%============================================================================= %%% Internal functions @@ -562,14 +511,15 @@ key() -> await_connected(Cluster) -> ct:log("waiting for free connections"), - Wait = fun W () -> + Wait = fun W() -> State = mero:state(), case proplists:get_value(connected, proplists:get_value(Cluster, State)) of - N when is_integer(N) andalso N > 1 -> - ok; - _ -> - timer:sleep(100), - W() + N when is_integer(N) andalso N > 1 -> + ok; + _ -> + timer:sleep(100), + W() end end, Wait(). + diff --git a/test/mero_cluster_SUITE.erl b/test/mero_cluster_SUITE.erl index 3380cbf..a88961d 100644 --- a/test/mero_cluster_SUITE.erl +++ b/test/mero_cluster_SUITE.erl @@ -33,47 +33,39 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --export([ - all/0, - load_cluster/1, - shard_phash2/1, - shard_crc32/1, - select_pool/1, - group_by_shards/1, - group_by_shards_clustered_key/1 -]). - -all() -> [ - load_cluster, - shard_phash2, - shard_crc32, - select_pool, - group_by_shards, - group_by_shards_clustered_key -]. +-export([all/0, + load_cluster/1, + shard_phash2/1, + shard_crc32/1, + select_pool/1, + group_by_shards/1, + group_by_shards_clustered_key/1]). + +all() -> + [load_cluster, + shard_phash2, + shard_crc32, + select_pool, + group_by_shards, + group_by_shards_clustered_key]. %% Just tests if the application can be started and when it does that %% the mero_cluster module is generated correctly. load_cluster(_Conf) -> - Config = [ - {cluster, [ - {servers, [ - {"localhost", 11996}, - {"localhost", 11997}, - {"localhost", 11998}, - {"localhost", 11999} - ]}, - {sharding_algorithm, {mero, shard_phash2}}, - {workers_per_shard, 3}, - {pool_worker_module, mero_wrk_tcp_txt} - ]}, - {cluster2, [ - {servers, [{"localhost", 11995}]}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 2}, - {pool_worker_module, mero_wrk_tcp_txt} - ]} - ], + Config = [{cluster, + [{servers, + [{"localhost", 11996}, + {"localhost", 11997}, + {"localhost", 11998}, + {"localhost", 11999}]}, + {sharding_algorithm, {mero, shard_phash2}}, + {workers_per_shard, 3}, + {pool_worker_module, mero_wrk_tcp_txt}]}, + {cluster2, + [{servers, [{"localhost", 11995}]}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 2}, + {pool_worker_module, mero_wrk_tcp_txt}]}], mero_cluster:load_clusters(Config), ?assertMatch(4, mero_cluster:cluster_shards(cluster)), ?assertMatch(1, mero_cluster:cluster_shards(cluster2)), @@ -92,146 +84,152 @@ load_cluster(_Conf) -> ct:log("cluster: ~p", [mero_cluster:child_definitions(cluster)]), ct:log("cluster2: ~p", [mero_cluster:child_definitions(cluster2)]), - ?assertEqual( - [ - {"localhost",11996,mero_cluster_localhost_0_0,mero_wrk_tcp_txt}, - {"localhost",11996,mero_cluster_localhost_0_1,mero_wrk_tcp_txt}, - {"localhost",11996,mero_cluster_localhost_0_2,mero_wrk_tcp_txt}, - {"localhost",11997,mero_cluster_localhost_1_0,mero_wrk_tcp_txt}, - {"localhost",11997,mero_cluster_localhost_1_1,mero_wrk_tcp_txt}, - {"localhost",11997,mero_cluster_localhost_1_2,mero_wrk_tcp_txt}, - {"localhost",11998,mero_cluster_localhost_2_0,mero_wrk_tcp_txt}, - {"localhost",11998,mero_cluster_localhost_2_1,mero_wrk_tcp_txt}, - {"localhost",11998,mero_cluster_localhost_2_2,mero_wrk_tcp_txt}, - {"localhost",11999,mero_cluster_localhost_3_0,mero_wrk_tcp_txt}, - {"localhost",11999,mero_cluster_localhost_3_1,mero_wrk_tcp_txt}, - {"localhost",11999,mero_cluster_localhost_3_2,mero_wrk_tcp_txt} - ], - mero_cluster:child_definitions(cluster) - ), - ?assertEqual( - [ - {"localhost",11995,mero_cluster2_localhost_0_0,mero_wrk_tcp_txt}, - {"localhost",11995,mero_cluster2_localhost_0_1,mero_wrk_tcp_txt} - ], - mero_cluster:child_definitions(cluster2) - ), + ?assertEqual([{"localhost", 11996, mero_cluster_localhost_0_0, mero_wrk_tcp_txt}, + {"localhost", 11996, mero_cluster_localhost_0_1, mero_wrk_tcp_txt}, + {"localhost", 11996, mero_cluster_localhost_0_2, mero_wrk_tcp_txt}, + {"localhost", 11997, mero_cluster_localhost_1_0, mero_wrk_tcp_txt}, + {"localhost", 11997, mero_cluster_localhost_1_1, mero_wrk_tcp_txt}, + {"localhost", 11997, mero_cluster_localhost_1_2, mero_wrk_tcp_txt}, + {"localhost", 11998, mero_cluster_localhost_2_0, mero_wrk_tcp_txt}, + {"localhost", 11998, mero_cluster_localhost_2_1, mero_wrk_tcp_txt}, + {"localhost", 11998, mero_cluster_localhost_2_2, mero_wrk_tcp_txt}, + {"localhost", 11999, mero_cluster_localhost_3_0, mero_wrk_tcp_txt}, + {"localhost", 11999, mero_cluster_localhost_3_1, mero_wrk_tcp_txt}, + {"localhost", 11999, mero_cluster_localhost_3_2, mero_wrk_tcp_txt}], + mero_cluster:child_definitions(cluster)), + ?assertEqual([{"localhost", 11995, mero_cluster2_localhost_0_0, mero_wrk_tcp_txt}, + {"localhost", 11995, mero_cluster2_localhost_0_1, mero_wrk_tcp_txt}], + mero_cluster:child_definitions(cluster2)), ok. - - shard_phash2(_Conf) -> [[begin - Result = mero:shard_phash2(Key, Shards), - ?assertEqual(Result, mero:shard_phash2(Key, Shards)), - ?assert(Result =< Shards) - end || Shards <- lists:seq(1, 10)] + Result = mero:shard_phash2(Key, Shards), + ?assertEqual(Result, mero:shard_phash2(Key, Shards)), + ?assert(Result =< Shards) + end + || Shards <- lists:seq(1, 10)] || Key <- [<<"Adroll">>, <<"retargetting">>, <<"platform">>]]. - shard_crc32(_Conf) -> [[begin - Result = mero:shard_crc32(Key, Shards), - ?assertEqual(Result, mero:shard_crc32(Key, Shards)), - ?assert(Result =< Shards) - end || Shards <- lists:seq(1, 10)] - || Key <- [<<"Adroll">>, <<"retargetting">>, <<"platform">>]]. + Result = mero:shard_crc32(Key, Shards), + ?assertEqual(Result, mero:shard_crc32(Key, Shards)), + ?assert(Result =< Shards) + end + || Shards <- lists:seq(1, 10)] + || Key <- [<<"Adroll">>, <<"retargetting">>, <<"platform">>]]. select_pool(_Conf) -> - Config = [ - {cluster, - [{servers, [{"localhost", 11996}, {"localhost", 11997}]}, - {sharding_algorithm, {mero, shard_phash2}}, - {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_txt}]}, - {cluster2, - [{servers, [{"localhost", 11995}, {"localhost", 11998}]}, - {sharding_algorithm, {mero, shard_phash2}}, - {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_txt}] - }], + Config = [{cluster, + [{servers, [{"localhost", 11996}, {"localhost", 11997}]}, + {sharding_algorithm, {mero, shard_phash2}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_txt}]}, + {cluster2, + [{servers, [{"localhost", 11995}, {"localhost", 11998}]}, + {sharding_algorithm, {mero, shard_phash2}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_txt}]}], mero_cluster:load_clusters(Config), ct:log("~p", [mero_cluster:child_definitions(cluster)]), ct:log("~p", [mero_cluster:child_definitions(cluster2)]), - ?assertEqual( - [ - {"localhost", 11996, mero_cluster_localhost_0_0, mero_wrk_tcp_txt}, - {"localhost", 11997, mero_cluster_localhost_1_0, mero_wrk_tcp_txt} - ], - mero_cluster:child_definitions(cluster) - ), - ?assertEqual( - [ - {"localhost", 11995, mero_cluster2_localhost_0_0, mero_wrk_tcp_txt}, - {"localhost", 11998, mero_cluster2_localhost_1_0, mero_wrk_tcp_txt} - ], - mero_cluster:child_definitions(cluster2) - ), + ?assertEqual([{"localhost", 11996, mero_cluster_localhost_0_0, mero_wrk_tcp_txt}, + {"localhost", 11997, mero_cluster_localhost_1_0, mero_wrk_tcp_txt}], + mero_cluster:child_definitions(cluster)), + ?assertEqual([{"localhost", 11995, mero_cluster2_localhost_0_0, mero_wrk_tcp_txt}, + {"localhost", 11998, mero_cluster2_localhost_1_0, mero_wrk_tcp_txt}], + mero_cluster:child_definitions(cluster2)), ?assertMatch(mero_cluster_localhost_0_0, mero_cluster:server(cluster, <<"Adroll">>)), ?assertMatch(mero_cluster2_localhost_0_0, mero_cluster:server(cluster2, <<"Adroll">>)), ?assertMatch(mero_cluster_localhost_1_0, mero_cluster:server(cluster, <<"Adroll2">>)), ?assertMatch(mero_cluster2_localhost_1_0, mero_cluster:server(cluster2, <<"Adroll2">>)), ok. - group_by_shards(_Conf) -> - Config = [ - {cluster, - [{servers, [{"localhost", 11996}, {"localhost", 11997}]}, + Config = [{cluster, + [{servers, [{"localhost", 11996}, {"localhost", 11997}]}, {sharding_algorithm, {mero, shard_phash2}}, {workers_per_shard, 1}, {pool_worker_module, mero_wrk_tcp_txt}]}], mero_cluster:load_clusters(Config), ?assertEqual([], mero_cluster:group_by_shards(cluster, [])), - ?assertEqual([ - {0, [<<"6">>, <<"13">>, <<"14">>, <<"15">>, <<"17">>]}, - {1, [<<"1">>,<<"2">>,<<"3">>,<<"4">>,<<"5">>,<<"7">>, - <<"8">>,<<"9">>, <<"11">>,<<"12">>,<<"16">>,<<"18">>,<<"19">> - ]}], - mero_cluster:group_by_shards(cluster, - [<<"1">>, <<"2">>, <<"3">>, - <<"4">>, <<"5">>, <<"6">>, - <<"7">>, <<"8">>, <<"9">>, - <<"11">>, <<"12">>, <<"13">>, - <<"14">>, <<"15">>, <<"16">>, - <<"17">>, <<"18">>, <<"19">>])), - ?assertEqual([{0, [{x, <<"6">>}, {y, <<"13">>}]}, - {1, [{a, <<"1">>}, {b, <<"2">>}]}], + ?assertEqual([{0, [<<"6">>, <<"13">>, <<"14">>, <<"15">>, <<"17">>]}, + {1, + [<<"1">>, + <<"2">>, + <<"3">>, + <<"4">>, + <<"5">>, + <<"7">>, + <<"8">>, + <<"9">>, + <<"11">>, + <<"12">>, + <<"16">>, + <<"18">>, + <<"19">>]}], + mero_cluster:group_by_shards(cluster, + [<<"1">>, + <<"2">>, + <<"3">>, + <<"4">>, + <<"5">>, + <<"6">>, + <<"7">>, + <<"8">>, + <<"9">>, + <<"11">>, + <<"12">>, + <<"13">>, + <<"14">>, + <<"15">>, + <<"16">>, + <<"17">>, + <<"18">>, + <<"19">>])), + ?assertEqual([{0, [{x, <<"6">>}, {y, <<"13">>}]}, {1, [{a, <<"1">>}, {b, <<"2">>}]}], mero_cluster:group_by_shards(cluster, - [{a, <<"1">>}, {b, <<"2">>}, - {x, <<"6">>}, {y, <<"13">>}], + [{a, <<"1">>}, + {b, <<"2">>}, + {x, <<"6">>}, + {y, <<"13">>}], 2)), ok. group_by_shards_clustered_key(_Conf) -> - Config = [ - {cluster, - [{servers, [{"localhost", 11996}, {"localhost", 11997}]}, + Config = [{cluster, + [{servers, [{"localhost", 11996}, {"localhost", 11997}]}, {sharding_algorithm, {mero, shard_phash2}}, {workers_per_shard, 1}, {pool_worker_module, mero_wrk_tcp_txt}]}], mero_cluster:load_clusters(Config), - ?assertEqual( - [ - {0, [<<"K6">>]}, - {1, [<<"K1">>,<<"K2">>,<<"K3">>,<<"K4">>,<<"K5">>,<<"K7">>,<<"K8">>,<<"K9">>]} - ], - mero_cluster:group_by_shards(cluster, - [{<<"1">>, <<"K1">>}, {<<"2">>, <<"K2">>}, {<<"3">>, <<"K3">>}, - {<<"4">>, <<"K4">>}, {<<"5">>, <<"K5">>}, {<<"6">>, <<"K6">>}, - {<<"7">>, <<"K7">>}, {<<"8">>, <<"K8">>}, {<<"9">>, <<"K9">>}]) - ), - ?assertEqual( - [{0, [{x, <<"K6">>}, {y, <<"K13">>}]}, {1, [{a, <<"K1">>}, {b, <<"K2">>}]}], - mero_cluster:group_by_shards( - cluster, - [ - {a, {<<"1">>, <<"K1">>}}, - {b, {<<"2">>, <<"K2">>}}, - {x, {<<"6">>, <<"K6">>}}, - {y, {<<"13">>, <<"K13">>}} - ], - 2 - ) - ), + ?assertEqual([{0, [<<"K6">>]}, + {1, + [<<"K1">>, + <<"K2">>, + <<"K3">>, + <<"K4">>, + <<"K5">>, + <<"K7">>, + <<"K8">>, + <<"K9">>]}], + mero_cluster:group_by_shards(cluster, + [{<<"1">>, <<"K1">>}, + {<<"2">>, <<"K2">>}, + {<<"3">>, <<"K3">>}, + {<<"4">>, <<"K4">>}, + {<<"5">>, <<"K5">>}, + {<<"6">>, <<"K6">>}, + {<<"7">>, <<"K7">>}, + {<<"8">>, <<"K8">>}, + {<<"9">>, <<"K9">>}])), + ?assertEqual([{0, [{x, <<"K6">>}, {y, <<"K13">>}]}, {1, [{a, <<"K1">>}, {b, <<"K2">>}]}], + mero_cluster:group_by_shards(cluster, + [{a, {<<"1">>, <<"K1">>}}, + {b, {<<"2">>, <<"K2">>}}, + {x, {<<"6">>, <<"K6">>}}, + {y, {<<"13">>, <<"K13">>}}], + 2)), ok. + diff --git a/test/mero_conf_SUITE.erl b/test/mero_conf_SUITE.erl index dc8afe4..fb009b6 100644 --- a/test/mero_conf_SUITE.erl +++ b/test/mero_conf_SUITE.erl @@ -33,65 +33,58 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --export([ - all/0, - init_per_testcase/2, - end_per_testcase/2, - helper_mfa_config_function/0, - diff/1, - process_server_specs_a_compatible/1, - process_server_specs_a/1, - process_server_specs_a_alternate/1, - process_server_specs_a_b/1, - process_server_specs_a_b_c/1, - process_server_specs_mfa/1, - per_pool_config/1 -]). - -all() -> [ - diff, - process_server_specs_a_compatible, - process_server_specs_a, - process_server_specs_a_alternate, - process_server_specs_a_b, - process_server_specs_a_b_c, - process_server_specs_mfa, - per_pool_config -]. +-export([all/0, + init_per_testcase/2, + end_per_testcase/2, + helper_mfa_config_function/0, + diff/1, + process_server_specs_a_compatible/1, + process_server_specs_a/1, + process_server_specs_a_alternate/1, + process_server_specs_a_b/1, + process_server_specs_a_b_c/1, + process_server_specs_mfa/1, + per_pool_config/1]). + +all() -> + [diff, + process_server_specs_a_compatible, + process_server_specs_a, + process_server_specs_a_alternate, + process_server_specs_a_b, + process_server_specs_a_b_c, + process_server_specs_mfa, + per_pool_config]. init_per_testcase(diff, Conf) -> Conf; init_per_testcase(_, Conf) -> meck:new(mero_elasticache, [passthrough]), HostLinea = <<"a1.com|10.100.100.100|11211 ", - "a2.com|10.101.101.00|11211 ", - "a3.com|10.102.00.102|11211\n">>, - - HostLineb = <<"b1.com|10.100.100.100|11211 ", - "b2.com|10.101.101.00|11211\n">>, - + "a2.com|10.101.101.00|11211 ", + "a3.com|10.102.00.102|11211\n">>, + HostLineb = <<"b1.com|10.100.100.100|11211 ", "b2.com|10.101.101.00|11211\n">>, HostLinec = <<"c1.com|10.100.100.100|11211 ", - "c2.com|10.101.101.00|11211 ", - "c3.com|10.102.00.102|11211 c4.com|10.102.00.102|11211\n">>, - - meck:expect(mero_elasticache, request_response, - fun(Type, _, _, _) -> - HostLines = case Type of - a -> HostLinea; - b -> HostLineb; - c -> HostLinec - end, - { - ok, - [ - {banner, <<"CONFIG cluster ...">>}, - {version, <<"version1">>}, - {hosts, HostLines}, - {crlf, <<"\r\n">>}, - {eom, <<"END\r\n">>} - ] - } - end), + "c2.com|10.101.101.00|11211 ", + "c3.com|10.102.00.102|11211 c4.com|10.102.00.102|11211\n">>, + meck:expect(mero_elasticache, + request_response, + fun (Type, _, _, _) -> + HostLines = case Type of + a -> + HostLinea; + b -> + HostLineb; + c -> + HostLinec + end, + {ok, + [{banner, <<"CONFIG cluster ...">>}, + {version, <<"version1">>}, + {hosts, HostLines}, + {crlf, <<"\r\n">>}, + {eom, <<"END\r\n">>}]} + end), Conf. end_per_testcase(diff, _Conf) -> @@ -118,16 +111,18 @@ diff(_Conf) -> process_server_specs_a(_Conf) -> Spec = [{default, - [{servers, {elasticache, [{a, 11211, 2}]}}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_binary}]}], - + [{servers, {elasticache, [{a, 11211, 2}]}}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_binary}]}], [{default, ServerSpecs}] = mero_conf:process_server_specs(Spec), - ?assertEqual([ - {"a1.com", 11211}, {"a2.com", 11211}, {"a3.com", 11211}, - {"a1.com", 11211}, {"a2.com", 11211}, {"a3.com", 11211}], - proplists:get_value(servers, ServerSpecs)), + ?assertEqual([{"a1.com", 11211}, + {"a2.com", 11211}, + {"a3.com", 11211}, + {"a1.com", 11211}, + {"a2.com", 11211}, + {"a3.com", 11211}], + proplists:get_value(servers, ServerSpecs)), ?assertEqual(mero_wrk_tcp_binary, proplists:get_value(pool_worker_module, ServerSpecs)), ?assertEqual(1, proplists:get_value(workers_per_shard, ServerSpecs)), ?assertEqual({mero, shard_crc32}, proplists:get_value(sharding_algorithm, ServerSpecs)), @@ -135,15 +130,13 @@ process_server_specs_a(_Conf) -> process_server_specs_a_alternate(_Conf) -> Spec = [{default, - [{servers, {elasticache, [{a, 11211}]}}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_binary}]}], - + [{servers, {elasticache, [{a, 11211}]}}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_binary}]}], [{default, ServerSpecs}] = mero_conf:process_server_specs(Spec), - ?assertEqual([ - {"a1.com", 11211}, {"a2.com", 11211}, {"a3.com", 11211}], - proplists:get_value(servers, ServerSpecs)), + ?assertEqual([{"a1.com", 11211}, {"a2.com", 11211}, {"a3.com", 11211}], + proplists:get_value(servers, ServerSpecs)), ?assertEqual(mero_wrk_tcp_binary, proplists:get_value(pool_worker_module, ServerSpecs)), ?assertEqual(1, proplists:get_value(workers_per_shard, ServerSpecs)), ?assertEqual({mero, shard_crc32}, proplists:get_value(sharding_algorithm, ServerSpecs)), @@ -151,14 +144,13 @@ process_server_specs_a_alternate(_Conf) -> process_server_specs_a_compatible(_Conf) -> Spec = [{default, - [{servers, {elasticache, a, 11211}}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_binary}]}], - + [{servers, {elasticache, a, 11211}}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_binary}]}], [{default, ServerSpecs}] = mero_conf:process_server_specs(Spec), ?assertEqual([{"a1.com", 11211}, {"a2.com", 11211}, {"a3.com", 11211}], - proplists:get_value(servers, ServerSpecs)), + proplists:get_value(servers, ServerSpecs)), ?assertEqual(mero_wrk_tcp_binary, proplists:get_value(pool_worker_module, ServerSpecs)), ?assertEqual(1, proplists:get_value(workers_per_shard, ServerSpecs)), ?assertEqual({mero, shard_crc32}, proplists:get_value(sharding_algorithm, ServerSpecs)), @@ -166,17 +158,19 @@ process_server_specs_a_compatible(_Conf) -> process_server_specs_a_b(_Conf) -> Spec = [{default, - [{servers, {elasticache, [{a, 11211, 1}, {b, 11211, 2}]}}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 2}, - {pool_worker_module, mero_wrk_tcp_txt}]}], - + [{servers, {elasticache, [{a, 11211, 1}, {b, 11211, 2}]}}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 2}, + {pool_worker_module, mero_wrk_tcp_txt}]}], [{default, ServerSpecs}] = mero_conf:process_server_specs(Spec), - ?assertEqual([ - {"a1.com", 11211}, {"a2.com", 11211}, {"a3.com", 11211}, - {"b1.com", 11211}, {"b2.com", 11211}, - {"b1.com", 11211}, {"b2.com", 11211}], - proplists:get_value(servers, ServerSpecs)), + ?assertEqual([{"a1.com", 11211}, + {"a2.com", 11211}, + {"a3.com", 11211}, + {"b1.com", 11211}, + {"b2.com", 11211}, + {"b1.com", 11211}, + {"b2.com", 11211}], + proplists:get_value(servers, ServerSpecs)), ?assertEqual(mero_wrk_tcp_txt, proplists:get_value(pool_worker_module, ServerSpecs)), ?assertEqual(2, proplists:get_value(workers_per_shard, ServerSpecs)), ?assertEqual({mero, shard_crc32}, proplists:get_value(sharding_algorithm, ServerSpecs)), @@ -184,22 +178,35 @@ process_server_specs_a_b(_Conf) -> process_server_specs_a_b_c(_Conf) -> Spec = [{default, - [{servers, {elasticache, [{a, 11211, 1}, {b, 11211, 2}, {c, 11211, 4}]}}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 20}, - {pool_worker_module, mero_wrk_tcp_txt}]}], - + [{servers, {elasticache, [{a, 11211, 1}, {b, 11211, 2}, {c, 11211, 4}]}}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 20}, + {pool_worker_module, mero_wrk_tcp_txt}]}], [{default, ServerSpecs}] = mero_conf:process_server_specs(Spec), - ?assertEqual([ - {"a1.com", 11211}, {"a2.com", 11211}, {"a3.com", 11211}, - {"b1.com", 11211}, {"b2.com", 11211}, - {"b1.com", 11211}, {"b2.com", 11211}, - {"c1.com", 11211}, {"c2.com", 11211}, {"c3.com", 11211}, {"c4.com", 11211}, - {"c1.com", 11211}, {"c2.com", 11211}, {"c3.com", 11211}, {"c4.com", 11211}, - {"c1.com", 11211}, {"c2.com", 11211}, {"c3.com", 11211}, {"c4.com", 11211}, - {"c1.com", 11211}, {"c2.com", 11211}, {"c3.com", 11211}, {"c4.com", 11211} - ], - proplists:get_value(servers, ServerSpecs)), + ?assertEqual([{"a1.com", 11211}, + {"a2.com", 11211}, + {"a3.com", 11211}, + {"b1.com", 11211}, + {"b2.com", 11211}, + {"b1.com", 11211}, + {"b2.com", 11211}, + {"c1.com", 11211}, + {"c2.com", 11211}, + {"c3.com", 11211}, + {"c4.com", 11211}, + {"c1.com", 11211}, + {"c2.com", 11211}, + {"c3.com", 11211}, + {"c4.com", 11211}, + {"c1.com", 11211}, + {"c2.com", 11211}, + {"c3.com", 11211}, + {"c4.com", 11211}, + {"c1.com", 11211}, + {"c2.com", 11211}, + {"c3.com", 11211}, + {"c4.com", 11211}], + proplists:get_value(servers, ServerSpecs)), ?assertEqual(mero_wrk_tcp_txt, proplists:get_value(pool_worker_module, ServerSpecs)), ?assertEqual(20, proplists:get_value(workers_per_shard, ServerSpecs)), ?assertEqual({mero, shard_crc32}, proplists:get_value(sharding_algorithm, ServerSpecs)), @@ -207,13 +214,13 @@ process_server_specs_a_b_c(_Conf) -> process_server_specs_mfa(_Conf) -> Spec = [{default, - [{servers, {mfa, {?MODULE, helper_mfa_config_function, []}}}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 20}, - {pool_worker_module, mero_wrk_tcp_txt}]}], + [{servers, {mfa, {?MODULE, helper_mfa_config_function, []}}}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 20}, + {pool_worker_module, mero_wrk_tcp_txt}]}], [{default, ServerSpecs}] = mero_conf:process_server_specs(Spec), ?assertEqual([{"mfa1.com", 11211}, {"mfa2.com", 11211}], - proplists:get_value(servers, ServerSpecs)), + proplists:get_value(servers, ServerSpecs)), ?assertEqual(mero_wrk_tcp_txt, proplists:get_value(pool_worker_module, ServerSpecs)), ?assertEqual(20, proplists:get_value(workers_per_shard, ServerSpecs)), ?assertEqual({mero, shard_crc32}, proplists:get_value(sharding_algorithm, ServerSpecs)), @@ -225,3 +232,4 @@ per_pool_config(_Conf) -> ?assertEqual(30, mero_conf:pool_initial_connections(pool_1)), ?assertEqual(50, mero_conf:pool_initial_connections(pool_2)), ok. + diff --git a/test/mero_conf_monitor_SUITE.erl b/test/mero_conf_monitor_SUITE.erl index d8de01a..f983d04 100644 --- a/test/mero_conf_monitor_SUITE.erl +++ b/test/mero_conf_monitor_SUITE.erl @@ -31,57 +31,41 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --export([ - all/0, - init_per_testcase/2, - end_per_testcase/2 -]). --export([ - conf_is_periodically_fetched/1, - cluster_is_restarted_when_new_nodes/1, - cluster_is_restarted_when_lost_nodes/1, - cluster_is_not_restarted_when_other_changes/1, - cluster_is_not_restarted_with_bad_info/1, - cluster_is_not_restarted_on_socket_error/1, - non_heartbeat_messages_are_ignored/1 -]). - -all() -> [ - conf_is_periodically_fetched, - cluster_is_restarted_when_new_nodes, - cluster_is_restarted_when_lost_nodes, - cluster_is_not_restarted_when_other_changes, - cluster_is_not_restarted_with_bad_info, - cluster_is_not_restarted_on_socket_error, - non_heartbeat_messages_are_ignored -]. +-export([all/0, init_per_testcase/2, end_per_testcase/2]). +-export([conf_is_periodically_fetched/1, + cluster_is_restarted_when_new_nodes/1, + cluster_is_restarted_when_lost_nodes/1, + cluster_is_not_restarted_when_other_changes/1, + cluster_is_not_restarted_with_bad_info/1, + cluster_is_not_restarted_on_socket_error/1, + non_heartbeat_messages_are_ignored/1]). + +all() -> + [conf_is_periodically_fetched, + cluster_is_restarted_when_new_nodes, + cluster_is_restarted_when_lost_nodes, + cluster_is_not_restarted_when_other_changes, + cluster_is_not_restarted_with_bad_info, + cluster_is_not_restarted_on_socket_error, + non_heartbeat_messages_are_ignored]. init_per_testcase(_, Conf) -> meck:new([mero_elasticache, mero_wrk_tcp_binary], [passthrough, nolink]), HostLinea = <<"a1.com|10.100.100.100|11112 ", - "a2.com|10.101.101.00|11112 ", - "a3.com|10.102.00.102|11112\n">>, - - HostLineb = <<"b1.com|10.100.100.100|11212 ", - "b2.com|10.101.101.00|11212\n">>, - + "a2.com|10.101.101.00|11112 ", + "a3.com|10.102.00.102|11112\n">>, + HostLineb = <<"b1.com|10.100.100.100|11212 ", "b2.com|10.101.101.00|11212\n">>, HostLinec = <<"c1.com|10.100.100.100|11112 ", - "c2.com|10.101.101.00|11112 ", - "c3.com|10.102.00.102|11112 c4.com|10.102.00.102|11112\n">>, - - Lines = #{ - a => HostLinea, - b => HostLineb, - c => HostLinec - }, - + "c2.com|10.101.101.00|11112 ", + "c3.com|10.102.00.102|11112 c4.com|10.102.00.102|11112\n">>, + Lines = #{a => HostLinea, b => HostLineb, c => HostLinec}, mock_elasticache(Lines), - meck:expect(mero_wrk_tcp_binary, connect, - fun(_Host, Port, CallbackInfo) -> - meck:passthrough(["localhost", Port, CallbackInfo]) - end), - + meck:expect(mero_wrk_tcp_binary, + connect, + fun (_Host, Port, CallbackInfo) -> + meck:passthrough(["localhost", Port, CallbackInfo]) + end), application:load(mero), Conf. @@ -89,7 +73,6 @@ end_per_testcase(_, _Conf) -> application:stop(mero), meck:unload([mero_elasticache, mero_wrk_tcp_binary]). - conf_is_periodically_fetched(_) -> mero_conf:monitor_heartbeat_delay(10, 11), start_server(), @@ -110,32 +93,31 @@ cluster_is_restarted_when_new_nodes(_) -> %% Nothing Changed... send_heartbeat(), - ?assertEqual( - Cluster1Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), - ?assertEqual( - Cluster2Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), - + ?assertEqual(Cluster1Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), + ?assertEqual(Cluster2Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), %% Cluster1 stays, Cluster2 adds a node - Lines = #{ - a => <<"a1.com|10.100.100.100|11112 ", - "a2.com|10.101.101.00|11112 ", - "a3.com|10.102.00.102|11112\n">>, - b => <<"b1.com|10.100.100.100|11212 ", - "b2.com|10.101.101.00|11212 " - "b3.com|10.102.00.102|11212\n">> - }, + Lines = #{a => + <<"a1.com|10.100.100.100|11112 ", + "a2.com|10.101.101.00|11112 ", + "a3.com|10.102.00.102|11112\n">>, + b => + <<"b1.com|10.100.100.100|11212 ", + "b2.com|10.101.101.00|11212 b3.com|10.102.00.102|11212\n">>}, mock_elasticache(Lines), %% Cluster1 remains the same, Cluster2 is rebuilt send_heartbeat(), - ?assertEqual( - Cluster1Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), - NewCluster2Children = supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2)), + ?assertEqual(Cluster1Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), + NewCluster2Children = + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2)), ?assertEqual(3, length(NewCluster2Children)), - lists:foreach( - fun(Child) -> - ?assertNot(lists:member(Child, Cluster2Children)) - end, NewCluster2Children), + lists:foreach(fun (Child) -> + ?assertNot(lists:member(Child, Cluster2Children)) + end, + NewCluster2Children), ok. cluster_is_restarted_when_lost_nodes(_) -> @@ -149,30 +131,26 @@ cluster_is_restarted_when_lost_nodes(_) -> %% Nothing Changed... send_heartbeat(), - ?assertEqual( - Cluster1Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), - ?assertEqual( - Cluster2Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), - + ?assertEqual(Cluster1Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), + ?assertEqual(Cluster2Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), %% Cluster2 stays, Cluster1 lost a node - Lines = #{ - a => <<"a1.com|10.100.100.100|11112 ", - "a2.com|10.101.101.00|11112\n">>, - b => <<"b1.com|10.100.100.100|11212 ", - "b2.com|10.101.101.00|11212\n">> - }, + Lines = #{a => <<"a1.com|10.100.100.100|11112 ", "a2.com|10.101.101.00|11112\n">>, + b => <<"b1.com|10.100.100.100|11212 ", "b2.com|10.101.101.00|11212\n">>}, mock_elasticache(Lines), %% Cluster1 remains the same, Cluster2 is rebuilt send_heartbeat(), - NewCluster1Children = supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1)), + NewCluster1Children = + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1)), ?assertEqual(2, length(NewCluster1Children)), - lists:foreach( - fun(Child) -> - ?assertNot(lists:member(Child, Cluster1Children)) - end, NewCluster1Children), - ?assertEqual( - Cluster2Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), + lists:foreach(fun (Child) -> + ?assertNot(lists:member(Child, Cluster1Children)) + end, + NewCluster1Children), + ?assertEqual(Cluster2Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), ok. cluster_is_not_restarted_when_other_changes(_) -> @@ -186,27 +164,24 @@ cluster_is_not_restarted_when_other_changes(_) -> %% Nothing Changed... send_heartbeat(), - ?assertEqual( - Cluster1Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), - ?assertEqual( - Cluster2Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), - + ?assertEqual(Cluster1Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), + ?assertEqual(Cluster2Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), %% servers are reordered in both clusters, but that's irrelevant for us - Lines = #{ - a => <<"a2.com|10.101.101.00|11112 ", - "a1.com|10.100.100.100|11112 ", - "a3.com|10.102.00.102|11112\n">>, - b => <<"b2.com|10.101.101.00|11212 ", - "b1.com|10.100.100.100|11212\n">> - }, + Lines = #{a => + <<"a2.com|10.101.101.00|11112 ", + "a1.com|10.100.100.100|11112 ", + "a3.com|10.102.00.102|11112\n">>, + b => <<"b2.com|10.101.101.00|11212 ", "b1.com|10.100.100.100|11212\n">>}, mock_elasticache(Lines), %% Nothing Changed... send_heartbeat(), - ?assertEqual( - Cluster1Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), - ?assertEqual( - Cluster2Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), + ?assertEqual(Cluster1Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), + ?assertEqual(Cluster2Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), ok. cluster_is_not_restarted_with_bad_info(_) -> @@ -220,23 +195,20 @@ cluster_is_not_restarted_with_bad_info(_) -> %% Nothing Changed... send_heartbeat(), - ?assertEqual( - Cluster1Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), - ?assertEqual( - Cluster2Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), - + ?assertEqual(Cluster1Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), + ?assertEqual(Cluster2Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), %% bad info is received - Lines = #{ - a => <<"this is wrong\n">> - }, + Lines = #{a => <<"this is wrong\n">>}, mock_elasticache(Lines), %% Nothing Changed... send_heartbeat(), - ?assertEqual( - Cluster1Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), - ?assertEqual( - Cluster2Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), + ?assertEqual(Cluster1Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), + ?assertEqual(Cluster2Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), ?assertNotEqual(undefined, whereis(mero_conf_monitor)), ok. @@ -251,20 +223,19 @@ cluster_is_not_restarted_on_socket_error(_) -> %% Nothing Changed... send_heartbeat(), - ?assertEqual( - Cluster1Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), - ?assertEqual( - Cluster2Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), - + ?assertEqual(Cluster1Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), + ?assertEqual(Cluster2Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), %% socket times out when connecting to elasticache mock_elasticache_timeout(), %% Nothing Changed... send_heartbeat(), - ?assertEqual( - Cluster1Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), - ?assertEqual( - Cluster2Children, supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), + ?assertEqual(Cluster1Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster1))), + ?assertEqual(Cluster2Children, + supervisor:which_children(mero_cluster:sup_by_cluster_name(cluster2))), ?assertNotEqual(undefined, whereis(mero_conf_monitor)), ok. @@ -284,40 +255,34 @@ start_server() -> mero_test_util:start_server(cluster_config(), 5, 30, 1000, 5000). cluster_config() -> - [ - {cluster1, [ - {servers, {elasticache, [{a, 11112, 1}]}}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_binary} - ]}, - {cluster2, [ - {servers, {elasticache, [{b, 11112, 1}]}}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_binary} - ]} - ]. + [{cluster1, + [{servers, {elasticache, [{a, 11112, 1}]}}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_binary}]}, + {cluster2, + [{servers, {elasticache, [{b, 11112, 1}]}}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_binary}]}]. send_heartbeat() -> mero_conf_monitor ! heartbeat, {unknown_call, sync} = gen_server:call(mero_conf_monitor, sync). mock_elasticache(Lines) -> - meck:expect(mero_elasticache, request_response, - fun(Type, _, _, _) -> - HostLines = maps:get(Type, Lines), - { - ok, - [ - {banner, <<"CONFIG cluster ...">>}, - {version, <<"version1">>}, - {hosts, HostLines}, - {crlf, <<"\r\n">>}, - {eom, <<"END\r\n">>} - ] - } - end). + meck:expect(mero_elasticache, + request_response, + fun (Type, _, _, _) -> + HostLines = maps:get(Type, Lines), + {ok, + [{banner, <<"CONFIG cluster ...">>}, + {version, <<"version1">>}, + {hosts, HostLines}, + {crlf, <<"\r\n">>}, + {eom, <<"END\r\n">>}]} + end). mock_elasticache_timeout() -> meck:expect(mero_elasticache, request_response, 4, {error, etimedout}). + diff --git a/test/mero_dummy_server.erl b/test/mero_dummy_server.erl index 4646f8d..d271b21 100644 --- a/test/mero_dummy_server.erl +++ b/test/mero_dummy_server.erl @@ -45,17 +45,12 @@ handle_info/2, code_change/3, terminate/2]). - -export([accept/4]). -define(TCP_SEND_TIMEOUT, 15000). -define(FULLSWEEP_AFTER_OPT, {fullsweep_after, 10}). --record(state, {listen_socket, - num_acceptors, - opts, - keys = [] - }). +-record(state, {listen_socket, num_acceptors, opts, keys = []}). %%%----------------------------------------------------------------------------- %%% START/STOP EXPORTS @@ -70,70 +65,62 @@ stop(Pid) when is_pid(Pid) -> MRef = erlang:monitor(process, Pid), gen_server:call(Pid, stop), receive - {'DOWN', MRef, _, Object, Info} -> - ct:pal("server ~p stopped ~p: ~p", [Object, whereis(?MODULE), Info]), - ok + {'DOWN', MRef, _, Object, Info} -> + ct:pal("server ~p stopped ~p: ~p", [Object, whereis(?MODULE), Info]), + ok end; stop(Port) when is_integer(Port) -> Name = name(Port), Pid = whereis(Name), stop(Pid). - reset(Port) -> gen_server:call(name(Port), reset). - handle_call({put_key, Port, Key, undefined, _}, _From, #state{keys = Keys} = State) -> ct:log("~p deleting key ~p", [Port, Key]), NKeys = lists:keydelete({Port, Key}, 1, Keys), ct:log("new keys: ~p", [NKeys]), {reply, ok, State#state{keys = NKeys}}; - handle_call({put_key, Port, Key, Value, CAS}, _From, #state{keys = Keys} = State) -> ct:log("~p setting key ~p (cas: ~p)", [Port, Key, CAS]), ActualCAS = case CAS of - undefined -> - {Mega, Sec, Micro} = os:timestamp(), - Mega + Sec + Micro; - _ -> - CAS + undefined -> + {Mega, Sec, Micro} = os:timestamp(), + Mega + Sec + Micro; + _ -> + CAS end, NKeys = lists:keystore({Port, Key}, 1, Keys, {{Port, Key}, {Value, ActualCAS}}), ct:log("new keys: ~p", [NKeys]), {reply, ok, State#state{keys = NKeys}}; - handle_call({get_key, Port, Key}, _From, #state{keys = Keys} = State) -> {reply, {ok, proplists:get_value({Port, Key}, Keys)}, State}; - handle_call({flush_all, Port}, _From, #state{keys = Keys} = State) -> ct:log("~p flushing all keys", [Port]), - NKeys = lists:filter(fun ({{KeyPort, _}, _}) when KeyPort == Port -> false; - (_) -> true - end, Keys), + NKeys = lists:filter(fun ({{KeyPort, _}, _}) when KeyPort == Port -> + false; + (_) -> + true + end, + Keys), ct:log("new keys: ~p", [NKeys]), {reply, ok, State#state{keys = NKeys}}; - handle_call(stop, _From, State) -> {stop, normal, ok, State}. - handle_cast(_Msg, State) -> {noreply, State}. - handle_info(_Info, State) -> {noreply, State}. - terminate(_Reason, State) -> gen_tcp:close(State#state.listen_socket). - code_change(_, _, State) -> {ok, State}. - %%%----------------------------------------------------------------------------- %%% INTERNAL EXPORTS %%%----------------------------------------------------------------------------- @@ -141,64 +128,63 @@ code_change(_, _, State) -> init([Port, Opts]) -> process_flag(trap_exit, true), case listen(Port, Opts) of - {ok, ListenSocket} -> - ct:pal("memcached mocked server started on port ~p", [Port]), - start_acceptor([self(), Port, ListenSocket, Opts]), - {ok, #state{listen_socket = ListenSocket, - opts = Opts}}; - {error, Reason} -> - ct:pal("memcached dummy server error: ~p", [Reason]), - {stop, Reason} + {ok, ListenSocket} -> + ct:pal("memcached mocked server started on port ~p", [Port]), + start_acceptor([self(), Port, ListenSocket, Opts]), + {ok, #state{listen_socket = ListenSocket, opts = Opts}}; + {error, Reason} -> + ct:pal("memcached dummy server error: ~p", [Reason]), + {stop, Reason} end. start_acceptor(Args) -> proc_lib:spawn_opt(?MODULE, accept, Args, [?FULLSWEEP_AFTER_OPT]). listen(Port, SockOpts) -> - gen_tcp:listen(Port, [binary, - {packet, 0}, - {active, false}, - {reuseaddr, true}, - {nodelay, true}, - {send_timeout, ?TCP_SEND_TIMEOUT}, - {send_timeout_close, true}, - {keepalive, true} | - SockOpts]). + gen_tcp:listen(Port, + [binary, + {packet, 0}, + {active, false}, + {reuseaddr, true}, + {nodelay, true}, + {send_timeout, ?TCP_SEND_TIMEOUT}, + {send_timeout_close, true}, + {keepalive, true} + | SockOpts]). accept(Parent, Port, ListenSocket, Opts) -> try - link(Parent) + link(Parent) catch - error:noproc -> exit(normal) + error:noproc -> + exit(normal) end, put('$ancestors', tl(get('$ancestors'))), start_accept(Parent, Port, ListenSocket, Opts). - start_accept(Parent, Port, ListenSocket, Opts) -> case gen_tcp:accept(ListenSocket) of - {ok, Socket} -> - unlink(Parent), - start_acceptor([Parent, Port, ListenSocket, Opts]), - loop(Parent, Socket, Port, Opts); - {error, closed} -> - unlink(Parent), - exit(normal); - {error, _Reason} -> - start_accept(Parent, Port, ListenSocket, Opts) + {ok, Socket} -> + unlink(Parent), + start_acceptor([Parent, Port, ListenSocket, Opts]), + loop(Parent, Socket, Port, Opts); + {error, closed} -> + unlink(Parent), + exit(normal); + {error, _Reason} -> + start_accept(Parent, Port, ListenSocket, Opts) end. - loop(Parent, Sock, Port, Opts) -> loop(Parent, Sock, Port, Opts, <<>>). loop(Parent, Sock, Port, Opts, Buf) -> case gen_tcp:recv(Sock, 0) of - {ok, Data} -> - handle_data(Parent, Sock, Port, <>), - loop(Parent, Sock, Port, Opts, Buf); - {error, _Reason} = Error -> - Error + {ok, Data} -> + handle_data(Parent, Sock, Port, <>), + loop(Parent, Sock, Port, Opts, Buf); + {error, _Reason} = Error -> + Error end. %%%----------------------------------------------------------------------------- @@ -207,26 +193,25 @@ loop(Parent, Sock, Port, Opts, Buf) -> handle_data(Parent, Sock, Port, Data) -> {Response, Unparsed} = response(Parent, Port, Data), - ct:log("~p sending response ~p~nremaining data ~p", [Port, - iolist_to_binary(Response), - iolist_to_binary(Unparsed)]), + ct:log("~p sending response ~p~nremaining data ~p", + [Port, iolist_to_binary(Response), iolist_to_binary(Unparsed)]), send(Sock, iolist_to_binary(Response)), %% this assumes any remaining buffered data includes only complete requests. case Unparsed of - <<>> -> - ok; - _ -> - handle_data(Parent, Sock, Port, Unparsed) + <<>> -> + ok; + _ -> + handle_data(Parent, Sock, Port, Unparsed) end. %% We send one byte at a time to test that we are handling package split correctly -send(_Sock, <<>>) -> ok; +send(_Sock, <<>>) -> + ok; send(Sock, <>) -> gen_tcp:send(Sock, Byte), timer:sleep(1), send(Sock, Rest). - flush_all(Parent, Port) -> gen_server:call(Parent, {flush_all, Port}). @@ -236,10 +221,10 @@ get_key(Parent, Port, Key) -> put_key(Parent, Port, Key, Value, CAS) -> gen_server:call(Parent, {put_key, Port, Key, Value, CAS}). + put_key(Parent, Port, Key, Value) -> put_key(Parent, Port, Key, Value, undefined). - parse(<<16#80:8, _Rest/binary>> = Request) -> ct:log("About to parse request: ~p", [Request]), {Resp, Unparsed} = parse_binary(Request), @@ -256,16 +241,22 @@ parse(Request) -> %%%% Response -canned_responses(text, _Index, _Key, _Op, not_found) -> ["NOT_FOUND", <<"\r\n">>]; -canned_responses(text, _Index, _Key, _Op, not_stored) -> ["NOT_STORED", <<"\r\n">>]; -canned_responses(text, _Index, _Key, _Op, stored) -> [<<"STORED">>, <<"\r\n">>]; -canned_responses(text, _Index, _Key, _Op, already_exists) -> [<<"EXISTS">>, <<"\r\n">>]; -canned_responses(text, _Index, _Key, _Op, deleted) -> [<<"DELETED">>, <<"\r\n">>]; -canned_responses(text, _Index, _Key, _Op, flushed) -> [<<"FLUSHED">>, <<"\r\n">>]; -canned_responses(text, _Index, _Key, _Op, {incr, I}) -> [mero_util:to_bin(I), <<"\r\n">>]; -canned_responses(text, _Index, _Key, _Op, noop) -> []; - - +canned_responses(text, _Index, _Key, _Op, not_found) -> + ["NOT_FOUND", <<"\r\n">>]; +canned_responses(text, _Index, _Key, _Op, not_stored) -> + ["NOT_STORED", <<"\r\n">>]; +canned_responses(text, _Index, _Key, _Op, stored) -> + [<<"STORED">>, <<"\r\n">>]; +canned_responses(text, _Index, _Key, _Op, already_exists) -> + [<<"EXISTS">>, <<"\r\n">>]; +canned_responses(text, _Index, _Key, _Op, deleted) -> + [<<"DELETED">>, <<"\r\n">>]; +canned_responses(text, _Index, _Key, _Op, flushed) -> + [<<"FLUSHED">>, <<"\r\n">>]; +canned_responses(text, _Index, _Key, _Op, {incr, I}) -> + [mero_util:to_bin(I), <<"\r\n">>]; +canned_responses(text, _Index, _Key, _Op, noop) -> + []; canned_responses(binary, Index, _Key, Op, not_found) -> ExtrasOut = <<>>, ExtrasSizeOut = size(ExtrasOut), @@ -273,10 +264,16 @@ canned_responses(binary, Index, _Key, Op, not_found) -> BodyOut = <<>>, BodySizeOut = size(BodyOut), KeySize = 0, - <<16#81:8, Op:8, KeySize:16, ExtrasSizeOut:8, 0, Status:16, - BodySizeOut:32, (opaque(Index)):32, - 0:64, BodyOut/binary>>; - + <<16#81:8, + Op:8, + KeySize:16, + ExtrasSizeOut:8, + 0, + Status:16, + BodySizeOut:32, + (opaque(Index)):32, + 0:64, + BodyOut/binary>>; canned_responses(binary, Index, _Key, Op, not_stored) -> ExtrasOut = <<>>, ExtrasSizeOut = size(ExtrasOut), @@ -285,10 +282,16 @@ canned_responses(binary, Index, _Key, Op, not_stored) -> BodySizeOut = size(BodyOut), KeySize = 0, - <<16#81:8, Op:8, KeySize:16, ExtrasSizeOut:8, 0, Status:16, - BodySizeOut:32, (opaque(Index)):32, - 0:64, BodyOut/binary>>; - + <<16#81:8, + Op:8, + KeySize:16, + ExtrasSizeOut:8, + 0, + Status:16, + BodySizeOut:32, + (opaque(Index)):32, + 0:64, + BodyOut/binary>>; canned_responses(binary, Index, _Key, Op, stored) -> ExtrasOut = <<>>, ExtrasSizeOut = size(ExtrasOut), @@ -297,11 +300,21 @@ canned_responses(binary, Index, _Key, Op, stored) -> BodySizeOut = size(BodyOut), KeySize = 0, - <<16#81:8, Op:8, KeySize:16, ExtrasSizeOut:8, 0, Status:16, - BodySizeOut:32, (opaque(Index)):32, - 0:64, BodyOut/binary>>; - -canned_responses(binary, Index, _Key, Op, deleted) -> %% same as stored, intentionally + <<16#81:8, + Op:8, + KeySize:16, + ExtrasSizeOut:8, + 0, + Status:16, + BodySizeOut:32, + (opaque(Index)):32, + 0:64, + BodyOut/binary>>; +canned_responses(binary, + Index, + _Key, + Op, + deleted) -> %% same as stored, intentionally ExtrasOut = <<>>, ExtrasSizeOut = size(ExtrasOut), Status = ?NO_ERROR, @@ -309,10 +322,16 @@ canned_responses(binary, Index, _Key, Op, deleted) -> %% same as stored, intenti BodySizeOut = size(BodyOut), KeySize = 0, - <<16#81:8, Op:8, KeySize:16, ExtrasSizeOut:8, 0, Status:16, - BodySizeOut:32, (opaque(Index)):32, - 0:64, BodyOut/binary>>; - + <<16#81:8, + Op:8, + KeySize:16, + ExtrasSizeOut:8, + 0, + Status:16, + BodySizeOut:32, + (opaque(Index)):32, + 0:64, + BodyOut/binary>>; canned_responses(binary, Index, _Key, ?MEMCACHE_INCREMENT, {incr, I}) -> ExtrasOut = <<>>, ExtrasSizeOut = size(ExtrasOut), @@ -321,10 +340,16 @@ canned_responses(binary, Index, _Key, ?MEMCACHE_INCREMENT, {incr, I}) -> BodySizeOut = size(BodyOut), KeySize = 0, - <<16#81:8, ?MEMCACHE_INCREMENT:8, KeySize:16, ExtrasSizeOut:8, 0, Status:16, - BodySizeOut:32, (opaque(Index)):32, - 0:64, BodyOut/binary>>; - + <<16#81:8, + ?MEMCACHE_INCREMENT:8, + KeySize:16, + ExtrasSizeOut:8, + 0, + Status:16, + BodySizeOut:32, + (opaque(Index)):32, + 0:64, + BodyOut/binary>>; canned_responses(binary, Index, _Key, Op, already_exists) -> ExtrasOut = <<>>, ExtrasSizeOut = size(ExtrasOut), @@ -333,10 +358,16 @@ canned_responses(binary, Index, _Key, Op, already_exists) -> BodySizeOut = size(BodyOut), KeySize = 0, - <<16#81:8, Op:8, KeySize:16, ExtrasSizeOut:8, 0, Status:16, - BodySizeOut:32, (opaque(Index)):32, - 0:64, BodyOut/binary>>; - + <<16#81:8, + Op:8, + KeySize:16, + ExtrasSizeOut:8, + 0, + Status:16, + BodySizeOut:32, + (opaque(Index)):32, + 0:64, + BodyOut/binary>>; canned_responses(binary, _Index, _Key, Op, flushed) -> ExtrasOut = <<>>, ExtrasSizeOut = size(ExtrasOut), @@ -345,38 +376,49 @@ canned_responses(binary, _Index, _Key, Op, flushed) -> BodySizeOut = size(BodyOut), KeySize = 0, - <<16#81:8, Op:8, KeySize:16, ExtrasSizeOut:8, 0, Status:16, - BodySizeOut:32, 16#00:32, - 0:64, BodyOut/binary>>; - -canned_responses(binary, _Index, _Key, _Op, noop) -> []. - + <<16#81:8, + Op:8, + KeySize:16, + ExtrasSizeOut:8, + 0, + Status:16, + BodySizeOut:32, + 16#00:32, + 0:64, + BodyOut/binary>>; +canned_responses(binary, _Index, _Key, _Op, noop) -> + []. opaque(undefined) -> 16#00; opaque(Index) when is_integer(Index) -> Index. - text_response_get_keys(_Parent, _Port, [], Acc, _WithCas) -> - [Acc, "END\r\n"]; + [Acc, "END\r\n"]; text_response_get_keys(Parent, Port, [Key | Keys], Acc, WithCas) -> case get_key(Parent, Port, Key) of - undefined -> - text_response_get_keys(Parent, Port, Keys, Acc, WithCas); - {Value, CAS} -> - LValue = mero_util:to_bin(Value), - NBytes = size(LValue), - NAcc = [Acc, "VALUE", " ", mero_util:to_bin(Key), " 00 ", - mero_util:to_bin(NBytes), case WithCas of - true -> - [" ", mero_util:to_bin(CAS)]; - _ -> - "" - end, - "\r\n", - mero_util:to_bin(LValue), "\r\n"], - text_response_get_keys(Parent, Port, Keys, NAcc, WithCas) + undefined -> + text_response_get_keys(Parent, Port, Keys, Acc, WithCas); + {Value, CAS} -> + LValue = mero_util:to_bin(Value), + NBytes = size(LValue), + NAcc = [Acc, + "VALUE", + " ", + mero_util:to_bin(Key), + " 00 ", + mero_util:to_bin(NBytes), + case WithCas of + true -> + [" ", mero_util:to_bin(CAS)]; + _ -> + "" + end, + "\r\n", + mero_util:to_bin(LValue), + "\r\n"], + text_response_get_keys(Parent, Port, Keys, NAcc, WithCas) end. %% NOTE: This is not correct. Right now we don't distinguish between multiple @@ -384,10 +426,12 @@ text_response_get_keys(Parent, Port, [Key | Keys], Acc, WithCas) -> binary_response_get_keys(_Parent, _Port, [], Acc, _WithCas) -> Acc; binary_response_get_keys(Parent, Port, [{Op, Key} | Keys], Acc, WithCas) -> - {Status, Value, CAS} = case get_key(Parent, Port, Key) of - undefined -> {?NOT_FOUND, <<>>, undefined}; - {Val, StoredCAS} -> {?NO_ERROR, Val, StoredCAS} - end, + {Status, Value, CAS} = case get_key(Parent, Port, Key) of + undefined -> + {?NOT_FOUND, <<>>, undefined}; + {Val, StoredCAS} -> + {?NO_ERROR, Val, StoredCAS} + end, LValue = mero_util:to_bin(Value), ExtrasOut = <<>>, ExtrasSizeOut = size(ExtrasOut), @@ -395,117 +439,147 @@ binary_response_get_keys(Parent, Port, [{Op, Key} | Keys], Acc, WithCas) -> BodySizeOut = size(BodyOut), KeySize = size(Key), CASValue = case CAS of - undefined -> - 0; - _ -> - CAS + undefined -> + 0; + _ -> + CAS end, - binary_response_get_keys(Parent, Port, Keys, - [<<16#81:8, Op:8, KeySize:16, ExtrasSizeOut:8, 0, - Status:16, BodySizeOut:32, 0:32, CASValue:64, - BodyOut/binary>> | Acc], + binary_response_get_keys(Parent, + Port, + Keys, + [<<16#81:8, + Op:8, + KeySize:16, + ExtrasSizeOut:8, + 0, + Status:16, + BodySizeOut:32, + 0:32, + CASValue:64, + BodyOut/binary>> + | Acc], WithCas). %% TODO add stored / not stored responses here response(Parent, Port, Request) -> {Kind, {DeleteKeys, Cmd}, Unparsed} = parse(Request), - lists:foreach(fun(K) -> put_key(Parent, Port, K, undefined) end, DeleteKeys), - Response = - case {Kind, Cmd} of - {Kind, flush_all} -> - flush_all(Parent, Port), - canned_responses(Kind, undefined, undefined, ?MEMCACHE_FLUSH_ALL, flushed); - {Kind, {get, Keys}} -> - case Kind of - text -> - text_response_get_keys(Parent, Port, Keys, [], false); - binary -> - binary_response_get_keys(Parent, Port, Keys, [], false) - end; - {Kind, {gets, Keys}} -> - case Kind of - text -> - R = text_response_get_keys(Parent, Port, Keys, [], true), - ct:log("gets result: ~p", [iolist_to_binary(R)]), - R; - binary -> - binary_response_get_keys(Parent, Port, Keys, [], true) - end; - {_Kind, {set, Key, Bytes, _Index, true = _Quiet}} -> - put_key(Parent, Port, Key, Bytes), - <<>>; - {Kind, {set, Key, Bytes, Index, false}} -> - put_key(Parent, Port, Key, Bytes), - canned_responses(Kind, Index, Key, ?MEMCACHE_SET, stored); - {Kind, {cas, Key, Bytes, CAS, Index, Quiet}} -> - Op = case Quiet of - true -> ?MEMCACHE_SETQ; - false -> ?MEMCACHE_SET - end, - case get_key(Parent, Port, Key) of - undefined -> - ct:log("cas of non-existent key ~p", [Key]), - canned_responses(Kind, Index, Key, Op, not_found); - {_, CAS} -> - ct:log("cas of existing key ~p with correct token ~p", [Key, CAS]), - put_key(Parent, Port, Key, Bytes, CAS + 1), - case Quiet of - true -> <<>>; - false -> canned_responses(Kind, Index, Key, Op, stored) - end; - {_, ExpectedCAS} -> - ct:log("cas of existing key ~p with incorrect token ~p (wanted ~p)", - [Key, CAS, ExpectedCAS]), - canned_responses(Kind, Index, Key, Op, already_exists) - end; - {Kind, {delete, Key}} -> - ct:log("deleting ~p", [Key]), - case get_key(Parent, Port, Key) of - undefined -> - ct:log("was not present"), - canned_responses(Kind, undefined, Key, ?MEMCACHE_DELETE, not_found); - {_Value, _} -> - ct:log("key was present"), - put_key(Parent, Port, Key, undefined, undefined), - canned_responses(Kind, undefined, Key, ?MEMCACHE_DELETE, deleted) - end; - {Kind, {add, Key, Bytes, Index, Quiet}} -> - Op = case Quiet of - true -> ?MEMCACHE_ADDQ; - false -> ?MEMCACHE_ADD - end, - case get_key(Parent, Port, Key) of - undefined -> - put_key(Parent, Port, Key, Bytes, undefined), - case Quiet of - true -> <<>>; - false -> canned_responses(Kind, Index, Key, Op, stored) - end; - {_Value, _} -> - canned_responses(Kind, Index, Key, Op, already_exists) - end; - {Kind, {incr, Key, ExpTime, Initial, Bytes}} -> - case get_key(Parent, Port, Key) of - undefined -> - %% Return error - case ExpTime of - 4294967295 -> %% 32 bits, all 1 - canned_responses(Kind, undefined, Key, ?MEMCACHE_INCREMENT, not_found); - _ -> - put_key(Parent, Port, Key, Initial), - canned_responses( - Kind, undefined, Key, ?MEMCACHE_INCREMENT, {incr, Initial}) - end; - {Value, _} -> - Result = mero_util:to_int(Value) + mero_util:to_int(Bytes), - put_key(Parent, Port, Key, Result), - canned_responses(Kind, undefined, Key, ?MEMCACHE_INCREMENT, {incr, Result}) - end - end, + lists:foreach(fun (K) -> + put_key(Parent, Port, K, undefined) + end, + DeleteKeys), + Response = case {Kind, Cmd} of + {Kind, flush_all} -> + flush_all(Parent, Port), + canned_responses(Kind, undefined, undefined, ?MEMCACHE_FLUSH_ALL, flushed); + {Kind, {get, Keys}} -> + case Kind of + text -> + text_response_get_keys(Parent, Port, Keys, [], false); + binary -> + binary_response_get_keys(Parent, Port, Keys, [], false) + end; + {Kind, {gets, Keys}} -> + case Kind of + text -> + R = text_response_get_keys(Parent, Port, Keys, [], true), + ct:log("gets result: ~p", [iolist_to_binary(R)]), + R; + binary -> + binary_response_get_keys(Parent, Port, Keys, [], true) + end; + {_Kind, {set, Key, Bytes, _Index, true = _Quiet}} -> + put_key(Parent, Port, Key, Bytes), + <<>>; + {Kind, {set, Key, Bytes, Index, false}} -> + put_key(Parent, Port, Key, Bytes), + canned_responses(Kind, Index, Key, ?MEMCACHE_SET, stored); + {Kind, {cas, Key, Bytes, CAS, Index, Quiet}} -> + Op = case Quiet of + true -> + ?MEMCACHE_SETQ; + false -> + ?MEMCACHE_SET + end, + case get_key(Parent, Port, Key) of + undefined -> + ct:log("cas of non-existent key ~p", [Key]), + canned_responses(Kind, Index, Key, Op, not_found); + {_, CAS} -> + ct:log("cas of existing key ~p with correct token ~p", [Key, CAS]), + put_key(Parent, Port, Key, Bytes, CAS + 1), + case Quiet of + true -> + <<>>; + false -> + canned_responses(Kind, Index, Key, Op, stored) + end; + {_, ExpectedCAS} -> + ct:log("cas of existing key ~p with incorrect token ~p (wanted ~p)", + [Key, CAS, ExpectedCAS]), + canned_responses(Kind, Index, Key, Op, already_exists) + end; + {Kind, {delete, Key}} -> + ct:log("deleting ~p", [Key]), + case get_key(Parent, Port, Key) of + undefined -> + ct:log("was not present"), + canned_responses(Kind, undefined, Key, ?MEMCACHE_DELETE, not_found); + {_Value, _} -> + ct:log("key was present"), + put_key(Parent, Port, Key, undefined, undefined), + canned_responses(Kind, undefined, Key, ?MEMCACHE_DELETE, deleted) + end; + {Kind, {add, Key, Bytes, Index, Quiet}} -> + Op = case Quiet of + true -> + ?MEMCACHE_ADDQ; + false -> + ?MEMCACHE_ADD + end, + case get_key(Parent, Port, Key) of + undefined -> + put_key(Parent, Port, Key, Bytes, undefined), + case Quiet of + true -> + <<>>; + false -> + canned_responses(Kind, Index, Key, Op, stored) + end; + {_Value, _} -> + canned_responses(Kind, Index, Key, Op, already_exists) + end; + {Kind, {incr, Key, ExpTime, Initial, Bytes}} -> + case get_key(Parent, Port, Key) of + undefined -> + %% Return error + case ExpTime of + 4294967295 -> %% 32 bits, all 1 + canned_responses(Kind, + undefined, + Key, + ?MEMCACHE_INCREMENT, + not_found); + _ -> + put_key(Parent, Port, Key, Initial), + canned_responses(Kind, + undefined, + Key, + ?MEMCACHE_INCREMENT, + {incr, Initial}) + end; + {Value, _} -> + Result = mero_util:to_int(Value) + mero_util:to_int(Bytes), + put_key(Parent, Port, Key, Result), + canned_responses(Kind, + undefined, + Key, + ?MEMCACHE_INCREMENT, + {incr, Result}) + end + end, {Response, Unparsed}. - %%% Parse parse_text([<<"get">> | Keys]) -> @@ -554,55 +628,89 @@ parse_binary(<<16#80:8, ?MEMCACHE_GETK:8, _/binary>> = Bin) -> {{[], {get, parse_get([], Bin)}}, <<>>}; parse_binary(<<16#80:8, ?MEMCACHE_GETKQ:8, _/binary>> = Bin) -> {{[], {get, parse_get([], Bin)}}, <<>>}; -parse_binary(<<16#80:8, Op:8, KeySize:16, - ExtrasSize:8, 16#00:8, 16#00:16, - BodySize:32, Index:32, CAS:64, +parse_binary(<<16#80:8, + Op:8, + KeySize:16, + ExtrasSize:8, + 16#00:8, + 16#00:16, + BodySize:32, + Index:32, + CAS:64, _Extras:ExtrasSize/binary, - Key:KeySize/binary, Rest/binary>>) - when Op == ?MEMCACHE_SET; - Op == ?MEMCACHE_SETQ -> + Key:KeySize/binary, + Rest/binary>>) + when Op == ?MEMCACHE_SET; Op == ?MEMCACHE_SETQ -> Quiet = Op == ?MEMCACHE_SETQ, ValueSize = BodySize - ExtrasSize - KeySize, <> = Rest, case CAS of - 16#00 -> - {{[], {set, Key, Value, Index, Quiet}}, Remaining}; - _ -> - {{[], {cas, Key, Value, CAS, Index, Quiet}}, Remaining} + 16#00 -> + {{[], {set, Key, Value, Index, Quiet}}, Remaining}; + _ -> + {{[], {cas, Key, Value, CAS, Index, Quiet}}, Remaining} end; -parse_binary(<<16#80:8, Op:8, KeySize:16, - ExtrasSize:8, 16#00:8, 16#00:16, - BodySize:32, Index:32, 16#00:64, +parse_binary(<<16#80:8, + Op:8, + KeySize:16, + ExtrasSize:8, + 16#00:8, + 16#00:16, + BodySize:32, + Index:32, + 16#00:64, _Extras:ExtrasSize/binary, - Key:KeySize/binary, Rest/binary>>) - when Op == ?MEMCACHE_ADD; - Op == ?MEMCACHE_ADDQ -> + Key:KeySize/binary, + Rest/binary>>) + when Op == ?MEMCACHE_ADD; Op == ?MEMCACHE_ADDQ -> Quiet = Op == ?MEMCACHE_ADDQ, ValueSize = BodySize - ExtrasSize - KeySize, <> = Rest, {{[], {add, Key, Value, Index, Quiet}}, Remaining}; -parse_binary(<<16#80:8, ?MEMCACHE_DELETE:8, KeySize:16, - ExtrasSize:8, 16#00:8, 16#00:16, - _BodySize:32, 16#00:32, 16#00:64, +parse_binary(<<16#80:8, + ?MEMCACHE_DELETE:8, + KeySize:16, + ExtrasSize:8, + 16#00:8, + 16#00:16, + _BodySize:32, + 16#00:32, + 16#00:64, _Extras:ExtrasSize/binary, Key:KeySize/binary>>) -> {{[], {delete, Key}}, <<>>}; parse_binary(<<16#80:8, ?MEMCACHE_DELETEQ:8, _/binary>> = Inp) -> parse_multi_delete_binary([], Inp); -parse_binary(<<16#80:8, ?MEMCACHE_INCREMENT:8, KeySize:16, - _ExtrasSize:8, 16#00:8, 16#00:16, - _BodySize:32, 16#00:32, 16#00:64, - Value:64, Initial:64, ExpTime:32, +parse_binary(<<16#80:8, + ?MEMCACHE_INCREMENT:8, + KeySize:16, + _ExtrasSize:8, + 16#00:8, + 16#00:16, + _BodySize:32, + 16#00:32, + 16#00:64, + Value:64, + Initial:64, + ExpTime:32, Key:KeySize/binary>>) -> {{[], {incr, Key, ExpTime, Initial, Value}}, <<>>}. parse_multi_delete_binary(Acc, []) -> {{Acc, undefined}, <<>>}; -parse_multi_delete_binary(Acc, <<16#80:8, ?MEMCACHE_DELETEQ:8, KeySize:16, - ExtrasSize:8, 16#00:8, 16#00:16, - _BodySize:32, 16#00:32, 16#00:64, - _Extras:ExtrasSize/binary, - Key:KeySize/binary, Rest/binary>>) -> +parse_multi_delete_binary(Acc, + <<16#80:8, + ?MEMCACHE_DELETEQ:8, + KeySize:16, + ExtrasSize:8, + 16#00:8, + 16#00:16, + _BodySize:32, + 16#00:32, + 16#00:64, + _Extras:ExtrasSize/binary, + Key:KeySize/binary, + Rest/binary>>) -> parse_multi_delete_binary([Key | Acc], Rest); parse_multi_delete_binary(Acc, Other) -> {{[], Cmd}, Remaining} = parse_binary(Other), @@ -610,23 +718,56 @@ parse_multi_delete_binary(Acc, Other) -> parse_get(Acc, <<>>) -> Acc; -parse_get(Acc, <<16#80:8, ?MEMCACHE_GET:8, KeySize:16, - _ExtrasSize:8, 16#00:8, 16#00:16, - _BodySize:32, 16#00:32, 16#00:64, - Key:KeySize/binary, Rest/binary>>) -> +parse_get(Acc, + <<16#80:8, + ?MEMCACHE_GET:8, + KeySize:16, + _ExtrasSize:8, + 16#00:8, + 16#00:16, + _BodySize:32, + 16#00:32, + 16#00:64, + Key:KeySize/binary, + Rest/binary>>) -> parse_get([{?MEMCACHE_GET, Key} | Acc], Rest); -parse_get(Acc, <<16#80:8, ?MEMCACHE_GETQ:8, KeySize:16, - _ExtrasSize:8, 16#00:8, 16#00:16, - _BodySize:32, 16#00:32, 16#00:64, - Key:KeySize/binary, Rest/binary>>) -> +parse_get(Acc, + <<16#80:8, + ?MEMCACHE_GETQ:8, + KeySize:16, + _ExtrasSize:8, + 16#00:8, + 16#00:16, + _BodySize:32, + 16#00:32, + 16#00:64, + Key:KeySize/binary, + Rest/binary>>) -> parse_get([{?MEMCACHE_GETQ, Key} | Acc], Rest); -parse_get(Acc, <<16#80:8, ?MEMCACHE_GETK:8, KeySize:16, - _ExtrasSize:8, 16#00:8, 16#00:16, - _BodySize:32, 16#00:32, 16#00:64, - Key:KeySize/binary, Rest/binary>>) -> +parse_get(Acc, + <<16#80:8, + ?MEMCACHE_GETK:8, + KeySize:16, + _ExtrasSize:8, + 16#00:8, + 16#00:16, + _BodySize:32, + 16#00:32, + 16#00:64, + Key:KeySize/binary, + Rest/binary>>) -> parse_get([{?MEMCACHE_GETK, Key} | Acc], Rest); -parse_get(Acc, <<16#80:8, ?MEMCACHE_GETKQ:8, KeySize:16, - _ExtrasSize:8, 16#00:8, 16#00:16, - _BodySize:32, 16#00:32, 16#00:64, - Key:KeySize/binary, Rest/binary>>) -> +parse_get(Acc, + <<16#80:8, + ?MEMCACHE_GETKQ:8, + KeySize:16, + _ExtrasSize:8, + 16#00:8, + 16#00:16, + _BodySize:32, + 16#00:32, + 16#00:64, + Key:KeySize/binary, + Rest/binary>>) -> parse_get([{?MEMCACHE_GETKQ, Key} | Acc], Rest). + diff --git a/test/mero_pool_SUITE.erl b/test/mero_pool_SUITE.erl index e5fdc80..ed12aca 100644 --- a/test/mero_pool_SUITE.erl +++ b/test/mero_pool_SUITE.erl @@ -33,50 +33,46 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --export([ - all/0, - init_per_testcase/2, - end_per_testcase/2, - start_stop/1, - start_stop_many/1, - checkout_checkin/1, - checkout_checkin_limits/1, - checkout_checkin_closed/1, - conn_failed_checkout_error/1, - checkout_and_die/1, - expire_connections/1, - checkout_timeout/1 -]). - --define(POOL, 'mero_cluster_localhost_0_0'). +-export([all/0, + init_per_testcase/2, + end_per_testcase/2, + start_stop/1, + start_stop_many/1, + checkout_checkin/1, + checkout_checkin_limits/1, + checkout_checkin_closed/1, + conn_failed_checkout_error/1, + checkout_and_die/1, + expire_connections/1, + checkout_timeout/1]). + +-define(POOL, mero_cluster_localhost_0_0). -define(PORT, 11999). -define(TIMELIMIT(Timeout), mero_conf:add_now(Timeout)). -define(CLUSTER_CONFIG, - [{cluster, - [{servers, [{"localhost", ?PORT}]}, - {sharding_algorithm, {mero, shard_phash2}}, - {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_txt}]} - ]). - -all() -> [ - start_stop, - start_stop_many, - checkout_checkin, - checkout_checkin_limits, - checkout_checkin_closed, - conn_failed_checkout_error, - checkout_and_die, - expire_connections, - checkout_timeout -]. + [{cluster, + [{servers, [{"localhost", ?PORT}]}, + {sharding_algorithm, {mero, shard_phash2}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_txt}]}]). + +all() -> + [start_stop, + start_stop_many, + checkout_checkin, + checkout_checkin_limits, + checkout_checkin_closed, + conn_failed_checkout_error, + checkout_and_die, + expire_connections, + checkout_timeout]. init_per_testcase(_, Conf) -> - application:load(mero), - Conf. + application:load(mero), + Conf. end_per_testcase(_, _Conf) -> - application:stop(mero). + application:stop(mero). %%%============================================================================= %%% Tests @@ -85,214 +81,198 @@ end_per_testcase(_, _Conf) -> %% Just tests if the application can be started and when it does that %% the mero_cluster module is generated correctly. start_stop(_Conf) -> - mero_test_util:start_server(?CLUSTER_CONFIG, 5, 30, 1000, 5000), - ct:log("~p~n", [mero_cluster:child_definitions(cluster)]), - ?assertMatch( - [{cluster, - [ - {links,7}, % we get +1 for timer:send_interval() - {monitors,0}, - {free,5}, - {connected,5}, - {connecting,0}, - {failed,0}, - {message_queue_len,0} - ]} - ], - mero:state()), - - ok = application:stop(mero), - ok = application:unload(mero). - + mero_test_util:start_server(?CLUSTER_CONFIG, 5, 30, 1000, 5000), + ct:log("~p~n", [mero_cluster:child_definitions(cluster)]), + ?assertMatch([{cluster, + [{links, 7}, % we get +1 for timer:send_interval() + {monitors, 0}, + {free, 5}, + {connected, 5}, + {connecting, 0}, + {failed, 0}, + {message_queue_len, 0}]}], + mero:state()), + ok = application:stop(mero), + ok = application:unload(mero). start_stop_many(_Conf) -> - MinConn = 10, - MaxConn = 400, - - ok = mero_conf:cluster_config(?CLUSTER_CONFIG), - ok = mero_conf:min_free_connections_per_pool(MinConn), - ok = mero_conf:initial_connections_per_pool(MinConn), - ok = mero_conf:max_connections_per_pool(MaxConn), - ok = mero_conf:expiration_interval(3000), - ok = mero_conf:connection_unused_max_time(2000), - ok = mero_conf:max_connection_delay_time(100), - - ok = application:start(mero), + MinConn = 10, + MaxConn = 400, - PoolModule = mero_cluster:server(cluster, <<"222">>), + ok = mero_conf:cluster_config(?CLUSTER_CONFIG), + ok = mero_conf:min_free_connections_per_pool(MinConn), + ok = mero_conf:initial_connections_per_pool(MinConn), + ok = mero_conf:max_connections_per_pool(MaxConn), + ok = mero_conf:expiration_interval(3000), + ok = mero_conf:connection_unused_max_time(2000), + ok = mero_conf:max_connection_delay_time(100), - ct:log("all memcached servers are down.. ~p" , [{PoolModule}]), + ok = application:start(mero), - mero_test_util:wait_for_min_connections_failed(PoolModule, - 0, 0, MinConn), + PoolModule = mero_cluster:server(cluster, <<"222">>), - ct:log("starting server on ?PORT ~p", [?PORT]), - {ok, _ServerPid} = mero_dummy_server:start_link(?PORT), + ct:log("all memcached servers are down.. ~p", [{PoolModule}]), - ct:log("checking that the connections are performed"), - mero_test_util:wait_for_pool_state(PoolModule, MinConn, MinConn, 0, 0), + mero_test_util:wait_for_min_connections_failed(PoolModule, 0, 0, MinConn), + ct:log("starting server on ?PORT ~p", [?PORT]), + {ok, _ServerPid} = mero_dummy_server:start_link(?PORT), - ok = application:stop(mero), - ok = application:unload(mero). + ct:log("checking that the connections are performed"), + mero_test_util:wait_for_pool_state(PoolModule, MinConn, MinConn, 0, 0), + ok = application:stop(mero), + ok = application:unload(mero). %% Test expiration of the connections and re-generation when the server is %% available after expiration expire_connections(_) -> - ct:log("Creating a server configured to renew unused sockets."), - {ok, _Pid} = mero_test_util:start_server(?CLUSTER_CONFIG, 2, 4, 300, 900), - mero_test_util:wait_for_pool_state(?POOL, 2, 2, 0, 0), - - ct:log("Let's take two of the connections, no new ones will be created"), - P1 = proc:new(), - P2 = proc:new(), - P3 = proc:new(), - {ok, Conn1} = proc:exec(P1, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), - {ok, Conn2} = proc:exec(P2, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), - mero_test_util:wait_for_pool_state(?POOL, 0, 2, 0, 0), - - ct:log("Only on reject are new connections minted."), - {error, reject} = proc:exec(P3, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), - mero_test_util:wait_for_pool_state(?POOL, 2, 4, 0, 0), - - ct:log("Now we can take a new connection."), - {ok, Conn3} = proc:exec(P3, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), - mero_test_util:wait_for_pool_state(?POOL, 1, 4, 0, 0), - - timer:sleep(500), - ct:log("kill the server so no connection can be stablished from now on! ;)"), - mero_dummy_server:stop(?PORT), - - mero_test_util:wait_for_pool_state(?POOL, 1, 4, 0, 0), - ct:log("checkin the sockets. We want these sockets to not expire"), - ok = proc:exec(P1, {mero_pool, checkin, [Conn1]}), - ok = proc:exec(P2, {mero_pool, checkin, [Conn2]}), - ok = proc:exec(P3, {mero_pool, checkin, [Conn3]}), - mero_test_util:wait_for_pool_state(?POOL, 4, 4, 0, 0), - - ct:log("After second check we will end up with only 2 connections"), - timer:sleep(300), - mero_test_util:wait_for_pool_state(?POOL, 3, 3, 0, 0), - - ct:log("We take one of the connections"), - {ok, _Conn4} = proc:exec(P1, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), - mero_test_util:wait_for_pool_state(?POOL, 2, 3, 0, 0), - - ct:log("And wait for the rest to expire..."), - timer:sleep(601), - mero_test_util:wait_for_min_connections_failed(?POOL, 0, 1, 1), - - ct:log("Finally start server again and expect recover"), - {ok, _ServerPid} = mero_dummy_server:start_link(?PORT), - mero_test_util:wait_for_pool_state(?POOL, 2, 3, 0, 0). + ct:log("Creating a server configured to renew unused sockets."), + {ok, _Pid} = mero_test_util:start_server(?CLUSTER_CONFIG, 2, 4, 300, 900), + mero_test_util:wait_for_pool_state(?POOL, 2, 2, 0, 0), + ct:log("Let's take two of the connections, no new ones will be created"), + P1 = proc:new(), + P2 = proc:new(), + P3 = proc:new(), + {ok, Conn1} = proc:exec(P1, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), + {ok, Conn2} = proc:exec(P2, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), + mero_test_util:wait_for_pool_state(?POOL, 0, 2, 0, 0), + + ct:log("Only on reject are new connections minted."), + {error, reject} = proc:exec(P3, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), + mero_test_util:wait_for_pool_state(?POOL, 2, 4, 0, 0), + + ct:log("Now we can take a new connection."), + {ok, Conn3} = proc:exec(P3, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), + mero_test_util:wait_for_pool_state(?POOL, 1, 4, 0, 0), + + timer:sleep(500), + ct:log("kill the server so no connection can be stablished from now " + "on! ;)"), + mero_dummy_server:stop(?PORT), + + mero_test_util:wait_for_pool_state(?POOL, 1, 4, 0, 0), + ct:log("checkin the sockets. We want these sockets to not expire"), + ok = proc:exec(P1, {mero_pool, checkin, [Conn1]}), + ok = proc:exec(P2, {mero_pool, checkin, [Conn2]}), + ok = proc:exec(P3, {mero_pool, checkin, [Conn3]}), + mero_test_util:wait_for_pool_state(?POOL, 4, 4, 0, 0), + + ct:log("After second check we will end up with only 2 connections"), + timer:sleep(300), + mero_test_util:wait_for_pool_state(?POOL, 3, 3, 0, 0), + + ct:log("We take one of the connections"), + {ok, _Conn4} = proc:exec(P1, {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), + mero_test_util:wait_for_pool_state(?POOL, 2, 3, 0, 0), + + ct:log("And wait for the rest to expire..."), + timer:sleep(601), + mero_test_util:wait_for_min_connections_failed(?POOL, 0, 1, 1), + ct:log("Finally start server again and expect recover"), + {ok, _ServerPid} = mero_dummy_server:start_link(?PORT), + mero_test_util:wait_for_pool_state(?POOL, 2, 3, 0, 0). %% @doc Basic test for checkout and checkin to pool. %% Test that connections are re-used. checkout_checkin(_) -> - mero_test_util:start_server(?CLUSTER_CONFIG, 1, 1, 1000, 1000), - - ct:log("A process is allowed to checkout a new connection"), - {ok, Conn1} = mero_pool:checkout(?POOL, ?TIMELIMIT(1000)), - - ct:log("You are rejected simply because the limit has been reached"), - ?assertMatch({error, reject}, - proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), - ?assertMatch({error, reject}, - proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), - ?assertMatch({error, reject}, - proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), - - ct:log("checkin connection"), - ok = mero_pool:checkin(Conn1), - mero_test_util:wait_for_pool_state(?POOL, 1, 1, 0, 0), - - ct:log("Another process is allowed to checkout a new connection"), - ?assertMatch({ok, _}, proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})). - + mero_test_util:start_server(?CLUSTER_CONFIG, 1, 1, 1000, 1000), + + ct:log("A process is allowed to checkout a new connection"), + {ok, Conn1} = mero_pool:checkout(?POOL, ?TIMELIMIT(1000)), + + ct:log("You are rejected simply because the limit has been reached"), + ?assertMatch({error, reject}, + proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), + ?assertMatch({error, reject}, + proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), + ?assertMatch({error, reject}, + proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), + ct:log("checkin connection"), + ok = mero_pool:checkin(Conn1), + mero_test_util:wait_for_pool_state(?POOL, 1, 1, 0, 0), + + ct:log("Another process is allowed to checkout a new connection"), + ?assertMatch({ok, _}, + proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})). %% A little more complex than the previous. Tests that new connections are created %% as the ones we have are in use checkout_checkin_limits(_) -> - mero_test_util:start_server(?CLUSTER_CONFIG, 2, 4, 1000, 1000), - - mero_test_util:wait_for_pool_state(?POOL, 2, 2, 0, 0), + mero_test_util:start_server(?CLUSTER_CONFIG, 2, 4, 1000, 1000), - ct:log("A process is allowed to checkout a new connection"), - {ok, Conn1} = mero_pool:checkout(?POOL, ?TIMELIMIT(1000)), - mero_test_util:wait_for_pool_state(?POOL, 2, 3, 0, 0), + mero_test_util:wait_for_pool_state(?POOL, 2, 2, 0, 0), - ct:log("A 2nd process is allowed to checkout a new connection"), - ?assertMatch({ok, _}, - proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), - mero_test_util:wait_for_pool_state(?POOL, 2, 4, 0, 0), + ct:log("A process is allowed to checkout a new connection"), + {ok, Conn1} = mero_pool:checkout(?POOL, ?TIMELIMIT(1000)), + mero_test_util:wait_for_pool_state(?POOL, 2, 3, 0, 0), - ct:log("A 3rd process is allowed to checkout a new connection"), - ?assertMatch({ok, _}, - proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), - mero_test_util:wait_for_pool_state(?POOL, 1, 4, 0, 0), + ct:log("A 2nd process is allowed to checkout a new connection"), + ?assertMatch({ok, _}, + proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), + mero_test_util:wait_for_pool_state(?POOL, 2, 4, 0, 0), - ct:log("The first one is on use so the second one should be established soon"), - ok = mero_pool:checkin(Conn1), - mero_test_util:wait_for_pool_state(?POOL, 2, 4, 0, 0), + ct:log("A 3rd process is allowed to checkout a new connection"), + ?assertMatch({ok, _}, + proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), + mero_test_util:wait_for_pool_state(?POOL, 1, 4, 0, 0), - ct:log("A 3rd process is allowed to checkout a new connection"), - ?assertMatch({ok, _}, - proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), - mero_test_util:wait_for_pool_state(?POOL, 1, 4, 0, 0). + ct:log("The first one is on use so the second one should be established " + "soon"), + ok = mero_pool:checkin(Conn1), + mero_test_util:wait_for_pool_state(?POOL, 2, 4, 0, 0), + ct:log("A 3rd process is allowed to checkout a new connection"), + ?assertMatch({ok, _}, + proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]})), + mero_test_util:wait_for_pool_state(?POOL, 1, 4, 0, 0). %% Tests that if a socket is checkined closed a new one will be created checkout_checkin_closed(_) -> - mero_test_util:start_server(?CLUSTER_CONFIG, 2, 2, 1000, 1000), - mero_test_util:wait_for_pool_state(?POOL, 2, 2, 0, 0), - - ct:log("A process is allowed to checkout a new connection"), - {ok, Conn1} = mero_pool:checkout(?POOL, ?TIMELIMIT(1000)), - mero_test_util:wait_for_pool_state(?POOL, 1, 2, 0, 0), - - ct:log("A 2nd process is allowed to checkout a new connection"), - {ok, _Conn2} = - proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), - mero_test_util:wait_for_pool_state(?POOL, 0, 2, 0, 0), + mero_test_util:start_server(?CLUSTER_CONFIG, 2, 2, 1000, 1000), + mero_test_util:wait_for_pool_state(?POOL, 2, 2, 0, 0), - ct:log("First connection is checkined closed. It will open a new one"), - ok = mero_pool:checkin_closed(Conn1), - mero_test_util:wait_for_pool_state(?POOL, 1, 2, 0, 0). + ct:log("A process is allowed to checkout a new connection"), + {ok, Conn1} = mero_pool:checkout(?POOL, ?TIMELIMIT(1000)), + mero_test_util:wait_for_pool_state(?POOL, 1, 2, 0, 0), + ct:log("A 2nd process is allowed to checkout a new connection"), + {ok, _Conn2} = proc:exec(proc:new(), {mero_pool, checkout, [?POOL, ?TIMELIMIT(1000)]}), + mero_test_util:wait_for_pool_state(?POOL, 0, 2, 0, 0), + ct:log("First connection is checkined closed. It will open a new one"), + ok = mero_pool:checkin_closed(Conn1), + mero_test_util:wait_for_pool_state(?POOL, 1, 2, 0, 0). %% @doc Test that connection failure results in a error %% checkoutting a connection from the pool. conn_failed_checkout_error(_) -> - ok = mero_conf:cluster_config(?CLUSTER_CONFIG), - ok = application:start(mero), - - mero_test_util:wait_for_min_connections_failed(?POOL, 0, 0, 2), - ?assertMatch({error, reject}, mero_pool:checkout(?POOL, ?TIMELIMIT(1000))). + ok = mero_conf:cluster_config(?CLUSTER_CONFIG), + ok = application:start(mero), + mero_test_util:wait_for_min_connections_failed(?POOL, 0, 0, 2), + ?assertMatch({error, reject}, mero_pool:checkout(?POOL, ?TIMELIMIT(1000))). %% @doc Test that the pool recovers a connection when %% a using process dies without checkin the connection. checkout_and_die(_) -> - mero_test_util:start_server(?CLUSTER_CONFIG, 1, 1, 1000, 1000), - - ct:log("A process is allowed to checkout a new connection"), - Parent = self(), - - spawn_link(fun() -> - mero_pool:checkout(?POOL, ?TIMELIMIT(1000)), - mero_test_util:wait_for_pool_state(?POOL, 0, 1, 0, 0), - Parent ! done - end), - - receive - done -> - mero_test_util:wait_for_pool_state(?POOL, 1, 1, 0, 0), - ?assertMatch({ok, _}, mero_pool:checkout(?POOL, ?TIMELIMIT(1000))), - mero_test_util:wait_for_pool_state(?POOL, 0, 1, 0, 0) - end. + mero_test_util:start_server(?CLUSTER_CONFIG, 1, 1, 1000, 1000), + + ct:log("A process is allowed to checkout a new connection"), + Parent = self(), + + spawn_link(fun () -> + mero_pool:checkout(?POOL, ?TIMELIMIT(1000)), + mero_test_util:wait_for_pool_state(?POOL, 0, 1, 0, 0), + Parent ! done + end), + receive + done -> + mero_test_util:wait_for_pool_state(?POOL, 1, 1, 0, 0), + ?assertMatch({ok, _}, mero_pool:checkout(?POOL, ?TIMELIMIT(1000))), + mero_test_util:wait_for_pool_state(?POOL, 0, 1, 0, 0) + end. %% @doc Test that checkout the conn timeout, and the process dies. checkout_timeout(_) -> diff --git a/test/mero_test_util.erl b/test/mero_test_util.erl index f434f3e..fc65d52 100644 --- a/test/mero_test_util.erl +++ b/test/mero_test_util.erl @@ -33,97 +33,104 @@ -export([start_server/5, stop_servers/1, wait_for_pool_state/5, - wait_for_min_connections_failed/4 - ]). - + wait_for_min_connections_failed/4]). wait_for_pool_state(Pool, Free, Connected, Connecting, NumFailedConnecting) -> wait_for_pool_state(Pool, Free, Connected, Connecting, NumFailedConnecting, 100). wait_for_pool_state(Pool, _Free, _Connected, _Connecting, _NumFailedConnecting, 0) -> - exit({pool_failed_to_start, Pool, mero_pool:state(Pool)}); + exit({pool_failed_to_start, Pool, mero_pool:state(Pool)}); wait_for_pool_state(Pool, Free, Connected, Connecting, NumFailedConnecting, Retries) -> - case mero_pool:state(Pool) of - [ _QueueInfo, - _Links, - _Monitors, - {free, Free}, - {num_connected, Connected}, - {num_connecting, Connecting}, - {num_failed_connecting, NumFailedConnecting}] = State -> - io:format("Pool State is ~p ~p... GOT IT! ~n",[os:timestamp(), State]), - ok; - State -> - io:format("Pool State is ~p ~p... retry ~n",[os:timestamp(), State]), - timer:sleep(30), - wait_for_pool_state(Pool, Free, Connected, Connecting, NumFailedConnecting, Retries - 1) - end. - + case mero_pool:state(Pool) of + [_QueueInfo, + _Links, + _Monitors, + {free, Free}, + {num_connected, Connected}, + {num_connecting, Connecting}, + {num_failed_connecting, NumFailedConnecting}] = + State -> + io:format("Pool State is ~p ~p... GOT IT! ~n", [os:timestamp(), State]), + ok; + State -> + io:format("Pool State is ~p ~p... retry ~n", [os:timestamp(), State]), + timer:sleep(30), + wait_for_pool_state(Pool, Free, Connected, Connecting, NumFailedConnecting, Retries - 1) + end. wait_for_min_connections_failed(Pool, Free, Connected, MinFailed) -> - case mero_pool:state(Pool) of - [ _QueueInfo, - _Links, - _Monitors, - {free, Free}, - {num_connected, Connected}, - {num_connecting, _}, - {num_failed_connecting, NumFailed}] = State when MinFailed = - io:format("Pool State is ~p ~p... GOT IT! ~n",[os:timestamp(), State]), - ok; - State -> - io:format("Pool State is ~p ~p... retry ~n",[os:timestamp(), State]), - timer:sleep(30), - wait_for_min_connections_failed(Pool, Free, Connected, MinFailed) - end. - + case mero_pool:state(Pool) of + [_QueueInfo, + _Links, + _Monitors, + {free, Free}, + {num_connected, Connected}, + {num_connecting, _}, + {num_failed_connecting, NumFailed}] = + State + when MinFailed =< NumFailed -> + io:format("Pool State is ~p ~p... GOT IT! ~n", [os:timestamp(), State]), + ok; + State -> + io:format("Pool State is ~p ~p... retry ~n", [os:timestamp(), State]), + timer:sleep(30), + wait_for_min_connections_failed(Pool, Free, Connected, MinFailed) + end. start_server(ClusterConfig, MinConn, MaxConn, Expiration, MaxTime) -> - ok = mero_conf:cluster_config(ClusterConfig), - ok = mero_conf:initial_connections_per_pool(MinConn), - ok = mero_conf:min_free_connections_per_pool(MinConn), - ok = mero_conf:max_connections_per_pool(MaxConn), - ok = mero_conf:expiration_interval(Expiration), - ok = mero_conf:connection_unused_max_time(MaxTime), - ok = mero_conf:max_connection_delay_time(100), - ok = mero_conf:write_retries(3), - ok = mero_conf:timeout_read(100), - ok = mero_conf:timeout_write(1000), - ok = mero_conf:elasticache_load_config_delay(0), - - ServerPids = lists:foldr( - fun({_, Config}, Acc) -> - HostPortList = proplists:get_value(servers, Config), - lists:foldr(fun({_Host, Port}, Acc2) -> - ct:log("Starting server on Port ~p", [Port]), - ServerPid = - case mero_dummy_server:start_link(Port) of - {ok, Pid} -> Pid; - {error, {already_started, Pid}} -> Pid - end, - [ServerPid | Acc2] - end, Acc, HostPortList) - end, [], process_server_specs(ClusterConfig)), + ok = mero_conf:cluster_config(ClusterConfig), + ok = mero_conf:initial_connections_per_pool(MinConn), + ok = mero_conf:min_free_connections_per_pool(MinConn), + ok = mero_conf:max_connections_per_pool(MaxConn), + ok = mero_conf:expiration_interval(Expiration), + ok = mero_conf:connection_unused_max_time(MaxTime), + ok = mero_conf:max_connection_delay_time(100), + ok = mero_conf:write_retries(3), + ok = mero_conf:timeout_read(100), + ok = mero_conf:timeout_write(1000), + ok = mero_conf:elasticache_load_config_delay(0), - {ok, _} = application:ensure_all_started(mero), + ServerPids = lists:foldr(fun ({_, Config}, Acc) -> + HostPortList = proplists:get_value(servers, Config), + lists:foldr(fun ({_Host, Port}, Acc2) -> + ct:log("Starting server on Port ~p", + [Port]), + ServerPid = case + mero_dummy_server:start_link(Port) + of + {ok, Pid} -> + Pid; + {error, + {already_started, Pid}} -> + Pid + end, + [ServerPid | Acc2] + end, + Acc, + HostPortList) + end, + [], + process_server_specs(ClusterConfig)), + {ok, _} = application:ensure_all_started(mero), - %% Wait for the connections - lists:foreach( - fun(Pool) -> - wait_for_pool_state(Pool, MinConn, MinConn, 0, 0) - end, - [ Pool - || {Cluster, _} <- ClusterConfig, {_, _, Pool, _} <- mero_cluster:child_definitions(Cluster)] - ), - {ok, ServerPids}. + %% Wait for the connections + lists:foreach(fun (Pool) -> + wait_for_pool_state(Pool, MinConn, MinConn, 0, 0) + end, + [Pool + || {Cluster, _} <- ClusterConfig, + {_, _, Pool, _} <- mero_cluster:child_definitions(Cluster)]), + {ok, ServerPids}. stop_servers(Pids) -> [mero_dummy_server:stop(Pid) || Pid <- Pids]. process_server_specs(ClusterConfig) -> - try mero_conf:process_server_specs(ClusterConfig) + try + mero_conf:process_server_specs(ClusterConfig) catch - K:E -> - ct:pal("Can't process specs: ~p:~p~n~p~n", [K, E, erlang:get_stacktrace()]), - exit(E) + K:E -> + ct:pal("Can't process specs: ~p:~p~n~p~n", [K, E, erlang:get_stacktrace()]), + exit(E) end. + diff --git a/test/mero_test_with_local_memcached_SUITE.erl b/test/mero_test_with_local_memcached_SUITE.erl index 1acedb2..534c52c 100644 --- a/test/mero_test_with_local_memcached_SUITE.erl +++ b/test/mero_test_with_local_memcached_SUITE.erl @@ -33,39 +33,13 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("common_test/include/ct.hrl"). --export([ - all/0, - init_per_suite/1, - end_per_suite/1, - init_per_testcase/2, - end_per_testcase/2, - get_undefined_binary/1, - get_undefined_txt/1, - get_set_binary/1, - get_set_txt/1, - flush_binary/1, - flush_txt/1, - delete_binary/1, - mdelete_binary/1, - mdelete_txt/1, - delete_txt/1, - mget_binary/1, - mget_txt/1, - add_binary/1, - add_txt/1, - increment_binary/1, - increment_txt/1, - increment_binary_with_initial/1, - increment_txt_with_initial/1, - mincrement_binary/1, - mincrement_txt/1, - cas_binary/1, - cas_txt/1, - mgets_binary/1, - madd_binary/1, - mset_binary/1, - mcas_binary/1 -]). +-export([all/0, init_per_suite/1, end_per_suite/1, init_per_testcase/2, + end_per_testcase/2, get_undefined_binary/1, get_undefined_txt/1, get_set_binary/1, + get_set_txt/1, flush_binary/1, flush_txt/1, delete_binary/1, mdelete_binary/1, + mdelete_txt/1, delete_txt/1, mget_binary/1, mget_txt/1, add_binary/1, add_txt/1, + increment_binary/1, increment_txt/1, increment_binary_with_initial/1, + increment_txt_with_initial/1, mincrement_binary/1, mincrement_txt/1, cas_binary/1, + cas_txt/1, mgets_binary/1, madd_binary/1, mset_binary/1, mcas_binary/1]). -define(HOST, "127.0.0.1"). -define(PORT, 11911). @@ -75,48 +49,46 @@ %%%============================================================================= %% TODO: Uncomment these if you want to test agains a specific memcache server -all() -> [ - %% get_undefined_binary, - %% get_undefined_txt, - %% get_set_binary, - %% get_set_txt, - %% flush_binary, - %% flush_txt, - %% delete_binary, - %% mdelete_binary, - %% mdelete_txt, - %% delete_txt, - %% mget_binary, - %% mget_txt, - %% add_binary, - %% add_txt, - %% increment_binary, - %% increment_txt, - %% increment_binary_with_initial, - %% increment_txt_with_initial, - %% mincrement_binary, - %% mincrement_txt, - %% cas_binary, - %% cas_txt, - %% mgets_binary, - %% madd_binary, - %% mset_binary, - %% mcas_binary -]. +all() -> + []. %% get_undefined_binary, + %% get_undefined_txt, + %% get_set_binary, + %% get_set_txt, + %% flush_binary, + %% flush_txt, + %% delete_binary, + %% mdelete_binary, + %% mdelete_txt, + %% delete_txt, + %% mget_binary, + %% mget_txt, + %% add_binary, + %% add_txt, + %% increment_binary, + %% increment_txt, + %% increment_binary_with_initial, + %% increment_txt_with_initial, + %% mincrement_binary, + %% mincrement_txt, + %% cas_binary, + %% cas_txt, + %% mgets_binary, + %% madd_binary, + %% mset_binary, + %% mcas_binary init_per_suite(Conf) -> application:load(mero), - ok = mero_conf:cluster_config( - [{cluster_binary, - [{servers, [{"localhost", 11211}]}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 1}, - {pool_worker_module,mero_wrk_tcp_binary}]}, - {cluster_txt, - [{servers, [{"localhost", 11211}]}, - {sharding_algorithm, {mero, shard_crc32}}, - {workers_per_shard, 1}, - {pool_worker_module, mero_wrk_tcp_txt}]}]), + ok = mero_conf:cluster_config([{cluster_binary, + [{servers, [{"localhost", 11211}]}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_binary}]}, + {cluster_txt, + [{servers, [{"localhost", 11211}]}, + {sharding_algorithm, {mero, shard_crc32}}, + {workers_per_shard, 1}, + {pool_worker_module, mero_wrk_tcp_txt}]}]), ok = mero_conf:initial_connections_per_pool(4), ok = mero_conf:min_free_connections_per_pool(1), ok = mero_conf:expiration_interval(3000), @@ -132,18 +104,14 @@ end_per_suite(_Conf) -> ok = application:stop(mero), ok. - init_per_testcase(_Module, Conf) -> ct:log("state ~p", [mero:state()]), - Keys = [key() || _ <- lists:seq(1, 4)], + Keys = [key() || _ <- lists:seq(1, 4)], [{keys, Keys} | Conf]. - end_per_testcase(_Module, _Conf) -> mero:flush_all(cluster_txt). - - %%%============================================================================= %%% Tests %%%============================================================================= @@ -151,31 +119,25 @@ end_per_testcase(_Module, _Conf) -> get_undefined_binary(Conf) -> get_undefined(cluster_binary, keys(Conf)). - get_undefined_txt(Conf) -> get_undefined(cluster_txt, keys(Conf)). - get_set_binary(Conf) -> Keys = keys(Conf), get_set(cluster_binary, cluster_txt, Keys). - get_set_txt(Conf) -> Keys = keys(Conf), get_set(cluster_txt, cluster_binary, Keys). - flush_binary(Conf) -> Keys = keys(Conf), flush(cluster_binary, Keys). - flush_txt(Conf) -> Keys = keys(Conf), flush(cluster_txt, Keys). - delete_binary(Conf) -> Keys = keys(Conf), delete(cluster_binary, Keys). @@ -184,7 +146,6 @@ mdelete_binary(Conf) -> Keys = keys(Conf), mdelete(cluster_binary, Keys). - delete_txt(Conf) -> Keys = keys(Conf), delete(cluster_txt, Keys). @@ -197,22 +158,18 @@ mget_binary(Conf) -> Keys = keys(Conf), mget(cluster_binary, cluster_txt, Keys). - mget_txt(Conf) -> Keys = keys(Conf), mget(cluster_txt, cluster_binary, Keys). - add_binary(Conf) -> Keys = keys(Conf), add(cluster_binary, cluster_txt, Keys). - add_txt(Conf) -> Keys = keys(Conf), add(cluster_txt, cluster_binary, Keys). - increment_binary(Conf) -> Keys = keys(Conf), increment(cluster_binary, cluster_txt, Keys). @@ -229,21 +186,18 @@ increment_txt(Conf) -> Keys = keys(Conf), increment(cluster_txt, cluster_binary, Keys). - increment_binary_with_initial(Conf) -> Keys = keys(Conf), increment_with_initial(cluster_binary, cluster_txt, Keys, 10, 2), mero:flush_all(cluster_binary), increment_with_initial(cluster_binary, cluster_txt, Keys, 0, 100). - increment_txt_with_initial(Conf) -> Keys = keys(Conf), increment_with_initial(cluster_binary, cluster_txt, Keys, 10, 2), mero:flush_all(cluster_binary), increment_with_initial(cluster_binary, cluster_txt, Keys, 800, 100). - cas_txt(Conf) -> Keys = keys(Conf), cas(cluster_txt, cluster_binary, Keys). @@ -269,7 +223,6 @@ mcas_binary(Conf) -> Keys = keys(Conf), mcas(cluster_binary, cluster_txt, Keys). - %%%============================================================================= %%% Internal functions %%%============================================================================= @@ -277,66 +230,51 @@ mcas_binary(Conf) -> keys(Conf) -> proplists:get_value(keys, Conf). - get_undefined(Cluster, Keys) -> ct:log("Checking empty keys with ~p~n", [Cluster]), ct:log("state ~p", [mero:state()]), [{Key, undefined} = mero:get(Cluster, Key) || Key <- Keys], ct:log("Checking empty keys ok~n"). - get_set(Cluster, ClusterAlt, Keys) -> ct:log("Check set to adroll ~n"), ct:log("state ~p", [mero:state()]), - [ok = mero:set(Cluster, Key, <<"Adroll">>, 11111, 1000) - || Key <- Keys], + [ok = mero:set(Cluster, Key, <<"Adroll">>, 11111, 1000) || Key <- Keys], ct:log("Checking keys ~n"), [{Key, <<"Adroll">>} = mero:get(Cluster, Key) || Key <- Keys], [{Key, <<"Adroll">>} = mero:get(ClusterAlt, Key) || Key <- Keys]. - flush(Cluster, Keys) -> ct:log("Check set to adroll ~n"), ct:log("state ~p", [mero:state()]), - [ok = mero:set(Cluster, Key, <<"Adroll">>, 11111, 1000) - || Key <- Keys], + [ok = mero:set(Cluster, Key, <<"Adroll">>, 11111, 1000) || Key <- Keys], ct:log("Flushing local memcache ! ~p ~n", [mero:flush_all(Cluster)]), ct:log("Checking empty keys ~n"), [{Key, undefined} = mero:get(Cluster, Key) || Key <- Keys]. - - delete(Cluster, Keys) -> ct:log("Check set to adroll ~n"), ct:log("state ~p", [mero:state()]), - [ok = mero:set(Cluster, Key, <<"Adroll">>, 11111, 1000) - || Key <- Keys], - + [ok = mero:set(Cluster, Key, <<"Adroll">>, 11111, 1000) || Key <- Keys], ct:log("Delete ! ~n", []), [ok = mero:delete(Cluster, Key, 1000) || Key <- Keys], ct:log("Checking empty keys ~n"), [{Key, undefined} = mero:get(Cluster, Key) || Key <- Keys]. - mdelete(Cluster, Keys) -> ct:log("Check set to adroll ~n"), ct:log("state ~p", [mero:state()]), - [ok = mero:set(Cluster, Key, <<"Adroll">>, 11111, 1000) - || Key <- Keys], - + [ok = mero:set(Cluster, Key, <<"Adroll">>, 11111, 1000) || Key <- Keys], ct:log("Delete ! ~n", []), ok = mero:mdelete(Cluster, Keys, 1000), ct:log("Checking empty keys ~n"), [{Key, undefined} = mero:get(Cluster, Key) || Key <- Keys]. - mget(Cluster, ClusterAlt, Keys) -> - [?assertMatch(ok, mero:set(Cluster, Key, Key, 11111, 1000)) - || Key <- Keys], - + [?assertMatch(ok, mero:set(Cluster, Key, Key, 11111, 1000)) || Key <- Keys], io:format("Checking get itself ~n"), [?assertMatch({Key, Key}, mero:get(Cluster, Key)) || Key <- Keys], @@ -344,9 +282,10 @@ mget(Cluster, ClusterAlt, Keys) -> ResultsAlt = mero:mget(ClusterAlt, Keys, 10000), io:format("Checking mget ~p ~n", [Results]), [begin - ?assertEqual({value, {Key, Key}}, lists:keysearch(Key, 1, Results)), - ?assertEqual({value, {Key, Key}}, lists:keysearch(Key, 1, ResultsAlt)) - end || Key <- Keys]. + ?assertEqual({value, {Key, Key}}, lists:keysearch(Key, 1, Results)), + ?assertEqual({value, {Key, Key}}, lists:keysearch(Key, 1, ResultsAlt)) + end + || Key <- Keys]. mincrement(Cluster = cluster_txt, _ClusterAlt, Keys) -> {error, not_supportable} = mero:mincrement_counter(Cluster, Keys); @@ -359,104 +298,102 @@ mincrement(Cluster = cluster_binary, _ClusterAlt, Keys) -> increment(Cluster, ClusterAlt, Keys) -> io:format("Increment +1 +1 +1 ~n"), - F = fun(Key, Expected) -> - IncrementReturn = element(2, mero:increment_counter(Cluster, Key)), - io:format("Increment return Expected ~p Received ~p~n", [Expected, IncrementReturn]), - {Key, Value2} = mero:get(Cluster, Key), - io:format("Checking get ~p ~p ~n", [Cluster, Value2]), - ?assertMatch(Expected, IncrementReturn), - ?assertMatch(IncrementReturn, binary_to_integer(Value2)), - {Key, Value3} = mero:get(ClusterAlt, Key), - io:format("Checking get ~p ~p ~n", [ClusterAlt, Value3]), - ?assertMatch(IncrementReturn, binary_to_integer(Value3)) + F = fun (Key, Expected) -> + IncrementReturn = element(2, mero:increment_counter(Cluster, Key)), + io:format("Increment return Expected ~p Received ~p~n", + [Expected, IncrementReturn]), + {Key, Value2} = mero:get(Cluster, Key), + io:format("Checking get ~p ~p ~n", [Cluster, Value2]), + ?assertMatch(Expected, IncrementReturn), + ?assertMatch(IncrementReturn, binary_to_integer(Value2)), + {Key, Value3} = mero:get(ClusterAlt, Key), + io:format("Checking get ~p ~p ~n", [ClusterAlt, Value3]), + ?assertMatch(IncrementReturn, binary_to_integer(Value3)) end, - [F(Key, 1) || Key <- Keys], [F(Key, 2) || Key <- Keys], [F(Key, 3) || Key <- Keys]. - - increment_with_initial(Cluster, ClusterAlt, Keys, Initial, Steps) -> - io:format("Increment +~p ~p ~n",[Initial, Steps]), - - F = fun(Key, Expected) -> - IncrementReturn = element(2, - mero:increment_counter(Cluster, Key, Steps, Initial, 22222, 3, 1000)), - io:format("Increment return Expected ~p Received ~p~n", [Expected, IncrementReturn]), - {Key, Value2} = mero:get(Cluster, Key), - io:format("Checking get ~p ~p ~n", [Cluster, Value2]), - {Key, Value3} = mero:get(ClusterAlt, Key), - io:format("Checking get ~p ~p ~n", [ClusterAlt, Value3]), - ?assertMatch(Expected, IncrementReturn), - ?assertMatch(IncrementReturn, binary_to_integer(Value2)), - ?assertMatch(IncrementReturn, binary_to_integer(Value3)) - end, - + io:format("Increment +~p ~p ~n", [Initial, Steps]), + + F = fun (Key, Expected) -> + IncrementReturn = element(2, + mero:increment_counter(Cluster, + Key, + Steps, + Initial, + 22222, + 3, + 1000)), + io:format("Increment return Expected ~p Received ~p~n", + [Expected, IncrementReturn]), + {Key, Value2} = mero:get(Cluster, Key), + io:format("Checking get ~p ~p ~n", [Cluster, Value2]), + {Key, Value3} = mero:get(ClusterAlt, Key), + io:format("Checking get ~p ~p ~n", [ClusterAlt, Value3]), + ?assertMatch(Expected, IncrementReturn), + ?assertMatch(IncrementReturn, binary_to_integer(Value2)), + ?assertMatch(IncrementReturn, binary_to_integer(Value3)) + end, [F(Key, Initial) || Key <- Keys], - [F(Key, (Initial + Steps)) || Key <- Keys], - [F(Key, (Initial + 2*Steps)) || Key <- Keys]. - + [F(Key, Initial + Steps) || Key <- Keys], + [F(Key, Initial + 2 * Steps) || Key <- Keys]. add(Cluster, ClusterAlt, Keys) -> io:format("Attempt to add sucess to 5000 ~n"), Expected = <<"5000">>, Expected2 = <<"asdf">>, [begin - ?assertEqual(ok, mero:add(Cluster, Key, Expected, 10000, 10000)), - ?assertEqual({error, not_stored}, mero:add(cluster_txt, Key, Expected2, 10000, 10000)), - ?assertEqual( - {error, already_exists}, mero:add(cluster_binary, Key, Expected2, 10000, 10000)), - {Key, Value} = mero:get(Cluster, Key), - {Key, Value2} = mero:get(ClusterAlt, Key), - io:format("Checking get ~p ~p ~n", [Value, Value2]), - ?assertEqual(Expected, Value), - ?assertEqual(Expected, Value2) - end || Key <- Keys]. - + ?assertEqual(ok, mero:add(Cluster, Key, Expected, 10000, 10000)), + ?assertEqual({error, not_stored}, mero:add(cluster_txt, Key, Expected2, 10000, 10000)), + ?assertEqual({error, already_exists}, + mero:add(cluster_binary, Key, Expected2, 10000, 10000)), + {Key, Value} = mero:get(Cluster, Key), + {Key, Value2} = mero:get(ClusterAlt, Key), + io:format("Checking get ~p ~p ~n", [Value, Value2]), + ?assertEqual(Expected, Value), + ?assertEqual(Expected, Value2) + end + || Key <- Keys]. cas(Cluster, ClusterAlt, Keys) -> Value1 = <<"asdf">>, Value2 = <<"foo">>, Value3 = <<"bar">>, [begin - ?assertEqual({error, not_found}, mero:cas(Cluster, Key, Value1, 10000, 10000, 12345)), - await_connected(Cluster), - ?assertEqual(ok, mero:set(Cluster, Key, Value1, 10000, 10000)), - ?assertEqual({Key, Value1}, mero:get(ClusterAlt, Key)), - {Key, Value1, CAS} = mero:gets(Cluster, Key), - {Key, Value1, CAS} = mero:gets(ClusterAlt, Key), - ?assertEqual( - {error, already_exists}, mero:cas(Cluster, Key, Value2, 10000, 10000, CAS + 1)), - await_connected(Cluster), - ?assertEqual(ok, mero:cas(Cluster, Key, Value2, 10000, 10000, CAS)), - ?assertEqual( - {error, already_exists}, mero:cas(ClusterAlt, Key, Value2, 10000, 10000, CAS)), - await_connected(ClusterAlt), - ?assertEqual({Key, Value2}, mero:get(ClusterAlt, Key)), - ?assertEqual(ok, mero:set(Cluster, Key, Value3, 10000, 10000)), - {Key, Value3, NCAS} = mero:gets(Cluster, Key), - ?assertNotEqual(CAS, NCAS) + ?assertEqual({error, not_found}, mero:cas(Cluster, Key, Value1, 10000, 10000, 12345)), + await_connected(Cluster), + ?assertEqual(ok, mero:set(Cluster, Key, Value1, 10000, 10000)), + ?assertEqual({Key, Value1}, mero:get(ClusterAlt, Key)), + {Key, Value1, CAS} = mero:gets(Cluster, Key), + {Key, Value1, CAS} = mero:gets(ClusterAlt, Key), + ?assertEqual({error, already_exists}, + mero:cas(Cluster, Key, Value2, 10000, 10000, CAS + 1)), + await_connected(Cluster), + ?assertEqual(ok, mero:cas(Cluster, Key, Value2, 10000, 10000, CAS)), + ?assertEqual({error, already_exists}, + mero:cas(ClusterAlt, Key, Value2, 10000, 10000, CAS)), + await_connected(ClusterAlt), + ?assertEqual({Key, Value2}, mero:get(ClusterAlt, Key)), + ?assertEqual(ok, mero:set(Cluster, Key, Value3, 10000, 10000)), + {Key, Value3, NCAS} = mero:gets(Cluster, Key), + ?assertNotEqual(CAS, NCAS) end || Key <- Keys]. - %% this is needed b/c our test server doesn't emulate a real memcached server with 100% %% accuracy. mgets(Cluster, _ClusterAlt, Keys) -> - Expected = lists:keysort(1, [{Key, undefined, undefined} - || Key <- Keys]), + Expected = lists:keysort(1, [{Key, undefined, undefined} || Key <- Keys]), ?assertEqual(Expected, lists:keysort(1, mero:mgets(Cluster, Keys, 1000))). - madd(Cluster, _ClusterAlt, Keys) -> Expected = lists:duplicate(length(Keys), ok) ++ [{error, already_exists}], - KVs = [{Key, <<"xyzzy">>, 1000} - || Key <- Keys] ++ [{hd(Keys), <<"flub">>, 1000}], + KVs = [{Key, <<"xyzzy">>, 1000} || Key <- Keys] ++ [{hd(Keys), <<"flub">>, 1000}], ?assertEqual(Expected, mero:madd(Cluster, KVs, 1000)), ?assertEqual({hd(Keys), <<"xyzzy">>}, mero:get(Cluster, hd(Keys), 1000)). - madd_moving(Cluster, _ClusterAlt, _Keys) -> %% with one existing key, add new keys repeatedly, moving the %% position of the existing key each time: @@ -469,71 +406,67 @@ madd_moving(Cluster, _ClusterAlt, _Keys) -> lists:foreach(fun ({Start, N}) -> mero:flush_all(Cluster), ok = mero:add(Cluster, ExistingKey, ExistingKey, 1000, 1000), - CurKeys = MakeKeys(Start, N) - ++ [ExistingKey] - ++ MakeKeys(Start + N + 1, Total - N - 1), + CurKeys = MakeKeys(Start, N) ++ + [ExistingKey] ++ MakeKeys(Start + N + 1, Total - N - 1), ExpectedResult = [case Key of - ExistingKey -> {error, already_exists}; - _ -> ok + ExistingKey -> + {error, already_exists}; + _ -> + ok end || Key <- CurKeys], ?assertEqual(ExpectedResult, - mero:madd(Cluster, [{Key, Key, 1000} - || Key <- CurKeys], 1000)), - ?assertEqual(lists:keysort(1, [{Key, Key} - || Key <- CurKeys]), + mero:madd(Cluster, + [{Key, Key, 1000} || Key <- CurKeys], + 1000)), + ?assertEqual(lists:keysort(1, [{Key, Key} || Key <- CurKeys]), lists:keysort(1, mero:mget(Cluster, CurKeys, 1000))) end, - [{1, N} - || N <- lists:seq(1, Total - 1)]). - + [{1, N} || N <- lists:seq(1, Total - 1)]). mset(Cluster, _ClusterAlt, Keys) -> - KVs = [{Key, Key, 1000} - || Key <- Keys], + KVs = [{Key, Key, 1000} || Key <- Keys], Expected = lists:duplicate(length(Keys), ok), ?assertEqual(Expected, mero:mset(Cluster, KVs, 1000)), - ?assertEqual(lists:keysort(1, [{Key, Key} - || Key <- Keys]), + ?assertEqual(lists:keysort(1, [{Key, Key} || Key <- Keys]), lists:keysort(1, mero:mget(Cluster, Keys, 1000))). - mcas(Cluster, _ClusterAlt, Keys) -> - mero:mset(Cluster, [{Key, Key, 1000} - || Key <- Keys], 1000), + mero:mset(Cluster, [{Key, Key, 1000} || Key <- Keys], 1000), Stored = mero:mgets(Cluster, Keys, 1000), FailedUpdate = {element(1, hd(Stored)), <<"xyzzy">>, 1000, 12345}, - Updates = [FailedUpdate - | [{Key, <>, 1000, CAS} - || {Key, _, CAS} <- tl(Stored)]] ++ [FailedUpdate], - Expected = [{error, already_exists} - | lists:duplicate(length(Stored) - 1, ok)] ++ [{error, already_exists}], + Updates = [FailedUpdate | [{Key, <>, 1000, CAS} + || {Key, _, CAS} <- tl(Stored)]] + ++ [FailedUpdate], + Expected = [{error, already_exists} | lists:duplicate(length(Stored) - 1, ok)] ++ + [{error, already_exists}], ?assertEqual(Expected, mero:mcas(Cluster, Updates, 1000)), - ?assertEqual(lists:keysort(1, [{element(1, hd(Stored)), element(1, hd(Stored))} - | [{Key, <>} - || {Key, _, _} <- tl(Stored)]]), + ?assertEqual(lists:keysort(1, + [{element(1, hd(Stored)), element(1, hd(Stored))} | [{Key, + <>} + || {Key, _, _} + <- tl(Stored)]]), lists:keysort(1, mero:mget(Cluster, Keys, 1000))). - %%%============================================================================= %%% Internal functions %%%============================================================================= - key() -> base64:encode(crypto:strong_rand_bytes(20)). - await_connected(Cluster) -> ct:log("waiting for free connections"), - Wait = fun W () -> + Wait = fun W() -> State = mero:state(), case proplists:get_value(connected, proplists:get_value(Cluster, State)) of - N when is_integer(N) andalso N > 0 -> - ok; - _ -> - timer:sleep(100), - W() + N when is_integer(N) andalso N > 0 -> + ok; + _ -> + timer:sleep(100), + W() end end, Wait(). + diff --git a/test/mero_wrk_tcp_binary_SUITE.erl b/test/mero_wrk_tcp_binary_SUITE.erl index 01643b8..83eb056 100644 --- a/test/mero_wrk_tcp_binary_SUITE.erl +++ b/test/mero_wrk_tcp_binary_SUITE.erl @@ -29,126 +29,139 @@ -module(mero_wrk_tcp_binary_SUITE). -include_lib("mero/include/mero.hrl"). - -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --export([ - all/0, - init_per_testcase/2, - end_per_testcase/2, - mero_get_not_found/1, - mero_get_found/1, - mero_set/1, - mero_mget/1 -]). - +-export([all/0, + init_per_testcase/2, + end_per_testcase/2, + mero_get_not_found/1, + mero_get_found/1, + mero_set/1, + mero_mget/1]). -export([stats/1]). -all() -> [ - mero_get_not_found, - mero_get_found, - mero_set, - mero_mget -]. +all() -> + [mero_get_not_found, mero_get_found, mero_set, mero_mget]. init_per_testcase(_, Conf) -> meck:new(gen_tcp, [unstick]), - meck:expect(gen_tcp, connect, fun(_,_,_) -> {ok, socket} end), - meck:expect(gen_tcp, controlling_process, fun(_,_) -> ok end), - meck:expect(gen_tcp, close, fun(_) -> ok end), - meck:expect(gen_tcp, send, fun(_,_) -> ok end), + meck:expect(gen_tcp, + connect, + fun (_, _, _) -> + {ok, socket} + end), + meck:expect(gen_tcp, + controlling_process, + fun (_, _) -> + ok + end), + meck:expect(gen_tcp, + close, + fun (_) -> + ok + end), + meck:expect(gen_tcp, + send, + fun (_, _) -> + ok + end), Conf. end_per_testcase(_, _Conf) -> meck:unload(gen_tcp), ok. - %%%============================================================================= %%% Tests %%%============================================================================= --define(GET_NOT_FOUND_RESPONSE, <<129,0,0,0,0,0,0,1,0,0,0,9,0,0,0,0,0,0,0,0,0,0,0,0, - 78,111,116,32,102,111,117,110,100>>). - --define(GET_FOUND_RESPONSE, <<129,0,0,0,4,0,0,0,0,0,0,21,0,0,0,0,0,0,0,0,0,0,121,222, - 222,173,190,239,115,111,109,101,32,99,97,99,104,101,100,32, - 118,97,108,117,101>>). - --define(SET_RESPONSE, <<129,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,121,221>>). - --define(MGET_RESPONSE, <<129,13,0,2,4,0,0,0,0,0,0,23,0,0,0,0,0,0,0,0,0,0,121,222, - 222,173,190,239,97,97,115,111,109,101,32,99,97,99,104,101, - 100,32,118,97,108,117,101, - 129,12,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0, 0,0,0,0,0, - 99>>). +-define(GET_NOT_FOUND_RESPONSE, + <<129, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 78, 111, 116, + 32, 102, 111, 117, 110, 100>>). +-define(GET_FOUND_RESPONSE, + <<129, 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 222, 222, 173, + 190, 239, 115, 111, 109, 101, 32, 99, 97, 99, 104, 101, 100, 32, 118, 97, 108, 117, + 101>>). +-define(SET_RESPONSE, + <<129, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 221>>). +-define(MGET_RESPONSE, + <<129, 13, 0, 2, 4, 0, 0, 0, 0, 0, 0, 23, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 121, 222, 222, + 173, 190, 239, 97, 97, 115, 111, 109, 101, 32, 99, 97, 99, 104, 101, 100, 32, 118, 97, + 108, 117, 101, 129, 12, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 99>>). %% Return up to L of the remaining bytes in buffer in each call. fake_network_recv(Buffer, L) -> receive - {read, Pid} -> - case Buffer of - <> -> - Pid ! {ok, B}, - fake_network_recv(Rest, L); - _ -> - Pid ! {ok, Buffer}, - fake_network_recv(<<>>, L) - end + {read, Pid} -> + case Buffer of + <> -> + Pid ! {ok, B}, + fake_network_recv(Rest, L); + _ -> + Pid ! {ok, Buffer}, + fake_network_recv(<<>>, L) + end end. network_read(Pid) -> Pid ! {read, self()}, receive - {ok, B} -> - {ok, B} + {ok, B} -> + {ok, B} end. stats(Metric) -> ct:log("~p", [Metric]). - test_response_parsing(Buffer, ExpectedResult, {MemcachedOp, MemcachedOpArgs}) -> %% Reads from the buffer in different chunk sizes, to exercise the buffering done %% on mero_wrk_tcp_binary. Check that the parsed result is the expected one. - lists:foreach(fun(ReadSize) -> - FakeNetwork = spawn_link(fun() -> fake_network_recv(Buffer, ReadSize) end), - meck:expect(gen_tcp, recv, fun(_, 0, _Timeout) -> network_read(FakeNetwork) end), - {ok, Client} = mero_wrk_tcp_binary:connect("localhost", 5000, {?MODULE, stats, []}), - ?assertMatch({Client, ExpectedResult}, - mero_wrk_tcp_binary:transaction(Client, MemcachedOp, MemcachedOpArgs)) - end, [10, 2, 1024]). + lists:foreach(fun (ReadSize) -> + FakeNetwork = spawn_link(fun () -> + fake_network_recv(Buffer, ReadSize) + end), + meck:expect(gen_tcp, + recv, + fun (_, 0, _Timeout) -> + network_read(FakeNetwork) + end), + {ok, Client} = mero_wrk_tcp_binary:connect("localhost", + 5000, + {?MODULE, stats, []}), + ?assertMatch({Client, ExpectedResult}, + mero_wrk_tcp_binary:transaction(Client, + MemcachedOp, + MemcachedOpArgs)) + end, + [10, 2, 1024]). mero_get_not_found(_Conf) -> - test_response_parsing( - ?GET_NOT_FOUND_RESPONSE, - #mero_item{key = <<"aa">>, value = undefined}, - {get, [<<"aa">>, mero_conf:add_now(100)]} - ). + test_response_parsing(?GET_NOT_FOUND_RESPONSE, + #mero_item{key = <<"aa">>, value = undefined}, + {get, [<<"aa">>, mero_conf:add_now(100)]}). mero_get_found(_Conf) -> - test_response_parsing( - ?GET_FOUND_RESPONSE, - #mero_item{key = <<"aa">>, value = <<"some cached value">>, cas = 31198}, - {get, [<<"aa">>, mero_conf:add_now(100)]} - ). + test_response_parsing(?GET_FOUND_RESPONSE, + #mero_item{key = <<"aa">>, value = <<"some cached value">>, cas = 31198}, + {get, [<<"aa">>, mero_conf:add_now(100)]}). mero_set(_Conf) -> - test_response_parsing( - ?SET_RESPONSE, - ok, - {set, [<<"aa">>, <<"some cached value">>, <<"1000">>, mero_conf:add_now(100), 31198]} - ). + test_response_parsing(?SET_RESPONSE, + ok, + {set, + [<<"aa">>, + <<"some cached value">>, + <<"1000">>, + mero_conf:add_now(100), + 31198]}). mero_mget(_Conf) -> - test_response_parsing( - ?MGET_RESPONSE, - [ - #mero_item{key = <<"c">>, value = undefined}, - #mero_item{key = <<"aa">>, value = <<"some cached value">>, cas = 31198}, - #mero_item{key = <<"b">>, value = undefined} - ], - {async_mget_response, [[<<"b">>, <<"aa">>,<<"c">>], mero_conf:add_now(100)]} - ). + test_response_parsing(?MGET_RESPONSE, + [#mero_item{key = <<"c">>, value = undefined}, + #mero_item{key = <<"aa">>, value = <<"some cached value">>, cas = 31198}, + #mero_item{key = <<"b">>, value = undefined}], + {async_mget_response, + [[<<"b">>, <<"aa">>, <<"c">>], mero_conf:add_now(100)]}). diff --git a/test/mero_wrk_tcp_txt_SUITE.erl b/test/mero_wrk_tcp_txt_SUITE.erl index 4abbc0b..de85fab 100644 --- a/test/mero_wrk_tcp_txt_SUITE.erl +++ b/test/mero_wrk_tcp_txt_SUITE.erl @@ -29,201 +29,220 @@ -module(mero_wrk_tcp_txt_SUITE). -include_lib("mero/include/mero.hrl"). - -include_lib("common_test/include/ct.hrl"). -include_lib("eunit/include/eunit.hrl"). --export([ - all/0, - init_per_testcase/2, - end_per_testcase/2, - mero_get_not_found/1, - mero_get_found/1, - mero_set/1, - mero_set_cas/1, - mero_set_cas_exists/1, - mero_set_cas_not_found/1, - mero_add/1, - mero_add_not_stored/1, - mero_delete/1, - mero_delete_not_found/1, - mero_flush_all/1, - mero_mget/1 -]). - +-export([all/0, + init_per_testcase/2, + end_per_testcase/2, + mero_get_not_found/1, + mero_get_found/1, + mero_set/1, + mero_set_cas/1, + mero_set_cas_exists/1, + mero_set_cas_not_found/1, + mero_add/1, + mero_add_not_stored/1, + mero_delete/1, + mero_delete_not_found/1, + mero_flush_all/1, + mero_mget/1]). -export([stats/1]). -all() -> [ - mero_get_not_found, - mero_get_found, - mero_set, - mero_set_cas, - mero_set_cas_exists, - mero_set_cas_not_found, - mero_add, - mero_add_not_stored, - mero_delete, - mero_delete_not_found, - mero_flush_all, - mero_mget -]. +all() -> + [mero_get_not_found, + mero_get_found, + mero_set, + mero_set_cas, + mero_set_cas_exists, + mero_set_cas_not_found, + mero_add, + mero_add_not_stored, + mero_delete, + mero_delete_not_found, + mero_flush_all, + mero_mget]. init_per_testcase(_, Conf) -> meck:new(gen_tcp, [unstick]), - meck:expect(gen_tcp, connect, fun(_,_,_) -> {ok, socket} end), - meck:expect(gen_tcp, controlling_process, fun(_,_) -> ok end), - meck:expect(gen_tcp, close, fun(_) -> ok end), - meck:expect(gen_tcp, send, fun(_,_) -> ok end), + meck:expect(gen_tcp, + connect, + fun (_, _, _) -> + {ok, socket} + end), + meck:expect(gen_tcp, + controlling_process, + fun (_, _) -> + ok + end), + meck:expect(gen_tcp, + close, + fun (_) -> + ok + end), + meck:expect(gen_tcp, + send, + fun (_, _) -> + ok + end), Conf. end_per_testcase(_, _Conf) -> meck:unload(gen_tcp), ok. - %%%============================================================================= %%% Tests %%%============================================================================= -define(GET_NOT_FOUND_RESPONSE, <<"END\r\n">>). - -define(GET_FOUND_RESPONSE, <<"VALUE aa 0 19\r\nsome cached\r\n value\r\nEND\r\n">>). - -define(STORED_RESPONSE, <<"STORED\r\n">>). - -define(EXISTS_RESPONSE, <<"EXISTS\r\n">>). - -define(NOT_STORED_RESPONSE, <<"NOT_STORED\r\n">>). - -define(NOT_FOUND_RESPONSE, <<"NOT_FOUND\r\n">>). - -define(DELETED_RESPONSE, <<"DELETED\r\n">>). - -define(OK_RESPONSE, <<"OK\r\n">>). - --define(MGET_RESPONSE, <<"VALUE aa 0 19 31198\r\nsome cached\r\n value\r\n" - "VALUE b 0 9\r\nsomething\r\nEND\r\n">>). +-define(MGET_RESPONSE, + <<"VALUE aa 0 19 31198\r\nsome cached\r\n value\r\nVALUE b 0 9\r\nsomet" + "hing\r\nEND\r\n">>). %% Return up to L of the remaining bytes in buffer in each call. fake_network_recv(Buffer, L) -> receive - {read, Pid} -> - case Buffer of - <> -> - Pid ! {ok, B}, - fake_network_recv(Rest, L); - _ -> - Pid ! {ok, Buffer}, - fake_network_recv(<<>>, L) - end + {read, Pid} -> + case Buffer of + <> -> + Pid ! {ok, B}, + fake_network_recv(Rest, L); + _ -> + Pid ! {ok, Buffer}, + fake_network_recv(<<>>, L) + end end. network_read(Pid) -> Pid ! {read, self()}, receive - {ok, B} -> - {ok, B} + {ok, B} -> + {ok, B} end. stats(Metric) -> ct:log("~p", [Metric]). - test_response_parsing(Buffer, ExpectedResult, {MemcachedOp, MemcachedOpArgs}) -> %% Reads from the buffer in different chunk sizes, to exercise the buffering done %% on mero_wrk_tcp_binary. Check that the parsed result is the expected one. - lists:foreach(fun(ReadSize) -> - FakeNetwork = spawn_link(fun() -> fake_network_recv(Buffer, ReadSize) end), - meck:expect(gen_tcp, recv, fun(_, 0, _Timeout) -> network_read(FakeNetwork) end), - {ok, Client} = mero_wrk_tcp_txt:connect("localhost", 5000, {?MODULE, stats, []}), - ?assertMatch({Client, ExpectedResult}, - mero_wrk_tcp_txt:transaction(Client, MemcachedOp, MemcachedOpArgs)) - end, [10, 2, 1024]). + lists:foreach(fun (ReadSize) -> + FakeNetwork = spawn_link(fun () -> + fake_network_recv(Buffer, ReadSize) + end), + meck:expect(gen_tcp, + recv, + fun (_, 0, _Timeout) -> + network_read(FakeNetwork) + end), + {ok, Client} = mero_wrk_tcp_txt:connect("localhost", + 5000, + {?MODULE, stats, []}), + ?assertMatch({Client, ExpectedResult}, + mero_wrk_tcp_txt:transaction(Client, + MemcachedOp, + MemcachedOpArgs)) + end, + [10, 2, 1024]). mero_get_not_found(_Conf) -> - test_response_parsing( - ?GET_NOT_FOUND_RESPONSE, - #mero_item{key = <<"aa">>, value = undefined}, - {get, [<<"aa">>, mero_conf:add_now(100)]} - ). + test_response_parsing(?GET_NOT_FOUND_RESPONSE, + #mero_item{key = <<"aa">>, value = undefined}, + {get, [<<"aa">>, mero_conf:add_now(100)]}). mero_get_found(_Conf) -> - test_response_parsing( - ?GET_FOUND_RESPONSE, - #mero_item{key = <<"aa">>, value = <<"some cached\r\n value">>, cas = undefined}, - {get, [<<"aa">>, mero_conf:add_now(100)]} - ). + test_response_parsing(?GET_FOUND_RESPONSE, + #mero_item{key = <<"aa">>, + value = <<"some cached\r\n value">>, + cas = undefined}, + {get, [<<"aa">>, mero_conf:add_now(100)]}). mero_set(_Conf) -> - test_response_parsing( - ?STORED_RESPONSE, - ok, - {set, [<<"aa">>, <<"some cached value">>, <<"1000">>, mero_conf:add_now(100), undefined]} - ). + test_response_parsing(?STORED_RESPONSE, + ok, + {set, + [<<"aa">>, + <<"some cached value">>, + <<"1000">>, + mero_conf:add_now(100), + undefined]}). mero_set_cas(_Conf) -> - test_response_parsing( - ?STORED_RESPONSE, - ok, - {set, [<<"aa">>, <<"some cached value">>, <<"1000">>, mero_conf:add_now(100), 31198]} - ). + test_response_parsing(?STORED_RESPONSE, + ok, + {set, + [<<"aa">>, + <<"some cached value">>, + <<"1000">>, + mero_conf:add_now(100), + 31198]}). mero_set_cas_exists(_Conf) -> - test_response_parsing( - ?EXISTS_RESPONSE, - {error, already_exists}, - {set, [<<"aa">>, <<"some cached value">>, <<"1000">>, mero_conf:add_now(100), 31198]} - ). + test_response_parsing(?EXISTS_RESPONSE, + {error, already_exists}, + {set, + [<<"aa">>, + <<"some cached value">>, + <<"1000">>, + mero_conf:add_now(100), + 31198]}). mero_set_cas_not_found(_Conf) -> - test_response_parsing( - ?NOT_FOUND_RESPONSE, - {error, not_found}, - {set, [<<"aa">>, <<"some cached value">>, <<"1000">>, mero_conf:add_now(100), 31198]} - ). + test_response_parsing(?NOT_FOUND_RESPONSE, + {error, not_found}, + {set, + [<<"aa">>, + <<"some cached value">>, + <<"1000">>, + mero_conf:add_now(100), + 31198]}). mero_add(_Conf) -> - test_response_parsing( - ?STORED_RESPONSE, - ok, - {add, [<<"aa">>, <<"some cached value">>, <<"1000">>, mero_conf:add_now(100)]} - ). + test_response_parsing(?STORED_RESPONSE, + ok, + {add, + [<<"aa">>, + <<"some cached value">>, + <<"1000">>, + mero_conf:add_now(100)]}). mero_add_not_stored(_Conf) -> - test_response_parsing( - ?NOT_STORED_RESPONSE, - {error, not_stored}, - {add, [<<"aa">>, <<"some cached value">>, <<"1000">>, mero_conf:add_now(100)]} - ). + test_response_parsing(?NOT_STORED_RESPONSE, + {error, not_stored}, + {add, + [<<"aa">>, + <<"some cached value">>, + <<"1000">>, + mero_conf:add_now(100)]}). mero_delete(_Conf) -> - test_response_parsing( - ?DELETED_RESPONSE, - ok, - {delete, [<<"aa">>, mero_conf:add_now(100)]} - ). + test_response_parsing(?DELETED_RESPONSE, + ok, + {delete, [<<"aa">>, mero_conf:add_now(100)]}). mero_delete_not_found(_Conf) -> - test_response_parsing( - ?NOT_FOUND_RESPONSE, - {error, not_found}, - {delete, [<<"aa">>, mero_conf:add_now(100)]} - ). + test_response_parsing(?NOT_FOUND_RESPONSE, + {error, not_found}, + {delete, [<<"aa">>, mero_conf:add_now(100)]}). mero_flush_all(_Conf) -> - test_response_parsing( - ?OK_RESPONSE, - ok, - {flush_all, [mero_conf:add_now(100)]} - ). + test_response_parsing(?OK_RESPONSE, ok, {flush_all, [mero_conf:add_now(100)]}). mero_mget(_Conf) -> - test_response_parsing( - ?MGET_RESPONSE, - [ - #mero_item{key = <<"b">>, value = <<"something">>}, - #mero_item{key = <<"aa">>, value = <<"some cached\r\n value">>, cas = 31198}, - #mero_item{key = <<"c">>, value = undefined} - ], - {async_mget_response, [[<<"b">>, <<"aa">>,<<"c">>], mero_conf:add_now(100)]} - ). + test_response_parsing(?MGET_RESPONSE, + [#mero_item{key = <<"b">>, value = <<"something">>}, + #mero_item{key = <<"aa">>, + value = <<"some cached\r\n value">>, + cas = 31198}, + #mero_item{key = <<"c">>, value = undefined}], + {async_mget_response, + [[<<"b">>, <<"aa">>, <<"c">>], mero_conf:add_now(100)]}). +