Skip to content

Commit

Permalink
use retry_timeout for (re)connects and added max_retries as an XML op…
Browse files Browse the repository at this point in the history
…tion
  • Loading branch information
Sebastian Cohnen committed Aug 6, 2014
1 parent 9c2aa86 commit 5fece5b
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 14 deletions.
7 changes: 4 additions & 3 deletions include/ts_profile.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
{ regexp,
subst = false,
'when' = false,
name,
name,
do = continue, %(continue | loop | abort | log )
sleep_loop, % in seconds
apply_to_content,
Expand Down Expand Up @@ -58,9 +58,10 @@
bosh_path = "/http-bind/", % for bash only
websocket_path = "/chat", % for websocket only
websocket_frame = "binary", % for websocket only
retry_timeout = 10, % retry sending in microsec
retry_timeout = 10, % retry sending in milliseconds
max_retries = 0, % maximum number of retries
idle_timeout = 600000, % timeout for local ack
connect_timeout = infinity, % timeout for gen_tcp:connect/4
connect_timeout = infinity, % timeout for gen_tcp:connect/4 (infinity OR time in milliseconds)
global_ack_timeout = infinity, % timeout for global ack
tcp_rcv_size = 32768, % tcp buffers size
tcp_snd_size = 32768,
Expand Down
19 changes: 8 additions & 11 deletions src/tsung/ts_client.erl
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,6 @@
-include("ts_config.hrl").
-include("ts_profile.hrl").

-define(MAX_RETRIES,3). % max number of connection retries
-define(RETRY_TIMEOUT,10000). % waiting time between retries (msec)

%% External exports
-export([start/1, next/1]).

Expand Down Expand Up @@ -820,7 +817,7 @@ handle_next_request(Request, State) ->
_ ->
{next_state, wait_ack, NewState}
end;
{error, closed} when State#state_rcv.retries < ?MAX_RETRIES ->
{error, closed} when State#state_rcv.retries < ProtoOpts#proto_opts.max_retries ->
?LOG("connection close while sending message !~n", ?WARN),
Retries = State#state_rcv.retries +1,
handle_close_while_sending(State#state_rcv{socket=NewSocket,
Expand All @@ -829,14 +826,14 @@ handle_next_request(Request, State) ->
session=NewSession,
retries=Retries,
port=Port});
{error, Reason} when State#state_rcv.retries < ?MAX_RETRIES ->
{error, Reason} when State#state_rcv.retries < ProtoOpts#proto_opts.max_retries ->
%% LOG only at INFO level since we report also an error to ts_mon
?LOGF("Error: Unable to send data, reason: ~p~n",[Reason],?INFO),
CountName="error_send_"++atom_to_list(Reason),
ts_mon:add({ count, list_to_atom(CountName) }),
Retries = State#state_rcv.retries +1,
handle_timeout_while_sending(State#state_rcv{session=NewSession,retries=Retries});
{'EXIT', {noproc, _Rest}} when State#state_rcv.retries < ?MAX_RETRIES ->
{'EXIT', {noproc, _Rest}} when State#state_rcv.retries < ProtoOpts#proto_opts.max_retries ->
?LOG("EXIT from ssl app while sending message !~n", ?WARN),
Retries = State#state_rcv.retries +1,
handle_close_while_sending(State#state_rcv{socket=NewSocket,
Expand All @@ -845,7 +842,7 @@ handle_next_request(Request, State) ->
retries=Retries,
host=Host,
port=Port});
Exit when State#state_rcv.retries < ?MAX_RETRIES ->
Exit when State#state_rcv.retries < ProtoOpts#proto_opts.max_retries ->
?LOGF("EXIT Error: Unable to send data, reason: ~p~n",
[Exit], ?ERR),
ts_mon:add({ count, error_send }),
Expand All @@ -855,12 +852,12 @@ handle_next_request(Request, State) ->
{stop, normal, State}
end;

{error, timeout} when State#state_rcv.retries < ?MAX_RETRIES ->
{error, timeout} when State#state_rcv.retries < ProtoOpts#proto_opts.max_retries ->
ts_mon:add({count, error_connect_timeout}),

handle_reconnect_issue(State#state_rcv{session=NewSession});

{error, _Reason} when State#state_rcv.retries < ?MAX_RETRIES ->
{error, _Reason} when State#state_rcv.retries < ProtoOpts#proto_opts.max_retries ->
handle_reconnect_issue(State#state_rcv{session=NewSession});

{error, Reason} ->
Expand Down Expand Up @@ -906,13 +903,13 @@ finish_session(State) ->
%% Purpose: there was an issue (re)opening the connection. Retry with
%% backoff in a moment.
%%----------------------------------------------------------------------
handle_reconnect_issue(State) ->
handle_reconnect_issue(#state_rcv{proto_opts = PO} = State) ->
Retries = State#state_rcv.retries + 1,

% simplified exponential backoff algorithm: we increase
% the timeout when the number of retries increase, with a
% simple rule: number of retries * retry_timeout
set_thinktime(?RETRY_TIMEOUT * Retries),
set_thinktime(PO#proto_opts.retry_timeout * Retries),
{next_state, think, State#state_rcv{retries=Retries}}.


Expand Down
6 changes: 6 additions & 0 deletions src/tsung_controller/ts_config.erl
Original file line number Diff line number Diff line change
Expand Up @@ -841,6 +841,12 @@ parse(Element = #xmlElement{name=option, attributes=Attrs},
NewProto = OldProto#proto_opts{global_ack_timeout=Timeout},
lists:foldl( fun parse/2, Conf#config{proto_opts=NewProto},
Element#xmlElement.content);
"max_retries" ->
MaxRetries = getAttr(integer,Attrs, value, ?config(max_retries)),
OldProto = Conf#config.proto_opts,
NewProto = OldProto#proto_opts{max_retries=MaxRetries},
lists:foldl( fun parse/2, Conf#config{proto_opts=NewProto},
Element#xmlElement.content);
"retry_timeout" ->
Timeout = getAttr(integer,Attrs, value, ?config(client_retry_timeout)),
OldProto = Conf#config.proto_opts,
Expand Down

0 comments on commit 5fece5b

Please sign in to comment.