Permalink
Browse files

Making scheduler polling optional.

Closes issue #3

The scheduler polling will be disabled by default to avoid a few
potential bugs with the scheduler. To enable it, forcing the
env variable 'sched_time' to 'true' will required.
  • Loading branch information...
1 parent b5ec8d8 commit 365896d6d870f1a8acb6492030dae39e86f0675d @ferd committed Nov 13, 2012
Showing with 58 additions and 37 deletions.
  1. +10 −2 README.markdown
  2. +3 −2 src/vmstats.app.src
  3. +20 −8 src/vmstats_server.erl
  4. +4 −4 test/statsderl.erl
  5. +21 −21 test/vmstats_server_tests.erl
View
@@ -8,16 +8,24 @@ The different fields include:
- the number of processes
- the process limit
- the length of the run queue
- - the scheduler usage as a percentage
+ - the scheduler usage as a percentage (disabled by default)
- memory used for ETS tables, atoms, processes, binaries and the total memory
## How to build ##
`$ ./rebar compile`
+or
+
+ `$ make`
+
## Other Stuff
-It is recommended to leave the interval at 1000ms (1 second) as graphite seems to dampen missing data points on intervals larger than that, or to accumulate them when they're smaller. At roughly 1 second, the values seem to represent what the Erlang VM outputs the best.
+Although it was recommended to leave the interval at 1000ms (1 second) as graphite seems to dampen missing data points on intervals larger than that, the newer version of this application allows more precise or longer delays given the use of guauges instead of increments.
+
+## I want to use newer versions but disable scheduler wall time statistics ##
+
+Scheduler wall time statistics are now disabled by default to keep in line with 0.1.0 behaviour, and after some bugs being reported when the Erlang scheduler would lock on such calls in R15B01 a few times in a day, never to unlock again. People who want to take the risk of running these statistics can do it by setting the `vmstats` env variable `sched_time` to `true`.
## I was basing myself on 'master' and stuff started breaking!
View
@@ -1,6 +1,6 @@
{application, vmstats, [
{description, "Tiny application to gather VM statistics for StatsD client"},
- {vsn, "0.2.0"},
+ {vsn, "0.2.1"},
{registered, [vmstats_sup, vmstats_server]},
{applications, [
kernel,
@@ -11,6 +11,7 @@
{applications, [statsderl]},
{modules, [vmstats, vmstats_sup, vmstats_server]},
{env, [
- {delay, 1000} % in milliseconds
+ {delay, 1000}, % in milliseconds
+ {sched_time, false}
]}
]}.
@@ -12,7 +12,7 @@
-define(TIMER_MSG, '#delay').
-record(state, {key :: string(),
- sched_time :: enabled | disabled,
+ sched_time :: enabled | disabled | unavailable,
prev_sched :: [{integer(), integer(), integer()}],
timer_ref :: reference(),
delay :: integer()}). % milliseconds
@@ -27,19 +27,23 @@ start_link(BaseKey) ->
init(BaseKey) ->
{ok, Delay} = application:get_env(vmstats, delay),
Ref = erlang:start_timer(Delay, self(), ?TIMER_MSG),
- try erlang:system_flag(scheduler_wall_time, true) of
- _ ->
+ case {sched_time_available(), application:get_env(vmstats, sched_time)} of
+ {true, {ok,true}} ->
{ok, #state{key = [BaseKey,$.],
timer_ref = Ref,
delay = Delay,
sched_time = enabled,
- prev_sched = lists:sort(erlang:statistics(scheduler_wall_time))}}
- catch
- error:badarg ->
+ prev_sched = lists:sort(erlang:statistics(scheduler_wall_time))}};
+ {true, _} ->
+ {ok, #state{key = [BaseKey,$.],
+ timer_ref = Ref,
+ delay = Delay,
+ sched_time = disabled}};
+ {false, _} ->
{ok, #state{key = [BaseKey,$.],
timer_ref = Ref,
delay = Delay,
- sched_time = disabled}}
+ sched_time = unavailable}}
end.
handle_call(_Msg, _From, State) ->
@@ -86,7 +90,7 @@ handle_info({timeout, R, ?TIMER_MSG}, S = #state{key=K, delay=D, timer_ref=R}) -
|| {Sid, Active, Total} <- wall_time_diff(PrevSched, NewSched)],
{noreply, S#state{timer_ref=erlang:start_timer(D, self(), ?TIMER_MSG),
prev_sched=NewSched}};
- disabled ->
+ _ -> % disabled or unavailable
{noreply, S#state{timer_ref=erlang:start_timer(D, self(), ?TIMER_MSG)}}
end;
handle_info(_Msg, {state, _Key, _TimerRef, _Delay}) ->
@@ -105,3 +109,11 @@ terminate(_Reason, _State) ->
wall_time_diff(T1, T2) ->
[{I, Active2-Active1, Total2-Total1}
|| {{I, Active1, Total1}, {I, Active2, Total2}} <- lists:zip(T1,T2)].
+
+sched_time_available() ->
+ try erlang:system_flag(scheduler_wall_time, true) of
+ _ -> true
+ catch
+ error:badarg -> false
+ end.
+
View
@@ -1,11 +1,11 @@
-module(statsderl).
--export([start_link/0, increment/3, called/0, stop/0]).
+-export([start_link/0, gauge/3, called/0, stop/0]).
start_link() ->
spawn_link(fun() -> init() end).
-increment(Key, Data, Freq) ->
- call({incr, Key, Data, Freq}).
+gauge(Key, Value, SampleRate) ->
+ call({gauge, Key, Value, SampleRate}).
called() -> call(called).
@@ -17,7 +17,7 @@ init() ->
loop(Stack) ->
receive
- {From, {incr, K, D, F}} ->
+ {From, {gauge, K, D, F}} ->
reply(From, ok),
loop([{K,D,F}|Stack]);
{From, called} ->
@@ -7,39 +7,39 @@
timer_500ms_test() ->
application:set_env(vmstats, delay, 500),
- Key = "",
+ Key = "key",
statsderl:start_link(),
{ok, Pid} = vmstats_server:start_link(Key),
unlink(Pid),
timer:sleep(750),
%% First match works
?assertMatch(
- [{"error_logger_queue_len", _, 1.00},
- {"memory.atom_used", _, 1.00},
- {"memory.binary", _, 1.00},
- {"memory.ets", _, 1.00},
- {"memory.procs_used", _, 1.00},
- {"memory.total", _, 1.00},
- {"modules", _, 1.00},
- {"proc_count", _, 1.00},
- {"proc_limit", _, 1.00},
- {"run_queue", _, 1.00}],
+ [{"key.error_logger_queue_len", _, 1.00},
+ {"key.memory.atom_used", _, 1.00},
+ {"key.memory.binary", _, 1.00},
+ {"key.memory.ets", _, 1.00},
+ {"key.memory.procs_used", _, 1.00},
+ {"key.memory.total", _, 1.00},
+ {"key.modules", _, 1.00},
+ {"key.proc_count", _, 1.00},
+ {"key.proc_limit", _, 1.00},
+ {"key.run_queue", _, 1.00}],
lists:sort([{lists:flatten(K), V, Freq} || {K, V, Freq} <- statsderl:called()])
),
timer:sleep(600),
exit(Pid, shutdown),
%% Done, we know it loops!
?assertMatch(
- [{"error_logger_queue_len", _, 1.00},
- {"memory.atom_used", _, 1.00},
- {"memory.binary", _, 1.00},
- {"memory.ets", _, 1.00},
- {"memory.procs_used", _, 1.00},
- {"memory.total", _, 1.00},
- {"modules", _, 1.00},
- {"proc_count", _, 1.00},
- {"proc_limit", _, 1.00},
- {"run_queue", _, 1.00}],
+ [{"key.error_logger_queue_len", _, 1.00},
+ {"key.memory.atom_used", _, 1.00},
+ {"key.memory.binary", _, 1.00},
+ {"key.memory.ets", _, 1.00},
+ {"key.memory.procs_used", _, 1.00},
+ {"key.memory.total", _, 1.00},
+ {"key.modules", _, 1.00},
+ {"key.proc_count", _, 1.00},
+ {"key.proc_limit", _, 1.00},
+ {"key.run_queue", _, 1.00}],
lists:sort([{lists:flatten(K), V, Freq} || {K, V, Freq} <- statsderl:called()])
),
?assertEqual([], lists:sort(statsderl:called())),

0 comments on commit 365896d

Please sign in to comment.