This repository has been archived by the owner on Nov 17, 2020. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 166
/
rabbit_mgmt_db_cache.erl
139 lines (114 loc) · 4.77 KB
/
rabbit_mgmt_db_cache.erl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
%% the contents of this file are subject to the mozilla public license
%% version 1.1 (the "license"); you may not use this file except in
%% compliance with the license. you may obtain a copy of the license at
%% http://www.mozilla.org/mpl/
%%
%% software distributed under the license is distributed on an "as is"
%% basis, without warranty of any kind, either express or implied. see the
%% license for the specific language governing rights and limitations
%% under the license.
%%
%% Copyright (c) 2016-2018 Pivotal Software, Inc. All rights reserved.
-module(rabbit_mgmt_db_cache).
-behaviour(gen_server).
%% API functions
-export([start_link/1]).
-export([process_name/1,
fetch/2,
fetch/3,
fetch/4]).
%% gen_server callbacks
-export([init/1,
handle_call/3,
handle_cast/2,
handle_info/2,
terminate/2,
code_change/3]).
-record(state, {data :: any() | none,
args :: [any()],
timer_ref :: undefined | timer:tref(),
multiplier :: integer()}).
-type error_desc() :: key_not_found | timeout | {throw, atom()}.
-type fetch_fun() :: fun((_) -> any()) | fun(() -> any()).
-type fetch_ret() :: {ok, any()} | {error, error_desc()}.
-define(DEFAULT_MULT, 5).
-define(DEFAULT_TIMEOUT, 60000).
-define(CHILD(Key), {rabbit_mgmt_db_cache:process_name(Key),
{rabbit_mgmt_db_cache, start_link, [Key]},
permanent, 5000, worker,
[rabbit_mgmt_db_cache]}).
-define(RESET_STATE(State), State#state{data = none, args = []}).
%% Implements an adaptive cache that times the value generating fun
%% and uses the return value as the cached value for the time it took
%% to produce multiplied by some factor (defaults to 5).
%% There is one cache process per key. New processes are started as
%% required. The cache is invalidated if the arguments to the fetch
%% fun have changed.
%%%===================================================================
%%% API functions
%%%===================================================================
-spec fetch(atom(), fetch_fun()) -> fetch_ret().
fetch(Key, FetchFun) ->
fetch(Key, FetchFun, []).
-spec fetch(atom(), fetch_fun(), [any()]) -> fetch_ret().
fetch(Key, FetchFun, Args) when is_list(Args) ->
fetch(Key, FetchFun, Args, ?DEFAULT_TIMEOUT).
-spec fetch(atom(), fetch_fun(), [any()], integer()) -> fetch_ret().
fetch(Key, FetchFun, FunArgs, Timeout) ->
ProcName = process_name(Key),
Pid = case whereis(ProcName) of
undefined ->
{ok, P} = supervisor:start_child(rabbit_mgmt_db_cache_sup,
?CHILD(Key)),
P;
P -> P
end,
gen_server:call(Pid, {fetch, FetchFun, FunArgs}, Timeout).
-spec process_name(atom()) -> atom().
process_name(Key) ->
list_to_atom(atom_to_list(?MODULE) ++ "_" ++ atom_to_list(Key)).
-spec start_link(atom()) -> {ok, pid()} | ignore | {error, any()}.
start_link(Key) ->
gen_server:start_link({local, process_name(Key)}, ?MODULE, [], []).
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
init([]) ->
Mult = application:get_env(rabbitmg_management, management_db_cache_multiplier,
?DEFAULT_MULT),
{ok, #state{data = none,
args = [],
multiplier = Mult}}.
handle_call({fetch, FetchFun, FunArgs}, _From,
#state{data = CachedData, args = Args,
multiplier = Mult, timer_ref = Ref} = State) when
CachedData =:= none orelse Args =/= FunArgs ->
_ = timer:cancel(Ref),
try timer:tc(FetchFun, FunArgs) of
{Time, Data} ->
case trunc(Time / 1000 * Mult) of
0 -> {reply, {ok, Data}, ?RESET_STATE(State)}; % no need to cache that
T ->
{ok, TimerRef} = timer:send_after(T, self(), purge_cache),
{reply, {ok, Data}, State#state{data = Data,
timer_ref = TimerRef,
args = FunArgs}}
end
catch
Throw -> {reply, {error, {throw, Throw}}, State}
end;
handle_call({fetch, _FetchFun, _}, _From, #state{data = Data} = State) ->
Reply = {ok, Data},
{reply, Reply, State};
handle_call(purge_cache, _From, State) ->
{reply, ok, ?RESET_STATE(State)}.
handle_cast(_Msg, State) ->
{noreply, State}.
handle_info(purge_cache, State) ->
{noreply, ?RESET_STATE(State)};
handle_info(_Info, State) ->
{noreply, State}.
terminate(_Reason, _State) ->
ok.
code_change(_OldVsn, State, _Extra) ->
{ok, State}.