Skip to content

Commit

Permalink
Replace khash with maps in ddoc_cache_lru
Browse files Browse the repository at this point in the history
This is a companion PR to the previous one for couch_event_server [1].

Implementation notes:

 * Some functionality was moved to helper functions (do_refresh, get_entries)

 * The main concern was to make sure to return the updated map object, when
   before we just returned `ok`.

 * ddoc_cache_lru already had an almost 100% test coverage so opted to rely
   those tests.

 * Performance with maps seems to at least as good or better than with the
   khash nif [2].

[1] #4977
[2] https://gist.github.com/nickva/06f1511b7f9d0bbc2d9a0dfc2d36779e
  • Loading branch information
nickva committed Feb 19, 2024
1 parent a0a9998 commit 2064f21
Show file tree
Hide file tree
Showing 16 changed files with 78 additions and 2,649 deletions.
1 change: 0 additions & 1 deletion mix.exs
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,6 @@ defmodule CouchDBTest.Mixfile do
"proper",
"mochiweb",
"meck",
"khash",
"hyper",
"fauxton"
]
Expand Down
1 change: 0 additions & 1 deletion rebar.config.script
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ SubDirs = [
"src/couch_epi",
"src/config",
"src/couch_log",
"src/khash",
"src/b64url",
"src/exxhash",
"src/ets_lru",
Expand Down
2 changes: 0 additions & 2 deletions rel/reltool.config
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@
jiffy,
jwtf,
ken,
khash,
mango,
mem3,
mochiweb,
Expand Down Expand Up @@ -117,7 +116,6 @@
{app, jiffy, [{incl_cond, include}]},
{app, jwtf, [{incl_cond, include}]},
{app, ken, [{incl_cond, include}]},
{app, khash, [{incl_cond, include}]},
{app, mango, [{incl_cond, include}]},
{app, mem3, [{incl_cond, include}]},
{app, mochiweb, [{incl_cond, include}]},
Expand Down
201 changes: 78 additions & 123 deletions src/ddoc_cache/src/ddoc_cache_lru.erl
Original file line number Diff line number Diff line change
Expand Up @@ -88,15 +88,13 @@ init(_) ->
] ++ BaseOpts,
ets:new(?CACHE, CacheOpts),
ets:new(?LRU, [ordered_set, {write_concurrency, true}] ++ BaseOpts),
{ok, Pids} = khash:new(),
{ok, Dbs} = khash:new(),
{ok, Evictor} = couch_event:link_listener(
?MODULE, handle_db_event, nil, [all_dbs]
),
?EVENT(lru_init, nil),
{ok, #st{
pids = Pids,
dbs = Dbs,
pids = #{},
dbs = #{},
evictor = Evictor
}}.

Expand All @@ -107,25 +105,21 @@ terminate(_Reason, St) ->
end,
ok.

handle_call({start, Key, Default}, _From, St) ->
#st{
pids = Pids,
dbs = Dbs
} = St,
handle_call({start, Key, Default}, _From, #st{} = St) ->
case ets:lookup(?CACHE, Key) of
[] ->
MaxSize = config:get_integer("ddoc_cache", "max_size", 104857600),
case trim(St, max(0, MaxSize)) of
ok ->
{ok, #st{pids = Pids, dbs = Dbs} = St1} ->
true = ets:insert_new(?CACHE, #entry{key = Key}),
{ok, Pid} = ddoc_cache_entry:start_link(Key, Default),
true = ets:update_element(?CACHE, Key, {#entry.pid, Pid}),
ok = khash:put(Pids, Pid, Key),
store_key(Dbs, Key, Pid),
{reply, {ok, Pid}, St};
full ->
Pids1 = Pids#{Pid => Key},
Dbs1 = store_key(Dbs, Key, Pid),
{reply, {ok, Pid}, St1#st{dbs = Dbs1, pids = Pids1}};
{full, #st{} = St1} ->
?EVENT(full, Key),
{reply, full, St}
{reply, full, St1}
end;
[#entry{pid = Pid}] ->
{reply, {ok, Pid}, St}
Expand All @@ -139,81 +133,35 @@ handle_cast({evict, DbName}, St) ->
handle_cast({refresh, DbName, DDocIds}, St) ->
gen_server:abcast(mem3:nodes(), ?MODULE, {do_refresh, DbName, DDocIds}),
{noreply, St};
handle_cast({do_evict, DbName}, St) ->
#st{
dbs = Dbs
} = St,
ToRem =
case khash:lookup(Dbs, DbName) of
{value, DDocIds} ->
AccOut = khash:fold(
DDocIds,
fun(_, Keys, Acc1) ->
khash:to_list(Keys) ++ Acc1
end,
[]
),
?EVENT(evicted, DbName),
AccOut;
not_found ->
?EVENT(evict_noop, DbName),
[]
end,
lists:foreach(
fun({Key, Pid}) ->
remove_entry(St, Key, Pid)
end,
ToRem
),
khash:del(Dbs, DbName),
{noreply, St};
handle_cast({do_refresh, DbName, DDocIdList}, St) ->
#st{
dbs = Dbs
} = St,
handle_cast({do_evict, DbName}, #st{dbs = Dbs} = St) ->
ToRem = get_entries(DbName, Dbs),
case ToRem of
[] -> ?EVENT(evict_noop, DbName);
[_ | _] -> ?EVENT(evicted, DbName)
end,
St1 = lists:foldl(fun remove_entry/2, St, ToRem),
{noreply, St1#st{dbs = maps:remove(DbName, Dbs)}};
handle_cast({do_refresh, DbName, DDocIdList}, #st{dbs = Dbs} = St) ->
% We prepend no_ddocid to the DDocIdList below
% so that we refresh all custom and validation
% function entries which load data from all
% design documents.
case khash:lookup(Dbs, DbName) of
{value, DDocIds} ->
lists:foreach(
fun(DDocId) ->
case khash:lookup(DDocIds, DDocId) of
{value, Keys} ->
khash:fold(
Keys,
fun(_, Pid, _) ->
ddoc_cache_entry:refresh(Pid)
end,
nil
);
not_found ->
ok
end
end,
[no_ddocid | DDocIdList]
);
not_found ->
ok
case Dbs of
#{DbName := DDocIds} -> do_refresh(DDocIds, [no_ddocid | DDocIdList]);
_ -> ok
end,
{noreply, St};
handle_cast(Msg, St) ->
{stop, {invalid_cast, Msg}, St}.

handle_info({'EXIT', Pid, Reason}, #st{evictor = Pid} = St) ->
{stop, Reason, St};
handle_info({'EXIT', Pid, normal}, St) ->
handle_info({'EXIT', Pid, normal}, #st{pids = Pids, dbs = Dbs} = St) ->
% This clause handles when an entry starts
% up but encounters an error or uncacheable
% response from its recover call.
#st{
pids = Pids
} = St,
{value, Key} = khash:lookup(Pids, Pid),
khash:del(Pids, Pid),
remove_key(St, Key),
{noreply, St};
{Key, Pids1} = maps:take(Pid, Pids),
{noreply, St#st{pids = Pids1, dbs = remove_key(Dbs, Key)}};
handle_info(Msg, St) ->
{stop, {invalid_info, Msg}, St}.

Expand Down Expand Up @@ -241,80 +189,87 @@ lru_start(Key, DoInsert) ->
ddoc_cache_entry:recover(Key)
end.

trim(_, 0) ->
full;
trim(St, MaxSize) ->
trim(#st{} = St, 0) ->
{full, St};
trim(#st{} = St, MaxSize) ->
CurSize = ets:info(?CACHE, memory) * erlang:system_info(wordsize),
if
CurSize =< MaxSize ->
ok;
{ok, St};
true ->
case ets:first(?LRU) of
{_Ts, Key, Pid} ->
remove_entry(St, Key, Pid),
trim(St, MaxSize);
St1 = remove_entry({Key, Pid}, St),
trim(St1, MaxSize);
'$end_of_table' ->
full
{full, St}
end
end.

remove_entry(St, Key, Pid) ->
#st{
pids = Pids
} = St,
get_entries(DbName, #{} = Dbs) ->
case Dbs of
#{DbName := DDocIds} ->
Fun = fun(_, Keys, Acc1) -> maps:to_list(Keys) ++ Acc1 end,
maps:fold(Fun, [], DDocIds);
_ ->
[]
end.

do_refresh(#{} = DDocIdsMap, [_ | _] = DDocIdList) ->
Fun = fun(DDocId) ->
case DDocIdsMap of
#{DDocId := Keys} ->
maps:foreach(fun(_, Pid) -> ddoc_cache_entry:refresh(Pid) end, Keys);
_ ->
ok
end
end,
lists:foreach(Fun, [no_ddocid | DDocIdList]).

remove_entry({Key, Pid}, #st{pids = Pids, dbs = Dbs} = St) ->
unlink_and_flush(Pid),
ddoc_cache_entry:shutdown(Pid),
khash:del(Pids, Pid),
remove_key(St, Key).
St#st{pids = maps:remove(Pid, Pids), dbs = remove_key(Dbs, Key)}.

store_key(Dbs, Key, Pid) ->
store_key(#{} = Dbs, Key, Pid) ->
DbName = ddoc_cache_entry:dbname(Key),
DDocId = ddoc_cache_entry:ddocid(Key),
case khash:lookup(Dbs, DbName) of
{value, DDocIds} ->
case khash:lookup(DDocIds, DDocId) of
{value, Keys} ->
khash:put(Keys, Key, Pid);
not_found ->
{ok, Keys} = khash:from_list([{Key, Pid}]),
khash:put(DDocIds, DDocId, Keys)
case Dbs of
#{DbName := DDocIds} ->
case DDocIds of
#{DDocId := Keys} ->
Dbs#{DbName := DDocIds#{DDocId := Keys#{Key => Pid}}};
_ ->
Dbs#{DbName := DDocIds#{DDocId => #{Key => Pid}}}
end;
not_found ->
{ok, Keys} = khash:from_list([{Key, Pid}]),
{ok, DDocIds} = khash:from_list([{DDocId, Keys}]),
khash:put(Dbs, DbName, DDocIds)
_ ->
Dbs#{DbName => #{DDocId => #{Key => Pid}}}
end.

remove_key(St, Key) ->
#st{
dbs = Dbs
} = St,
remove_key(#{} = Dbs, Key) ->
DbName = ddoc_cache_entry:dbname(Key),
DDocId = ddoc_cache_entry:ddocid(Key),

% For non-existent ddocs, a new ddoc_cache_entry is spawned for
% each call to ddoc_cache:open. Multiple calls to open the same
% non-existent ddoc will create multiple cache entries with the
% same Key but different PIDs. This can result in the following
% khash lookups returning not_found, so handle those corner cases.
case khash:lookup(Dbs, DbName) of
{value, DDocIds} ->
case khash:lookup(DDocIds, DDocId) of
{value, Keys} ->
ok = khash:del(Keys, Key),
case khash:size(Keys) of
0 -> khash:del(DDocIds, DDocId);
_ -> ok
end,
case khash:size(DDocIds) of
0 -> khash:del(Dbs, DbName);
_ -> ok
% map lookups not finding results, so handle those corner cases.
case Dbs of
#{DbName := DDocIds = #{DDocId := Keys = #{Key := _Pid}}} ->
Keys1 = maps:remove(Key, Keys),
case map_size(Keys1) of
0 ->
DDocIds1 = maps:remove(DDocId, DDocIds),
case map_size(DDocIds1) of
0 -> maps:remove(DbName, Dbs);
_ -> Dbs#{DbName := DDocIds1}
end;
not_found ->
ok
_ ->
Dbs#{DbName := DDocIds#{DDocId := Keys1}}
end;
not_found ->
ok
_ ->
Dbs
end.

unlink_and_flush(Pid) ->
Expand Down
10 changes: 0 additions & 10 deletions src/khash/.gitignore

This file was deleted.

0 comments on commit 2064f21

Please sign in to comment.