Skip to content

Commit

Permalink
Merge pull request basho#16 from basho/az629-use-lager
Browse files Browse the repository at this point in the history
Convert to lager
  • Loading branch information
rzezeski committed Aug 31, 2011
2 parents d0a1542 + 975f75d commit 199acb0
Show file tree
Hide file tree
Showing 7 changed files with 32 additions and 18 deletions.
1 change: 1 addition & 0 deletions .gitignore
@@ -1,3 +1,4 @@
ebin
.eunit
doc
deps
7 changes: 6 additions & 1 deletion rebar.config
@@ -1,2 +1,7 @@
{cover_enabled, true}.
{erl_opts, [debug_info]}.
{erl_opts, [debug_info, {parse_transform, lager_transform}]}.

{deps, [
{lager, "0.9.*",
{git, "git://github.com/basho/lager", {branch, "master"}}}
]}.
10 changes: 6 additions & 4 deletions src/mi_buffer.erl
Expand Up @@ -60,6 +60,7 @@ new(Filename) ->
open_inner(FH, Table),
{ok, Size} = file:position(FH, cur),

lager:info("opened buffer '~s'", [Filename]),
%% Return the buffer.
#buffer { filename=Filename, handle=FH, table=Table, size=Size }.

Expand All @@ -75,11 +76,12 @@ open_inner(FH, Table) ->
filename(Buffer) ->
Buffer#buffer.filename.

delete(Buffer) ->
ets:delete(Buffer#buffer.table),
delete(Buffer=#buffer{table=Table, filename=Filename}) ->
ets:delete(Table),
close_filehandle(Buffer),
file:delete(Buffer#buffer.filename),
file:delete(Buffer#buffer.filename ++ ".deleted"),
file:delete(Filename),
file:delete(Filename ++ ".deleted"),
lager:info("deleted buffer '~s'", [Filename]),
ok.

close_filehandle(Buffer) ->
Expand Down
6 changes: 3 additions & 3 deletions src/mi_buffer_converter.erl
Expand Up @@ -140,9 +140,9 @@ handle_cast({convert, Root, Buffer}, #state{mi_root=Root}=State) ->
{noreply, State}
catch
error:badarg ->
error_logger:warning_msg("`convert` attempted to work with a"
" nonexistent buffer, probably because"
" drop was called~n"),
lager:warning("`convert` attempted to work with a"
" nonexistent buffer, probably because"
" drop was called"),
{noreply, State}
end;
handle_cast(_Msg, State) ->
Expand Down
9 changes: 4 additions & 5 deletions src/mi_scheduler.erl
Expand Up @@ -98,7 +98,7 @@ handle_info({worker_ready, WorkerPid}, #state { queue = Q } = State) ->
{noreply, NewState}
end;
handle_info({'EXIT', WorkerPid, Reason}, #state { worker = WorkerPid } = State) ->
error_logger:error_msg("Compaction worker ~p exited: ~p\n", [WorkerPid, Reason]),
lager:error("Compaction worker ~p exited: ~p", [WorkerPid, Reason]),
%% Start a new worker.
Self=self(),
NewWorkerPid = spawn_link(fun() -> worker_loop(Self) end),
Expand Down Expand Up @@ -130,16 +130,15 @@ worker_loop(Parent) ->
{ok, OldSegments, OldBytes} ->
case ElapsedSecs > 1 of
true ->
error_logger:info_msg(
"Pid ~p compacted ~p segments for ~p bytes in ~p seconds, ~.2f MB/sec\n",
lager:info(
"Pid ~p compacted ~p segments for ~p bytes in ~p seconds, ~.2f MB/sec",
[Pid, OldSegments, OldBytes, ElapsedSecs, OldBytes/ElapsedSecs/(1024*1024)]);
false ->
ok
end;

{Error, Reason} when Error == error; Error == 'EXIT' ->
error_logger:error_msg("Failed to compact ~p: ~p\n",
[Pid, Reason])
lager:error("Failed to compact ~p: ~p", [Pid, Reason])
end,
?MODULE:worker_loop(Parent);
_ ->
Expand Down
2 changes: 2 additions & 0 deletions src/mi_segment.erl
Expand Up @@ -66,6 +66,7 @@ open_read(Root) ->
{ok, FileInfo} = file:read_file_info(data_file(Root)),

OffsetsTable = read_offsets(Root),
lager:info("opened segment '~s' for read", [Root]),
#segment {
root=Root,
offsets_table=OffsetsTable,
Expand All @@ -86,6 +87,7 @@ open_write(Root) ->
%% TODO: Do we really need to go through the trouble of writing empty files here?
file:write_file(data_file(Root), <<"">>),
file:write_file(offsets_file(Root), <<"">>),
lager:info("opened segment '~s' for write", [Root]),
#segment {
root = Root,
offsets_table = ets:new(segment_offsets, [ordered_set, public])
Expand Down
15 changes: 10 additions & 5 deletions src/mi_server.erl
Expand Up @@ -161,6 +161,7 @@ stop(Server) ->
%%%===================================================================

init([Root]) ->
lager:info("loading merge_index '~s'", [Root]),
%% Seed the random generator...
random:seed(now()),

Expand Down Expand Up @@ -188,6 +189,8 @@ init([Root]) ->
buffer_rollover_size=fuzzed_rollover_size()
},

lager:info("finished loading merge_index '~s' with rollover size ~p",
[Root, State#state.buffer_rollover_size]),
{ok, State}.


Expand Down Expand Up @@ -480,9 +483,9 @@ handle_cast({buffer_to_segment, Buffer, SegmentWO}, State) ->
end,
{noreply, NewState};
false ->
error_logger:warning_msg("`buffer_to_segment` cast received"
" for nonexistent buffer, probably"
" because drop was called~n"),
lager:warning("`buffer_to_segment` cast received"
" for nonexistent buffer, probably"
" because drop was called"),
{noreply, State}
end;

Expand Down Expand Up @@ -537,8 +540,7 @@ handle_info({'EXIT', Pid, Reason},
normal ->
SR#stream_range.caller ! {eof, SR#stream_range.ref};
_ ->
error_logger:error_msg("lookup/range failure: ~p~n",
[Reason]),
lager:error("lookup/range failure: ~p", [Reason]),
SR#stream_range.caller
! {error, SR#stream_range.ref, Reason}
end,
Expand Down Expand Up @@ -586,6 +588,7 @@ read_buf_and_seg(Root) ->
F1 = fun(Filename) ->
Basename = filename:basename(Filename, ?DELETEME_FLAG),
Basename1 = filename:join(Root, Basename ++ ".*"),
lager:info("deleting '~s'", [Basename1]),
[ok = file:delete(X) || X <- filelib:wildcard(Basename1)]
end,
[F1(X) || X <- filelib:wildcard(join(Root, "*.deleted"))],
Expand All @@ -609,6 +612,7 @@ read_buf_and_seg(Root) ->
read_segments([], _Segments) -> [];
read_segments([SName|Rest], Segments) ->
%% Read the segment from disk...
lager:info("opening segment: '~s'", [SName]),
Segment = mi_segment:open_read(SName),
[Segment|read_segments(Rest, Segments)].

Expand All @@ -625,6 +629,7 @@ read_buffers(_Root, [{_BNum, BName}], NextID, Segments) ->

read_buffers(Root, [{BNum, BName}|Rest], NextID, Segments) ->
%% Multiple buffers exist... convert them into segments...
lager:info("converting buffer: '~s' to segment", [BName]),
SName = join(Root, "segment." ++ integer_to_list(BNum)),
set_deleteme_flag(SName),
Buffer = mi_buffer:new(BName),
Expand Down

0 comments on commit 199acb0

Please sign in to comment.