Permalink
Browse files

Convert to lager

az629

Convert all logging to use lager. Also resurrected some logging that I removed
in commit `7c6d74f76b00abb8f5c7e46d999c75416967222d` as it is potentially
useful.
  • Loading branch information...
1 parent d0a1542 commit 975f75d73fe44046f36e96600d0a88de8f0f3dce @rzezeski rzezeski committed Aug 30, 2011
Showing with 32 additions and 18 deletions.
  1. +1 −0 .gitignore
  2. +6 −1 rebar.config
  3. +6 −4 src/mi_buffer.erl
  4. +3 −3 src/mi_buffer_converter.erl
  5. +4 −5 src/mi_scheduler.erl
  6. +2 −0 src/mi_segment.erl
  7. +10 −5 src/mi_server.erl
View
1 .gitignore
@@ -1,3 +1,4 @@
ebin
.eunit
doc
+deps
View
7 rebar.config
@@ -1,2 +1,7 @@
{cover_enabled, true}.
-{erl_opts, [debug_info]}.
+{erl_opts, [debug_info, {parse_transform, lager_transform}]}.
+
+{deps, [
+ {lager, "0.9.*",
+ {git, "git://github.com/basho/lager", {branch, "master"}}}
+ ]}.
View
10 src/mi_buffer.erl
@@ -60,6 +60,7 @@ new(Filename) ->
open_inner(FH, Table),
{ok, Size} = file:position(FH, cur),
+ lager:info("opened buffer '~s'", [Filename]),
%% Return the buffer.
#buffer { filename=Filename, handle=FH, table=Table, size=Size }.
@@ -75,11 +76,12 @@ open_inner(FH, Table) ->
filename(Buffer) ->
Buffer#buffer.filename.
-delete(Buffer) ->
- ets:delete(Buffer#buffer.table),
+delete(Buffer=#buffer{table=Table, filename=Filename}) ->
+ ets:delete(Table),
close_filehandle(Buffer),
- file:delete(Buffer#buffer.filename),
- file:delete(Buffer#buffer.filename ++ ".deleted"),
+ file:delete(Filename),
+ file:delete(Filename ++ ".deleted"),
+ lager:info("deleted buffer '~s'", [Filename]),
ok.
close_filehandle(Buffer) ->
View
6 src/mi_buffer_converter.erl
@@ -140,9 +140,9 @@ handle_cast({convert, Root, Buffer}, #state{mi_root=Root}=State) ->
{noreply, State}
catch
error:badarg ->
- error_logger:warning_msg("`convert` attempted to work with a"
- " nonexistent buffer, probably because"
- " drop was called~n"),
+ lager:warning("`convert` attempted to work with a"
+ " nonexistent buffer, probably because"
+ " drop was called"),
{noreply, State}
end;
handle_cast(_Msg, State) ->
View
9 src/mi_scheduler.erl
@@ -98,7 +98,7 @@ handle_info({worker_ready, WorkerPid}, #state { queue = Q } = State) ->
{noreply, NewState}
end;
handle_info({'EXIT', WorkerPid, Reason}, #state { worker = WorkerPid } = State) ->
- error_logger:error_msg("Compaction worker ~p exited: ~p\n", [WorkerPid, Reason]),
+ lager:error("Compaction worker ~p exited: ~p", [WorkerPid, Reason]),
%% Start a new worker.
Self=self(),
NewWorkerPid = spawn_link(fun() -> worker_loop(Self) end),
@@ -130,16 +130,15 @@ worker_loop(Parent) ->
{ok, OldSegments, OldBytes} ->
case ElapsedSecs > 1 of
true ->
- error_logger:info_msg(
- "Pid ~p compacted ~p segments for ~p bytes in ~p seconds, ~.2f MB/sec\n",
+ lager:info(
+ "Pid ~p compacted ~p segments for ~p bytes in ~p seconds, ~.2f MB/sec",
[Pid, OldSegments, OldBytes, ElapsedSecs, OldBytes/ElapsedSecs/(1024*1024)]);
false ->
ok
end;
{Error, Reason} when Error == error; Error == 'EXIT' ->
- error_logger:error_msg("Failed to compact ~p: ~p\n",
- [Pid, Reason])
+ lager:error("Failed to compact ~p: ~p", [Pid, Reason])
end,
?MODULE:worker_loop(Parent);
_ ->
View
2 src/mi_segment.erl
@@ -66,6 +66,7 @@ open_read(Root) ->
{ok, FileInfo} = file:read_file_info(data_file(Root)),
OffsetsTable = read_offsets(Root),
+ lager:info("opened segment '~s' for read", [Root]),
#segment {
root=Root,
offsets_table=OffsetsTable,
@@ -86,6 +87,7 @@ open_write(Root) ->
%% TODO: Do we really need to go through the trouble of writing empty files here?
file:write_file(data_file(Root), <<"">>),
file:write_file(offsets_file(Root), <<"">>),
+ lager:info("opened segment '~s' for write", [Root]),
#segment {
root = Root,
offsets_table = ets:new(segment_offsets, [ordered_set, public])
View
15 src/mi_server.erl
@@ -161,6 +161,7 @@ stop(Server) ->
%%%===================================================================
init([Root]) ->
+ lager:info("loading merge_index '~s'", [Root]),
%% Seed the random generator...
random:seed(now()),
@@ -188,6 +189,8 @@ init([Root]) ->
buffer_rollover_size=fuzzed_rollover_size()
},
+ lager:info("finished loading merge_index '~s' with rollover size ~p",
+ [Root, State#state.buffer_rollover_size]),
{ok, State}.
@@ -480,9 +483,9 @@ handle_cast({buffer_to_segment, Buffer, SegmentWO}, State) ->
end,
{noreply, NewState};
false ->
- error_logger:warning_msg("`buffer_to_segment` cast received"
- " for nonexistent buffer, probably"
- " because drop was called~n"),
+ lager:warning("`buffer_to_segment` cast received"
+ " for nonexistent buffer, probably"
+ " because drop was called"),
{noreply, State}
end;
@@ -537,8 +540,7 @@ handle_info({'EXIT', Pid, Reason},
normal ->
SR#stream_range.caller ! {eof, SR#stream_range.ref};
_ ->
- error_logger:error_msg("lookup/range failure: ~p~n",
- [Reason]),
+ lager:error("lookup/range failure: ~p", [Reason]),
SR#stream_range.caller
! {error, SR#stream_range.ref, Reason}
end,
@@ -586,6 +588,7 @@ read_buf_and_seg(Root) ->
F1 = fun(Filename) ->
Basename = filename:basename(Filename, ?DELETEME_FLAG),
Basename1 = filename:join(Root, Basename ++ ".*"),
+ lager:info("deleting '~s'", [Basename1]),
[ok = file:delete(X) || X <- filelib:wildcard(Basename1)]
end,
[F1(X) || X <- filelib:wildcard(join(Root, "*.deleted"))],
@@ -609,6 +612,7 @@ read_buf_and_seg(Root) ->
read_segments([], _Segments) -> [];
read_segments([SName|Rest], Segments) ->
%% Read the segment from disk...
+ lager:info("opening segment: '~s'", [SName]),
Segment = mi_segment:open_read(SName),
[Segment|read_segments(Rest, Segments)].
@@ -625,6 +629,7 @@ read_buffers(_Root, [{_BNum, BName}], NextID, Segments) ->
read_buffers(Root, [{BNum, BName}|Rest], NextID, Segments) ->
%% Multiple buffers exist... convert them into segments...
+ lager:info("converting buffer: '~s' to segment", [BName]),
SName = join(Root, "segment." ++ integer_to_list(BNum)),
set_deleteme_flag(SName),
Buffer = mi_buffer:new(BName),

0 comments on commit 975f75d

Please sign in to comment.