Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

Fix bug when merge result is an empty file

This happens when all entries are expired, or
if all entries would have been tombstones.
  • Loading branch information...
commit 1b42172cbe556eaa567b9bc4f9f8ed6b7e1b64ba 1 parent 6172006
@krestenkrab krestenkrab authored
Showing with 38 additions and 10 deletions.
  1. +20 −5 src/hanoidb_level.erl
  2. +18 −5 src/hanoidb_writer.erl
View
25 src/hanoidb_level.erl
@@ -533,6 +533,19 @@ main_loop(State = #state{ next=Next }) ->
%% The outcome of merging resulted in a file with less than
%% level #entries, so we keep it at this level
%%
+ ?CAST(_From,{merge_done, 0, OutFileName}) ->
+ ok = file:delete(OutFileName),
+ {ok, State2} = close_and_delete_a_and_b(State),
+ case State#state.c of
+ undefined ->
+ main_loop(State2#state{ merge_pid=undefined });
+ CFile ->
+ ok = hanoidb_reader:close(CFile),
+ ok = file:rename(filename("C", State2), filename("A", State2)),
+ {ok, AFile} = hanoidb_reader:open(filename("A", State2), [random|State#state.opts]),
+ main_loop(State2#state{ a = AFile, c = undefined, merge_pid=undefined })
+ end;
+
?CAST(_From,{merge_done, Count, OutFileName}) when Count =< ?BTREE_SIZE(State#state.level) ->
?log("merge_done, out:~w~n -> self", [Count]),
@@ -547,16 +560,18 @@ main_loop(State = #state{ next=Next }) ->
% then, rename M to A, and open it
AFileName = filename("A",State2),
ok = file:rename(MFileName, AFileName),
- {ok, BT} = hanoidb_reader:open(AFileName, [random|State#state.opts]),
+ {ok, AFile} = hanoidb_reader:open(AFileName, [random|State#state.opts]),
% iff there is a C file, then move it to B position
% TODO: consider recovery for this
case State#state.c of
undefined ->
- main_loop(State2#state{ a=BT, b=undefined, merge_pid=undefined });
- TreeFile ->
- file:rename(filename("C",State2), filename("B", State2)),
- check_begin_merge_then_loop(State2#state{ a=BT, b=TreeFile, c=undefined,
+ main_loop(State2#state{ a=AFile, b=undefined, merge_pid=undefined });
+ CFile ->
+ ok = hanoidb_reader:close(CFile),
+ ok = file:rename(filename("C", State2), filename("B", State2)),
+ {ok, BFile} = hanoidb_reader:open(filename("B", State2), [random|State#state.opts]),
+ check_begin_merge_then_loop(State2#state{ a=AFile, b=BFile, c=undefined,
merge_pid=undefined })
end;
View
23 src/hanoidb_writer.erl
@@ -150,13 +150,17 @@ code_change(_OldVsn, State, _Extra) ->
%%%%% INTERNAL FUNCTIONS
-serialize(#state{ bloom=Bloom, index_file=File }=State) ->
% io:format("serializing ~p @ ~p~n", [State#state.name,
% State#state.index_file_pos]),
+serialize(#state{ bloom=Bloom, index_file=File, index_file_pos=Position }=State) ->
%% assert that we're on track
- Position = State#state.index_file_pos,
- {ok, Position} = file:position(File, cur),
+ case file:position(File, {eof, 0}) of
+ {ok, Position} ->
+ ok;
+ {ok, WrongPosition} ->
+ exit({bad_position, Position, WrongPosition})
+ end,
ok = file:close(File),
erlang:term_to_binary( { State#state{ index_file=closed }, ebloom:serialize(Bloom) } ).
@@ -183,16 +187,25 @@ flush_nodes(#state{ nodes=[], last_node_pos=LastNodePos, last_node_size=_LastNod
Bloom = zlib:zip(ebloom:serialize(Ref)),
BloomSize = byte_size(Bloom),
- Trailer = << 0:32, Bloom/binary, BloomSize:32/unsigned, LastNodePos:64/unsigned >>,
IdxFile = State#state.index_file,
+ if LastNodePos =:= undefined ->
+ %% store contains no entries!
+ ok = file:write(IdxFile, <<0:32,0:16>>),
+ RootPos = ?FIRST_BLOCK_POS;
+ true ->
+ RootPos = LastNodePos
+ end,
+
+ Trailer = << 0:32, Bloom/binary, BloomSize:32/unsigned, RootPos:64/unsigned >>,
+
ok = file:write(IdxFile, Trailer),
ok = file:datasync(IdxFile),
ok = file:close(IdxFile),
- {ok, State#state{ index_file=undefined }};
+ {ok, State#state{ index_file=undefined, index_file_pos=undefined }};
%% stack consists of one node with one {pos,len} member. Just ignore this node.
flush_nodes(State=#state{ nodes=[#node{level=N, members=[{_,{Pos,_Len}}]}], last_node_pos=Pos }) when N>0 ->
Please sign in to comment.
Something went wrong with that request. Please try again.