Skip to content

Commit

Permalink
Misc. dialyzer unmatched return fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
norton committed Jan 5, 2011
1 parent b69463d commit c109249
Show file tree
Hide file tree
Showing 8 changed files with 183 additions and 174 deletions.
4 changes: 2 additions & 2 deletions src/brick.erl
Original file line number Diff line number Diff line change
Expand Up @@ -73,11 +73,11 @@ prep_stop(State) ->
brick_shepherd:add_do_not_restart_brick(Br, node()),
brick_shepherd:stop_brick(Br)
end,
[catch Fstop(Br) || Br <- FirstBricks],
_ = [catch Fstop(Br) || Br <- FirstBricks],
timer:sleep(2000),

%% Finally, shut down any bootstrap bricks.
[catch Fstop(Br) || Br <- BootstrapBricks],
_ = [catch Fstop(Br) || Br <- BootstrapBricks],
State.

config_change(_Changed, _New, _Removed) ->
Expand Down
93 changes: 49 additions & 44 deletions src/brick_ets.erl
Original file line number Diff line number Diff line change
Expand Up @@ -478,10 +478,10 @@ handle_info({sync_done, Pid, LastLogSerial}, State)
?DBG_TLOGx({sync_done, State#state.name, logging_op_q, State#state.logging_op_q}),
{LQI_List, State2} =
pull_items_out_of_logging_queue(LastLogSerial, State),
[ok = map_mods_into_ets(DoOpList, State2) ||
#log_q{thisdo_mods = DoOpList} <- LQI_List],
[ok = clear_dirty_tab(DoOpList, State2) ||
#log_q{thisdo_mods = DoOpList} <- LQI_List],
_ = [ok = map_mods_into_ets(DoOpList, State2) ||
#log_q{thisdo_mods = DoOpList} <- LQI_List],
_ = [ok = clear_dirty_tab(DoOpList, State2) ||
#log_q{thisdo_mods = DoOpList} <- LQI_List],
ToDos = [{chain_send_downstream, LQI#log_q.logging_serial,
LQI#log_q.doflags, LQI#log_q.from, LQI#log_q.reply,
LQI#log_q.thisdo_mods}
Expand Down Expand Up @@ -578,11 +578,11 @@ handle_info(do_init_second_half, State) ->
%% But if we're in standalone mode, then there's no one to repair
%% the missing keys, which could make us regret deleting keys
%% as many keys as we are doing here.
[begin {Purged, _S} = purge_recs_by_seqnum(SeqNum, true, State),
?E_INFO("~s: purged ~p keys from sequence ~p\n",
[State#state.name, Purged, SeqNum]),
Purged
end || SeqNum <- read_external_bad_sequence_file(State#state.name)],
_ = [begin {Purged, _S} = purge_recs_by_seqnum(SeqNum, true, State),
?E_INFO("~s: purged ~p keys from sequence ~p\n",
[State#state.name, Purged, SeqNum]),
Purged
end || SeqNum <- read_external_bad_sequence_file(State#state.name)],

%% For recently bigdata_dir files, sync them all in a big safety blatt.
%% ... Except that it also seems to have the potential to slam the
Expand Down Expand Up @@ -1858,13 +1858,13 @@ checkpoint_start(S_ro, DoneLogSeq, ParentPid, Options) ->
DelAll = term_to_binary([{delete_all_table_items}]),
{_, Bin1} = (S_ro#state.wal_mod):create_hunk(?LOGTYPE_METADATA,
[DelAll], []),
file:write(CheckFH, Bin1),
ok = file:write(CheckFH, Bin1),

%% Dump data from the private metadata table.
MDs = term_to_binary([{md_insert, T} ||
T <- ets:tab2list(S_ro#state.mdtab)]),
{_, Bin2} = (S_ro#state.wal_mod):create_hunk(?LOGTYPE_METADATA, [MDs], []),
file:write(CheckFH, Bin2),
ok = file:write(CheckFH, Bin2),

%% Dump all the "normal" data.
ThrottleSvr = case proplists:get_value(throttle_bytes, Options) of
Expand Down Expand Up @@ -1923,20 +1923,22 @@ checkpoint_start(S_ro, DoneLogSeq, ParentPid, Options) ->
timer:sleep(5*1000),
?DBG_TLOGx({checkpoint, S_ro#state.name, async_delete,
OldSeqs}),
[_ = file:delete((S_ro#state.wal_mod):log_file_path(
Dir, X, Suffix)) ||
X <- OldSeqs, Suffix <- ["HLOG"]],
_ = [_ = file:delete((S_ro#state.wal_mod):log_file_path(
Dir, X, Suffix))
|| X <- OldSeqs, Suffix <- ["HLOG"]],
unlink(ParentPid),
exit(normal)
end);
end),
ok;
true ->
Finfolog("checkpoint: ~p: moving ~p log "
"files to long-term archive\n",
[S_ro#state.name, length(OldSeqs)]),
[(S_ro#state.wal_mod):move_seq_to_longterm(S_ro#state.log, X) ||
X <- OldSeqs],
_ = [(S_ro#state.wal_mod):move_seq_to_longterm(S_ro#state.log, X)
|| X <- OldSeqs],
_ = file:delete((S_ro#state.wal_mod):log_file_path(
Dir,S_ro#state.check_lastseqnum))
Dir,S_ro#state.check_lastseqnum)),
ok
end,

%% All "bad-sequence" processing is done at brick startup. (Bad
Expand All @@ -1949,7 +1951,7 @@ checkpoint_start(S_ro, DoneLogSeq, ParentPid, Options) ->
%% notification. If that race happens, it's possible to forget
%% about that sequence file, but someone we'll re-discover the
%% error ourselves at some future time.
delete_external_bad_sequence_file(S_ro#state.name),
ok = delete_external_bad_sequence_file(S_ro#state.name),

if is_pid(ThrottleSvr) ->
brick_ticket:stop(ThrottleSvr);
Expand All @@ -1973,14 +1975,14 @@ dump_items(Tab, WalMod, Log) ->
dump_items2('$end_of_table', _Tab, WalMod, LogFH, _AccNum, Acc) ->
{_, Bin} = WalMod:create_hunk(?LOGTYPE_METADATA,
[term_to_binary(lists:reverse(Acc))], []),
file:write(LogFH, Bin),
ok = file:write(LogFH, Bin),
ok;
dump_items2(Key, Tab, WalMod, LogFH, AccNum, Acc)
when AccNum > 200 ->
{Bytes, Bin} = WalMod:create_hunk(?LOGTYPE_METADATA,
[term_to_binary(lists:reverse(Acc))], []),
ok = get_bw_ticket(Bytes),
file:write(LogFH, Bin),
ok = file:write(LogFH, Bin),
dump_items2(Key, Tab, WalMod, LogFH, 0, []);
dump_items2(Key, Tab, WalMod, LogFH, AccNum, Acc) ->
[ST] = ets:lookup(Tab, Key),
Expand Down Expand Up @@ -2039,7 +2041,8 @@ sync_pid_loop(SPA) ->
{syncpid_stats, SPA#syncpid_arg.name, DiffMS, ms, length(L)},
?DBG_GEN("DBG: SPA ~p sync_done at ~p,~p, my last serial = ~p\n",
[SPA#syncpid_arg.name, _X, _Y, LastSerial]),
SPA#syncpid_arg.parent_pid ! {sync_done, self(), LastSerial};
SPA#syncpid_arg.parent_pid ! {sync_done, self(), LastSerial},
ok;
true ->
ok
end,
Expand Down Expand Up @@ -2949,18 +2952,20 @@ accumulate_maybe(Key, ST, Acc) ->
squidflash_doit(KsRaws, DoOp, From, ParentPid, FakeS) ->
Me = self(),
KRV_Refs = [{X, make_ref()} || X <- KsRaws],
[catch gmt_parallel_limit:enqueue(
brick_primer_limit,
fun() ->
catch squidflash_prime1(Key, RawVal, ValLen, Me, FakeS),
Me ! Ref,
exit(normal)
end) || {{Key, RawVal, ValLen}, Ref} <- KRV_Refs],
[receive
Ref -> ok
after 10000 -> % should be impossible, but...
ok
end || {_, Ref} <- KRV_Refs],
_ = [catch gmt_parallel_limit:enqueue(
brick_primer_limit,
fun() ->
catch squidflash_prime1(Key, RawVal, ValLen, Me, FakeS),
Me ! Ref,
exit(normal)
end)
|| {{Key, RawVal, ValLen}, Ref} <- KRV_Refs],
_ = [receive
Ref -> ok
after 10000 -> % should be impossible, but...
ok
end
|| {_, Ref} <- KRV_Refs],
%% TODO: This is also an ugly kludge ... hide it somewhere at
%% least? The alternative is to have a handle_call() ->
%% handle_cast() conversion doodad ... this is lazier.
Expand Down Expand Up @@ -3219,9 +3224,9 @@ scavenge_one_seq_file_fun(TempDir, SA, Fread_blob, Finfolog) ->
({live_bytes, _}, Acc) ->
Acc
end, {0, 0, 0}, DInLog),
file:close(FH),
disk_log:close(DInLog),
disk_log:close(DOutLog),
ok = file:close(FH),
ok = disk_log:close(DInLog),
ok = disk_log:close(DOutLog),
?E_INFO("SCAV: ~p middle of step 10\n", [SA#scav.name]),
if Errs == 0, SA#scav.destructive == true ->
{ok, DOutLog} = disk_log:open([{name, DOutPath},
Expand All @@ -3244,7 +3249,7 @@ scavenge_one_seq_file_fun(TempDir, SA, Fread_blob, Finfolog) ->
true ->
ok
end,
disk_log:close(DOutLog),
ok = disk_log:close(DOutLog),
{Hs + Hunks, Bs + Bytes, Es + Errs}
end.

Expand Down Expand Up @@ -3307,16 +3312,16 @@ file_output_fun(Log) ->

sort_test0() ->
{ok, TmpLog} = disk_log:open([{name, foo}, {file, "/tmp/footest"}]),
[disk_log:log(TmpLog, {xo, X, Y}) || X <- lists:seq(1, 50),
Y <- lists:seq(1,100)],
disk_log:close(TmpLog),
_ = [disk_log:log(TmpLog, {xo, X, Y})
|| X <- lists:seq(1, 50), Y <- lists:seq(1,100)],
ok = disk_log:close(TmpLog),
{ok, InLog} = disk_log:open([{name, in}, {file, "/tmp/footest"}, {mode, read_only}]),
{ok, OutLog} = disk_log:open([{name, out}, {file, "/tmp/footest.out"}]),
X = file_sorter:sort(file_input_fun(InLog, start), file_output_fun(OutLog),
[{format, term},
{order, fun({_,_,A}, {_,_,B}) -> A < B end}]),
disk_log:close(InLog),
disk_log:close(OutLog),
ok = disk_log:close(InLog),
ok = disk_log:close(OutLog),
X.

%%
Expand Down Expand Up @@ -3777,5 +3782,5 @@ X = WalMod:fold(
blah
end
end, unused),
file:close(OutFH),
ok = file:close(OutFH),
X.
4 changes: 2 additions & 2 deletions src/brick_hash.erl
Original file line number Diff line number Diff line change
Expand Up @@ -785,7 +785,7 @@ verify_chain_list(ChainList, ZeroLengthOK) ->

verify_chain_list_2(ChainList, ZeroLengthOK)
when is_list(ChainList), length(ChainList) > 0 ->
lists:map(
lists:foreach(
fun({ChainName, ChainMembers}) when is_atom(ChainName) ->
SortedChainMembers = lists:sort(ChainMembers),
if length(ChainMembers) < 1, not ZeroLengthOK ->
Expand All @@ -798,7 +798,7 @@ verify_chain_list_2(ChainList, ZeroLengthOK)
_ -> exit({error, duplicate_bricks_in_chain})
end,
%% Check for valid 2-tuples for brick name.
lists:map(
lists:foreach(
fun({Br, Nd}) when is_atom(Br), is_atom(Nd) ->
ok;
(X) ->
Expand Down
14 changes: 7 additions & 7 deletions src/brick_itimer.erl
Original file line number Diff line number Diff line change
Expand Up @@ -140,10 +140,10 @@ handle_call({cancel, MRef}, _From, State) ->
%% @end
%%--------------------------------------------------------------------
handle_cast(dump, State) ->
[begin
?E_INFO("~s: dump: interval ~p, pid ~p\n", [?MODULE, Interval, Pid]),
Pid ! dump
end || {Interval, Pid} <- dict:to_list(State#state.interval_d)],
_ = [begin
?E_INFO("~s: dump: interval ~p, pid ~p\n", [?MODULE, Interval, Pid]),
Pid ! dump
end || {Interval, Pid} <- dict:to_list(State#state.interval_d)],
{noreply, State};
handle_cast(_Msg, State) ->
?E_ERROR("~s: handle_cast: ~P\n", [?MODULE, _Msg, 20]),
Expand Down Expand Up @@ -175,7 +175,7 @@ handle_info(_Info, State) ->
%% @end
%%--------------------------------------------------------------------
terminate(_Reason, State) ->
[Pid ! stop || {_I, Pid} <- dict:to_list(State#state.interval_d)],
_ = [Pid ! stop || {_I, Pid} <- dict:to_list(State#state.interval_d)],
ok.

%%--------------------------------------------------------------------
Expand Down Expand Up @@ -212,14 +212,14 @@ start_interval_server(Interval) ->
-spec start_interval_loop(integer()) -> no_return().
start_interval_loop(Interval) ->
put(my_interval, Interval),
timer:send_interval(Interval, tick),
{ok, _} = timer:send_interval(Interval, tick),
interval_loop([]).

-spec interval_loop([{pid(), term(), term()}]) -> no_return().
interval_loop(Clients) ->
receive
tick ->
[catch (Pid ! Msg) || {Pid, Msg, _MRef} <- Clients],
_ = [catch (Pid ! Msg) || {Pid, Msg, _MRef} <- Clients],
?MODULE:interval_loop(Clients);
{send_interval, From, Pid, Msg} ->
case gmt_util:make_monitor(Pid) of
Expand Down
18 changes: 9 additions & 9 deletions src/brick_mboxmon.erl
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ is_pid_repairing(Pid) when is_pid(Pid) ->
%%--------------------------------------------------------------------
init([]) ->
process_flag(trap_exit, true),
net_kernel:monitor_nodes(true, [{node_type, visible}, nodedown_reason]),
ok = net_kernel:monitor_nodes(true, [{node_type, visible}, nodedown_reason]),
{ok, RepairHigh} = application:get_env(gdss, brick_mbox_repair_high_water),
{ok, High} = application:get_env(gdss, brick_mbox_high_water),
{ok, Low} = application:get_env(gdss, brick_mbox_low_water),
Expand Down Expand Up @@ -227,12 +227,12 @@ check_mboxes(Bricks, S) ->
[];
Rs ->
BrickMap = make_brick_map(),
[set_repair_overload(Brick, N, S#state.repair_high_water) ||
{above, Brick, true, N} <- Rs],
[report_mbox_above_water(Brick, RepairingP, BrickMap, N) ||
{above, Brick, RepairingP, N} <- Rs],
[report_mbox_below_water(Brick, BrickMap, N) ||
{below, Brick, N} <- Rs],
_ = [set_repair_overload(Brick, N, S#state.repair_high_water)
|| {above, Brick, true, N} <- Rs],
_ = [report_mbox_above_water(Brick, RepairingP, BrickMap, N)
|| {above, Brick, RepairingP, N} <- Rs],
_ = [report_mbox_below_water(Brick, BrickMap, N)
|| {below, Brick, N} <- Rs],
Rs
end.

Expand Down Expand Up @@ -394,8 +394,8 @@ set_repair_overload(Brick, N, RepairHigh) ->
repair_overload ->
?E_ERROR("~p: Change brick ~p to pre_init status\n",
[ResumeName, Brick]),
brick_server:chain_set_my_repair_state(
Brick, node(), pre_init);
ok = brick_server:chain_set_my_repair_state(
Brick, node(), pre_init);
_ ->
ok
end,
Expand Down

0 comments on commit c109249

Please sign in to comment.