Skip to content

Commit

Permalink
MB-6957 Retry file operations on Windows
Browse files Browse the repository at this point in the history
Due to the fact that some operations provided by the
Erlang 'file' module open files without all the share
flags, and that there may be external processes opening
files without those flags as well (Windows services,
antivirus software, etc), it makes some concurrent
operations against the same file fail with a Windows
share violation error, which Erlang file driver maps
to posix error 'eacces'.

When this happens, just retry the failed operations for
a limited period of time, after which we give up.

Change-Id: Iaecc6d520169d8b84bfcb354066e42b533c435cd
Reviewed-on: http://review.couchbase.org/22042
Tested-by: Filipe David Borba Manana <fdmanana@gmail.com>
Tested-by: Damien Katz <damien@couchbase.com>
Reviewed-by: Damien Katz <damien@couchbase.com>
  • Loading branch information
fdmanana authored and steveyen committed Oct 29, 2012
1 parent 1a5f629 commit e6bcd6e
Show file tree
Hide file tree
Showing 15 changed files with 133 additions and 34 deletions.
2 changes: 2 additions & 0 deletions etc/couchdb/default.ini.tpl.in
Expand Up @@ -24,6 +24,8 @@ max_parallel_replica_indexers = 2
btree_implementation = native
consistency_check_precompacted = false
consistency_check_compacted = false
; Maximum period for which we attempt to retry file operations on Windows.
windows_file_op_retry_period = 5000

[database_compaction]
; larger buffer sizes can originate smaller files
Expand Down
6 changes: 3 additions & 3 deletions src/couch_set_view/src/couch_set_view_compactor.erl
Expand Up @@ -83,7 +83,7 @@ compact_group(Group0, EmptyGroup, LogFilePath, UpdaterPid, Owner, UserStatus) ->
type = Type
} = Group0,

case file:delete(LogFilePath) of
case file2:delete(LogFilePath) of
ok ->
ok;
{error, enoent} ->
Expand Down Expand Up @@ -224,7 +224,7 @@ maybe_retry_compact(CompactResult0, StartTime, LogFilePath, LogOffsetStart, Owne
ok = couch_file:flush(Fd),
case gen_server:call(Owner, {compact_done, CompactResult}, infinity) of
ok ->
_ = file:delete(LogFilePath),
_ = file2:delete(LogFilePath),
ok = couch_file:sync(Fd);
{update, MissingCount} ->
{ok, LogEof} = gen_server:call(Owner, log_eof, infinity),
Expand All @@ -241,7 +241,7 @@ maybe_retry_compact(CompactResult0, StartTime, LogFilePath, LogOffsetStart, Owne
{progress, (TotalChanges * 100) div TotalChanges2},
{retry_number, Retries}
]),
{ok, LogFd} = file:open(LogFilePath, [read, raw, binary]),
{ok, LogFd} = file2:open(LogFilePath, [read, raw, binary]),
{ok, LogOffsetStart} = file:position(LogFd, LogOffsetStart),
ok = couch_set_view_util:open_raw_read_fd(NewGroup),
NewGroup2 = apply_log(NewGroup, LogFd, 0, nil,LogOffsetStart, LogEof),
Expand Down
8 changes: 4 additions & 4 deletions src/couch_set_view/src/couch_set_view_group.erl
Expand Up @@ -1300,7 +1300,7 @@ prepare_group({RootDir, SetName, #set_view_group{sig = Sig, type = Type} = Group
case (not ForceReset) andalso (Type =:= main) of
true ->
% initializing main view group
catch delete_index_file(RootDir, Group, replica);
ok = delete_index_file(RootDir, Group, replica);
false ->
ok
end,
Expand All @@ -1312,11 +1312,11 @@ prepare_group({RootDir, SetName, #set_view_group{sig = Sig, type = Type} = Group
[SetName, Type, Group#set_view_group.name]),
Error;
Error ->
catch delete_index_file(RootDir, Group, Type),
ok = delete_index_file(RootDir, Group, Type),
case (not ForceReset) andalso (Type =:= main) of
true ->
% initializing main view group
catch delete_index_file(RootDir, Group, replica);
ok = delete_index_file(RootDir, Group, replica);
false ->
ok
end,
Expand Down Expand Up @@ -1385,7 +1385,7 @@ delete_index_file(RootDir, Group, Type) ->
SetDir = couch_set_view:set_index_dir(RootDir, Group#set_view_group.set_name),
BaseName = filename:join([SetDir, base_index_file_name(Group, Type)]),
lists:foreach(
fun(F) -> couch_file:delete(RootDir, F) end,
fun(F) -> ok = couch_file:delete(RootDir, F) end,
filelib:wildcard(BaseName ++ ".[0-9]*")).


Expand Down
10 changes: 5 additions & 5 deletions src/couch_set_view/src/couch_set_view_updater.erl
Expand Up @@ -624,13 +624,13 @@ flush_writes(#writer_acc{initial_build = true} = WriterAcc) ->
{ok, NewIdBtreeRoot} = couch_btree_copy:from_sorted_file(
IdBtree, IdsSortedFile, GroupFd, fun file_sorter_format_function/1),
NewIdBtree = IdBtree#btree{root = NewIdBtreeRoot},
ok = file:delete(IdsSortedFile),
ok = file2:delete(IdsSortedFile),
NewViews = lists:map(
fun(#set_view{id_num = Id, btree = Bt} = View) ->
[{_, KvSortedFile}] = dict:fetch(Id, NewSortFiles2),
{ok, NewBtRoot} = couch_btree_copy:from_sorted_file(
Bt, KvSortedFile, GroupFd, fun file_sorter_format_function/1),
ok = file:delete(KvSortedFile),
ok = file2:delete(KvSortedFile),
View#set_view{
btree = Bt#btree{root = NewBtRoot}
}
Expand Down Expand Up @@ -696,7 +696,7 @@ maybe_flush_merge_buffers(BuffersDict, WriterAcc) ->
true ->
SortFiles = dict:fetch(Id, AccFiles),
FileName = new_sort_file_name(WriterAcc),
{ok, Fd} = file:open(FileName, [raw, append, binary]),
{ok, Fd} = file2:open(FileName, [raw, append, binary]),
ok = file:write(Fd, Buf),
ok = file:close(Fd),
AccBuffers2 = dict:store(Id, {[], 0}, AccBuffers),
Expand Down Expand Up @@ -1035,7 +1035,7 @@ write_header(#set_view_group{fd = Fd} = Group, DoFsync) ->
open_log_file(nil) ->
nil;
open_log_file(Path) when is_list(Path) ->
{ok, LogFd} = file:open(Path, [raw, binary, append]),
{ok, LogFd} = file2:open(Path, [raw, binary, append]),
LogFd.


Expand Down Expand Up @@ -1175,7 +1175,7 @@ spawn_merge_worker(LessFun, TmpDir, Workers, FilesToMerge, DestFile) ->
exit({sort_worker_died, Reason})
end,
lists:foreach(fun(F) ->
case file:delete(F) of
case file2:delete(F) of
ok ->
ok;
{error, Reason2} ->
Expand Down
4 changes: 2 additions & 2 deletions src/couch_set_view/src/couch_set_view_util.erl
Expand Up @@ -246,7 +246,7 @@ open_raw_read_fd(Group) ->
type = Type,
name = DDocId
} = Group,
case file:open(FileName, [read, raw, binary]) of
case file2:open(FileName, [read, raw, binary]) of
{ok, RawReadFd} ->
erlang:put({FilePid, fast_fd_read}, RawReadFd),
ok;
Expand Down Expand Up @@ -358,7 +358,7 @@ new_sort_file_path(RootDir, GroupSig) ->
delete_sort_files(RootDir) ->
WildCard = filename:join([RootDir, "*"]),
lists:foreach(
fun(F) -> _ = file:delete(F) end,
fun(F) -> _ = file2:delete(F) end,
filelib:wildcard(WildCard)).


Expand Down
2 changes: 2 additions & 0 deletions src/couchdb/Makefile.am
Expand Up @@ -28,6 +28,7 @@ CLEANFILES = $(compiled_files) $(doc_base)

source_files = \
erl_diag.erl \
file2.erl \
couch.erl \
couch_api_wrap.erl \
couch_api_wrap_httpc.erl \
Expand Down Expand Up @@ -91,6 +92,7 @@ EXTRA_DIST = $(source_files) couch_api_wrap.hrl couch_db.hrl couch_js_functions.

compiled_files = \
erl_diag.beam \
file2.beam \
couch.app \
couch.beam \
couch_api_wrap.beam \
Expand Down
2 changes: 1 addition & 1 deletion src/couchdb/couch_btree_copy.erl
Expand Up @@ -63,7 +63,7 @@ from_sorted_file(EmptyBtree, SortedFileName, DestFd, BinToKvFun) ->
kv_chunk_threshold = EmptyBtree#btree.kv_chunk_threshold,
kp_chunk_threshold = EmptyBtree#btree.kp_chunk_threshold
},
{ok, SourceFd} = file:open(SortedFileName, [read, raw, binary, read_ahead]),
{ok, SourceFd} = file2:open(SortedFileName, [read, raw, binary, read_ahead]),
{ok, Acc2} = try
sorted_file_fold(SourceFd, SortedFileName, BinToKvFun, 0, 0, Acc)
after
Expand Down
2 changes: 1 addition & 1 deletion src/couchdb/couch_config.erl
Expand Up @@ -184,7 +184,7 @@ code_change(_OldVsn, State, _Extra) ->
parse_ini_file(IniFile) ->
IniFilename = couch_util:abs_pathname(IniFile),
IniBin =
case file:read_file(IniFilename) of
case file2:read_file(IniFilename) of
{ok, IniBin0} ->
IniBin0;
{error, eacces} ->
Expand Down
4 changes: 2 additions & 2 deletions src/couchdb/couch_config_writer.erl
Expand Up @@ -27,15 +27,15 @@
%% File::filename()) -> ok
%% @doc Saves a Section/Key/Value triple to the ini file File::filename()
save_to_file({{Section, Key}, Value}, File) ->
{ok, OldFileContents} = file:read_file(File),
{ok, OldFileContents} = file2:read_file(File),
Lines = re:split(OldFileContents, "\r\n|\n|\r|\032", [{return, list}]),

SectionLine = "[" ++ Section ++ "]",
{ok, Pattern} = re:compile(["^(", Key, "\\s*=)|\\[[a-zA-Z0-9\_-]*\\]"]),

NewLines = process_file_lines(Lines, [], SectionLine, Pattern, Key, Value),
NewFileContents = reverse_and_add_newline(strip_empty_lines(NewLines), []),
case file:write_file(File, NewFileContents) of
case file2:write_file(File, NewFileContents) of
ok ->
ok;
{error, eacces} ->
Expand Down
6 changes: 3 additions & 3 deletions src/couchdb/couch_db.erl
Expand Up @@ -71,7 +71,7 @@ open_db_file(Filepath, Options) ->
end, MatchingFiles),
case MatchingFiles2 of
[] ->
[file:delete(F) || F <- CompactFiles],
[file2:delete(F) || F <- CompactFiles],
case lists:member(create, Options) of
true ->
% we had some compaction files hanging around, now retry
Expand All @@ -88,7 +88,7 @@ open_db_file(Filepath, Options) ->
[{fd_close_after, ?FD_CLOSE_TIMEOUT_MS} | Options]) of
{ok, Fd} ->
% delete the old files
[file:delete(F) || F <- RestOld ++ CompactFiles],
[file2:delete(F) || F <- RestOld ++ CompactFiles],
{ok, Fd, NewestFile};
Error ->
Error
Expand Down Expand Up @@ -435,7 +435,7 @@ update_docs(Db, Docs) ->
% docs_since or enum_docs, it's often faster as it avoids the messaging
% overhead with couch_file.
fast_reads(Db, Fun) ->
case file:open(Db#db.filepath, [binary, read, raw]) of
case file2:open(Db#db.filepath, [binary, read, raw]) of
{ok, FastReadFd} ->
put({Db#db.fd, fast_fd_read}, FastReadFd),
try
Expand Down
4 changes: 2 additions & 2 deletions src/couchdb/couch_db_updater.erl
Expand Up @@ -43,7 +43,7 @@ init({MainPid, DbName, Filepath, Fd, Options}) ->
ok = couch_file:write_header_bin(Fd,
db_header_to_header_bin(Header)),
% delete any old compaction files that might be hanging around
file:delete(Filepath ++ ".compact")
file2:delete(Filepath ++ ".compact")
end
end,

Expand Down Expand Up @@ -896,7 +896,7 @@ start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
ok
end,
% Compact it
NewDb = case file:read_file_info(CompactFile) of
NewDb = case file2:read_file_info(CompactFile) of
{ok, _} -> % Catch up
{ok, TargetDB} = make_target_db(Db, CompactFile),
copy_compact(Db, TargetDB, true);
Expand Down
16 changes: 8 additions & 8 deletions src/couchdb/couch_file.erl
Expand Up @@ -223,13 +223,13 @@ delete(RootDir, Filepath) ->

delete(RootDir, Filepath, Async) ->
DelFile = filename:join([RootDir,".delete", ?b2l(couch_uuids:random())]),
case file:rename(Filepath, DelFile) of
case file2:rename(Filepath, DelFile) of
ok ->
if (Async) ->
spawn(file, delete, [DelFile]),
spawn(file2, delete, [DelFile]),
ok;
true ->
file:delete(DelFile)
file2:delete(DelFile)
end;
Error ->
Error
Expand All @@ -246,7 +246,7 @@ init_delete_dir(RootDir) ->
filelib:ensure_dir(filename:join(Dir,"foo")),
filelib:fold_files(Dir, ".*", true,
fun(Filename, _) ->
ok = file:delete(Filename)
ok = file2:delete(Filename)
end, ok).


Expand Down Expand Up @@ -320,7 +320,7 @@ maybe_create_file(Filepath, Options) ->
case lists:member(create, Options) of
true ->
filelib:ensure_dir(Filepath),
case file:open(Filepath, [read, write, binary]) of
case file2:open(Filepath, [read, write, binary]) of
{ok, Fd} ->
{ok, Length} = file:position(Fd, eof),
case Length > 0 of
Expand Down Expand Up @@ -596,9 +596,9 @@ try_open_fd(FilePath, Options, _Timewait, TotalTimeRemain)
when TotalTimeRemain < 0 ->
% Out of retry time.
% Try one last time and whatever we get is the returned result.
file:open(FilePath, Options);
file2:open(FilePath, Options);
try_open_fd(FilePath, Options, Timewait, TotalTimeRemain) ->
case file:open(FilePath, Options) of
case file2:open(FilePath, Options) of
{ok, Fd} ->
{ok, Fd};
{error, emfile} ->
Expand Down Expand Up @@ -689,7 +689,7 @@ handle_write_message(Msg, Fd, FilePath, Eof, CloseTimeout) ->
From ! {ok, self()},
writer_loop(Fd, FilePath, Eof, NewCloseTimeout);
{rename, NewFilepath, From} ->
ok = file:rename(FilePath, NewFilepath),
ok = file2:rename(FilePath, NewFilepath),
ok = couch_file_write_guard:remove(self()),
ok = couch_file_write_guard:add(NewFilepath, self()),
From ! {ok, self()},
Expand Down
2 changes: 1 addition & 1 deletion src/couchdb/couch_log.erl
Expand Up @@ -176,7 +176,7 @@ read(Bytes, Offset) ->
LogFileName = couch_config:get("log", "file"),
LogFileSize = filelib:file_size(LogFileName),

{ok, Fd} = file:open(LogFileName, [read]),
{ok, Fd} = file2:open(LogFileName, [read]),
Start = lists:max([LogFileSize - Bytes, 0]) + Offset,

% TODO: truncate chopped first line
Expand Down
4 changes: 2 additions & 2 deletions src/couchdb/couch_server_sup.erl
Expand Up @@ -44,7 +44,7 @@ couch_config_start_link_wrapper(IniFiles, FirstConfigPid) ->
start_server(IniFiles) ->
case init:get_argument(pidfile) of
{ok, [PidFile]} ->
case file:write_file(PidFile, os:getpid()) of
case file2:write_file(PidFile, os:getpid()) of
ok -> ok;
Error -> io:format("Failed to write PID file ~s, error: ~p", [PidFile, Error])
end;
Expand Down Expand Up @@ -125,7 +125,7 @@ start_server(IniFiles) ->
undefined -> [];
Uri -> io_lib:format("~s~n", [Uri])
end end || Uri <- Uris],
file:write_file(UriFile, Lines)
file2:write_file(UriFile, Lines)
end,

{ok, Pid}.
Expand Down

0 comments on commit e6bcd6e

Please sign in to comment.