Skip to content

Commit

Permalink
Tagging 1.0.2
Browse files Browse the repository at this point in the history
git-svn-id: https://svn.apache.org/repos/asf/couchdb/tags/1.0.2@1054834 13f79535-47bb-0310-9956-ffa450edef68
  • Loading branch information
davisp committed Jan 3, 2011
2 parents 0f810b4 + c13fc05 commit 79bea41
Show file tree
Hide file tree
Showing 26 changed files with 473 additions and 286 deletions.
17 changes: 14 additions & 3 deletions CHANGES
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,10 @@ Apache CouchDB CHANGES
Version 1.0.2
-------------

Note: This version has not been released yet.

Futon:

* Make test suite work with Safari and Chrome.
* Fixed animated progress spinner.

Storage System:

Expand All @@ -17,6 +16,13 @@ Storage System:
* Fix occasional timeout errors after successfully compacting large databases.
* Fix ocassional error when writing to a database that has just been compacted.
* Fix occasional timeout errors on systems with slow or heavily loaded IO.
* Fix for OOME when compactions include documents with many conflicts.
* Fix for missing attachment compression when MIME types included parameters.
* Fix for frequently edited documents in multi-master deployments being
duplicated in _changes and _all_docs. See COUCHDDB-968 for details on how
to repair.
* Significantly higher read and write throughput against database and
view index files.

Log System:

Expand All @@ -30,7 +36,9 @@ HTTP Interface:

Replicator:

* Updated ibrowse library to 2.1.0 fixing numerous replication issues.
* Updated ibrowse library to 2.1.2 fixing numerous replication issues.
* Make sure that the replicator respects HTTP settings defined in the config.
* Fix error when the ibrowse connection closes unexpectedly.
* Fix authenticated replication (with HTTP basic auth) of design documents
with attachments.
* Various fixes to make replication more resilient for edge-cases.
Expand All @@ -41,6 +49,9 @@ View Server:
* Fix for circular references in CommonJS requires.
* Made isArray() function available to functions executed in the query server.
* Documents are now sealed before being passed to map functions.
* Force view compaction failure when duplicated document data exists. When
this error is seen in the logs users should rebuild their views from
scratch to fix the issue. See COUCHDB-999 for details.

Version 1.0.1
-------------
Expand Down
6 changes: 3 additions & 3 deletions NEWS
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,19 @@ Each release section notes when backwards incompatible changes have been made.
Version 1.0.2
-------------

Note: This version has not been released yet.

* Make test suite work with Safari and Chrome.
* Fix leaking file handles after compacting databases and views.
* Fix databases forgetting their validation function after compaction.
* Fix occasional timeout errors.
* Reduce lengthy stack traces.
* Allow logging of native <xml> types.
* Updated ibrowse library to 2.1.0 fixing numerous replication issues.
* Updated ibrowse library to 2.1.2 fixing numerous replication issues.
* Fix authenticated replication of design documents with attachments.
* Various fixes to make replicated more resilient for edge-cases.
* Don't trigger a view update when requesting `_design/doc/_info`.
* Fix for circular references in CommonJS requires.
* Fix for frequently edited documents in multi-master deployments being
duplicated in _changes and _all_docs.

Version 1.0.1
-------------
Expand Down
2 changes: 1 addition & 1 deletion acinclude.m4.in
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ m4_define([LOCAL_PACKAGE_NAME], [Apache CouchDB])
m4_define([LOCAL_BUG_URI], [https://issues.apache.org/jira/browse/COUCHDB])
m4_define([LOCAL_VERSION_MAJOR], [1])
m4_define([LOCAL_VERSION_MINOR], [0])
m4_define([LOCAL_VERSION_REVISION], [2])
m4_define([LOCAL_VERSION_REVISION], [1])
m4_define([LOCAL_VERSION_STAGE], [])
m4_define([LOCAL_VERSION_RELEASE], [])
m4_define([LOCAL_VERSION_PRIMARY],
Expand Down
10 changes: 8 additions & 2 deletions etc/couchdb/default.ini.tpl.in
Original file line number Diff line number Diff line change
Expand Up @@ -120,5 +120,11 @@ compression_level = 8 ; from 1 (lowest, fastest) to 9 (highest, slowest), 0 to d
compressible_types = text/*, application/javascript, application/json, application/xml

[replicator]
max_http_sessions = 10
max_http_pipeline_size = 10
max_http_sessions = 20
max_http_pipeline_size = 50
; set to true to validate peer certificates
verify_ssl_certificates = false
; file containing a list of peer trusted certificates (PEM format)
; ssl_trusted_certificates_file = /etc/ssl/certs/ca-certificates.crt
; maximum peer certificate depth (must be set even if certificate validation is off)
ssl_certificate_max_depth = 3
Binary file modified share/www/image/spinner.gif
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion share/www/script/test/replication.js
Original file line number Diff line number Diff line change
Expand Up @@ -667,7 +667,7 @@ couchTests.replication = function(debug) {
);
T(false, "replication should have failed");
} catch(x) {
T(x.error === "db_not_found");
T(x.error === "unauthorized");
}

atts_ddoc_copy = dbB.open(atts_ddoc._id);
Expand Down
3 changes: 2 additions & 1 deletion src/couchdb/couch_db.erl
Original file line number Diff line number Diff line change
Expand Up @@ -555,7 +555,8 @@ prep_and_validate_replicated_updates(Db, [Bucket|RestBuckets], [OldInfo|RestOldI
{ok, #full_doc_info{rev_tree=OldTree}} ->
NewRevTree = lists:foldl(
fun(NewDoc, AccTree) ->
{NewTree, _} = couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]),
{NewTree, _} = couch_key_tree:merge(AccTree,
couch_db:doc_to_tree(NewDoc), Db#db.revs_limit),
NewTree
end,
OldTree, Bucket),
Expand Down
8 changes: 1 addition & 7 deletions src/couchdb/couch_db.hrl
Original file line number Diff line number Diff line change
Expand Up @@ -264,13 +264,7 @@
body = nil,
options = [
{response_format,binary},
{inactivity_timeout, 30000},
{max_sessions, list_to_integer(
couch_config:get("replicator", "max_http_sessions", "10")
)},
{max_pipeline_size, list_to_integer(
couch_config:get("replicator", "max_http_pipeline_size", "10")
)}
{inactivity_timeout, 30000}
],
retries = 10,
pause = 500,
Expand Down
69 changes: 31 additions & 38 deletions src/couchdb/couch_db_updater.erl
Original file line number Diff line number Diff line change
Expand Up @@ -489,16 +489,17 @@ send_result(Client, Id, OriginalRevs, NewResult) ->
% used to send a result to the client
catch(Client ! {result, self(), {{Id, OriginalRevs}, NewResult}}).

merge_rev_trees(_MergeConflicts, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) ->
merge_rev_trees(_Limit, _Merge, [], [], AccNewInfos, AccRemoveSeqs, AccSeq) ->
{ok, lists:reverse(AccNewInfos), AccRemoveSeqs, AccSeq};
merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList],
merge_rev_trees(Limit, MergeConflicts, [NewDocs|RestDocsList],
[OldDocInfo|RestOldInfo], AccNewInfos, AccRemoveSeqs, AccSeq) ->
#full_doc_info{id=Id,rev_tree=OldTree,deleted=OldDeleted,update_seq=OldSeq}
= OldDocInfo,
NewRevTree = lists:foldl(
fun({Client, #doc{revs={Pos,[_Rev|PrevRevs]}}=NewDoc}, AccTree) ->
if not MergeConflicts ->
case couch_key_tree:merge(AccTree, [couch_db:doc_to_tree(NewDoc)]) of
case couch_key_tree:merge(AccTree, couch_db:doc_to_tree(NewDoc),
Limit) of
{_NewTree, conflicts} when (not OldDeleted) ->
send_result(Client, Id, {Pos-1,PrevRevs}, conflict),
AccTree;
Expand Down Expand Up @@ -529,7 +530,7 @@ merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList],
NewDoc#doc{revs={OldPos, [OldRev]}}),
NewDoc2 = NewDoc#doc{revs={OldPos + 1, [NewRevId, OldRev]}},
{NewTree2, _} = couch_key_tree:merge(AccTree,
[couch_db:doc_to_tree(NewDoc2)]),
couch_db:doc_to_tree(NewDoc2), Limit),
% we changed the rev id, this tells the caller we did
send_result(Client, Id, {Pos-1,PrevRevs},
{ok, {OldPos + 1, NewRevId}}),
Expand All @@ -543,24 +544,24 @@ merge_rev_trees(MergeConflicts, [NewDocs|RestDocsList],
end;
true ->
{NewTree, _} = couch_key_tree:merge(AccTree,
[couch_db:doc_to_tree(NewDoc)]),
couch_db:doc_to_tree(NewDoc), Limit),
NewTree
end
end,
OldTree, NewDocs),
if NewRevTree == OldTree ->
% nothing changed
merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo, AccNewInfos,
AccRemoveSeqs, AccSeq);
merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
AccNewInfos, AccRemoveSeqs, AccSeq);
true ->
% we have updated the document, give it a new seq #
NewInfo = #full_doc_info{id=Id,update_seq=AccSeq+1,rev_tree=NewRevTree},
RemoveSeqs = case OldSeq of
0 -> AccRemoveSeqs;
_ -> [OldSeq | AccRemoveSeqs]
end,
merge_rev_trees(MergeConflicts, RestDocsList, RestOldInfo,
[NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1)
merge_rev_trees(Limit, MergeConflicts, RestDocsList, RestOldInfo,
[NewInfo|AccNewInfos], RemoveSeqs, AccSeq+1)
end.


Expand All @@ -583,7 +584,8 @@ update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
#db{
fulldocinfo_by_id_btree = DocInfoByIdBTree,
docinfo_by_seq_btree = DocInfoBySeqBTree,
update_seq = LastSeq
update_seq = LastSeq,
revs_limit = RevsLimit
} = Db,
Ids = [Id || [{_Client, #doc{id=Id}}|_] <- DocsList],
% lookup up the old documents, if they exist.
Expand All @@ -596,11 +598,9 @@ update_docs_int(Db, DocsList, NonRepDocs, MergeConflicts, FullCommit) ->
end,
Ids, OldDocLookups),
% Merge the new docs into the revision trees.
{ok, NewDocInfos0, RemoveSeqs, NewSeq} = merge_rev_trees(
{ok, NewFullDocInfos, RemoveSeqs, NewSeq} = merge_rev_trees(RevsLimit,
MergeConflicts, DocsList, OldDocInfos, [], [], LastSeq),

NewFullDocInfos = stem_full_doc_infos(Db, NewDocInfos0),

% All documents are now ready to write.

{ok, Db2} = update_local_docs(Db, NonRepDocs),
Expand Down Expand Up @@ -765,36 +765,24 @@ copy_doc_attachments(#db{fd=SrcFd}=SrcDb, {Pos,_RevId}, SrcSp, DestFd) ->
end, BinInfos),
{BodyData, NewBinInfos}.

copy_rev_tree_attachments(SrcDb, DestFd, Tree) ->
couch_key_tree:map(
fun(Rev, {IsDel, Sp, Seq}, leaf) ->
DocBody = copy_doc_attachments(SrcDb, Rev, Sp, DestFd),
{IsDel, DocBody, Seq};
(_, _, branch) ->
?REV_MISSING
end, Tree).


copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq, Retry) ->
copy_docs(Db, #db{fd=DestFd}=NewDb, InfoBySeq0, Retry) ->
% COUCHDB-968, make sure we prune duplicates during compaction
InfoBySeq = lists:usort(fun(#doc_info{id=A}, #doc_info{id=B}) -> A =< B end,
InfoBySeq0),
Ids = [Id || #doc_info{id=Id} <- InfoBySeq],
LookupResults = couch_btree:lookup(Db#db.fulldocinfo_by_id_btree, Ids),

% write out the attachments
NewFullDocInfos0 = lists:map(
fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
Info#full_doc_info{rev_tree=copy_rev_tree_attachments(Db, DestFd, RevTree)}
end, LookupResults),
% write out the docs
% we do this in 2 stages so the docs are written out contiguously, making
% view indexing and replication faster.
NewFullDocInfos1 = lists:map(
fun(#full_doc_info{rev_tree=RevTree}=Info) ->
Info#full_doc_info{rev_tree=couch_key_tree:map_leafs(
fun(_Key, {IsDel, DocBody, Seq}) ->
fun({ok, #full_doc_info{rev_tree=RevTree}=Info}) ->
Info#full_doc_info{rev_tree=couch_key_tree:map(
fun(Rev, {IsDel, Sp, Seq}, leaf) ->
DocBody = copy_doc_attachments(Db, Rev, Sp, DestFd),
{ok, Pos} = couch_file:append_term_md5(DestFd, DocBody),
{IsDel, Pos, Seq}
{IsDel, Pos, Seq};
(_, _, branch) ->
?REV_MISSING
end, RevTree)}
end, NewFullDocInfos0),
end, LookupResults),

NewFullDocInfos = stem_full_doc_infos(Db, NewFullDocInfos1),
NewDocInfos = [couch_doc:to_doc_info(Info) || Info <- NewFullDocInfos],
Expand Down Expand Up @@ -866,7 +854,12 @@ start_copy_compact(#db{name=Name,filepath=Filepath}=Db) ->
{ok, Fd} ->
couch_task_status:add_task(<<"Database Compaction">>, <<Name/binary, " retry">>, <<"Starting">>),
Retry = true,
{ok, Header} = couch_file:read_header(Fd);
case couch_file:read_header(Fd) of
{ok, Header} ->
ok;
no_valid_header ->
ok = couch_file:write_header(Fd, Header=#db_header{})
end;
{error, enoent} ->
couch_task_status:add_task(<<"Database Compaction">>, Name, <<"Starting">>),
{ok, Fd} = couch_file:open(CompactFile, [create]),
Expand Down
23 changes: 15 additions & 8 deletions src/couchdb/couch_file.erl
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,19 @@ pread_binary(Fd, Pos) ->


pread_iolist(Fd, Pos) ->
gen_server:call(Fd, {pread_iolist, Pos}, infinity).
case gen_server:call(Fd, {pread_iolist, Pos}, infinity) of
{ok, IoList, <<>>} ->
{ok, IoList};
{ok, IoList, Md5} ->
case couch_util:md5(IoList) of
Md5 ->
{ok, IoList};
_ ->
exit({file_corruption, <<"file corruption">>})
end;
Error ->
Error
end.

%%----------------------------------------------------------------------
%% Purpose: The length of a file, in bytes.
Expand Down Expand Up @@ -298,15 +310,10 @@ handle_call({pread_iolist, Pos}, _From, File) ->
<<1:1/integer,Len:31/integer>> -> % an MD5-prefixed term
{Md5AndIoList, _} = read_raw_iolist_int(File, NextPos, Len+16),
{Md5, IoList} = extract_md5(Md5AndIoList),
case couch_util:md5(IoList) of
Md5 ->
{reply, {ok, IoList}, File};
_ ->
{stop, file_corruption, {error,file_corruption}, File}
end;
{reply, {ok, IoList, Md5}, File};
<<0:1/integer,Len:31/integer>> ->
{Iolist, _} = read_raw_iolist_int(File, NextPos, Len),
{reply, {ok, Iolist}, File}
{reply, {ok, Iolist, <<>>}, File}
end;
handle_call({pread, Pos, Bytes}, _From, #file{fd=Fd,tail_append_begin=TailAppendBegin}=File) ->
{ok, Bin} = file:pread(Fd, Pos, Bytes),
Expand Down
4 changes: 3 additions & 1 deletion src/couchdb/couch_httpd_misc_handlers.erl
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,9 @@ handle_replicate_req(#httpd{method='POST'}=Req) ->
end
catch
throw:{db_not_found, Msg} ->
send_json(Req, 404, {[{error, db_not_found}, {reason, Msg}]})
send_json(Req, 404, {[{error, db_not_found}, {reason, Msg}]});
throw:{unauthorized, Msg} ->
send_json(Req, 404, {[{error, unauthorized}, {reason, Msg}]})
end;
handle_replicate_req(Req) ->
send_method_not_allowed(Req, "POST").
Expand Down
Loading

0 comments on commit 79bea41

Please sign in to comment.