Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

remove collection level logging SERVER-1455

  • Loading branch information...
commit 1f8a29ada3d54e0e46e62f0189c992fe21be55d9 1 parent d7a2002
@erh erh authored
View
270 db/cloner.cpp
@@ -37,11 +37,10 @@ namespace mongo {
auto_ptr< DBClientWithCommands > conn;
void copy(const char *from_ns, const char *to_ns, bool isindex, bool logForRepl,
bool masterSameProcess, bool slaveOk, Query q = Query());
- void replayOpLog( DBClientCursor *c, const BSONObj &query );
struct Fun;
public:
Cloner() { }
-
+
/* slaveOk - if true it is ok if the source of the data is !ismaster.
useReplAuth - use the credentials we normally use as a replication slave for the cloning
snapshot - use $snapshot mode for copying collections. note this should not be used when it isn't required, as it will be slower.
@@ -49,8 +48,8 @@ namespace mongo {
*/
void setConnection( DBClientWithCommands *c ) { conn.reset( c ); }
bool go(const char *masterHost, string& errmsg, const string& fromdb, bool logForRepl, bool slaveOk, bool useReplAuth, bool snapshot);
- bool startCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, string& errmsg, bool logForRepl, bool copyIndexes, int logSizeMb, long long &cursorId );
- bool finishCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, long long cursorId, string &errmsg );
+
+ bool copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool copyIndexes = true );
};
/* for index info object:
@@ -197,6 +196,36 @@ namespace mongo {
}
}
}
+
+ bool Cloner::copyCollection( const string& from , const string& ns , const BSONObj& query , string& errmsg , bool copyIndexes ){
+ auto_ptr<DBClientConnection> myconn;
+ myconn.reset( new DBClientConnection() );
+ if ( ! myconn->connect( from , errmsg ) )
+ return false;
+
+ conn.reset( myconn.release() );
+
+ writelock lk(ns); // TODO: make this lower down
+ Client::Context ctx(ns);
+
+ { // config
+ string temp = ctx.db()->name + ".system.namespaces";
+ BSONObj config = conn->findOne( temp , BSON( "name" << ns ) );
+ if ( config["options"].isABSONObj() )
+ if ( ! userCreateNS( ns.c_str() , config["options"].Obj() , errmsg, true , 0 ) )
+ return false;
+ }
+
+ { // main data
+ copy( ns.c_str() , ns.c_str() , false , true , false , true , Query(query).snapshot() );
+ }
+
+ { // indexes
+ string temp = ctx.db()->name + ".system.indexes";
+ copy( temp.c_str() , temp.c_str() , true , true , false , true , BSON( "ns" << ns ) );
+ }
+ return true;
+ }
extern bool inDBRepair;
void ensureIdIndexForNewNs(const char *ns);
@@ -335,121 +364,6 @@ namespace mongo {
return true;
}
-
- bool Cloner::startCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, string &errmsg, bool logForRepl, bool copyIndexes, int logSizeMb, long long &cursorId ) {
- char db[256];
- nsToDatabase( ns, db );
-
- NamespaceDetails *nsd = nsdetails( ns );
- if ( nsd ){
- /** note: its ok to clone into a collection, but only if the range you're copying
- doesn't exist on this server */
- string err;
- if ( runCount( ns , BSON( "query" << query ) , err ) > 0 ){
- log() << "WARNING: data already exists for: " << ns << " in range : " << query << " deleting..." << endl;
- deleteObjects( ns , query , false , logForRepl , false );
- }
- }
-
- {
- dbtemprelease r;
- auto_ptr< DBClientConnection > c( new DBClientConnection() );
- if ( !c->connect( fromhost, errmsg ) )
- return false;
- if( !replAuthenticate(c.get()) )
- return false;
- conn = c;
-
- // Start temporary op log
- BSONObjBuilder cmdSpec;
- cmdSpec << "logCollection" << ns << "start" << 1;
- if ( logSizeMb != INT_MIN )
- cmdSpec << "logSizeMb" << logSizeMb;
- BSONObj info;
- if ( !conn->runCommand( db, cmdSpec.done(), info ) ) {
- errmsg = "logCollection failed: " + info.toString();
- return false;
- }
- }
-
- if ( ! nsd ) {
- BSONObj spec = conn->findOne( string( db ) + ".system.namespaces", BSON( "name" << ns ) );
- if ( !userCreateNS( ns, spec.getObjectField( "options" ), errmsg, true ) )
- return false;
- }
-
- copy( ns, ns, false, logForRepl, false, false, query );
-
- if ( copyIndexes ) {
- string indexNs = string( db ) + ".system.indexes";
- copy( indexNs.c_str(), indexNs.c_str(), true, logForRepl, false, false, BSON( "ns" << ns << "name" << NE << "_id_" ) );
- }
-
- auto_ptr< DBClientCursor > c;
- {
- dbtemprelease r;
- string logNS = "local.temp.oplog." + string( ns );
- c = conn->query( logNS.c_str(), Query(), 0, 0, 0, QueryOption_CursorTailable );
- }
- if ( c->more() ) {
- replayOpLog( c.get(), query );
- cursorId = c->getCursorId();
- massert( 10291 , "Expected valid tailing cursor", cursorId != 0 );
- } else {
- massert( 10292 , "Did not expect valid cursor for empty query result", c->getCursorId() == 0 );
- cursorId = 0;
- }
- c->decouple();
- return true;
- }
-
- void Cloner::replayOpLog( DBClientCursor *c, const BSONObj &query ) {
- Matcher matcher( query );
- while( 1 ) {
- BSONObj op;
- {
- dbtemprelease t;
- if ( !c->more() )
- break;
- op = c->next();
- }
- // For sharding v1.0, we don't allow shard key updates -- so just
- // filter each insert by value.
- if ( op.getStringField( "op" )[ 0 ] != 'i' || matcher.matches( op.getObjectField( "o" ) ) )
- ReplSource::applyOperation( op );
- }
- }
-
- bool Cloner::finishCloneCollection( const char *fromhost, const char *ns, const BSONObj &query, long long cursorId, string &errmsg ) {
- char db[256];
- nsToDatabase( ns, db );
-
- auto_ptr< DBClientCursor > cur;
- {
- dbtemprelease r;
- auto_ptr< DBClientConnection > c( new DBClientConnection() );
- if ( !c->connect( fromhost, errmsg ) )
- return false;
- if( !replAuthenticate(c.get()) )
- return false;
- conn = c;
- string logNS = "local.temp.oplog." + string( ns );
- if ( cursorId != 0 )
- cur = conn->getMore( logNS.c_str(), cursorId );
- else
- cur = conn->query( logNS.c_str(), Query() );
- }
- replayOpLog( cur.get(), query );
- {
- dbtemprelease t;
- BSONObj info;
- if ( !conn->runCommand( db, BSON( "logCollection" << ns << "validateComplete" << 1 ), info ) ) {
- errmsg = "logCollection failed: " + info.toString();
- return false;
- }
- }
- return true;
- }
/* slaveOk - if true it is ok if the source of the data is !ismaster.
useReplAuth - use the credentials we normally use as a replication slave for the cloning
@@ -494,7 +408,7 @@ namespace mongo {
virtual bool slaveOk() const {
return false;
}
- virtual LockType locktype() const { return WRITE; }
+ virtual LockType locktype() const { return NONE; }
CmdCloneCollection() : Command("cloneCollection") { }
virtual void help( stringstream &help ) const {
help << "{ cloneCollection: <namespace>, from: <host> [,query: <query_filter>] [,copyIndexes:<bool>] }"
@@ -512,7 +426,7 @@ namespace mongo {
{
HostAndPort h(fromhost);
if( h.isSelf() ) {
- errmsg = "can't copy from self";
+ errmsg = "can't cloneCollection from self";
return false;
}
}
@@ -524,124 +438,18 @@ namespace mongo {
BSONObj query = cmdObj.getObjectField("query");
if ( query.isEmpty() )
query = BSONObj();
+
BSONElement copyIndexesSpec = cmdObj.getField("copyindexes");
bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true;
- // Will not be used if doesn't exist.
- int logSizeMb = cmdObj.getIntField( "logSizeMb" );
-
- /* replication note: we must logOp() not the command, but the cloned data -- if the slave
- were to clone it would get a different point-in-time and not match.
- */
- Client::Context ctx( collection );
- log() << "cloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost << " query: " << query << " logSizeMb: " << logSizeMb << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
+ log() << "cloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost
+ << " query: " << query << " " << ( copyIndexes ? "" : ", not copying indexes" ) << endl;
Cloner c;
- long long cursorId;
- if ( !c.startCloneCollection( fromhost.c_str(), collection.c_str(), query, errmsg, !fromRepl, copyIndexes, logSizeMb, cursorId ) )
- return false;
- return c.finishCloneCollection( fromhost.c_str(), collection.c_str(), query, cursorId, errmsg);
+ return c.copyCollection( fromhost , collection , query, errmsg , copyIndexes );
}
} cmdclonecollection;
- class CmdStartCloneCollection : public Command {
- public:
- virtual bool slaveOk() const {
- return false;
- }
- virtual LockType locktype() const { return WRITE; }
- CmdStartCloneCollection() : Command("startCloneCollection") { }
- virtual void help( stringstream &help ) const {
- help << " example: { startCloneCollection: <collection ns>, from: <hostname>, query: <query> }";
- help << ", returned object includes a finishToken field, the value of which may be passed to the finishCloneCollection command";
- }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- string fromhost = cmdObj.getStringField("from");
- if ( fromhost.empty() ) {
- errmsg = "missing from spec";
- return false;
- }
- string collection = cmdObj.getStringField("startCloneCollection");
- if ( collection.empty() ) {
- errmsg = "missing startCloneCollection spec";
- return false;
- }
- BSONObj query = cmdObj.getObjectField("query");
- if ( query.isEmpty() )
- query = BSONObj();
- BSONElement copyIndexesSpec = cmdObj.getField("copyindexes");
- bool copyIndexes = copyIndexesSpec.isBoolean() ? copyIndexesSpec.boolean() : true;
- // Will not be used if doesn't exist.
- int logSizeMb = cmdObj.getIntField( "logSizeMb" );
-
- /* replication note: we must logOp() not the command, but the cloned data -- if the slave
- were to clone it would get a different point-in-time and not match.
- */
- Client::Context ctx(collection);
-
- log() << "startCloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost << " query: " << query << endl;
-
- Cloner c;
- long long cursorId;
- bool res = c.startCloneCollection( fromhost.c_str(), collection.c_str(), query, errmsg, !fromRepl, copyIndexes, logSizeMb, cursorId );
-
- if ( res ) {
- BSONObjBuilder b;
- b << "fromhost" << fromhost;
- b << "collection" << collection;
- b << "query" << query;
- b.appendDate( "cursorId", cursorId );
- BSONObj token = b.done();
- result << "finishToken" << token;
- }
- return res;
- }
- } cmdstartclonecollection;
-
- class CmdFinishCloneCollection : public Command {
- public:
- virtual bool slaveOk() const {
- return false;
- }
- virtual LockType locktype() const { return WRITE; }
- CmdFinishCloneCollection() : Command("finishCloneCollection") { }
- virtual void help( stringstream &help ) const {
- help << " example: { finishCloneCollection: <finishToken> }";
- }
- virtual bool run(const string& dbname , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- BSONObj fromToken = cmdObj.getObjectField("finishCloneCollection");
- if ( fromToken.isEmpty() ) {
- errmsg = "missing finishCloneCollection finishToken spec";
- return false;
- }
- string fromhost = fromToken.getStringField( "fromhost" );
- if ( fromhost.empty() ) {
- errmsg = "missing fromhost spec";
- return false;
- }
- string collection = fromToken.getStringField("collection");
- if ( collection.empty() ) {
- errmsg = "missing collection spec";
- return false;
- }
- BSONObj query = fromToken.getObjectField("query");
- if ( query.isEmpty() ) {
- query = BSONObj();
- }
- long long cursorId = 0;
- BSONElement cursorIdToken = fromToken.getField( "cursorId" );
- if ( cursorIdToken.type() == Date ) {
- cursorId = cursorIdToken._numberLong();
- }
-
- Client::Context ctx( collection );
-
- log() << "finishCloneCollection. db:" << dbname << " collection:" << collection << " from: " << fromhost << " query: " << query << endl;
-
- Cloner c;
- return c.finishCloneCollection( fromhost.c_str(), collection.c_str(), query, cursorId, errmsg );
- }
- } cmdfinishclonecollection;
thread_specific_ptr< DBClientConnection > authConn_;
/* Usage:
View
36 db/namespace.cpp
@@ -743,42 +743,6 @@ namespace mongo {
i.next().keyPattern().getFieldNames(_indexKeys);
}
- void NamespaceDetailsTransient::cllStart( int logSizeMb ) {
- assertInWriteLock();
- _cll_ns = "local.temp.oplog." + _ns;
- _cll_enabled = true;
- stringstream spec;
- // 128MB
- spec << "{size:" << logSizeMb * 1024 * 1024 << ",capped:true,autoIndexId:false}";
- Client::Context ct( _cll_ns );
- string err;
- massert( 10347 , "Could not create log ns", userCreateNS( _cll_ns.c_str(), fromjson( spec.str() ), err, false ) );
- NamespaceDetails *d = nsdetails( _cll_ns.c_str() );
- d->cappedDisallowDelete();
- }
-
- void NamespaceDetailsTransient::cllInvalidate() {
- assertInWriteLock();
- cllDrop();
- _cll_enabled = false;
- }
-
- bool NamespaceDetailsTransient::cllValidateComplete() {
- assertInWriteLock();
- cllDrop();
- bool ret = _cll_enabled;
- _cll_enabled = false;
- _cll_ns = "";
- return ret;
- }
-
- void NamespaceDetailsTransient::cllDrop() {
- assertInWriteLock();
- if ( !_cll_enabled )
- return;
- Client::Context ctx( _cll_ns );
- dropNS( _cll_ns );
- }
/* ------------------------------------------------------------------------- */
View
15 db/namespace.h
@@ -476,7 +476,7 @@ namespace mongo {
void reset();
static std::map< string, shared_ptr< NamespaceDetailsTransient > > _map;
public:
- NamespaceDetailsTransient(const char *ns) : _ns(ns), _keysComputed(false), _qcWriteCount(), _cll_enabled() { }
+ NamespaceDetailsTransient(const char *ns) : _ns(ns), _keysComputed(false), _qcWriteCount(){ }
/* _get() is not threadsafe -- see get_inlock() comments */
static NamespaceDetailsTransient& _get(const char *ns);
/* use get_w() when doing write operations */
@@ -556,19 +556,6 @@ namespace mongo {
_qcCache[ pattern ] = make_pair( indexKey, nScanned );
}
- /* for collection-level logging -- see CmdLogCollection ----------------- */
- /* assumed to be in write lock for this */
- private:
- string _cll_ns; // "local.temp.oplog." + _ns;
- bool _cll_enabled;
- void cllDrop(); // drop _cll_ns
- public:
- string cllNS() const { return _cll_ns; }
- bool cllEnabled() const { return _cll_enabled; }
- void cllStart( int logSizeMb = 256 ); // begin collection level logging
- void cllInvalidate();
- bool cllValidateComplete();
-
}; /* NamespaceDetailsTransient */
inline NamespaceDetailsTransient& NamespaceDetailsTransient::_get(const char *ns) {
View
61 db/oplog.cpp
@@ -293,15 +293,6 @@ namespace mongo {
}
logOpForSharding( opstr , ns , obj , patt );
-
- NamespaceDetailsTransient &t = NamespaceDetailsTransient::get_w( ns );
- if ( t.cllEnabled() ) {
- try {
- _logOpOld(opstr, ns, t.cllNS().c_str(), obj, patt, b);
- } catch ( const DBException & ) {
- t.cllInvalidate();
- }
- }
}
void createOplog() {
@@ -380,58 +371,6 @@ namespace mongo {
log() << "******" << endl;
}
- class CmdLogCollection : public Command {
- public:
- virtual bool slaveOk() const {
- return false;
- }
- virtual LockType locktype() const { return WRITE; }
- CmdLogCollection() : Command( "logCollection" ) {}
- virtual void help( stringstream &help ) const {
- help << "examples: { logCollection: <collection ns>, start: 1 }, "
- << "{ logCollection: <collection ns>, validateComplete: 1 }";
- }
- virtual bool run(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
- string logCollection = cmdObj.getStringField( "logCollection" );
- if ( logCollection.empty() ) {
- errmsg = "missing logCollection spec";
- return false;
- }
- bool start = !cmdObj.getField( "start" ).eoo();
- bool validateComplete = !cmdObj.getField( "validateComplete" ).eoo();
- if ( start ? validateComplete : !validateComplete ) {
- errmsg = "Must specify exactly one of start:1 or validateComplete:1";
- return false;
- }
- int logSizeMb = cmdObj.getIntField( "logSizeMb" );
- NamespaceDetailsTransient &t = NamespaceDetailsTransient::get_w( logCollection.c_str() );
- if ( start ) {
- if ( t.cllNS().empty() ) {
- if ( logSizeMb == INT_MIN ) {
- t.cllStart();
- } else {
- t.cllStart( logSizeMb );
- }
- } else {
- errmsg = "Log already started for ns: " + logCollection;
- return false;
- }
- } else {
- if ( t.cllNS().empty() ) {
- errmsg = "No log to validateComplete for ns: " + logCollection;
- return false;
- } else {
- if ( !t.cllValidateComplete() ) {
- errmsg = "Oplog failure, insufficient space allocated";
- return false;
- }
- }
- }
- log() << "started logCollection with cmd obj: " << cmdObj << endl;
- return true;
- }
- } cmdlogcollection;
-
// -------------------------------------
struct TestOpTime {
View
147 jstests/clone/clonecollection.js
@@ -2,65 +2,6 @@
var baseName = "jstests_clonecollection";
-parallel = function() {
- return t.parallelStatus;
-}
-
-resetParallel = function() {
- parallel().drop();
-}
-
-doParallel = function( work ) {
- resetParallel();
- startMongoProgramNoConnect( "mongo", "--port", ports[ 1 ], "--eval", work + "; db.parallelStatus.save( {done:1} );", baseName );
-}
-
-doneParallel = function() {
- return !!parallel().findOne();
-}
-
-waitParallel = function() {
- assert.soon( function() { return doneParallel(); }, "parallel did not finish in time", 300000, 1000 );
-}
-
-cloneNo = -1;
-startstartclone = function( spec ) {
- spec = spec || "";
- cloneNo++;
- doParallel( "z = db.runCommand( {startCloneCollection:\"jstests_clonecollection.a\", from:\"localhost:" + ports[ 0 ] + "\"" + spec + " } ); print( \"clone_clone_clone_commandResult::" + cloneNo + "::\" + tojson( z , '' , true ) + \":::::\" );" );
-}
-
-finishstartclone = function() {
- waitParallel();
- // even after parallel shell finished, must wait for finishToken line to appear in log
- assert.soon( function() {
- raw = rawMongoProgramOutput().replace( /[\r\n]/gm , " " )
- ret = raw.match( new RegExp( "clone_clone_clone_commandResult::" + cloneNo + "::(.*):::::" ) );
- if ( ret == null ) {
- return false;
- }
- ret = ret[ 1 ];
- return true;
- } );
-
- eval( "ret = " + ret );
-
- assert.commandWorked( ret );
- return ret;
-}
-
-dofinishclonecmd = function( ret ) {
- finishToken = ret.finishToken;
- // Round-tripping through JS can corrupt the cursor ids we store as BSON
- // Date elements. Date( 0 ) will correspond to a cursorId value of 0, which
- // makes the db start scanning from the beginning of the collection.
- finishToken.cursorId = new Date( 0 );
- return t.runCommand( {finishCloneCollection:finishToken} );
-}
-
-finishclone = function( ret ) {
- assert.commandWorked( dofinishclonecmd( ret ) );
-}
ports = allocatePorts( 2 );
@@ -105,92 +46,4 @@ assert( f.a.isCapped() );
assert.commandWorked( t.cloneCollection( "localhost:" + ports[ 0 ], "a" ) );
assert( t.a.isCapped(), "cloned collection not capped" );
-// Now test insert + delete + update during clone
-f.a.drop();
-t.a.drop();
-
-for( i = 0; i < 100000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 100000, f.a.count() );
-
-startstartclone( ", query:{i:{$gte:0}}" );
-
-sleep( 200 );
-f.a.save( { i: 200000 } );
-f.a.save( { i: -1 } );
-f.a.remove( { i: 0 } );
-f.a.update( { i: 99998 }, { i: 99998, x: "y" } );
-assert.eq( 100001, f.a.count() );
-ret = finishstartclone();
-finishclone( ret );
-
-
-assert.eq( 100000, t.a.find().count() , "D1" );
-assert.eq( 1, t.a.find( { i: 200000 } ).count() , "D2" );
-assert.eq( 0, t.a.find( { i: -1 } ).count() , "D3" );
-assert.eq( 0, t.a.find( { i: 0 } ).count() , "D4" );
-assert.eq( 1, t.a.find( { i: 99998, x: "y" } ).count() , "D5" );
-
-
-// Now test oplog running out of space -- specify small size clone oplog for test.
-f.a.drop();
-t.a.drop();
-
-for( i = 0; i < 200000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 200000, f.a.count() , "E1" );
-
-startstartclone( ", logSizeMb:1" );
-ret = finishstartclone();
-
-for( i = 200000; i < 250000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 250000, f.a.count() );
-
-assert.commandFailed( dofinishclonecmd( ret ) );
-
-// Make sure the same works with standard size op log.
-f.a.drop();
-t.a.drop();
-
-for( i = 0; i < 200000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 200000, f.a.count() , "F1" );
-
-startstartclone();
-ret = finishstartclone();
-
-for( i = 200000; i < 250000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 250000, f.a.count() , "F2" );
-
-finishclone( ret );
-assert.eq( 250000, t.a.find().count() , "F3" );
-
-// Test startCloneCollection and finishCloneCollection commands.
-f.a.drop();
-t.a.drop();
-
-for( i = 0; i < 100000; ++i ) {
- f.a.save( { i: i } );
-}
-assert.eq( 100000, f.a.count() , "G1" );
-
-startstartclone();
-
-sleep( 200 );
-f.a.save( { i: -1 } );
-assert.eq( 100001, f.a.count() );
-
-ret = finishstartclone();
-assert.eq( 100001, t.a.find().count() , "G2" );
-f.a.save( { i: -2 } );
-assert.eq( 100002, f.a.find().count() , "G3" );
-finishclone( ret );
-assert.eq( 100002, t.a.find().count() , "G4" );
Please sign in to comment.
Something went wrong with that request. Please try again.