Skip to content

Commit

Permalink
Merge branch 'master' of github.com:mongodb/mongo
Browse files Browse the repository at this point in the history
  • Loading branch information
astaple committed Jul 21, 2010
2 parents 7782698 + 77a8475 commit d7a2002
Show file tree
Hide file tree
Showing 12 changed files with 1,927 additions and 1,768 deletions.
5 changes: 5 additions & 0 deletions bson/bsonobj.h
Expand Up @@ -227,6 +227,11 @@ namespace mongo {
bool considerFieldName=true) const;


bool operator<( const BSONObj& other ) const { return woCompare( other ) < 0; }
bool operator<=( const BSONObj& other ) const { return woCompare( other ) <= 0; }
bool operator>( const BSONObj& other ) const { return woCompare( other ) > 0; }
bool operator>=( const BSONObj& other ) const { return woCompare( other ) >= 0; }

/**
* @param useDotted whether to treat sort key fields as possibly dotted and expand into them
*/
Expand Down
2 changes: 1 addition & 1 deletion client/distlock.cpp
Expand Up @@ -100,7 +100,7 @@ namespace mongo {

} distLockPinger;

DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , int takeoverMinutes )
DistributedLock::DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes )
: _conn(conn),_name(name),_takeoverMinutes(takeoverMinutes){
_id = BSON( "_id" << name );
_ns = "config.locks";
Expand Down
4 changes: 2 additions & 2 deletions client/distlock.h
Expand Up @@ -34,7 +34,7 @@ namespace mongo {
/**
* @param takeoverMinutes how long before we steal lock in minutes
*/
DistributedLock( const ConnectionString& conn , const string& name , int takeoverMinutes = 10 );
DistributedLock( const ConnectionString& conn , const string& name , unsigned takeoverMinutes = 10 );

int getState(){
return _state.get();
Expand All @@ -50,7 +50,7 @@ namespace mongo {
private:
ConnectionString _conn;
string _name;
int _takeoverMinutes;
unsigned _takeoverMinutes;

string _ns;
BSONObj _id;
Expand Down
6 changes: 3 additions & 3 deletions db/repl/health.cpp
Expand Up @@ -258,12 +258,12 @@ namespace mongo {
/* self row */
s << tr() << td(_self->fullName() + " (me)") <<
td(_self->id()) <<
td("1") <<
td("1") << //up
td(ago(started)) <<
td("") <<
td("") << // last heartbeat
td(ToString(_self->config().votes)) <<
td(stateAsHtml(_myState));
s << td( _self->lhb() );
s << td( _hbmsg );
stringstream q;
q << "/_replSetOplog?" << _self->id();
s << td( a(q.str(), "", theReplSet->lastOpTimeWritten.toString()) );
Expand Down
69 changes: 69 additions & 0 deletions db/repl/rs_rollback.cpp
@@ -0,0 +1,69 @@
/* @file rs_rollback.cpp
*
* Copyright (C) 2008 10gen Inc.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License, version 3,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/

#include "pch.h"
#include "../client.h"
#include "../../client/dbclient.h"
#include "rs.h"
#include "../repl.h"

/* Scenarios
We went offline with ops not replicated out.
F = node that failed and coming back.
P = node that took over, new primary
#1:
F : a b c d e f g
P : a b c d q
The design is "keep P". One could argue here that "keep F" has some merits, however, in most cases P
will have significantly more data. Also note that P may have a proper subset of F's stream if there were
no subsequent writes!
For now the model is simply : get F back in sync with P. If P was really behind or something, we should have
just chosen not to fail over anyway.
#2:
F : a b c d e f g -> a b c d
P : a b c d
#3:
F : a b c d e f g -> a b c d q r s t u v w x z
P : a b c d.q r s t u v w x z
Steps
find an event in common. 'd'.
undo our events beyond that by:
(1) taking copy from other server of those objects
(2) do not consider copy valid until we pass the original end point (e.g. g) in time
-- i.e., reset minvalid.
(3) skip operations on objects that are previous in time to our capture of the object.
a b c d e f
a b c
a b c d e.g
*/

namespace mongo {

void ReplSetImpl::syncRollback(OplogReader&r, ...) {
}

}
4 changes: 2 additions & 2 deletions db/repl/rs_sync.cpp
Expand Up @@ -69,11 +69,11 @@ namespace mongo {
long long h = o["h"].numberLong();
if( ts != lastOpTimeWritten || h != lastH ) {
if( lastOpTimeWritten < ts ) {
log() << "replSet ERROR too stale to catch up, at least from primary " << hn << rsLog;
log() << "replSet error too stale to catch up, at least from primary " << hn << rsLog;
log() << "replSet our last optime : " << lastOpTimeWritten.toStringPretty() << rsLog;
log() << "replSet oldest at " << hn << " : " << ts.toStringPretty() << rsLog;
log() << "replSet See http://www.mongodb.org/display/DOCS/Resyncing+a+Very+Stale+Replica+Set+Member" << rsLog;
sethbmsg("sync exception too stale to catch up");
sethbmsg("error too stale to catch up");
sleepsecs(120);
return;
}
Expand Down
22 changes: 22 additions & 0 deletions dbtests/jsobjtests.cpp
Expand Up @@ -1606,6 +1606,27 @@ namespace JsobjTests {
}
};

class CompareOps {
public:
void run(){

BSONObj a = BSON("a"<<1);
BSONObj b = BSON("a"<<1);
BSONObj c = BSON("a"<<2);
BSONObj d = BSON("a"<<3);
BSONObj e = BSON("a"<<4);
BSONObj f = BSON("a"<<4);

ASSERT( ! ( a < b ) );
ASSERT( a <= b );
ASSERT( a < c );

ASSERT( f > d );
ASSERT( f >= e );
ASSERT( ! ( f > e ) );
}
};

class All : public Suite {
public:
All() : Suite( "jsobj" ){
Expand Down Expand Up @@ -1712,6 +1733,7 @@ namespace JsobjTests {
add< BSONFieldTests >();
add< BSONForEachTest >();
add< StringDataTest >();
add< CompareOps >();
}
} myall;

Expand Down
11 changes: 11 additions & 0 deletions jstests/repl/basic1.js
Expand Up @@ -126,6 +126,17 @@ assert.eq( { _id : "fun" , a : { b : { c : { x : 6848 , y : 911 } } } } , as.b.f
check( "b 4" );


// lots of indexes

am.lotOfIndexes.insert( { x : 1 } )
for ( i=0; i<200; i++ ){
var idx = {}
idx["x"+i] = 1;
am.lotOfIndexes.ensureIndex( idx );
}

assert.eq( am.lotOfIndexes.getIndexes().length , as.lotOfIndexes.getIndexes().length , "lots of indexes" )


rt.stop();

Expand Down
38 changes: 28 additions & 10 deletions s/chunk.cpp
Expand Up @@ -120,19 +120,23 @@ namespace mongo {
return _manager->getShardKey().extractKey( end );
}

BSONObj cmd = BSON( "medianKey" << _manager->getns()
<< "keyPattern" << _manager->getShardKey().key()
<< "min" << getMin()
<< "max" << getMax() );

ScopedDbConnection conn( getShard().getConnString() );
BSONObj result;
if ( ! conn->runCommand( "admin" , BSON( "medianKey" << _manager->getns()
<< "keyPattern" << _manager->getShardKey().key()
<< "min" << getMin()
<< "max" << getMax()
) , result ) ){
if ( ! conn->runCommand( "admin" , cmd , result ) ){
stringstream ss;
ss << "medianKey command failed: " << result;
uassert( 10164 , ss.str() , 0 );
}

BSONObj median = result.getObjectField( "median" );
BSONObj median = result.getObjectField( "median" ).getOwned();
conn.done();


if (median == getMin()){
Query q;
q.minKey(_min).maxKey(_max);
Expand All @@ -141,10 +145,16 @@ namespace mongo {
median = conn->findOne(_manager->getns(), q);
median = _manager->getShardKey().extractKey( median );
}

conn.done();

return median.getOwned();
if ( median < getMin() || median >= getMax() ){
stringstream ss;
ss << "medianKey returned value out of range. "
<< " cmd: " << cmd
<< " result: " << result;
uasserted( 13394 , ss.str() );
}

return median;
}

void Chunk::pickSplitVector( vector<BSONObj>* splitPoints ) const {
Expand Down Expand Up @@ -215,6 +225,14 @@ namespace mongo {
log(4) << "splitPoint: " << splitPoint << endl;
nextPoint = (++i != m.end()) ? i->getOwned() : _max.getOwned();
log(4) << "nextPoint: " << nextPoint << endl;

if ( nextPoint <= splitPoint) {
stringstream ss;
ss << "multiSplit failing because keys min: " << splitPoint << " and max: " << nextPoint
<< " do not define a valid chunk";
uasserted( 13395, ss.str() );
}

ChunkPtr s( new Chunk( _manager, splitPoint , nextPoint , _shard) );
s->_markModified();
newChunks.push_back(s);
Expand Down Expand Up @@ -349,7 +367,7 @@ namespace mongo {
if ( size < myMax )
return false;

log() << "autosplitting " << _manager->getns() << " size: " << size << " shard: " << toString() << endl;
log() << "autosplitting " << _manager->getns() << " size: " << size << " shard: " << toString() << " on: " << splitPoint << endl;
vector<BSONObj> splitPoints;
splitPoints.push_back( splitPoint );
ChunkPtr newShard = multiSplit( splitPoints );
Expand Down
20 changes: 19 additions & 1 deletion s/d_split.cpp
Expand Up @@ -80,7 +80,25 @@ namespace mongo {
return false;
}

result.append( "median", c.prettyKey( c.currKey() ) );
BSONObj median = c.prettyKey( c.currKey() );
result.append( "median", median );

int x = median.woCompare( min , BSONObj() , false );
int y = median.woCompare( max , BSONObj() , false );
if ( x == 0 || y == 0 ){
// its on an edge, ok
}
else if ( x < 0 && y < 0 ){
log( LL_ERROR ) << "median error (1) min: " << min << " max: " << max << " median: " << median << endl;
errmsg = "median error 1";
return false;
}
else if ( x > 0 && y > 0 ){
log( LL_ERROR ) << "median error (2) min: " << min << " max: " << max << " median: " << median << endl;
errmsg = "median error 2";
return false;
}

return true;
}
} cmdMedianKey;
Expand Down

0 comments on commit d7a2002

Please sign in to comment.