Skip to content

Commit

Permalink
SERVER-3067 Add killop support for foreground index builds.
Browse files Browse the repository at this point in the history
  • Loading branch information
astaple committed Nov 9, 2012
1 parent f5d6b28 commit 6a51b6b
Show file tree
Hide file tree
Showing 31 changed files with 1,294 additions and 209 deletions.
53 changes: 53 additions & 0 deletions jstests/slowNightly/index_killop.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
// Both foreground and background index builds can be aborted using killop. SERVER-3067

t = db.jstests_slownightly_index_killop;
t.drop();

// Insert a large number of documents, enough to ensure that an index build on these documents will
// be interrupted before complete.
for( i = 0; i < 1e6; ++i ) {
t.save( { a:i } );
}
db.getLastError();

function debug( x ) {
// printjson( x );
}

/** @return the op id for the running index build, or -1 if there is no current index build. */
function getIndexBuildOpId() {
inprog = db.currentOp().inprog;
debug( inprog );
indexBuildOpId = -1;
inprog.forEach( function( op ) {
// Identify the index build as an insert into the 'test.system.indexes'
// namespace. It is assumed that no other clients are concurrently
// accessing the 'test' database.
if ( op.op == 'insert' && op.ns == 'test.system.indexes' ) {
debug( op.opid );
indexBuildOpId = op.opid;
}
} );
return indexBuildOpId;
}

/** Test that building an index with @param 'options' can be aborted using killop. */
function testAbortIndexBuild( options ) {

// Create an index asynchronously by using a new connection.
new Mongo( db.getMongo().host ).getCollection( t.toString() ).createIndex( { a:1 }, options );

// When the index build starts, find its op id.
assert.soon( function() { return ( opId = getIndexBuildOpId() ) != -1; } );
// Kill the index build.
db.killOp( opId );

// Wait for the index build to stop.
assert.soon( function() { return getIndexBuildOpId() == -1; } );
// Check that no new index has been created. This verifies that the index build was aborted
// rather than successfully completed.
assert.eq( [ { _id:1 } ], t.getIndexKeys() );
}

testAbortIndexBuild( { background:false } );
testAbortIndexBuild( { background:true } );
2 changes: 1 addition & 1 deletion src/mongo/db/btree.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1433,7 +1433,7 @@ namespace mongo {
template< class V >
DiskLoc BtreeBucket<V>::addBucket(const IndexDetails& id) {
string ns = id.indexNamespace();
DiskLoc loc = theDataFileMgr.insert(ns.c_str(), 0, V::BucketSize, true);
DiskLoc loc = theDataFileMgr.insert(ns.c_str(), 0, V::BucketSize, false, true);
BtreeBucket *b = BTREEMOD(loc);
b->init();
return loc;
Expand Down
28 changes: 6 additions & 22 deletions src/mongo/db/btreebuilder.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include "stats/counters.h"
#include "dur_commitjob.h"
#include "btreebuilder.h"
#include "mongo/db/kill_current_op.h"

namespace mongo {

Expand Down Expand Up @@ -92,7 +93,7 @@ namespace mongo {
}

template<class V>
void BtreeBuilder<V>::buildNextLevel(DiskLoc loc) {
void BtreeBuilder<V>::buildNextLevel(DiskLoc loc, bool mayInterrupt) {
int levels = 1;
while( 1 ) {
if( loc.btree<V>()->tempNext().isNull() ) {
Expand All @@ -108,6 +109,8 @@ namespace mongo {

DiskLoc xloc = loc;
while( !xloc.isNull() ) {
killCurrentOp.checkForInterrupt( !mayInterrupt );

if ( getDur().commitIfNeeded() ) {
b = cur.btreemod<V>();
up = upLoc.btreemod<V>();
Expand Down Expand Up @@ -154,30 +157,11 @@ namespace mongo {

/** when all addKeys are done, we then build the higher levels of the tree */
template<class V>
void BtreeBuilder<V>::commit() {
buildNextLevel(first);
void BtreeBuilder<V>::commit(bool mayInterrupt) {
buildNextLevel(first, mayInterrupt);
committed = true;
}

template<class V>
BtreeBuilder<V>::~BtreeBuilder() {
DESTRUCTOR_GUARD(
if( !committed ) {
LOG(2) << "Rolling back partially built index space" << endl;
DiskLoc x = first;
while( !x.isNull() ) {
DiskLoc next = x.btree<V>()->tempNext();
string ns = idx.indexNamespace();
theDataFileMgr._deleteRecord(nsdetails(ns.c_str()), ns.c_str(), x.rec(), x);
x = next;
getDur().commitIfNeeded();
}
verify( idx.head.isNull() );
LOG(2) << "done rollback" << endl;
}
)
}

template class BtreeBuilder<V0>;
template class BtreeBuilder<V1>;

Expand Down
6 changes: 2 additions & 4 deletions src/mongo/db/btreebuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,12 +43,10 @@ namespace mongo {
BtreeBucket<V> *b;

void newBucket();
void buildNextLevel(DiskLoc);
void buildNextLevel(DiskLoc loc, bool mayInterrupt);
void mayCommitProgressDurably();

public:
~BtreeBuilder();

BtreeBuilder(bool _dupsAllowed, IndexDetails& _idx);

/**
Expand All @@ -61,7 +59,7 @@ namespace mongo {
* commit work. if not called, destructor will clean up partially completed work
* (in case exception has happened).
*/
void commit();
void commit(bool mayInterrupt);

unsigned long long getn() { return n; }
};
Expand Down
5 changes: 4 additions & 1 deletion src/mongo/db/cap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -484,7 +484,10 @@ namespace mongo {
}

for ( unsigned i=0; i<indexes.size(); i++ ) {
theDataFileMgr.insertWithObjMod( Namespace( ns ).getSisterNS( "system.indexes" ).c_str() , indexes[i] , true );
theDataFileMgr.insertWithObjMod(Namespace( ns ).getSisterNS( "system.indexes" ).c_str(),
indexes[i],
false,
true);
}

}
Expand Down
2 changes: 0 additions & 2 deletions src/mongo/db/cloner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@ namespace mongo {

BSONElement getErrField(const BSONObj& o);

void ensureHaveIdIndex(const char *ns);

bool replAuthenticate(DBClientBase *);

/** Selectively release the mutex based on a parameter. */
Expand Down
2 changes: 1 addition & 1 deletion src/mongo/db/commands/mr.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ namespace mongo {
*/
void State::_insertToInc( BSONObj& o ) {
verify( _onDisk );
theDataFileMgr.insertWithObjMod( _config.incLong.c_str() , o , true );
theDataFileMgr.insertWithObjMod( _config.incLong.c_str(), o, false, true );
getDur().commitIfNeeded();
}

Expand Down
4 changes: 1 addition & 3 deletions src/mongo/db/compact.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,8 +37,6 @@

namespace mongo {

void addRecordToRecListInExtent(Record *r, DiskLoc loc);
DiskLoc allocateSpaceForANewRecord(const char *ns, NamespaceDetails *d, int lenWHdr, bool god);
void freeExtents(DiskLoc firstExt, DiskLoc lastExt);

/* this should be done in alloc record not here, but doing here for now.
Expand Down Expand Up @@ -123,7 +121,7 @@ namespace mongo {
{
// extract keys for all indexes we will be rebuilding
for( int x = 0; x < nidx; x++ ) {
phase1[x].addKeys(indexSpecs[x], objOld, loc);
phase1[x].addKeys(indexSpecs[x], objOld, loc, false);
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/mongo/db/compact.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,15 +32,15 @@ namespace mongo {
unsigned long long nkeys;
bool multi; // multikey index

void addKeys(const IndexSpec& spec, const BSONObj& o, DiskLoc loc) {
void addKeys(const IndexSpec& spec, const BSONObj& o, DiskLoc loc, bool mayInterrupt) {
BSONObjSet keys;
spec.getKeys(o, keys);
int k = 0;
for ( BSONObjSet::iterator i=keys.begin(); i != keys.end(); i++ ) {
if( ++k == 2 ) {
multi = true;
}
sorter->add(*i, loc);
sorter->add(*i, loc, mayInterrupt);
nkeys++;
}
n++;
Expand Down
6 changes: 4 additions & 2 deletions src/mongo/db/dbcommands.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -722,7 +722,9 @@ namespace mongo {
for ( list<BSONObj>::iterator i=all.begin(); i!=all.end(); i++ ) {
BSONObj o = *i;
LOG(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl;
theDataFileMgr.insertWithObjMod( Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" ).c_str() , o , true );
string systemIndexesNs =
Namespace( toDeleteNs.c_str() ).getSisterNS( "system.indexes" );
theDataFileMgr.insertWithObjMod( systemIndexesNs.c_str(), o, false, true );
}

result.append( "nIndexes" , (int)all.size() );
Expand Down Expand Up @@ -1469,7 +1471,7 @@ namespace mongo {
{
Lock::DBWrite lk(ns);
Client::Context ctx( ns );
theDataFileMgr.insertWithObjMod( ns.c_str(), obj, true );
theDataFileMgr.insertWithObjMod( ns.c_str(), obj, false, true );
}
return true;
}
Expand Down
20 changes: 11 additions & 9 deletions src/mongo/db/extsort.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,13 +40,13 @@ namespace mongo {
HLMutex BSONObjExternalSorter::_extSortMutex("s");
IndexInterface *BSONObjExternalSorter::extSortIdxInterface;
Ordering BSONObjExternalSorter::extSortOrder( Ordering::make(BSONObj()) );
bool BSONObjExternalSorter::extSortMayInterrupt( false );
unsigned long long BSONObjExternalSorter::_compares = 0;
unsigned long long BSONObjExternalSorter::_uniqueNumber = 0;
static SimpleMutex _uniqueNumberMutex( "uniqueNumberMutex" );

/*static*/
int BSONObjExternalSorter::_compare(IndexInterface& i, const Data& l, const Data& r, const Ordering& order) {
RARELY killCurrentOp.checkForInterrupt();
_compares++;
int x = i.keyCompare(l.first, r.first, order);
if ( x )
Expand All @@ -59,6 +59,7 @@ namespace mongo {
DEV RARELY {
_extSortMutex.dassertLocked(); // must be as we use a global var
}
RARELY killCurrentOp.checkForInterrupt(!extSortMayInterrupt);
Data * l = (Data*)lv;
Data * r = (Data*)rv;
return _compare(*extSortIdxInterface, *l, *r, extSortOrder);
Expand Down Expand Up @@ -97,28 +98,29 @@ namespace mongo {
wassert( removed == 1 + _files.size() );
}

void BSONObjExternalSorter::_sortInMem() {
void BSONObjExternalSorter::_sortInMem( bool mayInterrupt ) {
// extSortComp needs to use glpbals
// qsort_r only seems available on bsd, which is what i really want to use
HLMutex::scoped_lock lk(_extSortMutex);
extSortIdxInterface = &_idxi;
extSortOrder = Ordering::make(_order);
extSortMayInterrupt = mayInterrupt;
_cur->sort( BSONObjExternalSorter::extSortComp );
}

void BSONObjExternalSorter::sort() {
void BSONObjExternalSorter::sort( bool mayInterrupt ) {
uassert( 10048 , "already sorted" , ! _sorted );

_sorted = true;

if ( _cur && _files.size() == 0 ) {
_sortInMem();
_sortInMem( mayInterrupt );
LOG(1) << "\t\t not using file. size:" << _curSizeSoFar << " _compares:" << _compares << endl;
return;
}

if ( _cur ) {
finishMap();
finishMap( mayInterrupt );
}

if ( _cur ) {
Expand All @@ -131,7 +133,7 @@ namespace mongo {

}

void BSONObjExternalSorter::add( const BSONObj& o , const DiskLoc & loc ) {
void BSONObjExternalSorter::add( const BSONObj& o, const DiskLoc& loc, bool mayInterrupt ) {
uassert( 10049 , "sorted already" , ! _sorted );

if ( ! _cur ) {
Expand All @@ -146,20 +148,20 @@ namespace mongo {
_curSizeSoFar += size + sizeof( DiskLoc ) + sizeof( BSONObj );

if ( _cur->hasSpace() == false || _curSizeSoFar > _maxFilesize ) {
finishMap();
finishMap( mayInterrupt );
LOG(1) << "finishing map" << endl;
}

}

void BSONObjExternalSorter::finishMap() {
void BSONObjExternalSorter::finishMap( bool mayInterrupt ) {
uassert( 10050 , "bad" , _cur );

_curSizeSoFar = 0;
if ( _cur->size() == 0 )
return;

_sortInMem();
_sortInMem( mayInterrupt );

stringstream ss;
ss << _root.string() << "/file." << _files.size();
Expand Down
12 changes: 5 additions & 7 deletions src/mongo/db/extsort.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,7 @@ namespace mongo {

static IndexInterface *extSortIdxInterface;
static Ordering extSortOrder;
static bool extSortMayInterrupt;
static int extSortComp( const void *lv, const void *rv );

class FileIterator : boost::noncopyable {
Expand Down Expand Up @@ -96,13 +97,10 @@ namespace mongo {

};

void add( const BSONObj& o , const DiskLoc & loc );
void add( const BSONObj& o , int a , int b ) {
add( o , DiskLoc( a , b ) );
}
void add( const BSONObj& o, const DiskLoc& loc, bool mayInterrupt );

/* call after adding values, and before fetching the iterator */
void sort();
void sort( bool mayInterrupt );

auto_ptr<Iterator> iterator() {
uassert( 10052 , "not sorted" , _sorted );
Expand All @@ -122,10 +120,10 @@ namespace mongo {

private:

void _sortInMem();
void _sortInMem( bool mayInterrupt );

void sort( const std::string& file );
void finishMap();
void finishMap( bool mayInterrupt );

BSONObj _order;
long _maxFilesize;
Expand Down
Loading

0 comments on commit 6a51b6b

Please sign in to comment.