Skip to content

Commit

Permalink
SERVER-6450 - Use ps::Rolling to provide a working-set size estimate
Browse files Browse the repository at this point in the history
db.adminCommand( { serverStatus : 1 , workingSet : 1 } ).workingSet
  • Loading branch information
erh committed Oct 14, 2012
1 parent b184c11 commit bdd3900
Show file tree
Hide file tree
Showing 3 changed files with 76 additions and 4 deletions.
9 changes: 9 additions & 0 deletions src/mongo/db/dbcommands.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -493,6 +493,7 @@ namespace mongo {
}

bool run(const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {

long long start = Listener::getElapsedTimeMillis();
BSONObjBuilder timeBuilder(128);

Expand Down Expand Up @@ -680,6 +681,12 @@ namespace mongo {

timeBuilder.appendNumber( "after dur" , Listener::getElapsedTimeMillis() - start );

if ( cmdObj["workingSet"].trueValue() ) {
BSONObjBuilder bb( result.subobjStart( "workingSet" ) );
Record::appendWorkingSetInfo( bb );
bb.done();
}

{
RamLog* rl = RamLog::get( "warnings" );
massert(15880, "no ram log for warnings?" , rl);
Expand All @@ -695,6 +702,8 @@ namespace mongo {
}
}

// ----- cleaning up stuff

if ( ! authed )
result.append( "note" , "run against admin for more info" );

Expand Down
2 changes: 2 additions & 0 deletions src/mongo/db/pdfile.h
Original file line number Diff line number Diff line change
Expand Up @@ -284,6 +284,8 @@ namespace mongo {
* and how many times we throw a PageFaultException
*/
static void appendStats( BSONObjBuilder& b );

static void appendWorkingSetInfo( BSONObjBuilder& b );
private:

int _netLength() const { return _lengthWithHeaders - HeaderSize; }
Expand Down
69 changes: 65 additions & 4 deletions src/mongo/db/record.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
#include "mongo/db/pagefault.h"
#include "mongo/db/pdfile.h"
#include "mongo/db/record.h"
#include "mongo/platform/bits.h"
#include "mongo/platform/unordered_set.h"
#include "mongo/util/net/listen.h"
#include "mongo/util/processinfo.h"
#include "mongo/util/stack_introspect.h"
Expand Down Expand Up @@ -78,7 +80,7 @@ namespace mongo {
if ( ! e )
return Unk;

return ( e->value & ( ((unsigned long long)1) << offset ) ) ? In : Out;
return ( e->value & ( 1ULL << offset ) ) ? In : Out;
}

/**
Expand All @@ -91,10 +93,26 @@ namespace mongo {
if ( ! e )
return false;

e->value |= ((unsigned long long)1) << offset;
e->value |= 1ULL << offset;
return true;
}


void addPages( unordered_set<size_t>* pages ) {
for ( int i = 0; i < SliceSize; i++ ) {
unsigned long long v = _data[i].value;

while ( v ) {
int offset = firstBitSet( v ) - 1;

size_t page = ( _data[i].region << 6 | offset );
pages->insert( page );

v &= ~( 1ULL << offset );
}
}
}

private:

Entry* _get( int start , size_t region , bool add ) {
Expand Down Expand Up @@ -147,7 +165,7 @@ namespace mongo {
SimpleMutex::scoped_lock lk( _lock );

static int rarely_count = 0;
if ( rarely_count++ % 2048 == 0 ) {
if ( rarely_count++ % ( 2048 / BigHashSize ) == 0 ) {
long long now = Listener::getElapsedTimeMillis();
RARELY if ( now == 0 ) {
tlog() << "warning Listener::getElapsedTimeMillis returning 0ms" << endl;
Expand Down Expand Up @@ -179,7 +197,24 @@ namespace mongo {
}
return false;
}


/**
* @param pages OUT adds each page to the set
* @param mySlices temporary space for copy
*/
void addPages( unordered_set<size_t>* pages, Slice* mySlices ) {
{
// by doing this, we're in the lock only about half as long as the naive way
// that's measure with a small data set
// Assumption is that with a large data set, actually adding to set may get more costly
// so this way the time in lock should be totally constant
SimpleMutex::scoped_lock lk( _lock );
memcpy( mySlices, _slices, NumSlices * sizeof(Slice) );
}
for ( int i = 0; i < NumSlices; i++ ) {
mySlices[i].addPages( pages );
}
}
private:

void _rotate() {
Expand Down Expand Up @@ -290,6 +325,23 @@ namespace mongo {
Data* getData();

};

void appendWorkingSetInfo( BSONObjBuilder& b ) {

boost::scoped_array<Slice> mySlices( new Slice[NumSlices] );

unordered_set<size_t> totalPages;
Timer t;

for ( int i = 0; i < BigHashSize; i++ ) {
rolling[i].addPages( &totalPages, mySlices.get() );
}

b.append( "note", "thisIsAnEstimate" );
b.appendNumber( "pagesInMemory", totalPages.size() );
b.appendNumber( "computationTimeMicros", static_cast<long long>(t.micros()) );

}

}

Expand Down Expand Up @@ -337,6 +389,15 @@ namespace mongo {

const bool blockSupported = ProcessInfo::blockCheckSupported();

void Record::appendWorkingSetInfo( BSONObjBuilder& b ) {
if ( ! blockSupported ) {
b.append( "info", "not supported" );
return;
}

ps::appendWorkingSetInfo( b );
}

bool Record::blockCheckSupported() {
return ProcessInfo::blockCheckSupported();
}
Expand Down

0 comments on commit bdd3900

Please sign in to comment.