Permalink
Browse files

Merge branch 'master' of github.com:mongodb/mongo

  • Loading branch information...
2 parents e2972cb + 20deda9 commit c64d1eddda87d4c6fe164191753740efe8577db7 @dwight dwight committed May 5, 2010
Showing with 73 additions and 52 deletions.
  1. +33 −23 s/chunk.cpp
  2. +16 −5 s/chunk.h
  3. +13 −13 s/commands_public.cpp
  4. +11 −11 s/strategy_shard.cpp
View
@@ -342,20 +342,24 @@ namespace mongo {
return (long)result["size"].number();
}
-
- long Chunk::countObjects( const BSONObj& filter ) const{
- ShardConnection conn( getShard() );
+
+ template <typename ChunkType>
+ inline long countObjectsHelper(const ChunkType* chunk, const BSONObj& filter){
+ ShardConnection conn( chunk->getShard() );
- BSONObj f = getFilter();
+ BSONObj f = chunk->getFilter();
if ( ! filter.isEmpty() )
f = ClusteredCursor::concatQuery( f , filter );
BSONObj result;
- unsigned long long n = conn->count( _ns , f );
+ unsigned long long n = conn->count( chunk->getManager()->getns() , f );
conn.done();
return (long)n;
}
+
+ long Chunk::countObjects( const BSONObj& filter ) const { return countObjectsHelper(this, filter); }
+ long ChunkRange::countObjects( const BSONObj& filter ) const { return countObjectsHelper(this, filter); }
void Chunk::appendShortVersion( const char * name , BSONObjBuilder& b ){
BSONObjBuilder bb( b.subobjStart( name ) );
@@ -372,7 +376,10 @@ namespace mongo {
}
void Chunk::getFilter( BSONObjBuilder& b ) const{
- _manager->_key.getFilter( b , _min , _max );
+ _manager->getShardKey().getFilter( b , _min , _max );
+ }
+ void ChunkRange::getFilter( BSONObjBuilder& b ) const{
+ _manager->getShardKey().getFilter( b , _min , _max );
}
void Chunk::serialize(BSONObjBuilder& to){
@@ -491,7 +498,8 @@ namespace mongo {
_chunks.push_back( c );
_chunkMap[c->getMax()] = c;
-
+ _chunkRanges.reloadAll(_chunkMap);
+
log() << "no chunks for:" << ns << " so creating first: " << c->toString() << endl;
}
@@ -592,7 +600,7 @@ namespace mongo {
return 0;
}
- int ChunkManager::_getChunksForQuery( vector<Chunk*>& chunks , const BSONObj& query ){
+ int ChunkManager::_getChunksForQuery( vector<shared_ptr<ChunkRange> >& chunks , const BSONObj& query ){
rwlock lk( _lock , false );
FieldRangeSet ranges(_ns.c_str(), query, false);
@@ -606,12 +614,12 @@ namespace mongo {
return 0;
} else if (range.equality()) {
- chunks.push_back(&findChunk(BSON(field.fieldName() << range.min())));
+ chunks.push_back( _chunkRanges.upper_bound(BSON(field.fieldName() << range.min()))->second );
return 1;
} else if (!range.nontrivial()) {
return -1; // all chunks
} else {
- set<Chunk*, ChunkCmp> chunkSet;
+ set<shared_ptr<ChunkRange>, ChunkCmp> chunkSet;
for (vector<FieldInterval>::const_iterator it=range.intervals().begin(), end=range.intervals().end();
it != end;
@@ -621,16 +629,17 @@ namespace mongo {
assert(fi.valid());
BSONObj minObj = BSON(field.fieldName() << fi.lower_.bound_);
BSONObj maxObj = BSON(field.fieldName() << fi.upper_.bound_);
- ChunkMap::iterator min = (fi.lower_.inclusive_ ? _chunkMap.upper_bound(minObj) : _chunkMap.lower_bound(minObj));
- ChunkMap::iterator max = (fi.upper_.inclusive_ ? _chunkMap.upper_bound(maxObj) : _chunkMap.lower_bound(maxObj));
+ ChunkRangeMap::const_iterator min, max;
+ min = (fi.lower_.inclusive_ ? _chunkRanges.upper_bound(minObj) : _chunkRanges.lower_bound(minObj));
+ max = (fi.upper_.inclusive_ ? _chunkRanges.upper_bound(maxObj) : _chunkRanges.lower_bound(maxObj));
- assert(min != _chunkMap.end());
+ assert(min != _chunkRanges.ranges().end());
// make max non-inclusive like end iterators
- if(max != _chunkMap.end())
+ if(max != _chunkRanges.ranges().end())
++max;
- for (ChunkMap::iterator it=min; it != max; ++it){
+ for (ChunkRangeMap::const_iterator it=min; it != max; ++it){
chunkSet.insert(it->second);
}
}
@@ -640,27 +649,28 @@ namespace mongo {
}
}
- int ChunkManager::getChunksForQuery( vector<Chunk*>& chunks , const BSONObj& query ){
+ int ChunkManager::getChunksForQuery( vector<shared_ptr<ChunkRange> >& chunks , const BSONObj& query ){
int ret = _getChunksForQuery(chunks, query);
if (ret == -1){
- chunks = _chunks;
- return chunks.size();
+ for (ChunkRangeMap::const_iterator it=_chunkRanges.ranges().begin(), end=_chunkRanges.ranges().end(); it != end; ++it){
+ chunks.push_back(it->second);
+ }
}
-
- return ret;
+ return chunks.size();
+ //return ret;
}
int ChunkManager::getShardsForQuery( set<Shard>& shards , const BSONObj& query ){
- vector<Chunk*> chunks;
+ vector<shared_ptr<ChunkRange> > chunks;
int ret = _getChunksForQuery(chunks, query);
if (ret == -1){
getAllShards(shards);
}
else {
- for ( vector<Chunk*>::iterator it=chunks.begin(), end=chunks.end(); it != end; ++it ){
- Chunk* c = *it;
+ for ( vector<shared_ptr<ChunkRange> >::iterator it=chunks.begin(), end=chunks.end(); it != end; ++it ){
+ shared_ptr<ChunkRange> c = *it;
shards.insert(c->getShard());
}
}
View
@@ -132,7 +132,7 @@ namespace mongo {
static string genID( const string& ns , const BSONObj& min );
- const ChunkManager* getManager() { return _manager; }
+ const ChunkManager* getManager() const { return _manager; }
private:
@@ -169,7 +169,11 @@ namespace mongo {
const BSONObj& getMin() const { return _min; }
const BSONObj& getMax() const { return _max; }
+ // clones of Chunk methods
bool contains(const BSONObj& obj) const;
+ void getFilter( BSONObjBuilder& b ) const;
+ BSONObj getFilter() const{ BSONObjBuilder b; getFilter( b ); return b.obj(); }
+ long countObjects( const BSONObj& filter = BSONObj() ) const;
ChunkRange(ChunkMap::const_iterator begin, const ChunkMap::const_iterator end)
: _manager(begin->second->getManager())
@@ -244,7 +248,7 @@ namespace mongo {
ChunkManager( DBConfig * config , string ns , ShardKeyPattern pattern , bool unique );
virtual ~ChunkManager();
- string getns(){
+ string getns() const {
return _ns;
}
@@ -267,7 +271,7 @@ namespace mongo {
/**
* @return number of Chunk added to the vector
*/
- int getChunksForQuery( vector<Chunk*>& chunks , const BSONObj& query );
+ int getChunksForQuery( vector<shared_ptr<ChunkRange> >& chunks , const BSONObj& query );
/**
* @return number of Shards added to the set
@@ -323,7 +327,7 @@ namespace mongo {
/**
* @return number of Chunk matching the query or -1 for all chunks.
*/
- int _getChunksForQuery( vector<Chunk*>& chunks , const BSONObj& query );
+ int _getChunksForQuery( vector<shared_ptr<ChunkRange> >& chunks , const BSONObj& query );
};
// like BSONObjCmp. for use as an STL comparison functor
@@ -334,10 +338,17 @@ namespace mongo {
bool operator()( const Chunk &l, const Chunk &r ) const {
return _cmp(l.getMin(), r.getMin());
}
-
bool operator()( const Chunk *l, const Chunk *r ) const {
return operator()(*l, *r);
}
+
+ // Also support ChunkRanges
+ bool operator()( const ChunkRange &l, const ChunkRange &r ) const {
+ return _cmp(l.getMin(), r.getMin());
+ }
+ bool operator()( const shared_ptr<ChunkRange> l, const shared_ptr<ChunkRange> r ) const {
+ return operator()(*l, *r);
+ }
private:
BSONObjCmp _cmp;
};
View
@@ -187,12 +187,12 @@ namespace mongo {
ChunkManager * cm = conf->getChunkManager( fullns );
massert( 10419 , "how could chunk manager be null!" , cm );
- vector<Chunk*> chunks;
+ vector<shared_ptr<ChunkRange> > chunks;
cm->getChunksForQuery( chunks , filter );
unsigned long long total = 0;
- for ( vector<Chunk*>::iterator i = chunks.begin() ; i != chunks.end() ; i++ ){
- Chunk * c = *i;
+ for ( vector<shared_ptr<ChunkRange> >::iterator i = chunks.begin() ; i != chunks.end() ; i++ ){
+ shared_ptr<ChunkRange> c = *i;
total += c->countObjects( filter );
}
@@ -279,7 +279,7 @@ namespace mongo {
ChunkManager * cm = conf->getChunkManager( fullns );
massert( 13002 , "how could chunk manager be null!" , cm );
- vector<Chunk*> chunks;
+ vector<shared_ptr<ChunkRange> > chunks;
cm->getChunksForQuery( chunks , filter );
BSONObj sort = cmdObj.getObjectField("sort");
@@ -311,8 +311,8 @@ namespace mongo {
std::sort(chunks.begin(), chunks.end(), ChunkCmp(sort));
}
- for ( vector<Chunk*>::iterator i = chunks.begin() ; i != chunks.end() ; i++ ){
- Chunk * c = *i;
+ for ( vector<shared_ptr<ChunkRange> >::iterator i = chunks.begin() ; i != chunks.end() ; i++ ){
+ shared_ptr<ChunkRange> c = *i;
ShardConnection conn( c->getShard() );
BSONObj res;
@@ -329,7 +329,7 @@ namespace mongo {
}
private:
- BSONObj fixCmdObj(const BSONObj& cmdObj, const Chunk* chunk){
+ BSONObj fixCmdObj(const BSONObj& cmdObj, const shared_ptr<ChunkRange> chunk){
assert(chunk);
BSONObjBuilder b;
@@ -393,14 +393,14 @@ namespace mongo {
ChunkManager * cm = conf->getChunkManager( fullns );
massert( 10420 , "how could chunk manager be null!" , cm );
- vector<Chunk*> chunks;
+ vector<shared_ptr<ChunkRange> > chunks;
cm->getChunksForQuery( chunks , BSONObj() );
set<BSONObj,BSONObjCmp> all;
int size = 32;
- for ( vector<Chunk*>::iterator i = chunks.begin() ; i != chunks.end() ; i++ ){
- Chunk * c = *i;
+ for ( vector<shared_ptr<ChunkRange> >::iterator i = chunks.begin() ; i != chunks.end() ; i++ ){
+ shared_ptr<ChunkRange> c = *i;
ShardConnection conn( c->getShard() );
BSONObj res;
@@ -531,7 +531,7 @@ namespace mongo {
q = cmdObj["query"].embeddedObjectUserCheck();
}
- vector<Chunk*> chunks;
+ vector<shared_ptr<ChunkRange> > chunks;
cm->getChunksForQuery( chunks , q );
const string shardedOutputCollection = getTmpName( collection );
@@ -544,8 +544,8 @@ namespace mongo {
list< shared_ptr<Future::CommandResult> > futures;
- for ( vector<Chunk*>::iterator i = chunks.begin() ; i != chunks.end() ; i++ ){
- Chunk * c = *i;
+ for ( vector<shared_ptr<ChunkRange> >::iterator i = chunks.begin() ; i != chunks.end() ; i++ ){
+ shared_ptr<ChunkRange> c = *i;
futures.push_back( Future::spawnCommand( c->getShard().getConnString() , dbName , shardedCommand ) );
}
View
@@ -44,12 +44,12 @@ namespace mongo {
Query query( q.query );
- vector<Chunk*> shards;
+ vector<shared_ptr<ChunkRange> > shards;
info->getChunksForQuery( shards , query.getFilter() );
set<ServerAndQuery> servers;
- for ( vector<Chunk*>::iterator i = shards.begin(); i != shards.end(); i++ ){
- Chunk* c = *i;
+ for ( vector<shared_ptr<ChunkRange> >::iterator i = shards.begin(); i != shards.end(); i++ ){
+ shared_ptr<ChunkRange> c = *i;
//servers.insert( ServerAndQuery( c->getShard() , BSONObj() ) );
servers.insert( ServerAndQuery( c->getShard().getConnString() , c->getFilter() ) );
}
@@ -77,8 +77,8 @@ namespace mongo {
if ( shardKeyOrder ){
// 2. sort on shard key, can do in serial intelligently
set<ServerAndQuery> buckets;
- for ( vector<Chunk*>::iterator i = shards.begin(); i != shards.end(); i++ ){
- Chunk * s = *i;
+ for ( vector<shared_ptr<ChunkRange> >::iterator i = shards.begin(); i != shards.end(); i++ ){
+ shared_ptr<ChunkRange> s = *i;
buckets.insert( ServerAndQuery( s->getShard().getConnString() , s->getFilter() , s->getMin() ) );
}
cursor = new SerialServerClusteredCursor( buckets , q , shardKeyOrder );
@@ -211,11 +211,11 @@ namespace mongo {
}
if ( multi ){
- vector<Chunk*> chunks;
+ vector<shared_ptr<ChunkRange> > chunks;
manager->getChunksForQuery( chunks , chunkFinder );
set<Shard> seen;
- for ( vector<Chunk*>::iterator i=chunks.begin(); i!=chunks.end(); i++){
- Chunk * c = *i;
+ for ( vector<shared_ptr<ChunkRange> >::iterator i=chunks.begin(); i!=chunks.end(); i++){
+ shared_ptr<ChunkRange> c = *i;
if ( seen.count( c->getShard() ) )
continue;
doWrite( dbUpdate , r , c->getShard() );
@@ -238,7 +238,7 @@ namespace mongo {
uassert( 10203 , "bad delete message" , d.moreJSObjs() );
BSONObj pattern = d.nextJsObj();
- vector<Chunk*> chunks;
+ vector<shared_ptr<ChunkRange> > chunks;
manager->getChunksForQuery( chunks , pattern );
cout << "delete : " << pattern << " \t " << chunks.size() << " justOne: " << justOne << endl;
if ( chunks.size() == 1 ){
@@ -250,8 +250,8 @@ namespace mongo {
throw UserException( 8015 , "can only delete with a non-shard key pattern if can delete as many as we find" );
set<Shard> seen;
- for ( vector<Chunk*>::iterator i=chunks.begin(); i!=chunks.end(); i++){
- Chunk * c = *i;
+ for ( vector<shared_ptr<ChunkRange> >::iterator i=chunks.begin(); i!=chunks.end(); i++){
+ shared_ptr<ChunkRange> c = *i;
if ( seen.count( c->getShard() ) )
continue;
seen.insert( c->getShard() );

0 comments on commit c64d1ed

Please sign in to comment.