Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

fixed bug SERVER-118 multikey duplicate results from queries

  • Loading branch information...
commit 85e8f71c1cf11524af71c1ba1c4cb6db7ce439eb 1 parent 6b38f5e
@dwight dwight authored
View
22 db/btree.h
@@ -214,11 +214,14 @@ namespace mongo {
class BtreeCursor : public Cursor {
friend class BtreeBucket;
+ NamespaceDetails *d;
+ int idxNo;
BSONObj startKey;
BSONObj endKey;
bool endKeyInclusive_;
+ bool multikey; // note this must be updated every getmore batch in case someone added a multikey...
public:
- BtreeCursor( const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
+ BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails&, const BSONObj &startKey, const BSONObj &endKey, bool endKeyInclusive, int direction );
virtual bool ok() {
return !bucket.isNull();
}
@@ -230,6 +233,23 @@ namespace mongo {
virtual void noteLocation(); // updates keyAtKeyOfs...
virtual void checkLocation();
+ /* used for multikey index traversal to avoid sending back dups. see JSMatcher::matches().
+ if a multikey index traversal:
+ if loc has already been sent, returns true.
+ otherwise, marks loc as sent.
+ @param deep - match was against an array, so we know it is multikey. this is legacy and kept
+ for backwards datafile compatibility. 'deep' can be eliminated next time we
+ force a data file conversion. 7Jul09
+ */
+ set<DiskLoc> dups;
+ virtual bool getsetdup(bool deep, DiskLoc loc) {
+ if( deep || multikey ) {
+ pair<set<DiskLoc>::iterator, bool> p = dups.insert(loc);
+ return !p.second;
+ }
+ return false;
+ }
+
_KeyNode& _currKeyNode() {
assert( !bucket.isNull() );
_KeyNode& kn = bucket.btree()->k(keyOfs);
View
22 db/btreecursor.cpp
@@ -28,13 +28,19 @@ namespace mongo {
DiskLoc maxDiskLoc(0x7fffffff, 0x7fffffff);
DiskLoc minDiskLoc(0, 1);
- BtreeCursor::BtreeCursor( const IndexDetails &_id, const BSONObj &_startKey, const BSONObj &_endKey, bool endKeyInclusive, int _direction ) :
- startKey( _startKey ),
- endKey( _endKey ),
- endKeyInclusive_( endKeyInclusive ),
- indexDetails( _id ),
- order( _id.keyPattern() ),
- direction( _direction ) {
+ BtreeCursor::BtreeCursor( NamespaceDetails *_d, int _idxNo, const IndexDetails &_id,
+ const BSONObj &_startKey, const BSONObj &_endKey, bool endKeyInclusive, int _direction ) :
+ d(_d), idxNo(_idxNo),
+ startKey( _startKey ),
+ endKey( _endKey ),
+ endKeyInclusive_( endKeyInclusive ),
+ indexDetails( _id ),
+ order( _id.keyPattern() ),
+ direction( _direction )
+ {
+ dassert( d->idxNo((IndexDetails&) indexDetails) == idxNo );
+ multikey = d->isMultikey(idxNo);
+
bool found;
if ( otherTraceLevel >= 12 ) {
if ( otherTraceLevel >= 200 ) {
@@ -122,6 +128,8 @@ namespace mongo {
if ( eof() )
return;
+ multikey = d->isMultikey(idxNo);
+
if ( keyOfs >= 0 ) {
BtreeBucket *b = bucket.btree();
View
14 db/cursor.h
@@ -90,15 +90,24 @@ namespace mongo {
return "abstract?";
}
- /* used for multikey index traversal to avoid sending back dups. see JSMatcher::matches() */
+ /* used for multikey index traversal to avoid sending back dups. see JSMatcher::matches().
+ if a multikey index traversal:
+ if loc has already been sent, returns true.
+ otherwise, marks loc as sent.
+ @param deep - match was against an array, so we know it is multikey. this is legacy and kept
+ for backwards datafile compatibility. 'deep' can be eliminated next time we
+ force a data file conversion. 7Jul09
+ */
+ virtual bool getsetdup(bool deep, DiskLoc loc) = 0;
+/*
set<DiskLoc> dups;
bool getsetdup(DiskLoc loc) {
- /* to save mem only call this when there is risk of dups (e.g. when 'deep'/multikey) */
if ( dups.count(loc) > 0 )
return true;
dups.insert(loc);
return false;
}
+*/
virtual BSONObj prettyStartKey() const { return BSONObj(); }
virtual BSONObj prettyEndKey() const { return BSONObj(); }
@@ -178,6 +187,7 @@ namespace mongo {
virtual bool tailable() {
return tailable_;
}
+ virtual bool getsetdup(bool deep, DiskLoc loc) { return false; }
};
/* used for order { $natural: -1 } */
View
38 db/dbcommands.cpp
@@ -486,6 +486,23 @@ namespace mongo {
}
} cmdoplogging;
+ unsigned removeBit(unsigned b, int x) {
+ unsigned tmp = b;
+ return
+ (tmp & ((1 << x)-1)) |
+ ((tmp >> (x+1)) << x);
+ }
+
+ struct DBCommandsUnitTest {
+ DBCommandsUnitTest() {
+ assert( removeBit(1, 0) == 0 );
+ assert( removeBit(2, 0) == 1 );
+ assert( removeBit(2, 1) == 0 );
+ assert( removeBit(255, 1) == 127 );
+ assert( removeBit(21, 2) == 9 );
+ }
+ } dbc_unittest;
+
bool deleteIndexes( NamespaceDetails *d, const char *ns, const char *name, string &errmsg, BSONObjBuilder &anObjBuilder, bool mayDeleteIdIndex ) {
d->aboutToDeleteAnIndex();
@@ -513,6 +530,8 @@ namespace mongo {
d->indexes[ 0 ] = *idIndex;
d->nIndexes = 1;
}
+ /* assuming here that id index is not multikey: */
+ d->multiKeyIndexBits = 0;
}
else {
// delete just one index
@@ -531,7 +550,7 @@ namespace mongo {
return false;
}
d->indexes[x].kill();
-
+ d->multiKeyIndexBits = removeBit(d->multiKeyIndexBits, x);
d->nIndexes--;
for ( int i = x; i < d->nIndexes; i++ )
d->indexes[i] = d->indexes[i+1];
@@ -823,7 +842,7 @@ namespace mongo {
}
} cmdFileMD5;
- const IndexDetails *cmdIndexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
+ IndexDetails *cmdIndexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
if ( ns[ 0 ] == '\0' || min.isEmpty() || max.isEmpty() ) {
errmsg = "invalid command syntax (note: min and max are required)";
return 0;
@@ -845,15 +864,17 @@ namespace mongo {
BSONObj max = jsobj.getObjectField( "max" );
BSONObj keyPattern = jsobj.getObjectField( "keyPattern" );
- const IndexDetails *id = cmdIndexDetailsForRange( ns, errmsg, min, max, keyPattern );
+ IndexDetails *id = cmdIndexDetailsForRange( ns, errmsg, min, max, keyPattern );
if ( id == 0 )
return false;
Timer t;
int num = 0;
- for( BtreeCursor c( *id, min, max, false, 1 ); c.ok(); c.advance(), ++num );
+ NamespaceDetails *d = nsdetails(ns);
+ int idxNo = d->idxNo(*id);
+ for( BtreeCursor c( d, idxNo, *id, min, max, false, 1 ); c.ok(); c.advance(), ++num );
num /= 2;
- BtreeCursor c( *id, min, max, false, 1 );
+ BtreeCursor c( d, idxNo, *id, min, max, false, 1 );
for( ; num; c.advance(), --num );
int ms = t.millis();
if ( ms > 100 ) {
@@ -892,10 +913,11 @@ namespace mongo {
errmsg = "only one of min or max specified";
return false;
} else {
- const IndexDetails *id = cmdIndexDetailsForRange( ns, errmsg, min, max, keyPattern );
- if ( id == 0 )
+ IndexDetails *idx = cmdIndexDetailsForRange( ns, errmsg, min, max, keyPattern );
+ if ( idx == 0 )
return false;
- c.reset( new BtreeCursor( *id, min, max, false, 1 ) );
+ NamespaceDetails *d = nsdetails(ns);
+ c.reset( new BtreeCursor( d, d->idxNo(*idx), *idx, min, max, false, 1 ) );
}
Timer t;
View
35 db/namespace.h
@@ -238,6 +238,8 @@ namespace mongo {
// For capped case, signal that we are doing initial extent allocation.
if ( capped )
deletedList[ 1 ].setInvalid();
+ version = 0;
+ multiKeyIndexBits = 0;
memset(reserved, 0, sizeof(reserved));
}
DiskLoc firstExtent;
@@ -254,13 +256,44 @@ namespace mongo {
int flags;
DiskLoc capExtent;
DiskLoc capFirstNewRecord;
- char reserved[108];
+
+ /* NamespaceDetails version. So we can do backward compatibility in the future.
+ */
+ unsigned version;
+
+ unsigned multiKeyIndexBits;
+
+ char reserved[100];
enum NamespaceFlags {
Flag_HaveIdIndex = 1 << 0, // set when we have _id index (ONLY if ensureIdIndex was called -- 0 if that has never been called)
Flag_CappedDisallowDelete = 1 << 1 // set when deletes not allowed during capped table allocation.
};
+ /* hackish */
+ int idxNo(IndexDetails& idx) {
+ for( int i = 0; i < nIndexes; i++ )
+ if( &indexes[i] == &idx )
+ return i;
+ massert("E12000 idxNo fails", false);
+ return -1;
+ }
+
+ /* multikey indexes are indexes where there are more than one key in the index
+ for a single document. see multikey in wiki.
+ */
+ bool isMultikey(int i) {
+ return (multiKeyIndexBits & (1 << i)) != 0;
+ }
+ void setIndexIsMultikey(int i) {
+ dassert( i < 32 && i <MaxIndexes );
+ multiKeyIndexBits |= (1 << i);
+ }
+ void clearIndexIsMultikey(int i) {
+ dassert( i < 32 && i <MaxIndexes );
+ multiKeyIndexBits &= ~(1 << i);
+ }
+
/* you MUST call when adding an index. see pdfile.cpp */
void addingIndex(const char *thisns, IndexDetails& details);
View
28 db/pdfile.cpp
@@ -868,6 +868,8 @@ assert( !eloc.isNull() );
IndexChanges& ch = v[i];
idx.getKeysFromObject(oldObj, ch.oldkeys);
idx.getKeysFromObject(newObj, ch.newkeys);
+ if( ch.newkeys.size() > 1 )
+ d.setIndexIsMultikey(i);
setDifference(ch.oldkeys, ch.newkeys, ch.removed);
setDifference(ch.newkeys, ch.oldkeys, ch.added);
}
@@ -987,11 +989,16 @@ assert( !eloc.isNull() );
int deb=0;
/* add keys to indexes for a new record */
- inline void _indexRecord(IndexDetails& idx, BSONObj& obj, DiskLoc newRecordLoc, bool dupsAllowed) {
+ inline void _indexRecord(NamespaceDetails *d, int idxNo, BSONObj& obj, DiskLoc newRecordLoc, bool dupsAllowed) {
+ IndexDetails& idx = d->indexes[idxNo];
BSONObjSetDefaultOrder keys;
idx.getKeysFromObject(obj, keys);
BSONObj order = idx.keyPattern();
+ int n = 0;
for ( BSONObjSetDefaultOrder::iterator i=keys.begin(); i != keys.end(); i++ ) {
+ if( ++n == 2 ) {
+ d->setIndexIsMultikey(idxNo);
+ }
assert( !newRecordLoc.isNull() );
try {
idx.head.btree()->bt_insert(idx.head, newRecordLoc,
@@ -1009,7 +1016,7 @@ assert( !eloc.isNull() );
/* note there are faster ways to build an index in bulk, that can be
done eventually */
- void addExistingToIndex(const char *ns, IndexDetails& idx) {
+ void addExistingToIndex(const char *ns, NamespaceDetails *d, IndexDetails& idx, int idxNo) {
bool dupsAllowed = !idx.unique();
Timer t;
@@ -1021,7 +1028,7 @@ assert( !eloc.isNull() );
while ( c->ok() ) {
BSONObj js = c->current();
try {
- _indexRecord(idx, js, c->currLoc(),dupsAllowed);
+ _indexRecord(d, idxNo, js, c->currLoc(),dupsAllowed);
} catch( AssertionException& e ) {
l << endl;
log(2) << "addExistingToIndex exception " << e.what() << endl;
@@ -1042,7 +1049,7 @@ assert( !eloc.isNull() );
for ( int i = 0; i < d->nIndexes; i++ ) {
try {
bool unique = d->indexes[i].unique();
- _indexRecord(d->indexes[i], obj, newRecordLoc, /*dupsAllowed*/!unique);
+ _indexRecord(d, i, obj, newRecordLoc, /*dupsAllowed*/!unique);
}
catch( DBException& ) {
// try to roll back previously added index entries
@@ -1293,15 +1300,16 @@ assert( !eloc.isNull() );
NamespaceDetailsTransient::get( ns ).registerWriteOp();
if ( tableToIndex ) {
- IndexDetails& idxinfo = tableToIndex->indexes[tableToIndex->nIndexes];
- idxinfo.info = loc;
- idxinfo.head = BtreeBucket::addHead(idxinfo);
- tableToIndex->addingIndex(tabletoidxns.c_str(), idxinfo);
+ int idxNo = tableToIndex->nIndexes;
+ IndexDetails& idx = tableToIndex->indexes[idxNo];
+ idx.info = loc;
+ idx.head = BtreeBucket::addHead(idx);
+ tableToIndex->addingIndex(tabletoidxns.c_str(), idx);
try {
- addExistingToIndex(tabletoidxns.c_str(), idxinfo);
+ addExistingToIndex(tabletoidxns.c_str(), tableToIndex, idx, idxNo);
} catch( DBException& ) {
// roll back this index
- string name = idxinfo.indexName();
+ string name = idx.indexName();
BSONObjBuilder b;
string errmsg;
bool ok = deleteIndexes(tableToIndex, tabletoidxns.c_str(), name.c_str(), errmsg, b, true);
View
10 db/query.cpp
@@ -71,7 +71,7 @@ namespace mongo {
bool deep;
if ( matcher_->matches(c_->currKey(), rloc, &deep) ) {
- if ( !deep || !c_->getsetdup(rloc) )
+ if ( !c_->getsetdup(deep, rloc) )
++count_;
}
@@ -140,7 +140,7 @@ namespace mongo {
}
else {
c->advance(); // must advance before deleting as the next ptr will die
- assert( !deep || !c->getsetdup(rloc) ); // can't be a dup, we deleted it!
+ assert( !c->getsetdup(deep, rloc) ); // can't be a dup, we deleted it!
if ( !justOne )
c->noteLocation();
@@ -822,7 +822,7 @@ namespace mongo {
}
else {
//out() << "matches " << c->currLoc().toString() << ' ' << deep << '\n';
- if ( deep && c->getsetdup(c->currLoc()) ) {
+ if( c->getsetdup(deep, c->currLoc()) ) {
//out() << " but it's a dup \n";
}
else {
@@ -914,7 +914,7 @@ namespace mongo {
bool deep;
if ( !matcher_->matches(c_->currKey(), c_->currLoc(), &deep) ) {
}
- else if ( !deep || !c_->getsetdup(c_->currLoc()) ) { // i.e., check for dups on deep items only
+ else if( !c_->getsetdup(deep, c_->currLoc()) ) {
bool match = true;
if ( !fields_.empty() ) {
BSONObj js = c_->current();
@@ -1065,7 +1065,7 @@ namespace mongo {
}
else {
DiskLoc cl = c_->currLoc();
- if ( !deep || !c_->getsetdup(cl) ) { // i.e., check for dups on deep items only
+ if( !c_->getsetdup(deep, cl) ) {
BSONObj js = c_->current();
// got a match.
assert( js.objsize() >= 0 ); //defensive for segfaults
View
41 db/queryoptimizer.cpp
@@ -31,7 +31,10 @@ namespace mongo {
return 1;
}
- QueryPlan::QueryPlan( const FieldBoundSet &fbs, const BSONObj &order, const IndexDetails *index, const BSONObj &startKey, const BSONObj &endKey ) :
+ QueryPlan::QueryPlan(
+ NamespaceDetails *_d, int _idxNo,
+ const FieldBoundSet &fbs, const BSONObj &order, const IndexDetails *index, const BSONObj &startKey, const BSONObj &endKey ) :
+ d(_d), idxNo(_idxNo),
fbs_( fbs ),
order_( order ),
index_( index ),
@@ -140,7 +143,7 @@ namespace mongo {
return findTableScan( fbs_.ns(), order_, startLoc );
massert( "newCursor() with start location not implemented for indexed plans", startLoc.isNull() );
//TODO This constructor should really take a const ref to the index details.
- return auto_ptr< Cursor >( new BtreeCursor( *const_cast< IndexDetails* >( index_ ), startKey_, endKey_, endKeyInclusive_, direction_ >= 0 ? 1 : -1 ) );
+ return auto_ptr< Cursor >( new BtreeCursor( d, idxNo, *const_cast< IndexDetails* >( index_ ), startKey_, endKey_, endKeyInclusive_, direction_ >= 0 ? 1 : -1 ) );
}
auto_ptr< Cursor > QueryPlan::newReverseCursor() const {
@@ -166,8 +169,9 @@ namespace mongo {
NamespaceDetailsTransient::get( ns() ).registerIndexForPattern( fbs_.pattern( order_ ), indexKey(), nScanned );
}
- QueryPlanSet::QueryPlanSet( const char *ns, const BSONObj &query, const BSONObj &order, const BSONElement *hint, bool honorRecordedPlan, const BSONObj &min, const BSONObj &max ) :
- fbs_( ns, query ),
+ QueryPlanSet::QueryPlanSet( const char *_ns, const BSONObj &query, const BSONObj &order, const BSONElement *hint, bool honorRecordedPlan, const BSONObj &min, const BSONObj &max ) :
+ ns(_ns),
+ fbs_( _ns, query ),
mayRecordPlan_( true ),
usingPrerecordedPlan_( false ),
hint_( BSONObj() ),
@@ -184,14 +188,15 @@ namespace mongo {
init();
}
- void QueryPlanSet::addHint( const IndexDetails &id ) {
+ void QueryPlanSet::addHint( IndexDetails &id ) {
if ( !min_.isEmpty() || !max_.isEmpty() ) {
string errmsg;
BSONObj keyPattern = id.keyPattern();
// This reformats min_ and max_ to be used for index lookup.
massert( errmsg, indexDetailsForRange( fbs_.ns(), errmsg, min_, max_, keyPattern ) );
}
- plans_.push_back( PlanPtr( new QueryPlan( fbs_, order_, &id, min_, max_ ) ) );
+ NamespaceDetails *d = nsdetails(ns);
+ plans_.push_back( PlanPtr( new QueryPlan( d, d->idxNo(id), fbs_, order_, &id, min_, max_ ) ) );
}
void QueryPlanSet::init() {
@@ -203,7 +208,7 @@ namespace mongo {
NamespaceDetails *d = nsdetails( ns );
if ( !d || !fbs_.matchPossible() ) {
// Table scan plan, when no matches are possible
- plans_.push_back( PlanPtr( new QueryPlan( fbs_, order_ ) ) );
+ plans_.push_back( PlanPtr( new QueryPlan( d, -1, fbs_, order_ ) ) );
return;
}
@@ -226,7 +231,7 @@ namespace mongo {
if ( !strcmp( hintobj.firstElement().fieldName(), "$natural" ) ) {
massert( "natural order cannot be specified with $min/$max", min_.isEmpty() && max_.isEmpty() );
// Table scan plan
- plans_.push_back( PlanPtr( new QueryPlan( fbs_, order_ ) ) );
+ plans_.push_back( PlanPtr( new QueryPlan( d, -1, fbs_, order_ ) ) );
return;
}
for (int i = 0; i < d->nIndexes; i++ ) {
@@ -243,9 +248,9 @@ namespace mongo {
if ( !min_.isEmpty() || !max_.isEmpty() ) {
string errmsg;
BSONObj keyPattern;
- const IndexDetails *id = indexDetailsForRange( ns, errmsg, min_, max_, keyPattern );
- massert( errmsg, id );
- plans_.push_back( PlanPtr( new QueryPlan( fbs_, order_, id, min_, max_ ) ) );
+ IndexDetails *idx = indexDetailsForRange( ns, errmsg, min_, max_, keyPattern );
+ massert( errmsg, idx );
+ plans_.push_back( PlanPtr( new QueryPlan( d, d->idxNo(*idx), fbs_, order_, idx, min_, max_ ) ) );
return;
}
@@ -257,13 +262,13 @@ namespace mongo {
oldNScanned_ = NamespaceDetailsTransient::get( ns ).nScannedForPattern( fbs_.pattern( order_ ) );
if ( !strcmp( bestIndex.firstElement().fieldName(), "$natural" ) ) {
// Table scan plan
- plans_.push_back( PlanPtr( new QueryPlan( fbs_, order_ ) ) );
+ plans_.push_back( PlanPtr( new QueryPlan( d, -1, fbs_, order_ ) ) );
return;
}
for (int i = 0; i < d->nIndexes; i++ ) {
IndexDetails& ii = d->indexes[i];
if( ii.keyPattern().woCompare(bestIndex) == 0 ) {
- plans_.push_back( PlanPtr( new QueryPlan( fbs_, order_, &ii ) ) );
+ plans_.push_back( PlanPtr( new QueryPlan( d, i, fbs_, order_, &ii ) ) );
return;
}
}
@@ -284,13 +289,13 @@ namespace mongo {
if ( ( fbs_.nNontrivialBounds() == 0 && order_.isEmpty() ) ||
( !order_.isEmpty() && !strcmp( order_.firstElement().fieldName(), "$natural" ) ) ) {
// Table scan plan
- addPlan( PlanPtr( new QueryPlan( fbs_, order_ ) ), checkFirst );
+ addPlan( PlanPtr( new QueryPlan( d, -1, fbs_, order_ ) ), checkFirst );
return;
}
PlanSet plans;
for( int i = 0; i < d->nIndexes; ++i ) {
- PlanPtr p( new QueryPlan( fbs_, order_, &d->indexes[ i ] ) );
+ PlanPtr p( new QueryPlan( d, i, fbs_, order_, &d->indexes[ i ] ) );
if ( p->optimal() ) {
addPlan( p, checkFirst );
return;
@@ -302,7 +307,7 @@ namespace mongo {
addPlan( *i, checkFirst );
// Table scan plan
- addPlan( PlanPtr( new QueryPlan( fbs_, order_ ) ), checkFirst );
+ addPlan( PlanPtr( new QueryPlan( d, -1, fbs_, order_ ) ), checkFirst );
}
shared_ptr< QueryOp > QueryPlanSet::runOp( QueryOp &op ) {
@@ -495,14 +500,14 @@ namespace mongo {
}
// NOTE min, max, and keyPattern will be updated to be consistent with the selected index.
- const IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
+ IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern ) {
if ( min.isEmpty() && max.isEmpty() ) {
errmsg = "one of min or max must be specified";
return 0;
}
setClient( ns );
- const IndexDetails *id = 0;
+ IndexDetails *id = 0;
NamespaceDetails *d = nsdetails( ns );
if ( !d ) {
errmsg = "ns not found";
View
10 db/queryoptimizer.h
@@ -27,7 +27,8 @@ namespace mongo {
class IndexDetails;
class QueryPlan {
public:
- QueryPlan( const FieldBoundSet &fbs,
+ QueryPlan(NamespaceDetails *_d, int _idxNo,
+ const FieldBoundSet &fbs,
const BSONObj &order,
const IndexDetails *index = 0,
const BSONObj &startKey = BSONObj(),
@@ -56,6 +57,8 @@ namespace mongo {
const FieldBound &bound( const char *fieldName ) const { return fbs_.bound( fieldName ); }
void registerSelf( long long nScanned ) const;
private:
+ NamespaceDetails *d;
+ int idxNo;
const FieldBoundSet &fbs_;
const BSONObj &order_;
const IndexDetails *index_;
@@ -129,7 +132,7 @@ namespace mongo {
plans_.push_back( plan );
}
void init();
- void addHint( const IndexDetails &id );
+ void addHint( IndexDetails &id );
struct Runner {
Runner( QueryPlanSet &plans, QueryOp &op );
shared_ptr< QueryOp > run();
@@ -138,6 +141,7 @@ namespace mongo {
static void initOp( QueryOp &op );
static void nextOp( QueryOp &op );
};
+ const char *ns;
FieldBoundSet fbs_;
PlanSet plans_;
bool mayRecordPlan_;
@@ -151,6 +155,6 @@ namespace mongo {
};
// NOTE min, max, and keyPattern will be updated to be consistent with the selected index.
- const IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern );
+ IndexDetails *indexDetailsForRange( const char *ns, string &errmsg, BSONObj &min, BSONObj &max, BSONObj &keyPattern );
} // namespace mongo
View
10 jstests/indexc.js
@@ -12,9 +12,9 @@ for ( var i=1; i<100; i++ ){
assert.eq( 50 , t.find( { ts : { $lt : mid } } ).itcount() , "A" );
assert.eq( 50 , t.find( { ts : { $lt : mid } } ).sort( { ts : 1 } ).itcount() , "B" );
-//t.ensureIndex( { ts : 1 , cats : 1 } );
-//t.ensureIndex( { cats : 1 } );
+t.ensureIndex( { ts : 1 , cats : 1 } );
+t.ensureIndex( { cats : 1 } );
-// multi-key bug
-//assert.eq( 50 , t.find( { ts : { $lt : mid } } ).itcount() , "C" );
-//assert.eq( 50 , t.find( { ts : { $lt : mid } } ).sort( { ts : 1 } ).itcount() , "D" );
+// multi-key bug was firing here (related to getsetdup()):
+assert.eq( 50 , t.find( { ts : { $lt : mid } } ).itcount() , "C" );
+assert.eq( 50 , t.find( { ts : { $lt : mid } } ).sort( { ts : 1 } ).itcount() , "D" );
Please sign in to comment.
Something went wrong with that request. Please try again.