Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

4150 explain implementation checkpoint

  • Loading branch information...
commit afc4e7fe813a753bf24414ff1385226d8ebbbd86 1 parent cb40560
astaple astaple authored
2  jstests/explain2.js
View
@@ -9,7 +9,7 @@ for ( i=1; i<10; i++ ){
}
function go( q , c , b , o ){
- var e = t.find( q ).explain();
+ var e = t.find( q ).hint( {a:1,b:1} ).explain();
assert.eq( c , e.n , "count " + tojson( q ) )
assert.eq( b , e.nscanned , "nscanned " + tojson( q ) )
assert.eq( o , e.nscannedObjects , "nscannedObjects " + tojson( q ) )
54 jstests/explain4.js
View
@@ -0,0 +1,54 @@
+// Basic validation of explain output fields
+
+t = db.jstests_explain4;
+t.drop();
+
+function checkField( name, value ) {
+ assert( explain.hasOwnProperty( name ) );
+ if ( value != null ) {
+ assert.eq( value, explain[ name ], name );
+ }
+}
+
+function checkPlanFields( explain, matches, n ) {
+ checkField( "cursor", "BasicCursor" );
+ checkField( "n", n );
+ checkField( "nscannedObjects", matches );
+ checkField( "nscanned", matches );
+ checkField( "indexBounds", {} );
+}
+
+function checkFields( matches, sort, limit ) {
+ it = t.find();
+ if ( sort ) {
+ it.sort({a:1});
+ }
+ if ( limit ) {
+ it.limit( limit );
+ }
+ explain = it.explain( true );
+ printjson( explain );
+ checkPlanFields( explain, matches, matches > 0 ? 1 : 0 );
+ checkField( "scanAndOrder", sort );
+ checkField( "millis" );
+ checkField( "nYields" );
+ checkField( "nChunkSkips", 0 );
+ checkField( "isMultiKey", false );
+// checkField( "indexOnly", false );
+ checkField( "server" );
+ checkField( "allPlans" );
+ explain.allPlans.forEach( function( x ) { checkPlanFields( x, matches ); } );
+}
+
+checkFields( 0, false );
+checkFields( 0, true );
+
+t.save( {} );
+checkFields( 1, false );
+checkFields( 1, true );
+
+t.save( {} );
+checkFields( 1, false, 1 );
+//checkFields( 2, true, 1 );
+//
+//// check all other fields, eg oldPlan
28 jstests/explain5.js
View
@@ -0,0 +1,28 @@
+// Check that the explain result count does proper deduping.
+
+t = db.jstests_explain5;
+t.drop();
+
+t.ensureIndex( {a:1} );
+t.ensureIndex( {b:1} );
+
+t.save( {a:[1,2,3],b:[4,5,6]} );
+for( i = 0; i < 10; ++i ) {
+ t.save( {} );
+}
+
+explain = t.find( {a:{$gt:0},b:{$gt:0}} ).explain( true );
+assert.eq( 1, explain.n );
+assert.eq( 1, explain.allPlans[ 0 ].n );
+assert.eq( 1, explain.allPlans[ 1 ].n );
+
+explain = t.find( {$or:[{a:{$gt:0},b:{$gt:0}},{a:{$gt:-1},b:{$gt:-1}}]} ).explain( true );
+assert.eq( 1, explain.n );
+
+assert.eq( 1, explain.clauses[ 0 ].n );
+assert.eq( 1, explain.clauses[ 0 ].allPlans[ 0 ].n );
+assert.eq( 1, explain.clauses[ 0 ].allPlans[ 1 ].n );
+
+assert.eq( 0, explain.clauses[ 1 ].n );
+assert.eq( 0, explain.clauses[ 1 ].allPlans[ 0 ].n );
+assert.eq( 0, explain.clauses[ 1 ].allPlans[ 1 ].n );
23 jstests/explain6.js
View
@@ -0,0 +1,23 @@
+// Test explain result count when a skip parameter is used.
+
+t = db.jstests_explain6;
+t.drop();
+
+t.save( {} );
+explain = t.find().skip( 1 ).explain( true );
+assert.eq( 0, explain.n );
+// With only one plan, the skip information is known for the plan. This is an arbitrary
+// implementation detail, but it changes the way n is calculated.
+assert.eq( 0, explain.allPlans[ 0 ].n );
+
+t.ensureIndex( {a:1} );
+explain = t.find( {a:null,b:null} ).skip( 1 ).explain( true );
+assert.eq( 0, explain.n );
+// With multiple plans, the skip information is not known to the plan.
+assert.eq( 1, explain.allPlans[ 0 ].n );
+
+t.dropIndexes();
+explain = t.find().skip( 1 ).sort({a:1}).explain( true );
+// Skip is applied for an in memory sort.
+assert.eq( 0, explain.n );
+assert.eq( 1, explain.allPlans[ 0 ].n );
25 jstests/in4.js
View
@@ -2,17 +2,6 @@ t = db.jstests_in4;
function checkRanges( a, b ) {
assert.eq( a, b );
-// expectedCount = a;
-// r = b;
-//// printjson( r );
-// assert.eq.automsg( "expectedCount", "r.a.length" );
-// for( i in r.a ) {
-// assert.eq.automsg( "r.a[ i ][ 0 ]", "r.a[ i ][ 1 ]" );
-// }
-// assert.eq.automsg( "expectedCount", "r.b.length" );
-// for( i in r.b ) {
-// assert.eq.automsg( "r.b[ i ][ 0 ]", "r.b[ i ][ 1 ]" );
-// }
}
t.drop();
@@ -27,9 +16,9 @@ checkRanges( {a:[[2,2],[3,3]],b:[[4,10]]}, t.find( {a:{$in:[2,3]},b:{$gt:4,$lt:1
t.save( {a:1,b:1} );
t.save( {a:2,b:4.5} );
t.save( {a:2,b:4} );
-assert.eq.automsg( "2", "t.find( {a:{$in:[2,3]},b:{$in:[4,5]}} ).explain().nscanned" );
-assert.eq.automsg( "2", "t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).a" );
-assert.eq.automsg( "4", "t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).b" );
+assert.eq( 2, t.find( {a:{$in:[2,3]},b:{$in:[4,5]}} ).hint( {a:1,b:1} ).explain().nscanned );
+assert.eq( 2, t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).a );
+assert.eq( 4, t.findOne( {a:{$in:[2,3]},b:{$in:[4,5]}} ).b );
t.drop();
t.ensureIndex( {a:1,b:1,c:1} );
@@ -37,17 +26,17 @@ checkRanges( {a:[[2,2]],b:[[3,3],[4,4]],c:[[5,5]]}, t.find( {a:2,b:{$in:[3,4]},c
t.save( {a:2,b:3,c:5} );
t.save( {a:2,b:3,c:4} );
-assert.eq.automsg( "1", "t.find( {a:2,b:{$in:[3,4]},c:5} ).explain().nscanned" );
+assert.eq( 1, t.find( {a:2,b:{$in:[3,4]},c:5} ).hint( {a:1,b:1,c:1} ).explain().nscanned );
t.remove();
t.save( {a:2,b:4,c:5} );
t.save( {a:2,b:4,c:4} );
-assert.eq.automsg( "2", "t.find( {a:2,b:{$in:[3,4]},c:5} ).explain().nscanned" );
+assert.eq( 2, t.find( {a:2,b:{$in:[3,4]},c:5} ).hint( {a:1,b:1,c:1} ).explain().nscanned );
t.drop();
t.ensureIndex( {a:1,b:-1} );
ib = t.find( {a:2,b:{$in:[3,4]}} ).explain().indexBounds;
checkRanges( {a:[[2,2]],b:[[4,4],[3,3]]}, ib );
-assert.automsg( "ib.b[ 0 ][ 0 ] > ib.b[ 1 ][ 0 ]" );
+assert( ib.b[ 0 ][ 0 ] > ib.b[ 1 ][ 0 ] );
ib = t.find( {a:2,b:{$in:[3,4]}} ).sort( {a:-1,b:1} ).explain().indexBounds;
checkRanges( {a:[[2,2]],b:[[3,3],[4,4]]}, ib );
-assert.automsg( "ib.b[ 0 ][ 0 ] < ib.b[ 1 ][ 0 ]" );
+assert( ib.b[ 0 ][ 0 ] < ib.b[ 1 ][ 0 ] );
71 jstests/index_check6.js
View
@@ -12,12 +12,12 @@ for ( var age=10; age<50; age++ ){
assert.eq( 10 , t.find( { age : 30 } ).explain().nscanned , "A" );
assert.eq( 20 , t.find( { age : { $gte : 29 , $lte : 30 } } ).explain().nscanned , "B" );
-assert.eq( 18 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,9] } } ).explain().nscanned , "C1" );
-assert.eq( 23 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,8] } } ).explain().nscanned , "C2" );
-assert.eq( 28 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [1,8] } } ).explain().nscanned , "C3" );
+assert.eq( 18 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,9] } } ).hint( {age:1,rating:1} ).explain().nscanned , "C1" );
+assert.eq( 23 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [0,8] } } ).hint( {age:1,rating:1} ).explain().nscanned , "C2" );
+assert.eq( 28 , t.find( { age : { $gte : 25 , $lte : 30 }, rating: {$in: [1,8] } } ).hint( {age:1,rating:1} ).explain().nscanned , "C3" );
-assert.eq( 4 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : 5 } ).explain().nscanned , "C" ); // SERVER-371
-assert.eq( 6 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : { $gte : 4 , $lte : 5 } } ).explain().nscanned , "D" ); // SERVER-371
+assert.eq( 4 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : 5 } ).hint( {age:1,rating:1} ).explain().nscanned , "C" ); // SERVER-371
+assert.eq( 6 , t.find( { age : { $gte : 29 , $lte : 30 } , rating : { $gte : 4 , $lte : 5 } } ).hint( {age:1,rating:1} ).explain().nscanned , "D" ); // SERVER-371
assert.eq.automsg( "2", "t.find( { age:30, rating:{ $gte:4, $lte:5} } ).explain().nscanned" );
@@ -31,37 +31,40 @@ for ( var a=1; a<10; a++ ){
}
}
-function doTest( s ) {
- sort = s;
-assert.eq.automsg( "1", "t.find( { a:5, b:5, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "2", "t.find( { a:5, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "1", "t.find( { a:5, b:5, c:{$gte:5.5,$lte:6} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "1", "t.find( { a:5, b:5, c:{$gte:5,$lte:5.5} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "3", "t.find( { a:5, b:5, c:{$gte:5,$lte:7} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "4", "t.find( { a:5, b:{$gte:5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
- if ( s.b > 0 ) {
- assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5.5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
- assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5,$lte:5.5}, c:5 } ).sort( sort ).explain().nscanned" );
+function doQuery( count, query, sort, index ) {
+ assert.eq( count, t.find( query ).hint( index ).sort( sort ).explain().nscanned );
+}
+
+function doTest( sort, index ) {
+ doQuery( 1, { a:5, b:5, c:5 }, sort, index );
+ doQuery( 2, { a:5, b:5, c:{$gte:5,$lte:6} }, sort, index );
+ doQuery( 1, { a:5, b:5, c:{$gte:5.5,$lte:6} }, sort, index );
+ doQuery( 1, { a:5, b:5, c:{$gte:5,$lte:5.5} }, sort, index );
+ doQuery( 3, { a:5, b:5, c:{$gte:5,$lte:7} }, sort, index );
+ doQuery( 4, { a:5, b:{$gte:5,$lte:6}, c:5 }, sort, index );
+ if ( sort.b > 0 ) {
+ doQuery( 2, { a:5, b:{$gte:5.5,$lte:6}, c:5 }, sort, index );
+ doQuery( 2, { a:5, b:{$gte:5,$lte:5.5}, c:5 }, sort, index );
} else {
- assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5.5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
- assert.eq.automsg( "2", "t.find( { a:5, b:{$gte:5,$lte:5.5}, c:5 } ).sort( sort ).explain().nscanned" );
+ doQuery( 2, { a:5, b:{$gte:5.5,$lte:6}, c:5 }, sort, index );
+ doQuery( 2, { a:5, b:{$gte:5,$lte:5.5}, c:5 }, sort, index );
}
-assert.eq.automsg( "7", "t.find( { a:5, b:{$gte:5,$lte:7}, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "4", "t.find( { a:{$gte:5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
- if ( s.a > 0 ) {
- assert.eq.automsg( "2", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
- assert.eq.automsg( "2", "t.find( { a:{$gte:5,$lte:5.5}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
- assert.eq.automsg( "3", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+ doQuery( 7, { a:5, b:{$gte:5,$lte:7}, c:5 }, sort, index );
+ doQuery( 4, { a:{$gte:5,$lte:6}, b:5, c:5 }, sort, index );
+ if ( sort.a > 0 ) {
+ doQuery( 2, { a:{$gte:5.5,$lte:6}, b:5, c:5 }, sort, index );
+ doQuery( 2, { a:{$gte:5,$lte:5.5}, b:5, c:5 }, sort, index );
+ doQuery( 3, { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} }, sort, index );
} else {
- assert.eq.automsg( "2", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
- assert.eq.automsg( "2", "t.find( { a:{$gte:5,$lte:5.5}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
- assert.eq.automsg( "3", "t.find( { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+ doQuery( 2, { a:{$gte:5.5,$lte:6}, b:5, c:5 }, sort, index );
+ doQuery( 2, { a:{$gte:5,$lte:5.5}, b:5, c:5 }, sort, index );
+ doQuery( 3, { a:{$gte:5.5,$lte:6}, b:5, c:{$gte:5,$lte:6} }, sort, index );
}
-assert.eq.automsg( "7", "t.find( { a:{$gte:5,$lte:7}, b:5, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "6", "t.find( { a:{$gte:5,$lte:6}, b:5, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "6", "t.find( { a:5, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "10", "t.find( { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:5 } ).sort( sort ).explain().nscanned" );
-assert.eq.automsg( "14", "t.find( { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} } ).sort( sort ).explain().nscanned" );
+ doQuery( 7, { a:{$gte:5,$lte:7}, b:5, c:5 }, sort, index );
+ doQuery( 6, { a:{$gte:5,$lte:6}, b:5, c:{$gte:5,$lte:6} }, sort, index );
+ doQuery( 6, { a:5, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} }, sort, index );
+ doQuery( 10, { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:5 }, sort, index );
+ doQuery( 14, { a:{$gte:5,$lte:6}, b:{$gte:5,$lte:6}, c:{$gte:5,$lte:6} }, sort, index );
}
for ( var a = -1; a <= 1; a += 2 ) {
@@ -70,8 +73,8 @@ for ( var a = -1; a <= 1; a += 2 ) {
t.dropIndexes();
var spec = {a:a,b:b,c:c};
t.ensureIndex( spec );
- doTest( spec );
- doTest( {a:-a,b:-b,c:-c} );
+ doTest( spec, spec );
+ doTest( {a:-a,b:-b,c:-c}, spec );
}
}
}
13 jstests/index_elemmatch1.js
View
@@ -22,7 +22,18 @@ assert.eq( 30 , t.find( q ).itcount() , "A3" )
q.arr = { $elemMatch : { x : 5 , y : 5 } }
assert.eq( 10 , t.find( q ).itcount() , "A4" )
-assert.eq( t.find(q).itcount() , t.find(q).explain().nscanned , "A5" )
+function nscannedForCursor( explain, cursor ) {
+ plans = explain.allPlans;
+ for( i in plans ) {
+ if ( plans[ i ].cursor == cursor ) {
+ return plans[ i ].nscanned;
+ }
+ }
+ return -1;
+}
+
+assert.eq( t.find(q).itcount(),
+ nscannedForCursor( t.find(q).explain(true), 'BtreeCursor arr.x_1_a_1' ), "A5" );
16 jstests/indexj.js
View
@@ -28,17 +28,17 @@ t.save( { a:1,b:2 } );
t.save( { a:2,b:1 } );
t.save( { a:2,b:2 } );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).explain().nscanned );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).sort( {a:-1,b:-1} ).explain().nscanned );
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned );
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).sort( {a:-1,b:-1} ).explain().nscanned );
t.save( {a:1,b:1} );
t.save( {a:1,b:1} );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).explain().nscanned );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).explain().nscanned );
-assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).sort( {a:-1,b:-1} ).explain().nscanned );
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned );
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned );
+assert.eq( 2, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).sort( {a:-1,b:-1} ).explain().nscanned );
-assert.eq( 1, t.find( { a:{$in:[1,1.9]}, b:{$gt:1,$lt:2} } ).explain().nscanned );
-assert.eq( 1, t.find( { a:{$in:[1.1,2]}, b:{$gt:1,$lt:2} } ).sort( {a:-1,b:-1} ).explain().nscanned );
+assert.eq( 1, t.find( { a:{$in:[1,1.9]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned );
+assert.eq( 1, t.find( { a:{$in:[1.1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).sort( {a:-1,b:-1} ).explain().nscanned );
t.save( { a:1,b:1.5} );
-assert.eq( 3, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).explain().nscanned, "F" );
+assert.eq( 3, t.find( { a:{$in:[1,2]}, b:{$gt:1,$lt:2} } ).hint( {a:1,b:1} ).explain().nscanned, "F" );
25 jstests/orf.js
View
@@ -9,7 +9,28 @@ for( var i = 0; i < 200; ++i ) {
}
a.forEach( function( x ) { t.save( x ); } );
-explain = t.find( {$or:a} ).explain();
+explain = t.find( {$or:a} ).explain( true );
+printjson( explain );
assert.eq( 200, explain.n );
-assert.eq( 200, explain.clauses.length );
+clauses = explain.clauses;
+for( i = 0; i < clauses.length; ++i ) {
+ c = clauses[ i ];
+ assert.eq( 'BtreeCursor _id_', c.cursor );
+ assert.eq( false, c.isMultiKey );
+ assert.eq( 1, c.n, 'n' );
+ assert.eq( 1, c.nscannedObjects, 'nscannedObjects' );
+ assert.eq( 1, c.nscanned, 'nscanned' );
+ assert.eq( false, c.scanAndOrder );
+ assert.eq( false, c.indexOnly );
+ assert.eq( {_id:[[i,i]]}, c.indexBounds );
+ allPlans = c.allPlans;
+ assert.eq( 1, allPlans.length );
+ plan = allPlans[ 0 ];
+ assert.eq( 'BtreeCursor _id_', plan.cursor );
+ assert.eq( 1, plan.n, 'n' );
+ assert.eq( 1, plan.nscannedObjects, 'nscannedObjects' );
+ assert.eq( 1, plan.nscanned, 'nscanned' );
+ assert.eq( {_id:[[i,i]]}, plan.indexBounds );
+}
+assert.eq( 200, clauses.length );
assert.eq( 200, t.count( {$or:a} ) );
3  jstests/sharding/shard3.js
View
@@ -67,7 +67,8 @@ doCounts( "after wrong save" , total , true )
e = a.find().explain();
assert.eq( 3 , e.n , "ex1" )
assert.eq( 4 , e.nscanned , "ex2" )
-assert.eq( 1 , e.nChunkSkips , "ex3" )
+assert.eq( 4 , e.nscannedObjects , "ex3" )
+assert.eq( 1 , e.nChunkSkips , "ex4" )
// --- move all to 1 ---
print( "MOVE ALL TO 1" );
2  jstests/sortg.js
View
@@ -33,7 +33,7 @@ noMemoryException( {$natural:1} );
t.ensureIndex( {a:1} );
t.ensureIndex( {b:1} );
-
+//
// These sorts are now indexed.
noMemoryException( {a:1} );
noMemoryException( {b:1} );
1  src/mongo/SConscript
View
@@ -254,6 +254,7 @@ serverOnlyFiles = [ "db/curop.cpp",
"db/extsort.cpp",
"db/index.cpp",
"db/scanandorder.cpp",
+ "db/explain.cpp",
"db/geo/2d.cpp",
"db/geo/haystack.cpp",
"db/ops/count.cpp",
1  src/mongo/client/examples/first.cpp
View
@@ -82,5 +82,4 @@ int main( int argc, const char **argv ) {
cout << res.isEmpty() << "\t" << res.jsonString() << endl;
}
-
}
263 src/mongo/db/explain.cpp
View
@@ -0,0 +1,263 @@
+// @file explain.h - Helper classes for generating query explain output.
+
+/* Copyright 2012 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "explain.h"
+
+#include "cmdline.h"
+#include "../util/net/sock.h"
+#include "../util/mongoutils/str.h"
+
+namespace mongo {
+
+ // TODO get rid of const casts
+
+ ExplainPlanInfo::ExplainPlanInfo() :
+ _isMultiKey(),
+ _n(),
+ _nscannedObjects(),
+ _nscanned(),
+ _scanAndOrder(),
+ _nYields(),
+ _nChunkSkips(),
+ _picked(),
+ _done() {
+ }
+
+ void ExplainPlanInfo::notePlan( const Cursor &cursor, bool scanAndOrder ) {
+ _cursorName = const_cast<Cursor&>(cursor).toString();
+ _indexBounds = cursor.prettyIndexBounds();
+ _scanAndOrder = scanAndOrder;
+ noteCursorUpdate( cursor );
+ }
+
+ void ExplainPlanInfo::noteIterate( bool match, bool loadedObject, bool chunkSkip,
+ const Cursor &cursor ) {
+ if ( match ) {
+ ++_n;
+ }
+ if ( loadedObject ) {
+ ++_nscannedObjects;
+ }
+ if ( chunkSkip ) {
+ ++_nChunkSkips;
+ }
+ noteCursorUpdate( cursor );
+ }
+
+ void ExplainPlanInfo::noteYield() { ++_nYields; }
+
+ void ExplainPlanInfo::noteDone( const Cursor &cursor ) {
+ _done = true;
+ noteCursorUpdate( cursor );
+ }
+
+ void ExplainPlanInfo::notePicked() {
+ _picked = true;
+ }
+
+ BSONObj ExplainPlanInfo::bson() const {
+ return BSON(
+ "cursor" << _cursorName <<
+ "n" << _n <<
+ "nscannedObjects" << _nscannedObjects <<
+ "nscanned" << _nscanned <<
+ "indexBounds" << _indexBounds
+ );
+ }
+
+ BSONObj ExplainPlanInfo::pickedPlanBson( const ExplainClauseInfo &clauseInfo ) const {
+ return BSON(
+ "cursor" << _cursorName <<
+ "isMultiKey" << _isMultiKey <<
+ "n" << clauseInfo.n() <<
+ "nscannedObjects" << clauseInfo.nscannedObjects() <<
+ "nscanned" << clauseInfo.nscanned() <<
+ "scanAndOrder" << _scanAndOrder <<
+ "indexOnly" << false << // TODO
+ "nYields" << _nYields <<
+ "nChunkSkips" << clauseInfo.nChunkSkips() <<
+ "millis" << clauseInfo.millis() <<
+ "indexBounds" << _indexBounds
+ );
+ }
+
+ void ExplainPlanInfo::noteCursorUpdate( const Cursor &cursor ) {
+ _isMultiKey = cursor.isMultiKey();
+ _nscanned = const_cast<Cursor&>(cursor).nscanned();
+ }
+
+ ExplainClauseInfo::ExplainClauseInfo() :
+ _n(),
+ _nscannedObjects(),
+ _nChunkSkips() {
+ }
+
+ shared_ptr<ExplainClauseInfo>
+ ExplainClauseInfo::fromSinglePlan( const shared_ptr<ExplainPlanInfo> &info ) {
+ shared_ptr<ExplainClauseInfo> clauseInfo( new ExplainClauseInfo() );
+ clauseInfo->addPlanInfo( info );
+ clauseInfo->_n = info->n();
+ clauseInfo->_nscannedObjects = info->nscannedObjects();
+ clauseInfo->_nChunkSkips = info->nChunkSkips();
+ clauseInfo->_timer = info->timer();
+ return clauseInfo;
+ }
+
+ BSONObj ExplainClauseInfo::bson() const {
+ BSONObjBuilder bb;
+ bb.appendElements( virtualPickedPlan().pickedPlanBson( *this ) );
+ // TODO won't include plans w/ no cursor iterates.
+ BSONArrayBuilder allPlans( bb.subarrayStart( "allPlans" ) );
+ for( list<shared_ptr<const ExplainPlanInfo> >::const_iterator i = _plans.begin();
+ i != _plans.end(); ++i ) {
+ allPlans << (*i)->bson();
+ }
+ allPlans.done();
+ return bb.obj();
+ }
+
+ void ExplainClauseInfo::addPlanInfo( const shared_ptr<ExplainPlanInfo> &info ) {
+ _plans.push_back( info );
+ }
+
+ void ExplainClauseInfo::noteIterate( bool match, bool loadedObject, bool chunkSkip ) {
+ if ( match ) {
+ ++_n;
+ }
+ if ( loadedObject ) {
+ ++_nscannedObjects;
+ }
+ if ( chunkSkip ) {
+ ++_nChunkSkips;
+ }
+ }
+
+ void ExplainClauseInfo::reviseN( long long n ) {
+ _n = n;
+ }
+
+ void ExplainClauseInfo::stopTimer() {
+ _timer.stop();
+ }
+
+ long long ExplainClauseInfo::nscanned() const {
+ long long ret = 0;
+ for( list<shared_ptr<const ExplainPlanInfo> >::const_iterator i = _plans.begin();
+ i != _plans.end(); ++i ) {
+ ret += (*i)->nscanned();
+ }
+ return ret;
+ }
+
+ const ExplainPlanInfo &ExplainClauseInfo::virtualPickedPlan() const {
+ // Return a picked plan if possible.
+ for( list<shared_ptr<const ExplainPlanInfo> >::const_iterator i = _plans.begin();
+ i != _plans.end(); ++i ) {
+ if ( (*i)->picked() ) {
+ return **i;
+ }
+ }
+ // Return a done plan if possible.
+ for( list<shared_ptr<const ExplainPlanInfo> >::const_iterator i = _plans.begin();
+ i != _plans.end(); ++i ) {
+ if ( (*i)->done() ) {
+ return **i;
+ }
+ }
+ // Return a plan with the highest match count.
+ int maxN = 0;
+ for( list<shared_ptr<const ExplainPlanInfo> >::const_iterator i = _plans.begin();
+ i != _plans.end(); ++i ) {
+ if ( (*i)->n() > maxN ) {
+ maxN = (*i)->n();
+ }
+ }
+ for( list<shared_ptr<const ExplainPlanInfo> >::const_iterator i = _plans.begin();
+ i != _plans.end(); ++i ) {
+ if ( (*i)->n() == maxN ) {
+ return **i;
+ }
+ }
+ verify( 16062, false );
+ return *(new ExplainPlanInfo()); // TODO better
+ }
+
+ shared_ptr<ExplainQueryInfo>
+ ExplainQueryInfo::fromSinglePlan( const shared_ptr<ExplainPlanInfo> &info ) {
+ shared_ptr<ExplainClauseInfo> clauseInfo = ExplainClauseInfo::fromSinglePlan( info );
+ shared_ptr<ExplainQueryInfo> queryInfo( new ExplainQueryInfo() );
+ queryInfo->addClauseInfo( clauseInfo );
+ return queryInfo;
+ }
+
+ void ExplainQueryInfo::noteIterate( bool match, bool loadedObject, bool chunkSkip ) {
+ verify( 16063, !_clauses.empty() );
+ _clauses.back()->noteIterate( match, loadedObject, chunkSkip );
+ }
+
+ void ExplainQueryInfo::reviseN( long long n ) {
+ verify( 16064, !_clauses.empty() );
+ _clauses.back()->reviseN( n );
+ }
+
+ void ExplainQueryInfo::setAncillaryInfo( const AncillaryInfo &ancillaryInfo ) {
+ _ancillaryInfo = ancillaryInfo;
+ }
+
+ BSONObj ExplainQueryInfo::bson() const {
+ BSONObjBuilder bb;
+ if ( _clauses.size() == 1 ) {
+ bb.appendElements( _clauses.front()->bson() );
+ }
+ else {
+ long long n = 0;
+ long long nscannedObjects = 0;
+ long long nscanned = 0;
+ BSONArrayBuilder clauseArray( bb.subarrayStart( "clauses" ) );
+ for( list<shared_ptr<ExplainClauseInfo> >::const_iterator i = _clauses.begin();
+ i != _clauses.end(); ++i ) {
+ clauseArray << (*i)->bson();
+ n += (*i)->n();
+ nscannedObjects += (*i)->nscannedObjects();
+ nscanned += (*i)->nscanned();
+ }
+ clauseArray.done();
+ bb
+ << "n" << n
+ << "nscannedObjects" << nscannedObjects
+ << "nscanned" << nscanned
+ << "millis" << _timer.duration();
+ }
+
+ if ( !_ancillaryInfo._oldPlan.isEmpty() ) {
+ bb << "oldPlan" << _ancillaryInfo._oldPlan;
+ }
+ bb
+ << "server"
+ << (string)( mongoutils::str::stream() << getHostNameCached() << ":" << cmdLine.port );
+
+ return bb.obj();
+ }
+
+ void ExplainQueryInfo::addClauseInfo( const shared_ptr<ExplainClauseInfo> &info ) {
+ if ( !_clauses.empty() ) {
+ _clauses.back()->stopTimer();
+ }
+ _clauses.push_back( info );
+ }
+
+} // namespace mongo
125 src/mongo/db/explain.h
View
@@ -0,0 +1,125 @@
+// @file explain.h - Helper classes for generating query explain output.
+
+/* Copyright 2012 10gen Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "cursor.h"
+#include "../util/timer.h"
+
+namespace mongo {
+
+ class DurationTimer {
+ public:
+ DurationTimer() : _running( true ), _duration() {}
+ void stop() { _running = false; _duration = _timer.millis(); }
+ int duration() const { return _running ? _timer.millis() : _duration; }
+ private:
+ Timer _timer;
+ bool _running;
+ int _duration;
+ };
+
+ class ExplainClauseInfo;
+
+ class ExplainPlanInfo {
+ public:
+ ExplainPlanInfo();
+
+ void notePlan( const Cursor &cursor, bool scanAndOrder );
+ void noteIterate( bool match, bool loadedObject, bool chunkSkip, const Cursor &cursor );
+ void noteYield();
+ void noteDone( const Cursor &cursor );
+ void notePicked();
+
+ BSONObj bson() const;
+ BSONObj pickedPlanBson( const ExplainClauseInfo &clauseInfo ) const;
+
+ bool picked() const { return _picked; }
+ bool done() const { return _done; }
+ long long n() const { return _n; }
+ long long nscannedObjects() const { return _nscannedObjects; }
+ long long nscanned() const { return _nscanned; }
+ long long nChunkSkips() const { return _nChunkSkips; }
+ DurationTimer timer() const { return _timer; }
+
+ private:
+ void noteCursorUpdate( const Cursor &cursor );
+ string _cursorName;
+ bool _isMultiKey;
+ long long _n;
+ long long _nscannedObjects;
+ long long _nscanned;
+ bool _scanAndOrder;
+ int _nYields;
+ long long _nChunkSkips;
+ BSONObj _indexBounds;
+ bool _picked;
+ bool _done;
+ DurationTimer _timer;
+ };
+
+ class ExplainClauseInfo {
+ public:
+ ExplainClauseInfo();
+ static shared_ptr<ExplainClauseInfo>
+ fromSinglePlan( const shared_ptr<ExplainPlanInfo> &info );
+
+ void noteIterate( bool match, bool loadedObject, bool chunkSkip );
+ void reviseN( long long n );
+ void stopTimer();
+
+ void addPlanInfo( const shared_ptr<ExplainPlanInfo> &info );
+ BSONObj bson() const;
+
+ long long n() const { return _n; }
+ long long nscannedObjects() const { return _nscannedObjects; }
+ long long nscanned() const;
+ long long nChunkSkips() const { return _nChunkSkips; }
+ int millis() const { return _timer.duration(); }
+
+ private:
+ const ExplainPlanInfo &virtualPickedPlan() const;
+ list<shared_ptr<const ExplainPlanInfo> > _plans;
+ long long _n;
+ long long _nscannedObjects;
+ long long _nChunkSkips;
+ DurationTimer _timer;
+ };
+
+ class ExplainQueryInfo {
+ public:
+ static shared_ptr<ExplainQueryInfo>
+ fromSinglePlan( const shared_ptr<ExplainPlanInfo> &info );
+
+ void noteIterate( bool match, bool loadedObject, bool chunkSkip );
+ void reviseN( long long n );
+
+ struct AncillaryInfo {
+ BSONObj _oldPlan;
+ };
+ void setAncillaryInfo( const AncillaryInfo &ancillaryInfo );
+
+ void addClauseInfo( const shared_ptr<ExplainClauseInfo> &info );
+ BSONObj bson() const;
+
+ private:
+ list<shared_ptr<ExplainClauseInfo> > _clauses;
+ AncillaryInfo _ancillaryInfo;
+ DurationTimer _timer;
+ };
+
+} // namespace mongo
214 src/mongo/db/ops/query.cpp
View
@@ -39,6 +39,7 @@
#include "../../server.h"
#include "../d_concurrency.h"
#include "../queryoptimizercursorimpl.h"
+#include "../explain.h"
namespace mongo {
@@ -652,57 +653,47 @@ namespace mongo {
bool _yieldRecoveryFailed;
};
-
+
+ // separate explain strategies
+
class QueryResponseBuilder {
public:
- QueryResponseBuilder( const ParsedQuery &parsedQuery, const shared_ptr<Cursor> &cursor ) :
+ QueryResponseBuilder( const ParsedQuery &parsedQuery, const shared_ptr<Cursor> &cursor,
+ const BSONObj &oldPlan ) :
_parsedQuery( parsedQuery ),
_cursor( cursor ),
_queryOptimizerCursor( dynamic_cast<QueryOptimizerCursor*>( cursor.get() ) ),
_buf( 32768 ),
_scanAndOrder( newScanAndOrder() ),
_skip( _parsedQuery.getSkip() ),
- _n() {
+ _n(),
+ _chunkManager( newChunkManager() ),
+ _explainInfo( newExplainInfo() ),
+ _oldPlan( oldPlan ) {
+ if ( _explainInfo ) _explainInfo->notePlan( *_cursor, false );
_buf.skip( sizeof( QueryResult ) );
}
- void mayAddMatch() {
- DiskLoc loc = _cursor->currLoc();
+ bool mayAddMatch() {
+ if ( !currentMatches() ) {
+ return false;
+ }
+ if ( !chunkMatches() ) {
+ return false;
+ }
if ( _scanAndOrder ) {
- if ( !_scanAndOrderDups.getsetdup( loc ) ) {
- try {
- _scanAndOrder->add( _cursor->current(), _parsedQuery.showDiskLoc() ? &loc : 0 );
- } catch ( const UserException &e ) {
- bool rethrow = true;
- if ( e.getCode() == ScanAndOrderMemoryLimitExceededAssertionCode ) {
- if ( _queryOptimizerCursor->multiPlanScanner()->haveOrderedPlan() ) {
- _scanAndOrder.reset();
- _queryOptimizerCursor->abortUnorderedPlans();
- rethrow = false;
- }
- else if ( _queryOptimizerCursor->multiPlanScanner()->usingCachedPlan() ) {
- _queryOptimizerCursor->multiPlanScanner()->clearIndexesForPatterns();
- }
- }
- if ( rethrow ) {
- throw;
- }
- }
- }
+ handleScanAndOrderMatch();
}
if ( !iterateNeedsSort() ) {
- if ( !_cursor->getsetdup( loc ) ) {
- if ( _skip > 0 ) {
- --_skip;
- }
- else {
- ++_n;
- fillQueryResultFromObj( _buf, _parsedQuery.getFields(), loc.obj(), ( _parsedQuery.showDiskLoc() ? &loc : 0 ) );
- }
- }
+ handleOrderedMatch();
}
+ return true;
+ }
+ void noteYield() {
+ if ( _explainInfo ) _explainInfo->noteYield();
+ // _queryOptimizerCursor counts yields internally.
}
bool enoughForFirstBatch() const {
- return _parsedQuery.enoughForFirstBatch( _n, _buf.len() );
+ return !_parsedQuery.isExplain() && _parsedQuery.enoughForFirstBatch( _n, _buf.len() );
}
bool enoughTotalResults() const {
return ( _parsedQuery.enough( _n ) || _buf.len() >= MaxBytesToReturnToClientAtOnce );
@@ -713,6 +704,29 @@ namespace mongo {
}
}
long long handoff( Message &result ) {
+ if ( _parsedQuery.isExplain() ) {
+ shared_ptr<ExplainQueryInfo> explainQueryInfo;
+ if ( _explainInfo ) {
+ _explainInfo->noteDone( *_cursor );
+ explainQueryInfo = ExplainQueryInfo::fromSinglePlan( _explainInfo );
+ }
+ else {
+ verify( 16067, _queryOptimizerCursor );
+ explainQueryInfo = _queryOptimizerCursor->explainQueryInfo();
+ }
+ if ( resultsNeedSort() ) {
+ explainQueryInfo->reviseN( _scanAndOrder->nout() );
+ }
+ ExplainQueryInfo::AncillaryInfo ancillaryInfo;
+ ancillaryInfo._oldPlan = _oldPlan;
+ explainQueryInfo->setAncillaryInfo( ancillaryInfo );
+ _buf.reset();
+ _buf.skip( sizeof( QueryResult ) );
+ fillQueryResultFromObj( _buf, 0, explainQueryInfo->bson() );
+ result.appendData( _buf.buf(), _buf.len() );
+ _buf.decouple();
+ return 1;
+ }
int ret = _n;
if ( resultsNeedSort() ) {
_buf.reset();
@@ -721,7 +735,7 @@ namespace mongo {
}
if ( _buf.len() > 0 ) {
result.appendData( _buf.buf(), _buf.len() );
- _buf.decouple();
+ _buf.decouple(); // only decouple here ok?
}
return ret;
}
@@ -730,7 +744,7 @@ namespace mongo {
if ( _parsedQuery.getOrder().isEmpty() ) {
return 0;
}
- if ( !_queryOptimizerCursor ) {
+ if ( !_queryOptimizerCursor || !_queryOptimizerCursor->ok() ) {
return 0;
}
return new ScanAndOrder( _parsedQuery.getSkip(),
@@ -738,6 +752,73 @@ namespace mongo {
_parsedQuery.getOrder(),
_queryOptimizerCursor->queryPlan()->multikeyFrs() );
}
+ ShardChunkManagerPtr newChunkManager() const {
+ if ( !shardingState.needShardChunkManager( _parsedQuery.ns() ) ) {
+ return ShardChunkManagerPtr();
+ }
+ return shardingState.getShardChunkManager( _parsedQuery.ns() );
+ }
+ ExplainPlanInfo *newExplainInfo() const {
+ if ( !_parsedQuery.isExplain() ) {
+ return 0;
+ }
+ if ( _queryOptimizerCursor ) {
+ return 0;
+ }
+ return new ExplainPlanInfo();
+ }
+ void handleScanAndOrderMatch() {
+ DiskLoc loc = _cursor->currLoc();
+ if ( _scanAndOrderDups.getsetdup( loc ) ) {
+ return;
+ }
+ try {
+ _scanAndOrder->add( _cursor->current(), _parsedQuery.showDiskLoc() ? &loc : 0 );
+ } catch ( const UserException &e ) {
+ bool rethrow = true;
+ if ( e.getCode() == ScanAndOrderMemoryLimitExceededAssertionCode ) {
+ if ( _queryOptimizerCursor->multiPlanScanner()->haveOrderedPlan() ) {
+ _scanAndOrder.reset();
+ _queryOptimizerCursor->abortUnorderedPlans();
+ rethrow = false;
+ }
+ else if ( _queryOptimizerCursor->multiPlanScanner()->usingCachedPlan() ) {
+ _queryOptimizerCursor->multiPlanScanner()->clearIndexesForPatterns();
+ }
+ }
+ if ( rethrow ) {
+ throw;
+ }
+ }
+ }
+ void handleOrderedMatch() {
+ DiskLoc loc = _cursor->currLoc();
+ if ( _cursor->getsetdup( loc ) ) {
+ return;
+ }
+ if ( _skip > 0 ) {
+ --_skip;
+ return;
+ }
+ ++_n;
+ if ( _parsedQuery.isExplain() ) {
+ noteIterate( true, true, false );
+ }
+ else {
+ fillQueryResultFromObj( _buf, _parsedQuery.getFields(), loc.obj(), ( _parsedQuery.showDiskLoc() ? &loc : 0 ) );
+ }
+ }
+ void noteIterate( bool match, bool loadedDocument, bool chunkSkip ) {
+ if ( !_parsedQuery.isExplain() ) {
+ return;
+ }
+ if ( _explainInfo ) {
+ _explainInfo->noteIterate( match, loadedDocument, chunkSkip, *_cursor );
+ }
+ else if ( _queryOptimizerCursor ) {
+ _queryOptimizerCursor->noteIterate( match, loadedDocument, chunkSkip );
+ }
+ }
bool iterateNeedsSort() const {
if ( !_scanAndOrder ) {
return false;
@@ -764,6 +845,25 @@ namespace mongo {
}
return true;
}
+ bool currentMatches() {
+ MatchDetails details;
+ if ( _cursor->currentMatches( &details ) ) {
+ return true;
+ }
+ noteIterate( false, details._loadedObject, false );
+ return false;
+ }
+ bool chunkMatches() {
+ if ( !_chunkManager ) {
+ return true;
+ }
+ // TODO: should make this covered at some point
+ if ( _chunkManager->belongsToMe( _cursor->current() ) ) {
+ return true;
+ }
+ noteIterate( false, true, true );
+ return false;
+ }
const ParsedQuery &_parsedQuery;
shared_ptr<Cursor> _cursor;
QueryOptimizerCursor *_queryOptimizerCursor;
@@ -772,6 +872,9 @@ namespace mongo {
SmallDupSet _scanAndOrderDups;
long long _skip;
long long _n;
+ ShardChunkManagerPtr _chunkManager;
+ shared_ptr<ExplainPlanInfo> _explainInfo;
+ BSONObj _oldPlan;
};
/* run a query -- includes checking for and running a Command \
@@ -911,43 +1014,58 @@ namespace mongo {
}
}
+ // TODO clean this up a bit.
+ BSONObj oldPlan;
+ if ( explain && ! pq.hasIndexSpecifier() ) {
+ MultiPlanScanner mps( ns, query, order );
+ if ( mps.usingCachedPlan() ) {
+ oldPlan = mps.oldExplain().firstElement().embeddedObject().firstElement().embeddedObject().getOwned();
+ }
+ }
+
for( int retries = 0; retries < 2; ++retries ) {
try {
shared_ptr<Cursor> cursor;
if ( pq.hasOption( QueryOption_OplogReplay ) ) {
cursor = FindingStartCursor::getCursor( ns, query, order );
}
- else if ( !pq.getFields() && !pq.isExplain() && !pq.returnKey() ) {
+ else if ( !pq.getFields() && !pq.returnKey() ) {
cursor = NamespaceDetailsTransient::getCursor( ns, query, order, false, 0, &pq );
}
if ( !cursor ) {
break;
}
{
- QueryResponseBuilder queryResponseBuilder( pq, cursor );
+ QueryResponseBuilder queryResponseBuilder( pq, cursor, oldPlan );
long long cursorid = 0;
OpTime slaveReadTill;
ClientCursor::CleanupPointer ccPointer;
ccPointer.reset( new ClientCursor( QueryOption_NoCursorTimeout, cursor, ns ) );
for( ; cursor->ok(); cursor->advance() ) {
- if ( !ccPointer->yieldSometimes( ClientCursor::MaybeCovered ) || !cursor->ok() ) {
+ bool yielded = false;
+ if ( !ccPointer->yieldSometimes( ClientCursor::MaybeCovered, &yielded ) || !cursor->ok() ) {
+ queryResponseBuilder.noteYield();
break;
}
+ if ( yielded ) {
+ queryResponseBuilder.noteYield();
+ }
if ( pq.getMaxScan() && cursor->nscanned() > pq.getMaxScan() ) {
break;
}
- if ( !cursor->currentMatches() ) {
+ // right place for this?
+ BSONObj js = cursor->current();
+ assert( js.isValid() );
+
+ if ( !queryResponseBuilder.mayAddMatch() ) {
continue;
}
- DiskLoc currLoc = cursor->currLoc();
// log() << "idx: " << cursor->indexKeyPattern() << " obj: " << cursor->current() << endl;
- BSONObj js = cursor->current();
- assert( js.isValid() );
// This should happen after matching?
// if ( pq.hasOption( QueryOption_OplogReplay ) ) {
@@ -957,8 +1075,7 @@ namespace mongo {
// }
// }
- queryResponseBuilder.mayAddMatch();
- if ( !cursor->supportGetMore() ) {
+ if ( !cursor->supportGetMore() || pq.isExplain() ) {
if ( queryResponseBuilder.enoughTotalResults() ) {
break;
}
@@ -987,6 +1104,7 @@ namespace mongo {
}
ccPointer->originalMessage = m;
ccPointer.release();
+ // undo unlimited timeout
}
long long nReturned = queryResponseBuilder.handoff( result );
@@ -1009,6 +1127,7 @@ namespace mongo {
// regular, not QO bypass query
+ {
BSONObj oldPlan;
if ( explain && ! pq.hasIndexSpecifier() ) {
MultiPlanScanner mps( ns, query, order );
@@ -1055,7 +1174,7 @@ namespace mongo {
bool moreClauses = mps->mayRunMore();
if ( moreClauses ) {
// this MultiCursor will use a dumb NoOp to advance(), so no need to specify mayYield
- shared_ptr< Cursor > multi( new MultiCursor( mps, cursor, dqo.matcher( cursor ), dqo ) );
+ shared_ptr< Cursor > multi( new MultiCursor( mps, cursor, dqo.matcher( cursor ), shared_ptr<ExplainPlanInfo>(), dqo ) );
cc = new ClientCursor(queryOptions, multi, ns, jsobj.getOwned());
}
else {
@@ -1099,6 +1218,7 @@ namespace mongo {
}
curop.debug().nreturned = n;
return exhaust;
+ }
}
} // namespace mongo
47 src/mongo/db/queryoptimizer.cpp
View
@@ -673,6 +673,9 @@ namespace mongo {
shared_ptr<QueryOp> MultiPlanScanner::iterateRunner( QueryOp &originalOp, bool retried ) {
if ( !_runner ) {
_runner.reset( new QueryPlanSet::Runner( *_currentQps, originalOp ) );
+ if ( _explainQueryInfo ) {
+ _explainQueryInfo->addClauseInfo( _runner->generateExplainInfo() );
+ }
shared_ptr<QueryOp> op = _runner->init();
if ( op->complete() ) {
return op;
@@ -680,7 +683,7 @@ namespace mongo {
}
shared_ptr<QueryOp> op = _runner->nextNonError();
if ( !op->error() ) {
- return op;
+ return op;
}
if ( !_currentQps->prepareToRetryQuery() ) {
return op;
@@ -696,7 +699,7 @@ namespace mongo {
void MultiPlanScanner::updateCurrentQps( QueryPlanSet *qps ) {
_currentQps.reset( qps );
_runner.reset();
- }
+ }
QueryPlanSet::Runner::Runner( QueryPlanSet &plans, QueryOp &op ) :
_op( op ),
@@ -750,8 +753,16 @@ namespace mongo {
// Initialize ops.
for( vector<shared_ptr<QueryOp> >::iterator i = _ops.begin(); i != _ops.end(); ++i ) {
initOp( **i );
- if ( (*i)->complete() )
+ if ( _explainClauseInfo ) {
+ _explainClauseInfo->addPlanInfo( (*i)->generateExplainInfo() );
+ }
+ }
+
+ // See if an op has completed.
+ for( vector<shared_ptr<QueryOp> >::iterator i = _ops.begin(); i != _ops.end(); ++i ) {
+ if ( (*i)->complete() ) {
return *i;
+ }
}
// Put runnable ops in the priority queue.
@@ -1071,6 +1082,7 @@ namespace mongo {
}
const QueryPlan *MultiPlanScanner::singlePlan() const {
+// log() << "_or: " << _or << endl;
if ( _or ||
_currentQps->nPlans() != 1 ||
_currentQps->usingCachedPlan() ) {
@@ -1112,12 +1124,14 @@ namespace mongo {
MultiCursor::MultiCursor( auto_ptr<MultiPlanScanner> mps, const shared_ptr<Cursor> &c,
const shared_ptr<CoveredIndexMatcher> &matcher,
+ const shared_ptr<ExplainPlanInfo> &explainPlanInfo,
const QueryOp &op, long long nscanned ) :
_mps( mps ),
_c( c ),
_matcher( matcher ),
_queryPlan( &op.qp() ),
- _nscanned( nscanned ) {
+ _nscanned( nscanned ),
+ _explainPlanInfo( explainPlanInfo ) {
_mps->clearRunner();
_mps->setRecordedPlanPolicy( QueryPlanSet::UseIfInOrder );
if ( !ok() ) {
@@ -1134,10 +1148,16 @@ namespace mongo {
return ok();
}
+ void MultiCursor::recoverFromYield() {
+ noteYield();
+ Cursor::recoverFromYield();
+ }
+
void MultiCursor::nextClause() {
- if ( _nscanned >= 0 && _c.get() ) {
+ if ( _nscanned >= 0 ) {
_nscanned += _c->nscanned();
}
+ if ( _explainPlanInfo ) _explainPlanInfo->noteDone( *_c );
_matcher->advanceOrClause( _queryPlan->originalFrv() );
shared_ptr<CoveredIndexMatcher> newMatcher
( _matcher->nextClauseMatcher( _queryPlan->indexKey() ) );
@@ -1146,9 +1166,24 @@ namespace mongo {
_queryPlan = nextPlan;
_matcher = newMatcher;
_c = nextPlan->newCursor();
+ if ( _explainPlanInfo ) {
+ _explainPlanInfo.reset( new ExplainPlanInfo() );
+ _explainPlanInfo->notePlan( *_c, _queryPlan->scanAndOrderRequired() );
+ shared_ptr<ExplainClauseInfo> clauseInfo( new ExplainClauseInfo() );
+ clauseInfo->addPlanInfo( _explainPlanInfo );
+ _mps->addClauseInfo( clauseInfo );
+ }
}
- }
+ }
+
+ void MultiCursor::noteIterate( bool match, bool loadedObject ) {
+ if ( _explainPlanInfo ) _explainPlanInfo->noteIterate( match, loadedObject, false, *_c );
+ }
+ void MultiCursor::noteYield() {
+ if ( _explainPlanInfo ) _explainPlanInfo->noteYield();
+ }
+
bool indexWorks( const BSONObj &idxPattern, const BSONObj &sampleKey, int direction, int firstSignificantField ) {
BSONObjIterator p( idxPattern );
BSONObjIterator k( sampleKey );
39 src/mongo/db/queryoptimizer.h
View
@@ -21,6 +21,7 @@
#include "cursor.h"
#include "queryutil.h"
#include "matcher.h"
+#include "explain.h"
#include "../util/net/listen.h"
namespace mongo {
@@ -197,6 +198,11 @@ namespace mongo {
if( ! c ) return _matcher;
return c->matcher() ? c->matcherPtr() : _matcher;
}
+
+ /** @return an ExplainPlanInfo object that will be updated as the query runs. */
+ virtual shared_ptr<ExplainPlanInfo> generateExplainInfo() {
+ return shared_ptr<ExplainPlanInfo>( new ExplainPlanInfo() );
+ }
protected:
/** Call if all results have been found. */
@@ -352,6 +358,12 @@ namespace mongo {
static void nextOp( QueryOp &op );
static bool prepareToYieldOp( QueryOp &op );
static void recoverFromYieldOp( QueryOp &op );
+
+ /** @return an ExplainClauseInfo object that will be updated as the query runs. */
+ shared_ptr<ExplainClauseInfo> generateExplainInfo() {
+ _explainClauseInfo.reset( new ExplainClauseInfo() );
+ return _explainClauseInfo;
+ }
private:
vector<shared_ptr<QueryOp> > _ops;
struct OpHolder {
@@ -363,6 +375,7 @@ namespace mongo {
}
};
our_priority_queue<OpHolder> _queue;
+ shared_ptr<ExplainClauseInfo> _explainClauseInfo;
};
private:
@@ -392,6 +405,7 @@ namespace mongo {
bool _mayYield;
ElapsedTracker _yieldSometimesTracker;
bool _mustAssertOnYieldFailure;
+ shared_ptr<ExplainClauseInfo> _explainClauseInfo;
};
/** Handles $or type queries by generating a QueryPlanSet for each $or clause. */
@@ -440,6 +454,17 @@ namespace mongo {
*/
const QueryPlan *nextClauseBestGuessPlan( const QueryPlan &currentPlan );
+ void addClauseInfo( const shared_ptr<ExplainClauseInfo> &clauseInfo ) {
+ verify( 16068, _explainQueryInfo );
+ _explainQueryInfo->addClauseInfo( clauseInfo );
+ }
+
+ /** @return an ExplainQueryInfo object that will be updated as the query runs. */
+ shared_ptr<ExplainQueryInfo> generateExplainInfo() {
+ _explainQueryInfo.reset( new ExplainQueryInfo() );
+ return _explainQueryInfo;
+ }
+
/** Yield the runner member. */
bool prepareToYield();
@@ -513,6 +538,7 @@ namespace mongo {
bool _tableScanned;
shared_ptr<QueryOp> _baseOp;
shared_ptr<QueryPlanSet::Runner> _runner;
+ shared_ptr<ExplainQueryInfo> _explainQueryInfo;
};
/** Provides a cursor interface for certain limited uses of a MultiPlanScanner. */
@@ -525,7 +551,10 @@ namespace mongo {
* @param nscanned is an optional initial value, if not supplied nscanned()
* will always return -1
*/
- MultiCursor( auto_ptr<MultiPlanScanner> mps, const shared_ptr<Cursor> &c, const shared_ptr<CoveredIndexMatcher> &matcher, const QueryOp &op, long long nscanned = -1 );
+ MultiCursor( auto_ptr<MultiPlanScanner> mps, const shared_ptr<Cursor> &c,
+ const shared_ptr<CoveredIndexMatcher> &matcher,
+ const shared_ptr<ExplainPlanInfo> &explainPlanInfo,
+ const QueryOp &op, long long nscanned = -1 );
virtual bool ok() { return _c->ok(); }
virtual Record* _current() { return _c->_current(); }
@@ -536,6 +565,7 @@ namespace mongo {
virtual DiskLoc refLoc() { return _c->refLoc(); }
virtual void noteLocation() { _c->noteLocation(); }
virtual void checkLocation() { _c->checkLocation(); }
+ virtual void recoverFromYield();
virtual bool supportGetMore() { return true; }
virtual bool supportYields() { return _c->supportYields(); }
virtual BSONObj indexKeyPattern() { return _c->indexKeyPattern(); }
@@ -553,9 +583,13 @@ namespace mongo {
virtual CoveredIndexMatcher* matcher() const { return _matcher.get(); }
virtual bool capped() const { return _c->capped(); }
-
+
/** return -1 if we're a getmore handoff */
virtual long long nscanned() { return _nscanned >= 0 ? _nscanned + _c->nscanned() : _nscanned; }
+
+ void noteIterate( bool match, bool loadedObject );
+
+ void noteYield();
private:
void nextClause();
auto_ptr<MultiPlanScanner> _mps;
@@ -563,6 +597,7 @@ namespace mongo {
shared_ptr<CoveredIndexMatcher> _matcher;
const QueryPlan *_queryPlan;
long long _nscanned;
+ shared_ptr<ExplainPlanInfo> _explainPlanInfo;
};
/** NOTE min, max, and keyPattern will be updated to be consistent with the selected index. */
4 src/mongo/db/queryoptimizercursor.h
View
@@ -80,9 +80,13 @@ namespace mongo {
class QueryOptimizerCursor : public Cursor {
public:
virtual const QueryPlan *queryPlan() const = 0;
+ virtual const Cursor *queryCursor() const = 0;
virtual const QueryPlan *completeQueryPlan() const = 0;
+ virtual const Cursor *completeQueryCursor() const = 0;
virtual const MultiPlanScanner *multiPlanScanner() const = 0;
virtual void abortUnorderedPlans() = 0;
+ virtual void noteIterate( bool match, bool loadedDocument, bool chunkSkip ) = 0;
+ virtual shared_ptr<ExplainQueryInfo> explainQueryInfo() const = 0;
};
} // namespace mongo
79 src/mongo/db/queryoptimizercursorimpl.cpp
View
@@ -22,6 +22,7 @@
#include "pdfile.h"
#include "clientcursor.h"
#include "btree.h"
+#include "explain.h"
namespace mongo {
@@ -93,6 +94,7 @@ namespace mongo {
// dups rather than avoid poisoning the cursor's dup set with unreturned documents. Deduping documents
// matched in this QueryOptimizerCursorOp will run against the takeover cursor.
_matchCounter.setCheckDups( _c->isMultiKey() );
+ // TODO ok if cursor becomes multikey later?
_matchCounter.updateNscanned( _c->nscanned() );
}
@@ -114,6 +116,7 @@ namespace mongo {
}
virtual void recoverFromYield() {
+ if ( _explainPlanInfo ) _explainPlanInfo->noteYield();
if ( _cc && !ClientCursor::recoverFromYield( _yieldData ) ) {
_yieldRecoveryFailed = true;
_c.reset();
@@ -156,9 +159,11 @@ namespace mongo {
if ( !qp().scanAndOrderRequired() && _matchCounter.enoughCumulativeMatchesToChooseAPlan() ) {
setStop();
+ if ( _explainPlanInfo ) _explainPlanInfo->notePicked();
return;
}
if ( !_c || !_c->ok() ) {
+ if ( _explainPlanInfo && _c ) _explainPlanInfo->noteDone( *_c );
setComplete();
return;
}
@@ -171,15 +176,42 @@ namespace mongo {
DiskLoc currLoc() const { return _c ? _c->currLoc() : DiskLoc(); }
BSONObj currKey() const { return _c ? _c->currKey() : BSONObj(); }
bool currentMatches( MatchDetails *details ) {
- bool ret = ( _c && _c->ok() ) ? matcher( _c.get() )->matchesCurrent( _c.get(), details ) : false;
+ if ( !_c || !_c->ok() ) {
+ _matchCounter.setMatch( false );
+ return false;
+ }
+
+ MatchDetails myDetails;
+ bool wantDetails = details || _explainPlanInfo;
+
+ bool match = matcher( _c.get() )->matchesCurrent( _c.get(),
+ wantDetails ? &myDetails : 0 );
// Cache the match, so we can count it in mayAdvance().
- _matchCounter.setMatch( ret );
- return ret;
+ bool newMatch = _matchCounter.setMatch( match );
+
+ if ( _explainPlanInfo ) {
+ bool countableMatch = newMatch && _matchCounter.wouldCountMatch( _c->currLoc() );
+ _explainPlanInfo->noteIterate( countableMatch,
+ countableMatch || myDetails._loadedObject,
+ false, *_c );
+ }
+ if ( details ) *details = myDetails;
+
+ return match;
}
virtual bool mayRecordPlan() const {
return !_yieldRecoveryFailed && complete() && ( !stopRequested() || _matchCounter.enoughMatchesToRecordPlan() );
}
shared_ptr<Cursor> cursor() const { return _c; }
+ virtual shared_ptr<ExplainPlanInfo> generateExplainInfo() {
+ if ( !_c ) {
+ return QueryOp::generateExplainInfo();
+ }
+ _explainPlanInfo.reset( new ExplainPlanInfo() );
+ _explainPlanInfo->notePlan( *_c, qp().scanAndOrderRequired() );
+ return _explainPlanInfo;
+ }
+ shared_ptr<ExplainPlanInfo> explainInfo() const { return _explainPlanInfo; }
private:
void mayAdvance() {
if ( !_c ) {
@@ -239,7 +271,8 @@ namespace mongo {
ClientCursor::YieldData _yieldData;
bool _yieldRecoveryFailed;
const QueryPlanSelectionPolicy &_selectionPolicy;
- const bool &_requireOrder;
+ const bool &_requireOrder; // TODO don't use a ref for this, but signal change explicitly
+ shared_ptr<ExplainPlanInfo> _explainPlanInfo;
};
/**
@@ -258,6 +291,7 @@ namespace mongo {
_completeOp(),
_nscanned() {
_mps->initialOp( _originalOp );
+ _explainQueryInfo = _mps->generateExplainInfo();
shared_ptr<QueryOp> op = _mps->nextOp();
rethrowOnError( op );
if ( !op->complete() ) {
@@ -436,11 +470,20 @@ namespace mongo {
assertOk();
return _currOp ? &_currOp->qp() : 0;
}
-
+
+ virtual const Cursor *queryCursor() const {
+ assertOk();
+ return _currOp ? _currOp->cursor().get() : 0;
+ }
+
virtual const QueryPlan *completeQueryPlan() const {
return _completeOp ? &_completeOp->qp() : 0;
}
+ virtual const Cursor *completeQueryCursor() const {
+ return _completeOp ? _completeOp->cursor().get() : 0;
+ }
+
virtual const MultiPlanScanner *multiPlanScanner() const {
return _mps.get();
}
@@ -449,6 +492,19 @@ namespace mongo {
_requireOrder = true;
}
+ virtual void noteIterate( bool match, bool loadedDocument, bool chunkSkip ) {
+ if ( _explainQueryInfo ) {
+ _explainQueryInfo->noteIterate( match, loadedDocument, chunkSkip );
+ }
+ if ( _takeover ) {
+ _takeover->noteIterate( match, loadedDocument );
+ }
+ }
+
+ virtual shared_ptr<ExplainQueryInfo> explainQueryInfo() const {
+ return _explainQueryInfo;
+ }
+
private:
/**
* Advances the QueryPlanSet::Runner.
@@ -479,21 +535,22 @@ namespace mongo {
_currOp = qocop;
}
else if ( op->stopRequested() ) {
- log() << "stop requested" << endl;
+// log() << "stop requested" << endl;
if ( qocop->cursor() ) {
// Ensure that prepareToTouchEarlierIterate() may be called safely when a BasicCursor takes over.
if ( !prevLoc.isNull() && prevLoc == qocop->currLoc() ) {
qocop->cursor()->advance();
}
- // Clear the Runner and any unnecessary QueryOps and their ClientCursors.
_takeover.reset( new MultiCursor( _mps,
qocop->cursor(),
op->matcher( qocop->cursor() ),
+ qocop->explainInfo(),
*op,
_nscanned - qocop->cursor()->nscanned() ) );
}
}
else {
+ // TODO not set if takeover
_completeOp = qocop;
}
@@ -526,11 +583,12 @@ namespace mongo {
shared_ptr<QueryOptimizerCursorOp> _originalOp;
QueryOptimizerCursorOp *_currOp;
QueryOptimizerCursorOp *_completeOp;
- shared_ptr<Cursor> _takeover;
+ shared_ptr<MultiCursor> _takeover;
long long _nscanned;
// Using a SmallDupSet seems a bit hokey, but I've measured a 5% performance improvement
// with ~100 document non multi key scans.
SmallDupSet _dups;
+ shared_ptr<ExplainQueryInfo> _explainQueryInfo;
};
shared_ptr<Cursor> newQueryOptimizerCursor( auto_ptr<MultiPlanScanner> mps, const QueryPlanSelectionPolicy &planPolicy, bool requireOrder ) {
@@ -606,7 +664,7 @@ namespace mongo {
}
}
}
- auto_ptr<MultiPlanScanner> mps( new MultiPlanScanner( ns, query, order, &hint, QueryPlanSet::Use, parsedQuery ? parsedQuery->getMin() : BSONObj(), parsedQuery ? parsedQuery->getMax() : BSONObj() ) ); // mayYield == false
+ auto_ptr<MultiPlanScanner> mps( new MultiPlanScanner( ns, query, order, &hint, ( parsedQuery && parsedQuery->isExplain() ) ? QueryPlanSet::Ignore : QueryPlanSet::Use, parsedQuery ? parsedQuery->getMin() : BSONObj(), parsedQuery ? parsedQuery->getMax() : BSONObj() ) ); // mayYield == false
const QueryPlan *singlePlan = mps->singlePlan();
bool requireOrder = ( parsedQuery == 0 );
if ( singlePlan && !singlePlan->scanAndOrderRequired() ) {
@@ -624,6 +682,9 @@ namespace mongo {
return single;
}
}
+ if ( parsedQuery && parsedQuery->isExplain() ) {
+ mps->generateExplainInfo();
+ }
bool requireOrder = ( parsedQuery == 0 );
return newQueryOptimizerCursor( mps, planPolicy, requireOrder );
}
16 src/mongo/db/queryoptimizercursorimpl.h
View
@@ -46,7 +46,12 @@ namespace mongo {
_match = Unknown;
_counted = false;
}
- void setMatch( bool match ) { _match = match ? True : False; }
+ /** @return true if the match was not previously recorded. */
+ bool setMatch( bool match ) {
+ MatchState oldMatch = _match;
+ _match = match ? True : False;
+ return _match == True && oldMatch != True;
+ }
bool knowMatch() const { return _match != Unknown; }
void countMatch( const DiskLoc &loc ) {
if ( !_counted && _match == True && !getsetdup( loc ) ) {
@@ -55,6 +60,9 @@ namespace mongo {
_counted = true;
}
}
+ bool wouldCountMatch( const DiskLoc &loc ) const {
+ return !_counted && _match == True && !getdup( loc );
+ }
bool enoughCumulativeMatchesToChooseAPlan() const {
// This is equivalent to the default condition for switching from
@@ -85,6 +93,12 @@ namespace mongo {
pair<set<DiskLoc>::iterator, bool> p = _dups.insert( loc );
return !p.second;
}
+ bool getdup( const DiskLoc &loc ) const {
+ if ( !_checkDups ) {
+ return false;
+ }
+ return _dups.find( loc ) != _dups.end();
+ }
long long &_aggregateNscanned;
long long _nscanned;
int _cumulativeCount;
2  src/mongo/db/queryutil.h
View
@@ -324,7 +324,7 @@ namespace mongo {
string _special;
bool _singleKey;
};
-
+
/**
* A BoundList contains intervals specified by inclusive start
* and end bounds. The intervals should be nonoverlapping and occur in
1  src/mongo/db/scanandorder.h
View
@@ -85,6 +85,7 @@ namespace mongo {
}
int size() const { return _best.size(); }
+ int nout() const { return (int)_best.size() > _startFrom ? _best.size() - _startFrom : 0; }
/**
* @throw ScanAndOrderMemoryLimitExceededAssertionCode if adding would grow memory usage
724 src/mongo/dbtests/queryoptimizercursortests.cpp
View
@@ -170,6 +170,18 @@ namespace QueryOptimizerCursorTests {
}
}
};
+
+ class DurationTimerStop {
+ public:
+ void run() {
+ DurationTimer t;
+ while( t.duration() == 0 );
+ ASSERT( t.duration() > 0 );
+ t.stop();
+ ASSERT( t.duration() > 0 );
+ ASSERT( t.duration() > 0 );
+ }
+ };
class Base {
public:
@@ -2634,122 +2646,614 @@ namespace QueryOptimizerCursorTests {
} // namespace GetCursor
+ namespace Explain {
+
+ class ClearRecordedIndex : public QueryOptimizerCursorTests::Base {
+ public:
+ void run() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ BSONObj query = BSON( "a" << 1 << "b" << 1 );
+ shared_ptr<Cursor> c =
+ NamespaceDetailsTransient::getCursor( ns(), query );
+ while( c->advance() );
+ ParsedQuery parsedQuery( ns(), 0, 0, 0,
+ BSON( "$query" << query << "$explain" << true ),
+ BSONObj() );
+ c = NamespaceDetailsTransient::getCursor( ns(), query, BSONObj(), false, 0,
+ &parsedQuery );
+ set<BSONObj> indexKeys;
+ while( c->ok() ) {
+ indexKeys.insert( c->indexKeyPattern() );
+ c->advance();
+ }
+ ASSERT( indexKeys.size() > 1 );
+ }
+ };
+
+ class Base : public QueryOptimizerCursorTests::Base {
+ public:
+ virtual ~Base() {}
+ void run() {
+ setupCollection();
+
+ dblock lk;
+ Client::Context ctx( ns() );
+ ParsedQuery parsedQuery( ns(), 0, 0, 0,
+ BSON( "$query" << query() << "$explain" << true ),
+ BSONObj() );
+ _cursor =
+ dynamic_pointer_cast<QueryOptimizerCursor>
+ ( NamespaceDetailsTransient::getCursor( ns(), query(), BSONObj(), false, 0,
+ &parsedQuery ) );
+ ASSERT( _cursor );
+
+ handleCursor();
+
+ _explainInfo = _cursor->explainQueryInfo();
+ _explain = _explainInfo->bson();
+
+ checkExplain();
+ }
+ protected:
+ virtual void setupCollection() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 2 << "b" << 1 ) );
+ }
+ virtual BSONObj query() const { return BSON( "a" << 1 << "b" << 1 ); }
+ virtual void handleCursor() {
+ }
+ virtual void checkExplain() {
+ }
+ shared_ptr<QueryOptimizerCursor> _cursor;
+ shared_ptr<ExplainQueryInfo> _explainInfo;
+ BSONObj _explain;
+ };
+
+ class Initial : public Base {
+ virtual void checkExplain() {
+ ASSERT( !_explain[ "cursor" ].eoo() );
+ ASSERT( !_explain[ "isMultiKey" ].Bool() );
+ ASSERT_EQUALS( 0, _explain[ "n" ].Long() );
+ ASSERT_EQUALS( 0, _explain[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 2, _explain[ "nscanned" ].Long() );
+ ASSERT( !_explain[ "scanAndOrder" ].Bool() );
+ ASSERT( !_explain[ "indexOnly" ].Bool() );
+ ASSERT_EQUALS( 0, _explain[ "nYields" ].Int() );
+ ASSERT_EQUALS( 0, _explain[ "nChunkSkips" ].Long() );
+ ASSERT( !_explain[ "millis" ].eoo() );
+ ASSERT( !_explain[ "indexBounds" ].eoo() );
+ ASSERT_EQUALS( 2U, _explain[ "allPlans" ].Array().size() );
+
+ BSONObj plan1 = _explain[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( "BtreeCursor a_1", plan1[ "cursor" ].String() );
+ ASSERT_EQUALS( 0, plan1[ "n" ].Long() );
+ ASSERT_EQUALS( 0, plan1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 1, plan1[ "nscanned" ].Long() );
+ ASSERT_EQUALS( fromjson( "{a:[[1,1]]}" ), plan1[ "indexBounds" ].Obj() );
+
+ BSONObj plan2 = _explain[ "allPlans" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( "BasicCursor", plan2[ "cursor" ].String() );
+ ASSERT_EQUALS( 0, plan2[ "n" ].Long() );
+ ASSERT_EQUALS( 0, plan2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 1, plan2[ "nscanned" ].Long() );
+ ASSERT_EQUALS( BSONObj(), plan2[ "indexBounds" ].Obj() );
+ }
+ };
+
+ class Empty : public Base {
+ virtual void setupCollection() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ }
+ virtual void handleCursor() {
+ ASSERT( !_cursor->ok() );
+ }
+ virtual void checkExplain() {
+ ASSERT( !_explain[ "cursor" ].eoo() );
+ ASSERT( !_explain[ "isMultiKey" ].Bool() );
+ ASSERT_EQUALS( 0, _explain[ "n" ].Long() );
+ ASSERT_EQUALS( 0, _explain[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 0, _explain[ "nscanned" ].Long() );
+ ASSERT( !_explain[ "scanAndOrder" ].Bool() );
+ ASSERT( !_explain[ "indexOnly" ].Bool() );
+ ASSERT_EQUALS( 0, _explain[ "nYields" ].Int() );
+ ASSERT_EQUALS( 0, _explain[ "nChunkSkips" ].Long() );
+ ASSERT( !_explain[ "millis" ].eoo() );
+ ASSERT( !_explain[ "indexBounds" ].eoo() );
+ ASSERT_EQUALS( 2U, _explain[ "allPlans" ].Array().size() );
+
+ BSONObj plan1 = _explain[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( "BtreeCursor a_1", plan1[ "cursor" ].String() );
+ ASSERT_EQUALS( 0, plan1[ "n" ].Long() );
+ ASSERT_EQUALS( 0, plan1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 0, plan1[ "nscanned" ].Long() );
+ ASSERT_EQUALS( fromjson( "{a:[[1,1]]}" ), plan1[ "indexBounds" ].Obj() );
+
+ BSONObj plan2 = _explain[ "allPlans" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( "BasicCursor", plan2[ "cursor" ].String() );
+ ASSERT_EQUALS( 0, plan2[ "n" ].Long() );
+ ASSERT_EQUALS( 0, plan2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 0, plan2[ "nscanned" ].Long() );
+ ASSERT_EQUALS( BSONObj(), plan2[ "indexBounds" ].Obj() );
+ }
+ };
+
+ class SimpleCount : public Base {
+ virtual void handleCursor() {
+ while( _cursor->ok() ) {
+ MatchDetails matchDetails;
+ if ( _cursor->currentMatches( &matchDetails ) &&
+ !_cursor->getsetdup( _cursor->currLoc() ) ) {
+ _cursor->noteIterate( true, true, false );
+ }
+ else {
+ _cursor->noteIterate( false, matchDetails._loadedObject, false );
+ }
+ _cursor->advance();
+ }
+ }
+ virtual void checkExplain() {
+ ASSERT_EQUALS( "BtreeCursor a_1", _explain[ "cursor" ].String() );
+ ASSERT_EQUALS( 1, _explain[ "n" ].Long() );
+ ASSERT_EQUALS( 2, _explain[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 2, _explain[ "nscanned" ].Long() );
+
+ BSONObj plan1 = _explain[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( 1, plan1[ "n" ].Long() );
+ ASSERT_EQUALS( 1, plan1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 1, plan1[ "nscanned" ].Long() );
+
+ BSONObj plan2 = _explain[ "allPlans" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( 1, plan2[ "n" ].Long() );
+ ASSERT_EQUALS( 1, plan2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 1, plan2[ "nscanned" ].Long() );
+ }
+ };
+
+ class IterateOnly : public Base {
+ virtual BSONObj query() const { return BSON( "a" << GT << 0 << "b" << 1 ); }
+ virtual void handleCursor() {
+ while( _cursor->ok() ) {
+ _cursor->advance();
+ }
+ }
+ virtual void checkExplain() {
+ ASSERT_EQUALS( "BtreeCursor a_1", _explain[ "cursor" ].String() );
+ ASSERT_EQUALS( 0, _explain[ "n" ].Long() ); // needs to be set with noteIterate()
+ ASSERT_EQUALS( 0, _explain[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 3, _explain[ "nscanned" ].Long() );
+
+ BSONObj plan1 = _explain[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( 2, plan1[ "n" ].Long() );
+ ASSERT_EQUALS( 2, plan1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 2, plan1[ "nscanned" ].Long() );
+
+ // Not fully incremented without checking for matches.
+ BSONObj plan2 = _explain[ "allPlans" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( 1, plan2[ "n" ].Long() );
+ ASSERT_EQUALS( 1, plan2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 1, plan2[ "nscanned" ].Long() );
+ }
+ };
+
+ class ExtraMatchChecks : public Base {
+ virtual BSONObj query() const { return BSON( "a" << GT << 0 << "b" << 1 ); }
+ virtual void handleCursor() {
+ while( _cursor->ok() ) {
+ _cursor->currentMatches();
+ _cursor->currentMatches();
+ _cursor->currentMatches();
+ _cursor->advance();
+ }
+ }
+ virtual void checkExplain() {
+ ASSERT_EQUALS( "BtreeCursor a_1", _explain[ "cursor" ].String() );
+ ASSERT_EQUALS( 0, _explain[ "n" ].Long() ); // needs to be set with noteIterate()
+ ASSERT_EQUALS( 0, _explain[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 4, _explain[ "nscanned" ].Long() );
+
+ BSONObj plan1 = _explain[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( 2, plan1[ "n" ].Long() );
+ // nscannedObjects are not deduped.
+ ASSERT_EQUALS( 6, plan1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 2, plan1[ "nscanned" ].Long() );
+
+ BSONObj plan2 = _explain[ "allPlans" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( 2, plan2[ "n" ].Long() );
+ ASSERT_EQUALS( 6, plan2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 2, plan2[ "nscanned" ].Long() );
+ }
+ };
+
+ class PartialIteration : public Base {
+ virtual void handleCursor() {
+ _cursor->currentMatches();
+ _cursor->advance();
+ _cursor->noteIterate( true, true, false );
+ }
+ virtual void checkExplain() {
+ ASSERT_EQUALS( "BtreeCursor a_1", _explain[ "cursor" ].String() );
+ ASSERT_EQUALS( 1, _explain[ "n" ].Long() );
+ ASSERT_EQUALS( 1, _explain[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 2, _explain[ "nscanned" ].Long() );
+
+ BSONObj plan1 = _explain[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( 1, plan1[ "n" ].Long() );
+ ASSERT_EQUALS( 1, plan1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 1, plan1[ "nscanned" ].Long() );
+
+ BSONObj plan2 = _explain[ "allPlans" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( 0, plan2[ "n" ].Long() );
+ ASSERT_EQUALS( 0, plan2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 1, plan2[ "nscanned" ].Long() );
+ }
+ };
+
+ class Multikey : public Base {
+ virtual void setupCollection() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( 1 << 2 ) ) );
+ }
+ virtual void handleCursor() {
+ while( _cursor->advance() );
+ }
+ virtual void checkExplain() {
+ ASSERT( _explain[ "isMultiKey" ].Bool() );
+ }
+ };
+
+ class MultikeyInitial : public Base {
+ virtual void setupCollection() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( 1 << 2 ) ) );
+ }
+ virtual void checkExplain() {
+ ASSERT( _explain[ "isMultiKey" ].Bool() );
+ }
+ };
+
+ class BecomesMultikey : public Base {
+ virtual void setupCollection() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.insert( ns(), BSON( "a" << 1 ) );
+ }
+ virtual void checkExplain() {
+ ASSERT( !_explain[ "isMultiKey" ].Bool() );
+
+ _cursor->prepareToYield();
+ {
+ dbtemprelease t;
+ _cli.insert( ns(), BSON( "a" << BSON_ARRAY( 1 << 2 ) ) );
+ }
+ _cursor->recoverFromYield();
+ _cursor->currentMatches();
+ ASSERT( _explainInfo->bson()[ "isMultiKey" ].Bool() );
+ }
+ };
+
+ class CountAndYield : public Base {
+ virtual void setupCollection() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ for( int i = 0; i < 5; ++i ) {
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ }
+ }
+ virtual void handleCursor() {
+ _nYields = 0;
+ while( _cursor->ok() ) {
+ _cursor->prepareToYield();
+ ++_nYields;
+ _cursor->recoverFromYield();
+ MatchDetails matchDetails;
+ if ( _cursor->currentMatches( &matchDetails ) &&
+ !_cursor->getsetdup( _cursor->currLoc() ) ) {
+ _cursor->noteIterate( true, true, false );
+ }
+ else {
+ _cursor->noteIterate( false, matchDetails._loadedObject, false );
+ }
+ _cursor->advance();
+ }
+ }
+ virtual void checkExplain() {
+ ASSERT( _nYields > 0 );
+ ASSERT_EQUALS( _nYields, _explain[ "nYields" ].Int() );
+
+ ASSERT_EQUALS( "BtreeCursor a_1", _explain[ "cursor" ].String() );
+ ASSERT_EQUALS( 5, _explain[ "n" ].Long() );
+ ASSERT_EQUALS( 10, _explain[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 10, _explain[ "nscanned" ].Long() );
+
+ BSONObj plan1 = _explain[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( 5, plan1[ "n" ].Long() );
+ ASSERT_EQUALS( 5, plan1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 5, plan1[ "nscanned" ].Long() );
+
+ BSONObj plan2 = _explain[ "allPlans" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( 5, plan2[ "n" ].Long() );
+ ASSERT_EQUALS( 5, plan2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 5, plan2[ "nscanned" ].Long() );
+ }
+ protected:
+ int _nYields;
+ };
+
+ class MultipleClauses : public CountAndYield {
+ virtual void setupCollection() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ _cli.ensureIndex( ns(), BSON( "b" << 1 ) );
+ for( int i = 0; i < 4; ++i ) {
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ }
+ _cli.insert( ns(), BSON( "a" << 0 << "b" << 1 ) );
+ }
+ virtual BSONObj query() const { return fromjson( "{$or:[{a:1,c:null},{b:1,c:null}]}"); }
+ virtual void checkExplain() {
+ ASSERT_EQUALS( 18, _nYields );
+
+ ASSERT_EQUALS( 5, _explain[ "n" ].Long() );
+ ASSERT_EQUALS( 18, _explain[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 18, _explain[ "nscanned" ].Long() );
+
+ BSONObj clause1 = _explain[ "clauses" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( "BtreeCursor a_1", clause1[ "cursor" ].String() );
+ ASSERT_EQUALS( 4, clause1[ "n" ].Long() );
+ ASSERT_EQUALS( 8, clause1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 8, clause1[ "nscanned" ].Long() );
+ ASSERT_EQUALS( 8, clause1[ "nYields" ].Int() );
+
+ BSONObj c1plan1 = clause1[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( "BtreeCursor a_1", c1plan1[ "cursor" ].String() );
+ ASSERT_EQUALS( 4, c1plan1[ "n" ].Long() );
+ ASSERT_EQUALS( 4, c1plan1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 4, c1plan1[ "nscanned" ].Long() );
+
+ BSONObj c1plan2 = clause1[ "allPlans" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( "BasicCursor", c1plan2[ "cursor" ].String() );
+ ASSERT_EQUALS( 4, c1plan2[ "n" ].Long() );
+ ASSERT_EQUALS( 4, c1plan2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 4, c1plan2[ "nscanned" ].Long() );
+
+ BSONObj clause2 = _explain[ "clauses" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( "BtreeCursor b_1", clause2[ "cursor" ].String() );
+ ASSERT_EQUALS( 1, clause2[ "n" ].Long() );
+ ASSERT_EQUALS( 10, clause2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 10, clause2[ "nscanned" ].Long() );
+ ASSERT_EQUALS( 10, clause2[ "nYields" ].Int() );
+
+ BSONObj c2plan1 = clause2[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( "BtreeCursor b_1", c2plan1[ "cursor" ].String() );
+ ASSERT_EQUALS( 1, c2plan1[ "n" ].Long() );
+ ASSERT_EQUALS( 5, c2plan1[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 5, c2plan1[ "nscanned" ].Long() );
+
+ BSONObj c2plan2 = clause2[ "allPlans" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( "BasicCursor", c2plan2[ "cursor" ].String() );
+ ASSERT_EQUALS( 1, c2plan2[ "n" ].Long() );
+ ASSERT_EQUALS( 5, c2plan2[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 5, c2plan2[ "nscanned" ].Long() );
+ }
+ };
+
+ class MultiCursorTakeover : public CountAndYield {
+ virtual void setupCollection() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ for( int i = 20; i >= 1; --i ) {
+ for( int j = 0; j < i; ++j ) {
+ _cli.insert( ns(), BSON( "a" << i ) );
+ }
+ }
+ }
+ virtual BSONObj query() const {
+ BSONArrayBuilder bab;
+ for( int i = 20; i >= 1; --i ) {
+ bab << BSON( "a" << i );
+ }
+ return BSON( "$or" << bab.arr() );
+ }
+ virtual void checkExplain() {
+ ASSERT_EQUALS( 20U, _explain[ "clauses" ].Array().size() );
+ for( int i = 20; i >= 1; --i ) {
+ BSONObj clause = _explain[ "clauses" ].Array()[ 20-i ].Obj();
+ ASSERT_EQUALS( "BtreeCursor a_1", clause[ "cursor" ].String() );
+ ASSERT_EQUALS( BSON( "a" << BSON_ARRAY( BSON_ARRAY( i << i ) ) ),
+ clause[ "indexBounds" ].Obj() );
+ ASSERT_EQUALS( i, clause[ "n" ].Long() );
+ ASSERT_EQUALS( i, clause[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( i, clause[ "nscanned" ].Long() );
+ ASSERT_EQUALS( i, clause[ "nYields" ].Int() );
+
+ ASSERT_EQUALS( 1U, clause[ "allPlans" ].Array().size() );
+ BSONObj plan = clause[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( i, plan[ "n" ].Long() );
+ ASSERT_EQUALS( i, plan[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( i, plan[ "nscanned" ].Long() );
+ }
+
+ ASSERT_EQUALS( 210, _explain[ "n" ].Long() );
+ ASSERT_EQUALS( 210, _explain[ "nscannedObjects" ].Long() );
+ ASSERT_EQUALS( 210, _explain[ "nscanned" ].Long() );
+ }
+ };
+
+ class NChunkSkipsTakeover : public Base {
+ virtual void setupCollection() {
+ _cli.ensureIndex( ns(), BSON( "a" << 1 ) );
+ for( int i = 0; i < 200; ++i ) {
+ _cli.insert( ns(), BSON( "a" << 1 << "b" << 1 ) );
+ }
+ for( int i = 0; i < 200; ++i ) {
+ _cli.insert( ns(), BSON( "a" << 2 << "b" << 2 ) );
+ }
+ }
+ virtual BSONObj query() const { return fromjson( "{$or:[{a:1,b:1},{a:2,b:2}]}" ); }
+ virtual void handleCursor() {
+ ASSERT_EQUALS( "QueryOptimizerCursor", _cursor->toString() );
+ int i = 0;
+ while( _cursor->ok() ) {
+ if ( _cursor->currentMatches() && !_cursor->getsetdup( _cursor->currLoc() ) ) {
+ _cursor->noteIterate( true, true, i++ %2 == 0 );
+ }
+ _cursor->advance();
+ }
+ }
+ virtual void checkExplain() {
+ // Historically, nChunkSkips has been excluded from the query summary.
+ ASSERT( _explain[ "nChunkSkips" ].eoo() );
+
+ BSONObj clause0 = _explain[ "clauses" ].Array()[ 0 ].Obj();
+ ASSERT_EQUALS( 100, clause0[ "nChunkSkips" ].Long() );
+ BSONObj plan0 = clause0[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT( plan0[ "nChunkSkips" ].eoo() );
+
+ BSONObj clause1 = _explain[ "clauses" ].Array()[ 1 ].Obj();
+ ASSERT_EQUALS( 100, clause1[ "nChunkSkips" ].Long() );
+ BSONObj plan1 = clause1[ "allPlans" ].Array()[ 0 ].Obj();
+ ASSERT( plan1[ "nChunkSkips" ].eoo() );
+ }
+ };
+
+ // test takeover w/ mixed plan clause ? necessary?
+
+ } // namespace Explain
+
class All : public Suite {
public:
All() : Suite( "queryoptimizercursor" ) {}
void setupTests() {
__forceLinkGeoPlugin();
- add<QueryOptimizerCursorTests::CachedMatchCounterCount>();
- add<QueryOptimizerCursorTests::CachedMatchCounterAccumulate>();
- add<QueryOptimizerCursorTests::CachedMatchCounterDedup>();
- add<QueryOptimizerCursorTests::CachedMatchCounterNscanned>();
- add<QueryOptimizerCursorTests::SmallDupSetUpgrade>();
- add<QueryOptimizerCursorTests::CachedMatchCounterCount>();
- add<QueryOptimizerCursorTests::SmallDupSetUpgradeRead>();
- add<QueryOptimizerCursorTests::SmallDupSetUpgradeWrite>();
- add<QueryOptimizerCursorTests::Empty>();
- add<QueryOptimizerCursorTests::Unindexed>();
- add<QueryOptimizerCursorTests::Basic>();
- add<QueryOptimizerCursorTests::NoMatch>();
- add<QueryOptimizerCursorTests::Interleaved>();
- add<QueryOptimizerCursorTests::NotMatch>();
- add<QueryOptimizerCursorTests::StopInterleaving>();
- add<QueryOptimizerCursorTests::TakeoverWithDup>();
- add<QueryOptimizerCursorTests::TakeoverWithNonMatches>();
- add<QueryOptimizerCursorTests::TakeoverWithTakeoverDup>();
- add<QueryOptimizerCursorTests::BasicOr>();
- add<QueryOptimizerCursorTests::OrFirstClauseEmpty>();
- add<QueryOptimizerCursorTests::OrSecondClauseEmpty>();
- add<QueryOptimizerCursorTests::OrMultipleClausesEmpty>();
- add<QueryOptimizerCursorTests::TakeoverCountOr>();
- add<QueryOptimizerCursorTests::TakeoverEndOfOrClause>();
- add<QueryOptimizerCursorTests::TakeoverBeforeEndOfOrClause>();
- add<QueryOptimizerCursorTests::TakeoverAfterEndOfOrClause>();
- add<QueryOptimizerCursorTests::ManualMatchingDeduping>();
- add<QueryOptimizerCursorTests::ManualMatchingUsingCurrKey>();
- add<QueryOptimizerCursorTests::ManualMatchingDedupingTakeover>();
- add<QueryOptimizerCursorTests::Singlekey>();
- add<QueryOptimizerCursorTests::Multikey>();
- add<QueryOptimizerCursorTests::AddOtherPlans>();
- add<QueryOptimizerCursorTests::AddOtherPlansDelete>();
- add<QueryOptimizerCursorTests::AddOtherPlansContinuousDelete>();
- add<QueryOptimizerCursorTests::OrRangeElimination>();
- add<QueryOptimizerCursorTests::OrDedup>();
- add<QueryOptimizerCursorTests::EarlyDups>();
- add<QueryOptimizerCursorTests::OrPopInTakeover>();
- add<QueryOptimizerCursorTests::OrCollectionScanAbort>();
- add<QueryOptimizerCursorTests::YieldNoOp>();
- add<QueryOptimizerCursorTests::YieldDelete>();
- add<QueryOptimizerCursorTests::YieldDeleteContinue>();
- add<QueryOptimizerCursorTests::YieldDeleteContinueFurther>();
- add<QueryOptimizerCursorTests::YieldUpdate>();
- add<QueryOptimizerCursorTests::YieldDrop>();
- add<QueryOptimizerCursorTests::YieldDropOr>();
- add<QueryOptimizerCursorTests::YieldRemoveOr>();
- add<QueryOptimizerCursorTests::YieldCappedOverwrite>();
- add<QueryOptimizerCursorTests::YieldDropIndex>();
- add<QueryOptimizerCursorTests::YieldMultiplePlansNoOp>();
- add<QueryOptimizerCursorTests::YieldMultiplePlansAdvanceNoOp>();
- add<QueryOptimizerCursorTests::YieldMultiplePlansDelete>();
- add<QueryOptimizerCursorTests::YieldMultiplePlansDeleteOr>();