Skip to content

Commit

Permalink
SERVER-16580 Remove deprecated system collection references in JS tests
Browse files Browse the repository at this point in the history
  • Loading branch information
visualzhou committed Jan 9, 2015
1 parent 81109be commit f18c1a1
Show file tree
Hide file tree
Showing 12 changed files with 59 additions and 42 deletions.
20 changes: 16 additions & 4 deletions jstests/aggregation/bugs/server3253.js
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,10 @@ function collectionExists(coll) {
}

function getOutputIndexes() {
return db.system.indexes.find({ns: output.getFullName()}).sort({"key":1}).toArray();
return output.getIndexes().sort(function(a, b) {
if (a.name < b.name) { return -1; }
else { return 1; }
});
}

function test(input, pipeline, expected) {
Expand All @@ -26,10 +29,19 @@ function test(input, pipeline, expected) {

assert.eq(cursor.itcount(), 0); // empty cursor returned
assert.eq(output.find().toArray(), expected); // correct results
assert.eq(getOutputIndexes(), indexes); // number of indexes maintained
var outputIndexes = getOutputIndexes();
assert.eq(outputIndexes.length, indexes.length); // number of indexes maintained
for (var i = 0; i < outputIndexes.length; i++) {
assert.docEq(outputIndexes[i], indexes[i]);
}

assert(collectionExists(output));
}

function listCollections(name) {
var collectionInfosCursor = db.runCommand("listCollections", {filter: { name: name}});
return new DBCommandCursor(db.getMongo(), collectionInfosCursor).toArray();
}

input.insert({_id:1});
input.insert({_id:2});
Expand All @@ -39,7 +51,7 @@ input.insert({_id:3});
output.insert({_id:1});

// ensure there are no tmp agg_out collections before we begin
assert.eq([], db.system.namespaces.find({name: /tmp\.agg_out/}).toArray());
assert.eq([], listCollections(/tmp\.agg_out/));

// basic test
test(input,
Expand Down Expand Up @@ -90,4 +102,4 @@ assertErrorCode(input, {$out: outputInSystem.getName()}, 17385);
assert(!collectionExists(outputInSystem));

// shoudn't leave temp collections laying around
assert.eq([], db.system.namespaces.find({name: /tmp\.agg_out/}).toArray());
assert.eq([], listCollections(/tmp\.agg_out/));
7 changes: 4 additions & 3 deletions jstests/auth/indexSystemUsers.js
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,12 @@ assert.writeError(adminDB.exploit.system.indexes.insert({ ns: "admin.system.user
unique: true,
dropDups: true }));
// Make sure that no indexes were built.
assert.eq(null,
adminDB.system.namespaces.findOne(
var collectionInfosCursor = adminDB.runCommand("listCollections", { filter:
{$and : [{name : /^admin\.system\.users\.\$/},
{name : {$ne : "admin.system.users.$_id_"}},
{name : {$ne : "admin.system.users.$user_1_db_1"}} ]}));
{name : {$ne : "admin.system.users.$user_1_db_1"}} ]}});

assert.eq([], new DBCommandCursor(adminDB.getMongo(), collectionInfosCursor).toArray());
adminDB.logout();

adminDB.auth('admin','x');
Expand Down
6 changes: 3 additions & 3 deletions jstests/core/apitest_db.js
Original file line number Diff line number Diff line change
Expand Up @@ -27,16 +27,16 @@ dd( "c" );
/*
* test createCollection
*/

db.getCollection( "test" ).drop();
db.getCollectionNames().forEach( function(x) { assert(x != "test"); });

dd( "d" );

db.createCollection("test");
var found = false;
db.getCollectionNames().forEach( function(x) { if (x == "test") found = true; });
assert(found, "found test.test in system.namespaces");
assert(found, "found test.test in collection infos");

// storageEngine in collection options must:
// - be a document
Expand Down
2 changes: 1 addition & 1 deletion jstests/core/index4.js
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ t.save( { name : "clusterstock" ,

// this should fail, not allowed -- we confirm that.
t.ensureIndex( { instances : { pool : 1 } } );
assert.eq( 0, db.system.indexes.find( {ns:"test.index4",name:{$ne:"_id_"}} ).count(), "no indexes should be here yet");
assert.eq( 1, t.getIndexes().length, "no indexes other than _id should be here yet");

t.ensureIndex( { "instances.pool" : 1 } );

Expand Down
19 changes: 12 additions & 7 deletions jstests/multiVersion/libs/verify_collection_data.js
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,8 @@ createCollectionWithData = function (db, collectionName, dataGenerator) {
numIndexes++;
}

// Make sure we actually added all the indexes we thing we added. +1 for the _id index.
assert.eq(db.system.indexes.find({"ns" : db.toString() + "." + collection.getName()}).count(),
numIndexes + 1);
// Make sure we actually added all the indexes we think we added. +1 for the _id index.
assert.eq(collection.getIndexes().length, numIndexes + 1);

var numInserted = 0;
while (dataGenerator.data.hasNext()) {
Expand Down Expand Up @@ -78,7 +77,10 @@ function CollectionDataValidator() {
this.recordCollectionData = function (collection) {

// Save the indexes for this collection for later comparison
indexData = collection.getDB().system.indexes.find({"ns" : collection.getFullName()}).sort({"name":1}).toArray();
indexData = collection.getIndexes().sort(function(a,b) {
if (a.name > b.name) return 1;
else return -1;
});

// Save the data for this collection for later comparison
collectionData = collection.find().sort({"_id":1}).toArray();
Expand Down Expand Up @@ -137,7 +139,10 @@ function CollectionDataValidator() {
assert.docEq(collectionStats, newCollectionStats, "collection metadata not equal");

// Get the indexes for this collection
var newIndexData = collection.getDB().system.indexes.find({"ns" : collection.getFullName()}).sort({"name":1}).toArray();
var newIndexData = collection.getIndexes().sort(function(a,b) {
if (a.name > b.name) return 1;
else return -1;
});
for (var i = 0; i < newIndexData.length; i++) {
assert.docEq(indexData[i], newIndexData[i], "indexes not equal");
}
Expand All @@ -164,7 +169,7 @@ function collectionDataValidatorTests() {
collection = createCollectionWithData(db, "test", myGenerator);
myValidator = new CollectionDataValidator();
myValidator.recordCollectionData(collection);
db.test.dropIndex(db.system.indexes.findOne({"key.a": { "$exists" : true } }).key);
db.test.dropIndex(db.test.getIndexKeys().filter(function(key) { return key.a != null })[0]);
assert.throws(myValidator.validateCollectionData, [collection], "Validation function should have thrown since we modified the collection");


Expand All @@ -186,7 +191,7 @@ function collectionDataValidatorTests() {
collection = createCollectionWithData(db, "test", myGenerator);
myValidator = new CollectionDataValidator();
myValidator.recordCollectionData(collection);
db.test.dropIndex(db.system.indexes.findOne({"key.a": { "$exists" : true } }).key);
db.test.dropIndex(db.test.getIndexKeys().filter(function(key) { return key.a != null })[0]);
assert.throws(myValidator.validateCollectionData, [collection], "Validation function should have thrown since we modified the collection");


Expand Down
16 changes: 8 additions & 8 deletions jstests/noPassthroughWithMongod/indexbg_drop.js
Original file line number Diff line number Diff line change
Expand Up @@ -56,14 +56,14 @@ jsTest.log("Starting background indexing for test of: " + tojson(dc));
masterDB.getCollection(collection).ensureIndex({b:1});

masterDB.getCollection(collection).ensureIndex( {i:1}, {background:true} );
assert.eq(3, masterDB.system.indexes.count( {ns:dbname + "." + collection}, {background:true} ) );
assert.eq(3, masterDB.getCollection(collection).getIndexes().length );

// Wait for the secondary to get the index entry
assert.soon(
function() { return 3 == secondDB.system.indexes.count( {ns:dbname + "." + collection} ); },
assert.soon(
function() { return 3 == secondDB.getCollection(collection).getIndexes().length; },
"index not created on secondary (prior to drop)", 240000 );

jsTest.log("Index created and system.indexes entry exists on secondary");
jsTest.log("Index created and index entry exists on secondary");


// make sure the index build has started on secondary
Expand All @@ -90,17 +90,17 @@ jsTest.log("Waiting on replication");
replTest.awaitReplication();

print("index list on master:");
masterDB.system.indexes.find().forEach(printjson);
masterDB.getCollection(collection).getIndexes().forEach(printjson);

// we need to assert.soon because the drop only marks the index for removal
// the removal itself is asynchronous and may take another moment before it happens
var i = 0;
assert.soon( function() {
print("index list on secondary (run " + i + "):");
secondDB.system.indexes.find().forEach(printjson);
secondDB.getCollection(collection).getIndexes().forEach(printjson);

i++;
return 1 === secondDB.system.indexes.count();
return 1 === secondDB.getCollection(collection).getIndexes().length;
}, "secondary did not drop index"
);

Expand Down
2 changes: 1 addition & 1 deletion jstests/noPassthroughWithMongod/indexbg_interrupts.js
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ for (var idx = 0; idx < dropAction.length; idx++) {
return 2 == secondDB.getCollection(collection).getIndexes().length;
}, "index not created on secondary", 240000 );

jsTest.log("Index created and system.indexes entry exists on secondary");
jsTest.log("Index created and index info exists on secondary");

jsTest.log("running command " + JSON.stringify(dc));
assert.commandWorked(masterDB.runCommand( dc ));
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ if (0) {

jsTest.log("Starting background indexing");
masterDB.jstests_bgsec.ensureIndex( {i:1}, {background:true} );
assert.eq(2, masterDB.system.indexes.count( {ns:"bgIndexNoRetrySec.jstests_bgsec"} ) );
assert.eq(2, masterDB.jstests_bgsec.getIndexes().length);

// Do one more write, so that later on, the secondary doesn't restart with the index build
// as the last op in the oplog -- it will redo this op otherwise.
Expand All @@ -96,21 +96,21 @@ if (0) {
// Make sure secondary comes back
assert.soon( function() {
try {
secondDB.system.namespaces.count(); // trigger a reconnect if needed
return true;
secondDB.isMaster(); // trigger a reconnect if needed
return true;
} catch (e) {
return false;
}
} , "secondary didn't restart", 60000, 1000);

assert_trueTimeout(
function() {
return 2 == secondDB.system.indexes.count( {ns:"bgIndexNoRetrySec.jstests_bgsec"} );
assert_trueTimeout(
function() {
return 2 == secondDB.jstests_bgsec.getIndexes().length;
},
"index created on secondary after restart with --noIndexBuildRetry",
30000, 200);

assert.neq(2, secondDB.system.indexes.count( {ns:"bgIndexNoRetrySec.jstests_bgsec"} ));
assert.neq(2, secondDB.jstests_bgsec.getIndexes().length );
replTest.stopSet();
}());

Expand Down
4 changes: 2 additions & 2 deletions jstests/replsets/index_restart_secondary.js
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@ if (conns[0].getDB('test').serverBuildInfo().bits !== 32) {
// Make sure secondary comes back
assert.soon( function() {
try {
secondDB.system.namespaces.count(); // trigger a reconnect if needed
return true;
secondDB.isMaster(); // trigger a reconnect if needed
return true;
} catch (e) {
return false;
}
Expand Down
8 changes: 4 additions & 4 deletions jstests/sharding/features1.js
Original file line number Diff line number Diff line change
Expand Up @@ -60,17 +60,17 @@ assert.eq( 5 , b.foo.getIndexKeys().length , "c index 3" );

db.foo2.ensureIndex( { a : 1 } );
s.sync();
printjson( db.system.indexes.find( { ns : "test.foo2" } ).toArray() );
printjson( db.foo2.getIndexes() );
assert( s.admin.runCommand( { shardcollection : "test.foo2" , key : { num : 1 } } ).ok , "shard with index" );

db.foo3.ensureIndex( { a : 1 } , true );
s.sync();
printjson( db.system.indexes.find( { ns : "test.foo3" } ).toArray() );
printjson( db.foo3.getIndexes() );
assert( ! s.admin.runCommand( { shardcollection : "test.foo3" , key : { num : 1 } } ).ok , "shard with unique index" );

db.foo7.ensureIndex( { num : 1 , a : 1 } , true );
s.sync();
printjson( db.system.indexes.find( { ns : "test.foo7" } ).toArray() );
printjson( db.foo7.getIndexes() );
assert( s.admin.runCommand( { shardcollection : "test.foo7" , key : { num : 1 } } ).ok , "shard with ok unique index" );


Expand Down Expand Up @@ -153,7 +153,7 @@ db.foo6.save( { a : 3 } );
db.foo6.save( { a : 3 } );
db.foo6.ensureIndex( { a : 1 } );
s.sync();
printjson( db.system.indexes.find( { ns : "test.foo6" } ).toArray() );
printjson( db.foo6.getIndexes() );

assert.eq( 2 , db.foo6.group( { key : { a : 1 } , initial : { count : 0 } ,
reduce : function(z,prev){ prev.count++; } } ).length );
Expand Down
2 changes: 1 addition & 1 deletion jstests/sharding/features2.js
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ db.foo2.save( { _id : new ObjectId() } );

assert.eq( 1 , s.onNumShards( "foo2" ) , "F1" );

printjson( db.system.indexes.find( { ns : "test.foo2" } ).toArray() );
printjson( db.foo2.getIndexes() );
s.adminCommand( { shardcollection : "test.foo2" , key : { _id : 1 } } );

assert.eq( 3 , db.foo2.count() , "F2" )
Expand Down
1 change: 0 additions & 1 deletion src/mongo/shell/replsettest.js
Original file line number Diff line number Diff line change
Expand Up @@ -1145,7 +1145,6 @@ ReplSetTest.prototype.bridge = function( opts ) {

if (!config) {
print("ReplSetTest bridge couldn't find config for "+this.nodes[i]);
printjson(this.nodes[i].getDB("local").system.namespaces.find().toArray());
assert(false);
}

Expand Down

0 comments on commit f18c1a1

Please sign in to comment.