Skip to content

Commit

Permalink
SERVER-18498 New replica set configurations have protocolVersion=1 by…
Browse files Browse the repository at this point in the history
… default

This re-adds Siyuan's work from commits 19d2885 and 362aac3.
  • Loading branch information
matt dannenberg committed Oct 1, 2015
1 parent 1cd101f commit d789bca
Show file tree
Hide file tree
Showing 39 changed files with 275 additions and 210 deletions.
12 changes: 6 additions & 6 deletions jstests/aggregation/testSlave.js
Expand Up @@ -4,15 +4,15 @@ var nodes = replTest.startSet();
replTest.initiate();
replTest.awaitReplication();

var mast = nodes[0].getDB('test');
var slav = nodes[1].getDB('test');
var primary = replTest.getPrimary().getDB('test');
var secondary = replTest.getSecondary().getDB('test');

var options = { writeConcern: { w: 2 }};
mast.agg.insert({}, options);
mast.agg.insert({}, options);
mast.agg.insert({}, options);
primary.agg.insert({}, options);
primary.agg.insert({}, options);
primary.agg.insert({}, options);

var res = slav.agg.aggregate({$group: {_id: null, count: {$sum: 1}}});
var res = secondary.agg.aggregate({$group: {_id: null, count: {$sum: 1}}});
assert.eq(res.toArray(), [{_id:null, count: 3}]);

replTest.stopSet();
7 changes: 2 additions & 5 deletions jstests/gle/create_index_gle.js
@@ -1,3 +1,4 @@
load('jstests/replsets/rslib.js');
var st = new ShardingTest({ shards: { rs0: { nodes: 2, oplogSize: 10, verbose: 1 }}});
var replTest = st.rs0;

Expand All @@ -9,11 +10,7 @@ config.version = 2;

var priConn = replTest.getPrimary();

try {
priConn.getDB('admin').runCommand({ replSetReconfig: config });
} catch (x) {
print('reconfig closed conn');
}
reconfig(replTest, config, true);

assert.soon(function() {
var secConn = replTest.getSecondary();
Expand Down
5 changes: 2 additions & 3 deletions jstests/multiVersion/2_test_launching_cluster.js
Expand Up @@ -53,7 +53,7 @@ st = new ShardingTest({ shards : 2,
mongosOptions : { binVersion : versionsToCheck },
configOptions : { binVersion : versionsToCheck },
shardOptions : { binVersion : versionsToCheck }

} });

shards = [ st.shard0, st.shard1 ];
Expand Down Expand Up @@ -98,8 +98,7 @@ st = new ShardingTest({ shards : 2,

mongosOptions : { binVersion : versionsToCheck },
configOptions : { binVersion : versionsToCheck },
rsOptions : { binVersion : versionsToCheck }

rsOptions : { binVersion : versionsToCheck, protocolVersion: 0 }
} });

var nodesA = st.rs0.nodes;
Expand Down
4 changes: 3 additions & 1 deletion jstests/multiVersion/downgrade_replset.js
Expand Up @@ -14,7 +14,9 @@ var nodes = {n1: {binVersion: newVersion},

var rst = new ReplSetTest({name: name, nodes: nodes, nodeOptions: {storageEngine: 'mmapv1'}});
rst.startSet();
rst.initiate();
var replSetConfig = rst.getReplSetConfig();
replSetConfig.protocolVersion = 0;
rst.initiate(replSetConfig);

var primary = rst.getPrimary();
var coll = "test.foo";
Expand Down
7 changes: 6 additions & 1 deletion jstests/multiVersion/initialsync.js
Expand Up @@ -14,7 +14,12 @@ var multitest = function(replSetVersion, newNodeVersion) {
print("Start up a two-node " + replSetVersion + " replica set.");
var rst = new ReplSetTest({name: name, nodes: nodes});
rst.startSet();
rst.initiate();
var config = rst.getReplSetConfig();
// Set protocol version to 0 for 3.2 replset.
if (replSetVersion == newVersion) {
config.protocolVersion = 0;
}
rst.initiate(config);

// Wait for a primary node.
var primary = rst.getPrimary();
Expand Down
3 changes: 2 additions & 1 deletion jstests/multiVersion/mixed_storage_version_replication.js
Expand Up @@ -612,7 +612,8 @@ function doMultiThreadedWork(primary, numThreads) {
// Make sure everyone is syncing from the primary, to ensure we have all combinations of
// primary/secondary syncing.
config.settings = {chainingAllowed: false};
replTest.initiate();
config.protocolVersion = 0;
replTest.initiate(config);
// Ensure all are synced.
replTest.awaitSecondaryNodes(120000);
var primary = replTest.getPrimary();
Expand Down
4 changes: 3 additions & 1 deletion jstests/multiVersion/upgrade_downgrade_mongod.js
Expand Up @@ -258,7 +258,9 @@
function init_replication(conn){
var testDB = conn.getDB('test');
var testName = this.name;
var rsconf = {_id: 'oplog', members: [ {_id: 0, host: 'localhost:' + conn.port}]};
var rsconf = {_id: 'oplog',
members: [ {_id: 0, host: 'localhost:' + conn.port}],
protocolVersion: 0};

assert.commandWorked(testDB.adminCommand({replSetInitiate : rsconf}),
testName + ' replSetInitiate');
Expand Down
1 change: 1 addition & 0 deletions jstests/multiVersion/wt_index_option_defaults_replset.js
Expand Up @@ -24,6 +24,7 @@
// Rig the election so that the 3.2 node becomes the primary.
var replSetConfig = rst.getReplSetConfig();
replSetConfig.members[1].priority = 0;
replSetConfig.protocolVersion = 0;

rst.initiate(replSetConfig);

Expand Down
4 changes: 4 additions & 0 deletions jstests/noPassthrough/initial_sync_cloner_dups.js
Expand Up @@ -75,6 +75,10 @@ jsTestLog("add a new secondary");
var secondary = replTest.add({});
replTest.reInitiate(4*60*1000);
secondary.setSlaveOk();
// Wait for the secondary to get ReplSetInitiate command.
replTest.waitForState(secondary,
[replTest.STARTUP2, replTest.RECOVERING, replTest.SECONDARY],
60 * 1000);

// This fail point will cause the first intial sync to fail, and leave an op in the buffer to
// verify the fix from SERVER-17807
Expand Down
8 changes: 4 additions & 4 deletions jstests/noPassthroughWithMongod/balance_repl.js
@@ -1,7 +1,7 @@
var otherOptions = { rs: true , numReplicas: 2 , chunksize: 1 , nopreallocj: true };
var s = new ShardingTest({ shards: 2, verbose: 1, other: otherOptions });
s.config.settings.update({ _id: "balancer" },
{ $set: { stopped: true }}, true );
assert.writeOK(s.config.settings.update({ _id: "balancer" },
{ $set: { stopped: true }}, true ));

db = s.getDB( "test" );
var bulk = db.foo.initializeUnorderedBulkOp();
Expand Down Expand Up @@ -30,12 +30,12 @@ for ( i=0; i<20; i++ ) {
// Needs to waitForDelete because we'll be performing a slaveOk query,
// and secondaries don't have a chunk manager so it doesn't know how to
// filter out docs it doesn't own.
s.adminCommand({ moveChunk: "test.foo",
assert(s.adminCommand({ moveChunk: "test.foo",
find: { _id: i * 100 },
to : other._id,
_secondaryThrottle: true,
writeConcern: { w: 2 },
_waitForDelete: true });
_waitForDelete: true }));
assert.eq( 2100, coll.find().itcount() );
}

Expand Down
7 changes: 5 additions & 2 deletions jstests/noPassthroughWithMongod/ttl_repl.js
Expand Up @@ -5,6 +5,8 @@
* Part 3: Change the TTL expireAfterSeconds field and check successful propogation to secondary.
*/

load("jstests/replsets/rslib.js");

var rt = new ReplSetTest( { name : "ttl_repl" , nodes: 2 } );

/******** Part 1 ***************/
Expand Down Expand Up @@ -64,8 +66,9 @@ assert.eq( 6 , slave1col.count() , "docs not deleted on secondary" );

// add a new secondary, wait for it to fully join
var slave = rt.add();
rt.reInitiate();
rt.awaitSecondaryNodes();
var config = rt.getReplSetConfig();
config.version = 2;
reconfig(rt, config);

var slave2col = slave.getDB( 'd' )[ 'c' ];

Expand Down
2 changes: 1 addition & 1 deletion jstests/replsets/bulk_api_wc.js
Expand Up @@ -4,7 +4,7 @@

jsTest.log("Starting bulk api write concern tests...");

// Start a single-node replica set with no journal
// Start a 2-node replica set with no journal
//Allows testing immediate write concern failures and wc application failures
var rst = new ReplSetTest({ nodes : 2 });
rst.startSet({ nojournal : "" });
Expand Down
3 changes: 2 additions & 1 deletion jstests/replsets/election_not_blocked.js
Expand Up @@ -20,7 +20,8 @@
{_id: 1, host: host+":"+port[1]},
{_id: 2, host: host+":"+port[2], hidden: true, priority: 0},
],
// vetos only exist in protocol version 0, so we use PV0 explicitly here.
// In PV1, a voter writes the last vote to disk before granting the vote,
// so it cannot vote while fsync locked in PV1. Use PV0 explicitly here.
protocolVersion: 0});
replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60 * 1000);
var master = replTest.getMaster();
Expand Down
37 changes: 10 additions & 27 deletions jstests/replsets/initial_sync1.js
Expand Up @@ -20,7 +20,7 @@ print("1. Bring up set");
// SERVER-7455, this test is called from ssl/auth_x509.js
var x509_options1;
var x509_options2;
var replTest = new ReplSetTest({name: basename,
var replTest = new ReplSetTest({name: basename,
nodes : {node0 : x509_options1, node1 : x509_options2}});

var conns = replTest.startSet();
Expand Down Expand Up @@ -61,7 +61,7 @@ var admin_s2 = slave2.getDB("admin");

var config = replTest.getReplSetConfig();
config.version = 2;
config.members.push({_id:2, host:hostname+":"+slave2.port});
config.members.push({_id:2, host: slave2.host});
try {
admin.runCommand({replSetReconfig:config});
}
Expand All @@ -82,37 +82,20 @@ wait(function() {
(config3 && config3.version == config.version);
});

wait(function() {
var status = admin_s2.runCommand({replSetGetStatus:1});
printjson(status);
return status.members &&
(status.members[2].state == 3 || status.members[2].state == 2);
});
replTest.waitForState(slave2, [replTest.SECONDARY, replTest.RECOVERING], 60 * 1000);

print("7. Kill the secondary in the middle of syncing");
replTest.stop(slave1);

print("7. Kill #2 in the middle of syncing");
replTest.stop(1);


print("8. Eventually it should become a secondary");
print("8. Eventually the new node should become a secondary");
print("if initial sync has started, this will cause it to fail and sleep for 5 minutes");
wait(function() {
var status = admin_s2.runCommand({replSetGetStatus:1});
occasionally(function() { printjson(status); });
return status.members[2].state == 2;
}, 350);
replTest.waitForState(slave2, replTest.SECONDARY, 60 * 1000);


print("9. Bring #2 back up");
replTest.start(1, {}, true);
print("9. Bring the secondary back up");
replTest.start(slave1, {}, true);
reconnect(slave1);
wait(function() {
var status = admin_s1.runCommand({replSetGetStatus:1});
printjson(status);
return status.ok === 1 && status.members && status.members.length >= 2 &&
(status.members[1].state === 2 || status.members[1].state === 1);
});

replTest.waitForState(slave1, [replTest.PRIMARY, replTest.SECONDARY], 60 * 1000);

print("10. Insert some stuff");
master = replTest.getMaster();
Expand Down
22 changes: 1 addition & 21 deletions jstests/replsets/initial_sync2.js
Expand Up @@ -147,27 +147,7 @@ for (var i=0; i<10000; i++) {


print("12. Everyone happy eventually");
// if 3 is master...
if (master+"" != origMaster+"") {
print("3 is master");
slave2 = origMaster;
}

wait(function() {
var op1 = getLatestOp(master);
var op2 = getLatestOp(slave1);
var op3 = getLatestOp(slave2);

occasionally(function() {
print("latest ops:");
printjson(op1);
printjson(op2);
printjson(op3);
});

return friendlyEqual(getLatestOp(master), getLatestOp(slave1)) &&
friendlyEqual(getLatestOp(master), getLatestOp(slave2));
});
replTest.awaitReplication(2 * 60 * 1000);

replTest.stopSet();
};
Expand Down
5 changes: 4 additions & 1 deletion jstests/replsets/maintenance.js
Expand Up @@ -2,7 +2,10 @@

var replTest = new ReplSetTest( {name: 'unicomplex', nodes: 2} );
var conns = replTest.startSet({ verbose: 1 });
replTest.initiate();
var config = replTest.getReplSetConfig();
config.members[0].priority = 2;
replTest.initiate(config);
replTest.waitForState(replTest.nodes[0], replTest.PRIMARY, 60000);

// Make sure we have a master
var master = replTest.getMaster();
Expand Down
2 changes: 1 addition & 1 deletion jstests/replsets/oplog_note_cmd.js
Expand Up @@ -12,7 +12,7 @@ db.foo.insert({a:1});
var statusBefore = db.runCommand({replSetGetStatus: 1});
assert.commandWorked(db.runCommand({appendOplogNote: 1, data: {a: 1}}));
var statusAfter = db.runCommand({replSetGetStatus: 1});
assert.lt(statusBefore.members[0].optime, statusAfter.members[0].optime);
assert.lt(statusBefore.members[0].optime.ts, statusAfter.members[0].optime.ts);

// Make sure note written successfully
var op = db.getSiblingDB('local').oplog.rs.find().sort({$natural: -1}).limit(1).next();
Expand Down
1 change: 1 addition & 0 deletions jstests/replsets/protocol_version_upgrade_downgrade.js
Expand Up @@ -37,6 +37,7 @@ assert.writeOK(primaryColl.bar.insert({x: 1}, {writeConcern: {w: 3}}));
// Check optime format in protocol version 0, which is a Timestamp.
var res = primary.adminCommand({replSetGetStatus: 1});
assert.commandWorked(res);
// Check the optime is a Timestamp, not an OpTime { ts: Timestamp, t: int }
assert.eq(res.members[0].optime.ts, null);

//
Expand Down
8 changes: 4 additions & 4 deletions jstests/replsets/read_after_optime.js
Expand Up @@ -14,15 +14,15 @@ var runTest = function(testDB, primaryConn) {

var localDB = primaryConn.getDB('local');

var oplogTS = localDB.oplog.rs.find().sort({ $natural: -1 }).limit(1).next().ts;
var twoSecTS = new Timestamp(oplogTS.getTime() + 2, 0);
var oplogTS = localDB.oplog.rs.find().sort({ $natural: -1 }).limit(1).next();
var twoSecTS = new Timestamp(oplogTS.ts.getTime() + 2, 0);

// Test timeout with maxTimeMS
var res = assert.commandFailed(testDB.runCommand({
find: 'user',
filter: { x: 1 },
readConcern: {
afterOpTime: { ts: twoSecTS, t: 0 }
afterOpTime: { ts: twoSecTS, t: oplogTS.t }
},
maxTimeMS: 1000
}));
Expand All @@ -40,7 +40,7 @@ var runTest = function(testDB, primaryConn) {
find: 'user',
filter: { x: 1 },
readConcern: {
afterOpTime: { ts: twoSecTS, t: 0 },
afterOpTime: { ts: twoSecTS, t: oplogTS.t },
maxTimeMS: 10 * 1000
}
}));
Expand Down

0 comments on commit d789bca

Please sign in to comment.