diff --git a/package.json b/package.json
index 451ff0891c2..72f57589f22 100644
--- a/package.json
+++ b/package.json
@@ -113,7 +113,7 @@
"check:kerberos": "mocha --config \"test/manual/mocharc.json\" test/manual/kerberos.test.js",
"check:tls": "mocha --config \"test/manual/mocharc.json\" test/manual/tls_support.test.js",
"check:ldap": "mocha --config \"test/manual/mocharc.json\" test/manual/ldap.test.js",
- "check:csfle": "mocha --file test/tools/runner test/functional/client_side_encryption",
+ "check:csfle": "mocha --file test/tools/runner test/integration/client-side-encryption",
"check:snappy": "mocha --file test/tools/runner test/functional/unit_snappy.test.js",
"prepare": "node etc/prepare.js",
"release": "standard-version -i HISTORY.md",
diff --git a/test/functional/client_side_encryption/corpus.test.js b/test/integration/client-side-encryption/client_side_encryption.corpus.spec.test.js
similarity index 100%
rename from test/functional/client_side_encryption/corpus.test.js
rename to test/integration/client-side-encryption/client_side_encryption.corpus.spec.test.js
diff --git a/test/functional/client_side_encryption/deadlock_tests.js b/test/integration/client-side-encryption/client_side_encryption.prose.deadlock.js
similarity index 100%
rename from test/functional/client_side_encryption/deadlock_tests.js
rename to test/integration/client-side-encryption/client_side_encryption.prose.deadlock.js
diff --git a/test/functional/client_side_encryption/prose.test.js b/test/integration/client-side-encryption/client_side_encryption.prose.test.js
similarity index 99%
rename from test/functional/client_side_encryption/prose.test.js
rename to test/integration/client-side-encryption/client_side_encryption.prose.test.js
index 66e66f5af7f..7ee9232a681 100644
--- a/test/functional/client_side_encryption/prose.test.js
+++ b/test/integration/client-side-encryption/client_side_encryption.prose.test.js
@@ -1,7 +1,7 @@
'use strict';
const BSON = require('bson');
const chai = require('chai');
-const { deadlockTests } = require('./deadlock_tests.js');
+const { deadlockTests } = require('./client_side_encryption.prose.deadlock');
const expect = chai.expect;
chai.use(require('chai-subset'));
diff --git a/test/functional/client_side_encryption/spec.test.js b/test/integration/client-side-encryption/client_side_encryption.spec.test.js
similarity index 84%
rename from test/functional/client_side_encryption/spec.test.js
rename to test/integration/client-side-encryption/client_side_encryption.spec.test.js
index 3522d922a3b..cef2a884534 100644
--- a/test/functional/client_side_encryption/spec.test.js
+++ b/test/integration/client-side-encryption/client_side_encryption.spec.test.js
@@ -1,9 +1,11 @@
'use strict';
const path = require('path');
-const TestRunnerContext = require('../spec-runner').TestRunnerContext;
-const gatherTestSuites = require('../spec-runner').gatherTestSuites;
-const generateTopologyTests = require('../spec-runner').generateTopologyTests;
+const {
+ TestRunnerContext,
+ gatherTestSuites,
+ generateTopologyTests
+} = require('../../tools/spec-runner');
describe('Client Side Encryption', function () {
// TODO: Replace this with using the filter once the filter works on describe blocks
diff --git a/test/functional/client_side_encryption/driver.test.js b/test/integration/client-side-encryption/driver.test.js
similarity index 100%
rename from test/functional/client_side_encryption/driver.test.js
rename to test/integration/client-side-encryption/driver.test.js
diff --git a/test/integration/gridfs/gridfs.spec.test.js b/test/integration/gridfs/gridfs.spec.test.js
new file mode 100644
index 00000000000..fdc2a02597f
--- /dev/null
+++ b/test/integration/gridfs/gridfs.spec.test.js
@@ -0,0 +1,252 @@
+'use strict';
+
+const { EJSON } = require('bson');
+const { setupDatabase } = require('./../shared');
+const { expect } = require('chai');
+const { GridFSBucket } = require('../../../src');
+
+describe('GridFS', function () {
+ before(function () {
+ return setupDatabase(this.configuration);
+ });
+
+ const UPLOAD_SPEC = require('../../spec/gridfs/gridfs-upload.json');
+ UPLOAD_SPEC.tests.forEach(function (specTest) {
+ (function (testSpec) {
+ it(testSpec.description, {
+ metadata: { requires: { topology: ['single'] } },
+
+ test(done) {
+ const configuration = this.configuration;
+ const client = configuration.newClient(configuration.writeConcernMax(), {
+ maxPoolSize: 1
+ });
+ client.connect(function (err, client) {
+ const db = client.db(configuration.db);
+ db.dropDatabase(function (error) {
+ expect(error).to.not.exist;
+
+ const bucket = new GridFSBucket(db, { bucketName: 'expected' });
+ const res = bucket.openUploadStream(
+ testSpec.act.arguments.filename,
+ testSpec.act.arguments.options
+ );
+ const buf = Buffer.from(testSpec.act.arguments.source.$hex, 'hex');
+
+ res.on('error', function (err) {
+ expect(err).to.not.exist;
+ });
+
+ res.on('finish', function () {
+ const data = testSpec.assert.data;
+ let num = data.length;
+ data.forEach(function (data) {
+ const collection = data.insert;
+ db.collection(collection)
+ .find({})
+ .toArray(function (error, docs) {
+ expect(data.documents.length).to.equal(docs.length);
+
+ for (let i = 0; i < docs.length; ++i) {
+ testResultDoc(data.documents[i], docs[i], res.id);
+ }
+
+ if (--num === 0) {
+ client.close(done);
+ }
+ });
+ });
+ });
+
+ res.write(buf);
+ res.end();
+ });
+ });
+ }
+ });
+ })(specTest);
+ });
+
+ const DOWNLOAD_SPEC = require('../../spec/gridfs/gridfs-download.json');
+ DOWNLOAD_SPEC.tests.forEach(function (specTest) {
+ (function (testSpec) {
+ it(testSpec.description, {
+ metadata: { requires: { topology: ['single'] } },
+
+ test(done) {
+ const configuration = this.configuration;
+ const client = configuration.newClient(configuration.writeConcernMax(), {
+ maxPoolSize: 1
+ });
+ client.connect(function (err, client) {
+ const db = client.db(configuration.db);
+ db.dropDatabase(function (err) {
+ expect(err).to.not.exist;
+ const BUCKET_NAME = 'fs';
+
+ const _runTest = function () {
+ const bucket = new GridFSBucket(db, { bucketName: BUCKET_NAME });
+ let res = Buffer.alloc(0);
+
+ const download = bucket.openDownloadStream(
+ EJSON.parse(JSON.stringify(testSpec.act.arguments.id), { relaxed: true })
+ );
+
+ download.on('data', function (chunk) {
+ res = Buffer.concat([res, chunk]);
+ });
+
+ let errorReported = false;
+ download.on('error', function (error) {
+ errorReported = true;
+ if (!testSpec.assert.error) {
+ expect.fail('Should be unreached');
+
+ // We need to abort in order to close the underlying cursor,
+ // and by extension the implicit session used for the cursor.
+ // This is only necessary if the cursor is not exhausted
+ download.abort();
+ client.close(done);
+ }
+ expect(error.toString().indexOf(testSpec.assert.error) !== -1).to.equal(true);
+
+ // We need to abort in order to close the underlying cursor,
+ // and by extension the implicit session used for the cursor.
+ // This is only necessary if the cursor is not exhausted
+ download.abort();
+ client.close(done);
+ });
+
+ download.on('end', function () {
+ const result = testSpec.assert.result;
+ if (!result) {
+ if (errorReported) {
+ return;
+ }
+
+ // We need to abort in order to close the underlying cursor,
+ // and by extension the implicit session used for the cursor.
+ // This is only necessary if the cursor is not exhausted
+ download.abort();
+ client.close(done);
+ expect.fail('errorReported should be set');
+ }
+
+ expect(res.toString('hex')).to.equal(result.$hex);
+
+ // We need to abort in order to close the underlying cursor,
+ // and by extension the implicit session used for the cursor.
+ // This is only necessary if the cursor is not exhausted
+ download.abort();
+ client.close(done);
+ });
+ };
+
+ const keys = Object.keys(DOWNLOAD_SPEC.data);
+ let numCollections = Object.keys(DOWNLOAD_SPEC.data).length;
+ keys.forEach(function (collection) {
+ const data = DOWNLOAD_SPEC.data[collection].map(function (v) {
+ return deflateTestDoc(v);
+ });
+
+ db.collection(BUCKET_NAME + '.' + collection).insertMany(data, function (error) {
+ expect(error).to.not.exist;
+
+ if (--numCollections === 0) {
+ if (testSpec.arrange) {
+ // only support 1 arrange op for now
+ expect(testSpec.arrange.data.length).to.equal(1);
+ applyArrange(db, deflateTestDoc(testSpec.arrange.data[0]), function (error) {
+ expect(error).to.not.exist;
+ _runTest();
+ });
+ } else {
+ _runTest();
+ }
+ }
+ });
+ });
+ });
+ });
+ }
+ });
+ })(specTest);
+ });
+
+ function testResultDoc(specDoc, resDoc, result) {
+ const specKeys = Object.keys(specDoc)
+ .filter(key => key !== 'md5')
+ .sort();
+ const resKeys = Object.keys(resDoc).sort();
+
+ expect(specKeys.length === resKeys.length).to.equal(true);
+
+ for (let i = 0; i < specKeys.length; ++i) {
+ const key = specKeys[i];
+ expect(specKeys[i]).to.equal(resKeys[i]);
+ if (specDoc[key] === '*actual') {
+ expect(resDoc[key]).to.exist;
+ } else if (specDoc[key] === '*result') {
+ expect(resDoc[key].toString()).to.equal(result.toString());
+ } else if (specDoc[key].$hex) {
+ expect(resDoc[key]._bsontype === 'Binary').to.equal(true);
+ expect(resDoc[key].toString('hex')).to.equal(specDoc[key].$hex);
+ } else {
+ if (typeof specDoc[key] === 'object') {
+ expect(specDoc[key]).to.deep.equal(resDoc[key]);
+ } else {
+ expect(specDoc[key]).to.equal(resDoc[key]);
+ }
+ }
+ }
+ }
+
+ function deflateTestDoc(doc) {
+ const ret = EJSON.parse(JSON.stringify(doc), { relaxed: true });
+ convert$hexToBuffer(ret);
+ return ret;
+ }
+
+ function convert$hexToBuffer(doc) {
+ const keys = Object.keys(doc);
+ keys.forEach(function (key) {
+ if (doc[key] && typeof doc[key] === 'object') {
+ if (doc[key].$hex != null) {
+ doc[key] = Buffer.from(doc[key].$hex, 'hex');
+ } else {
+ convert$hexToBuffer(doc[key]);
+ }
+ }
+ });
+ }
+
+ function applyArrange(db, command, callback) {
+ // Don't count on commands being there since we need to test on 2.2 and 2.4
+ if (command.delete) {
+ if (command.deletes.length !== 1) {
+ return callback(new Error('can only arrange with 1 delete'));
+ }
+ if (command.deletes[0].limit !== 1) {
+ return callback(new Error('can only arrange with delete limit 1'));
+ }
+ db.collection(command.delete).deleteOne(command.deletes[0].q, callback);
+ } else if (command.insert) {
+ db.collection(command.insert).insertMany(command.documents, callback);
+ } else if (command.update) {
+ const bulk = [];
+ for (let i = 0; i < command.updates.length; ++i) {
+ bulk.push({
+ updateOne: {
+ filter: command.updates[i].q,
+ update: command.updates[i].u
+ }
+ });
+ }
+
+ db.collection(command.update).bulkWrite(bulk, callback);
+ } else {
+ const msg = 'Command not recognized: ' + require('util').inspect(command);
+ callback(new Error(msg));
+ }
+ }
+});
diff --git a/test/functional/gridfs_stream.test.js b/test/integration/gridfs/gridfs_stream.test.js
similarity index 80%
rename from test/functional/gridfs_stream.test.js
rename to test/integration/gridfs/gridfs_stream.test.js
index e23970e3c22..5ce0239cef3 100644
--- a/test/functional/gridfs_stream.test.js
+++ b/test/integration/gridfs/gridfs_stream.test.js
@@ -2,11 +2,10 @@
const { Double } = require('bson');
const stream = require('stream');
-const { EJSON } = require('bson');
const fs = require('fs');
-const { setupDatabase, withClient } = require('./shared');
+const { setupDatabase, withClient } = require('./../shared');
const { expect } = require('chai');
-const { GridFSBucket, ObjectId } = require('../../src');
+const { GridFSBucket, ObjectId } = require('../../../src');
describe('GridFS Stream', function () {
before(function () {
@@ -894,246 +893,6 @@ describe('GridFS Stream', function () {
}
});
- const UPLOAD_SPEC = require('../spec/gridfs/gridfs-upload.json');
- UPLOAD_SPEC.tests.forEach(function (specTest) {
- (function (testSpec) {
- it(testSpec.description, {
- metadata: { requires: { topology: ['single'] } },
-
- test(done) {
- const configuration = this.configuration;
- const client = configuration.newClient(configuration.writeConcernMax(), {
- maxPoolSize: 1
- });
- client.connect(function (err, client) {
- const db = client.db(configuration.db);
- db.dropDatabase(function (error) {
- expect(error).to.not.exist;
-
- const bucket = new GridFSBucket(db, { bucketName: 'expected' });
- const res = bucket.openUploadStream(
- testSpec.act.arguments.filename,
- testSpec.act.arguments.options
- );
- const buf = Buffer.from(testSpec.act.arguments.source.$hex, 'hex');
-
- res.on('error', function (err) {
- expect(err).to.not.exist;
- });
-
- res.on('finish', function () {
- const data = testSpec.assert.data;
- let num = data.length;
- data.forEach(function (data) {
- const collection = data.insert;
- db.collection(collection)
- .find({})
- .toArray(function (error, docs) {
- expect(data.documents.length).to.equal(docs.length);
-
- for (let i = 0; i < docs.length; ++i) {
- testResultDoc(data.documents[i], docs[i], res.id);
- }
-
- if (--num === 0) {
- client.close(done);
- }
- });
- });
- });
-
- res.write(buf);
- res.end();
- });
- });
- }
- });
- })(specTest);
- });
-
- const DOWNLOAD_SPEC = require('../spec/gridfs/gridfs-download.json');
- DOWNLOAD_SPEC.tests.forEach(function (specTest) {
- (function (testSpec) {
- it(testSpec.description, {
- metadata: { requires: { topology: ['single'] } },
-
- test(done) {
- const configuration = this.configuration;
- const client = configuration.newClient(configuration.writeConcernMax(), {
- maxPoolSize: 1
- });
- client.connect(function (err, client) {
- const db = client.db(configuration.db);
- db.dropDatabase(function (err) {
- expect(err).to.not.exist;
- const BUCKET_NAME = 'fs';
-
- const _runTest = function () {
- const bucket = new GridFSBucket(db, { bucketName: BUCKET_NAME });
- let res = Buffer.alloc(0);
-
- const download = bucket.openDownloadStream(
- EJSON.parse(JSON.stringify(testSpec.act.arguments.id), { relaxed: true })
- );
-
- download.on('data', function (chunk) {
- res = Buffer.concat([res, chunk]);
- });
-
- let errorReported = false;
- download.on('error', function (error) {
- errorReported = true;
- if (!testSpec.assert.error) {
- expect.fail('Should be unreached');
-
- // We need to abort in order to close the underlying cursor,
- // and by extension the implicit session used for the cursor.
- // This is only necessary if the cursor is not exhausted
- download.abort();
- client.close(done);
- }
- expect(error.toString().indexOf(testSpec.assert.error) !== -1).to.equal(true);
-
- // We need to abort in order to close the underlying cursor,
- // and by extension the implicit session used for the cursor.
- // This is only necessary if the cursor is not exhausted
- download.abort();
- client.close(done);
- });
-
- download.on('end', function () {
- const result = testSpec.assert.result;
- if (!result) {
- if (errorReported) {
- return;
- }
-
- // We need to abort in order to close the underlying cursor,
- // and by extension the implicit session used for the cursor.
- // This is only necessary if the cursor is not exhausted
- download.abort();
- client.close(done);
- expect.fail('errorReported should be set');
- }
-
- expect(res.toString('hex')).to.equal(result.$hex);
-
- // We need to abort in order to close the underlying cursor,
- // and by extension the implicit session used for the cursor.
- // This is only necessary if the cursor is not exhausted
- download.abort();
- client.close(done);
- });
- };
-
- const keys = Object.keys(DOWNLOAD_SPEC.data);
- let numCollections = Object.keys(DOWNLOAD_SPEC.data).length;
- keys.forEach(function (collection) {
- const data = DOWNLOAD_SPEC.data[collection].map(function (v) {
- return deflateTestDoc(v);
- });
-
- db.collection(BUCKET_NAME + '.' + collection).insertMany(data, function (error) {
- expect(error).to.not.exist;
-
- if (--numCollections === 0) {
- if (testSpec.arrange) {
- // only support 1 arrange op for now
- expect(testSpec.arrange.data.length).to.equal(1);
- applyArrange(db, deflateTestDoc(testSpec.arrange.data[0]), function (error) {
- expect(error).to.not.exist;
- _runTest();
- });
- } else {
- _runTest();
- }
- }
- });
- });
- });
- });
- }
- });
- })(specTest);
- });
-
- function testResultDoc(specDoc, resDoc, result) {
- const specKeys = Object.keys(specDoc)
- .filter(key => key !== 'md5')
- .sort();
- const resKeys = Object.keys(resDoc).sort();
-
- expect(specKeys.length === resKeys.length).to.equal(true);
-
- for (let i = 0; i < specKeys.length; ++i) {
- const key = specKeys[i];
- expect(specKeys[i]).to.equal(resKeys[i]);
- if (specDoc[key] === '*actual') {
- expect(resDoc[key]).to.exist;
- } else if (specDoc[key] === '*result') {
- expect(resDoc[key].toString()).to.equal(result.toString());
- } else if (specDoc[key].$hex) {
- expect(resDoc[key]._bsontype === 'Binary').to.equal(true);
- expect(resDoc[key].toString('hex')).to.equal(specDoc[key].$hex);
- } else {
- if (typeof specDoc[key] === 'object') {
- expect(specDoc[key]).to.deep.equal(resDoc[key]);
- } else {
- expect(specDoc[key]).to.equal(resDoc[key]);
- }
- }
- }
- }
-
- function deflateTestDoc(doc) {
- const ret = EJSON.parse(JSON.stringify(doc), { relaxed: true });
- convert$hexToBuffer(ret);
- return ret;
- }
-
- function convert$hexToBuffer(doc) {
- const keys = Object.keys(doc);
- keys.forEach(function (key) {
- if (doc[key] && typeof doc[key] === 'object') {
- if (doc[key].$hex != null) {
- doc[key] = Buffer.from(doc[key].$hex, 'hex');
- } else {
- convert$hexToBuffer(doc[key]);
- }
- }
- });
- }
-
- function applyArrange(db, command, callback) {
- // Don't count on commands being there since we need to test on 2.2 and 2.4
- if (command.delete) {
- if (command.deletes.length !== 1) {
- return callback(new Error('can only arrange with 1 delete'));
- }
- if (command.deletes[0].limit !== 1) {
- return callback(new Error('can only arrange with delete limit 1'));
- }
- db.collection(command.delete).deleteOne(command.deletes[0].q, callback);
- } else if (command.insert) {
- db.collection(command.insert).insertMany(command.documents, callback);
- } else if (command.update) {
- const bulk = [];
- for (let i = 0; i < command.updates.length; ++i) {
- bulk.push({
- updateOne: {
- filter: command.updates[i].q,
- update: command.updates[i].u
- }
- });
- }
-
- db.collection(command.update).bulkWrite(bulk, callback);
- } else {
- const msg = 'Command not recognized: ' + require('util').inspect(command);
- callback(new Error(msg));
- }
- }
-
/**
* NODE-822 GridFSBucketWriteStream end method does not handle optional parameters
*/
diff --git a/test/integration/shared.js b/test/integration/shared.js
new file mode 100644
index 00000000000..2e2ab37419d
--- /dev/null
+++ b/test/integration/shared.js
@@ -0,0 +1,309 @@
+'use strict';
+
+const expect = require('chai').expect;
+
+// helpers for using chai.expect in the assert style
+const assert = {
+ equal: function (a, b) {
+ expect(a).to.equal(b);
+ },
+
+ deepEqual: function (a, b) {
+ expect(a).to.eql(b);
+ },
+
+ strictEqual: function (a, b) {
+ expect(a).to.eql(b);
+ },
+
+ notEqual: function (a, b) {
+ expect(a).to.not.equal(b);
+ },
+
+ ok: function (a) {
+ expect(a).to.be.ok;
+ },
+
+ throws: function (func) {
+ expect(func).to.throw;
+ }
+};
+
+function delay(timeout) {
+ return new Promise(function (resolve) {
+ setTimeout(function () {
+ resolve();
+ }, timeout);
+ });
+}
+
+function dropCollection(dbObj, collectionName) {
+ return dbObj.dropCollection(collectionName).catch(ignoreNsNotFound);
+}
+
+function filterForCommands(commands, bag) {
+ if (typeof commands === 'function') {
+ return function (event) {
+ if (commands(event.commandName)) bag.push(event);
+ };
+ }
+ commands = Array.isArray(commands) ? commands : [commands];
+ return function (event) {
+ if (commands.indexOf(event.commandName) !== -1) bag.push(event);
+ };
+}
+
+function filterOutCommands(commands, bag) {
+ if (typeof commands === 'function') {
+ return function (event) {
+ if (!commands(event.commandName)) bag.push(event);
+ };
+ }
+ commands = Array.isArray(commands) ? commands : [commands];
+ return function (event) {
+ if (commands.indexOf(event.commandName) === -1) bag.push(event);
+ };
+}
+
+function ignoreNsNotFound(err) {
+ if (!err.message.match(/ns not found/)) throw err;
+}
+
+function setupDatabase(configuration, dbsToClean) {
+ dbsToClean = Array.isArray(dbsToClean) ? dbsToClean : [];
+ var configDbName = configuration.db;
+ var client = configuration.newClient(configuration.writeConcernMax(), {
+ maxPoolSize: 1
+ });
+
+ dbsToClean.push(configDbName);
+
+ return client
+ .connect()
+ .then(() =>
+ dbsToClean.reduce(
+ (result, dbName) =>
+ result
+ .then(() =>
+ client.db(dbName).command({ dropAllUsersFromDatabase: 1, writeConcern: { w: 1 } })
+ )
+ .then(() => client.db(dbName).dropDatabase({ writeConcern: { w: 1 } })),
+ Promise.resolve()
+ )
+ )
+ .then(
+ () => client.close(),
+ err => client.close(() => Promise.reject(err))
+ );
+}
+
+/** @typedef {(client: MongoClient) => Promise | (client: MongoClient, done: Function) => void} withClientCallback */
+/**
+ * Safely perform a test with provided MongoClient, ensuring client won't leak.
+ *
+ * @param {string|MongoClient} [client] if not provided, `withClient` must be bound to test function `this`
+ * @param {withClientCallback} callback the test function
+ */
+function withClient(client, callback) {
+ let connectionString;
+ if (arguments.length === 1) {
+ callback = client;
+ client = undefined;
+ } else {
+ if (typeof client === 'string') {
+ connectionString = client;
+ client = undefined;
+ }
+ }
+
+ if (callback.length === 2) {
+ const cb = callback;
+ callback = client => new Promise(resolve => cb(client, resolve));
+ }
+
+ function cleanup(err) {
+ return new Promise((resolve, reject) => {
+ try {
+ client.close(closeErr => {
+ const finalErr = err || closeErr;
+ if (finalErr) {
+ return reject(finalErr);
+ }
+ return resolve();
+ });
+ } catch (e) {
+ return reject(err || e);
+ }
+ });
+ }
+
+ function lambda() {
+ if (!client) {
+ client = this.configuration.newClient(connectionString);
+ }
+ return client
+ .connect()
+ .then(callback)
+ .then(err => {
+ cleanup();
+ if (err) {
+ throw err;
+ }
+ }, cleanup);
+ }
+
+ if (this && this.configuration) {
+ return lambda.call(this);
+ }
+ return lambda;
+}
+
+/** @typedef {(client: MongoClient, events: Array, done: Function) => void} withMonitoredClientCallback */
+/**
+ * Perform a test with a monitored MongoClient that will filter for certain commands.
+ *
+ * @param {string|Array|Function} commands commands to filter for
+ * @param {object} [options] options to pass on to configuration.newClient
+ * @param {object} [options.queryOptions] connection string options
+ * @param {object} [options.clientOptions] MongoClient options
+ * @param {withMonitoredClientCallback} callback the test function
+ */
+function withMonitoredClient(commands, options, callback) {
+ if (arguments.length === 2) {
+ callback = options;
+ options = {};
+ }
+ if (!Object.prototype.hasOwnProperty.call(callback, 'prototype')) {
+ throw new Error('withMonitoredClient callback can not be arrow function');
+ }
+ return function () {
+ const monitoredClient = this.configuration.newClient(
+ Object.assign({}, options.queryOptions),
+ Object.assign({ monitorCommands: true }, options.clientOptions)
+ );
+ const events = [];
+ monitoredClient.on('commandStarted', filterForCommands(commands, events));
+ return withClient(monitoredClient, (client, done) =>
+ callback.bind(this)(client, events, done)
+ )();
+ };
+}
+
+/**
+ * Safely perform a test with an arbitrary cursor.
+ *
+ * @param {Function} cursor any cursor that needs to be closed
+ * @param {(cursor: object, done: Function) => void} body test body
+ * @param {Function} done called after cleanup
+ */
+function withCursor(cursor, body, done) {
+ let clean = false;
+ function cleanup(testErr) {
+ if (clean) return;
+ clean = true;
+ return cursor.close(closeErr => done(testErr || closeErr));
+ }
+ try {
+ body(cursor, cleanup);
+ } catch (err) {
+ cleanup(err);
+ }
+}
+
+/**
+ * A class for listening on specific events
+ *
+ * @example
+ * beforeEach(function() {
+ * // capture all commandStarted events from client. Get by doing this.commandStarted.events;
+ * this.commandStarted = new EventCollector(this.client, 'commandStarted');
+ * });
+ * @example
+ * beforeEach(function() {
+ * // same as above, but only allows 'insert' and 'find' events
+ * this.commandStarted = new EventCollector(this.client, 'commandStarted', {
+ * include: ['insert', 'find']
+ * });
+ * });
+ * @example
+ * beforeEach(function() {
+ * // same as above, but excludes 'ismaster' events
+ * this.commandStarted = new EventCollector(this.client, 'commandStarted', {
+ * exclude: ['ismaster']
+ * });
+ * });
+ */
+class APMEventCollector {
+ constructor(client, eventName, options) {
+ this._client = client;
+ this._eventName = eventName;
+
+ this._events = [];
+ this._listener = e => this._events.push(e);
+ this._client.on(this._eventName, this._listener);
+
+ options = options || {};
+ const include = this._buildSet(options.include);
+ if (include.size > 0) {
+ this._include = include;
+ }
+ this._exclude = this._buildSet(options.exclude);
+ }
+
+ _buildSet(input) {
+ if (Array.isArray(input)) {
+ return new Set(input.map(x => x.toLowerCase()));
+ } else if (typeof input === 'string') {
+ return new Set([input.toLowerCase()]);
+ }
+ return new Set();
+ }
+
+ get events() {
+ let events = this._events;
+ if (this._include) {
+ events = events.filter(e => this._include.has(e.commandName.toLowerCase()));
+ }
+ return events.filter(e => !this._exclude.has(e.commandName.toLowerCase()));
+ }
+
+ clear() {
+ this._events = [];
+ }
+
+ teardown() {
+ this._client.removeListener(this._eventName, this._listener);
+ }
+}
+
+// simplified `withClient` helper that only uses callbacks
+function withClientV2(callback) {
+ return function testFunction(done) {
+ const client = this.configuration.newClient({ monitorCommands: true });
+ client.connect(err => {
+ if (err) return done(err);
+ this.defer(() => client.close());
+
+ try {
+ callback.call(this, client, done);
+ } catch (err) {
+ done(err);
+ }
+ });
+ };
+}
+
+module.exports = {
+ assert,
+ delay,
+ dropCollection,
+ filterForCommands,
+ filterOutCommands,
+ ignoreNsNotFound,
+ setupDatabase,
+ withClient,
+ withClientV2,
+ withMonitoredClient,
+ withCursor,
+ APMEventCollector
+};
diff --git a/test/readme.md b/test/readme.md
index 24ef1e09835..f3ff916eba9 100644
--- a/test/readme.md
+++ b/test/readme.md
@@ -22,16 +22,21 @@ tests will be skipped.
Below is a summary of the types of test automation in this repo.
-| Type of Test | Test Location | About the Tests | How to Run Tests |
-| ------------ | ------------- | --------------- | ---------------- |
-| Unit | `/test/unit` | The unit tests test individual pieces of code, typically functions. These tests do **not** interact with a real database, so mocks are used instead. | `npm run check:unit` |
-| Functional | `/test/functional` | The functional tests test that a given feature or piece of a feature is working as expected. These tests do **not** use mocks; instead, they interact with a real database. | `npm run check:test` |
-| Benchmark | `/test/benchmarks` | The benchmark tests report how long a designated set of tests take to run. They are used to measure performance. | `npm run check:bench` |
-| Integration | `/test/integration` | *Coming Soon* The integration tests test that pieces of the driver work together as expected. | `npm run check:test` |
-| Specialized Environment | `/test/manual` | The specalized environment tests are functional tests that require specialized environment setups in Evergreen.
**Note**: "manual" in the directory path does not refer to tests that should be run manually. These tests are automated. These tests have a special Evergreen configuration and run in isolation from the other tests. | There is no single script for running all of the specialized environment tests. Instead, you can run the appropriate script based on the specialized environment you want to use:
- `npm run check:atlas` to test Atlas
- `npm run check:adl` to test Atlas Data Lake
- `npm run check:ocsp` to test OSCP
- `npm run check:kerberos` to test Kerberos
- `npm run check:tls` to test TLS
- `npm run check:ldap` to test LDAP authorization
-| Spec | Test input and expected results are in `/test/spec`.
Test runners are in `/test/functional` with the `_spec` suffix in the test file's name.
Some spec tests are also in `/test/unit`. | All of the MongoDB drivers follow the same [specifications (specs)][driver-specs]. Each spec has tests associated with it. Some of the tests are prose (written, descriptive) tests. Other tests are written in YAML and converted to JSON. The developers on the driver teams automate these tests. For prose tests, the tests are converted to automation and stored in the `test/unit` or `test/functional` directory as appropriate. Both the JSON and YAML files are stored in `test/spec`. The test runners for the JSON and YAML files are located in `test/functional` and `/test/unit`. | `npm run check:test` to run all of the functional and integration tests (including the spec tests stored with those). `npm run check:unit` to run all of the unit tests (including the spec tests stored with those).
-| TypeScript Definition | `/test/types` | The TypeScript definition tests verify the type definitions are correct. | `npm run check:tsd` |
+| Type of Test | Test Location | About the Tests | How to Run Tests |
+| ----------------------- | ------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| Unit | `/test/unit` | The unit tests test individual pieces of code, typically functions. These tests do **not** interact with a real database, so mocks are used instead.
The unit test directory mirrors the `/src` directory structure with test file names matching the source file names of the code they test. | `npm run check:unit` |
+| Integration | `/test/functional` and `/test/integration` | The integration tests test that a given feature or piece of a feature is working as expected. These tests do **not** use mocks; instead, they interact with a real database.
The integration test directory follows the `test/spec` directory structure representing the different functional areas of the driver.
**Note:** The two directories are due to the fact that the tests are currently being migrated from `/functional` to `/integration`. | `npm run check:test` |
+| Benchmark | `/test/benchmarks` | The benchmark tests report how long a designated set of tests take to run. They are used to measure performance. | `npm run check:bench` |
+| Specialized Environment | `/test/manual` | The specalized environment tests are functional tests that require specialized environment setups in Evergreen.
**Note**: "manual" in the directory path does not refer to tests that should be run manually. These tests are automated. These tests have a special Evergreen configuration and run in isolation from the other tests. | There is no single script for running all of the specialized environment tests. Instead, you can run the appropriate script based on the specialized environment you want to use:
- `npm run check:atlas` to test Atlas
- `npm run check:adl` to test Atlas Data Lake
- `npm run check:ocsp` to test OSCP
- `npm run check:kerberos` to test Kerberos
- `npm run check:tls` to test TLS
- `npm run check:ldap` to test LDAP authorization |
+| TypeScript Definition | `/test/types` | The TypeScript definition tests verify the type definitions are correct. | `npm run check:tsd` |
+### Spec Tests
+
+All of the MongoDB drivers follow the same [specifications (specs)][driver-specs]. Each spec has tests associated with it. Some of the tests are prose (written, descriptive) tests, which must be implemented on a case by case basis by the developers on the driver teams. Other tests are written in a standardized form as YAML and converted to JSON, which can be read by the specialized spec test runners that are implemented in each driver.
+
+The input test specifications are stored in `test/spec`.
+
+The actual implementations of the spec tests can be unit tests or integration tests depending on the requirements, and they can be found in the corresponding test directory according to their type. Regardless of whether they are located in the `/unit` or `/integration` test directory, test files named `spec_name.spec.test` contain spec test implementations that use a standardized runner and `spec_name.prose.test` files contain prose test implementations.
## Running the Tests Locally
@@ -41,20 +46,20 @@ Start a mongod standalone with our [cluster_setup.sh](tools/cluster_setup.sh) sc
Then run the tests: `npm test`.
->**Note:** the command above will run a subset of the tests that work with the standalone server topology since the tests are being run against a standalone server.
+> **Note:** the command above will run a subset of the tests that work with the standalone server topology since the tests are being run against a standalone server.
-The output will show how many tests passed, failed, and are pending. Tests that we have indicated should be skipped using `.skip()` will appear as pending in the test results. See [Mocha's documentation][mocha-skip] for more information.
+The output will show how many tests passed, failed, and are pending. Tests that we have indicated should be skipped using `.skip()` will appear as pending in the test results. See [Mocha's documentation][mocha-skip] for more information.
In the following subsections, we'll dig into the details of running the tests.
### Testing Different MongoDB Topologies
-As we mentioned earlier, the tests check the topology of the MongoDB server being used and run the tests associated with that topology. Tests that don't have a matching topology will be skipped.
+As we mentioned earlier, the tests check the topology of the MongoDB server being used and run the tests associated with that topology. Tests that don't have a matching topology will be skipped.
In the steps above, we started a standalone server: `./test/tools/cluster_setup.sh server`.
You can use the same [cluster_setup.sh](tools/cluster_setup.sh) script to start a replica set or sharded cluster by passing the appropriate option: `./test/tools/cluster_setup.sh replica_set` or
-`./test/tools/cluster_setup.sh sharded_cluster`. If you are running more than a standalone server, make sure your `ulimit` settings are in accordance with [MongoDB's recommendations][mongodb-ulimit]. Changing the settings on the latest versions of macOS can be tricky. See [this article][macos-ulimt] for tips. (You likely don't need to do the complicated maxproc steps.)
+`./test/tools/cluster_setup.sh sharded_cluster`. If you are running more than a standalone server, make sure your `ulimit` settings are in accordance with [MongoDB's recommendations][mongodb-ulimit]. Changing the settings on the latest versions of macOS can be tricky. See [this article][macos-ulimt] for tips. (You likely don't need to do the complicated maxproc steps.)
The [cluster_setup.sh](tools/cluster_setup.sh) script automatically stores the files associated with the MongoDB server in the `data` directory, which is stored at the top level of this repository.
You can delete this directory if you want to ensure you're running a clean configuration. If you delete the directory, the associated database server will be stopped, and you will need to run [cluster_setup.sh](tools/cluster_setup.sh) again.
@@ -63,14 +68,14 @@ You can prefix `npm test` with a `MONGODB_URI` environment variable to point the
### Running Individual Tests
-The easiest way to run a single test is by appending `.only()` to the test context you want to run. For example, you could update a test function to be `it.only(‘cool test’, function() {})`. Then
-run the test using `npm run check:test` for a functional or integration test or `npm run check:unit` for a unit test. See [Mocha's documentation][mocha-only] for more detailed information on `.only()`.
+The easiest way to run a single test is by appending `.only()` to the test context you want to run. For example, you could update a test function to be `it.only(‘cool test’, function() {})`. Then
+run the test using `npm run check:test` for a functional or integration test or `npm run check:unit` for a unit test. See [Mocha's documentation][mocha-only] for more detailed information on `.only()`.
-Another way to run a single test is to use Mocha's `grep` flag. For functional or integration tests, run `npm run check:test -- -g 'test name'`. For unit tests, run `npm run check:unit -- -g 'test name'`. See the [Mocha documentation][mocha-grep] for information on the `grep` flag.
+Another way to run a single test is to use Mocha's `grep` flag. For functional or integration tests, run `npm run check:test -- -g 'test name'`. For unit tests, run `npm run check:unit -- -g 'test name'`. See the [Mocha documentation][mocha-grep] for information on the `grep` flag.
## Running the Tests in Evergreen
-[Evergreen][evergreen-wiki] is the continuous integration (CI) system we use. Evergreen builds are automatically run whenever a pull request is created or when commits are pushed to particular branches (e.g., main, 4.0, and 3.6).
+[Evergreen][evergreen-wiki] is the continuous integration (CI) system we use. Evergreen builds are automatically run whenever a pull request is created or when commits are pushed to particular branches (e.g., main, 4.0, and 3.6).
Each Evergreen build runs the test suite against a variety of build variants that include a combination of topologies, special environments, and operating systems. By default, commits in pull requests only run a subset of the build variants in order to save time and resources. To configure a build, update `.evergreen/config.yml.in` and then generate a new Evergreen config via `node .evergreen/generate_evergreen_tasks.js`.
@@ -79,9 +84,11 @@ Each Evergreen build runs the test suite against a variety of build variants tha
Occasionally, you will want to manually kick off an Evergreen build in order to debug a test failure or to run tests against uncommitted changes.
#### Evergreen UI
+
You can use the Evergreen UI to choose to rerun a task (an entire set of test automation for a given topology and environment). Evergreen does not allow you to rerun an individual test.
#### Evergreen CLI
+
You can also choose to run a build against code on your local machine that you have not yet committed by running a pre-commit patch build.
##### Setup
@@ -94,20 +101,23 @@ Begin by setting up the Evergreen CLI.
##### Running the Build
-Once you have the Evergreen CLI setup, you are ready to run a build. Keep in mind that if you want to run only a few tests, you can append `.only()` as described in the [section above on running individual tests](#running-individual-tests).
+Once you have the Evergreen CLI setup, you are ready to run a build. Keep in mind that if you want to run only a few tests, you can append `.only()` as described in the [section above on running individual tests](#running-individual-tests).
1. In a terminal, navigate to your node driver directory:
`cd node-mongodb-native`
+
1. Use the Evergreen `patch` command. `-y` skips the confirmation dialog. `-u` includes uncommitted changes. `-p [project name]` specifies the Evergreen project. --browse opens the patch URL in your browser.
`evergreen patch -y -u -p mongo-node-driver --browse`
+
1. In your browser, select the build variants and tasks to run.
## Using a Pre-Release Version of a Dependent Library
You may want to test the driver with a pre-release version of a dependent library (e.g., [bson][js-bson]).
Follow the steps below to do so.
+
1. Open [package.json](../package.json)
1. Identify the line that specifies the dependency
1. Replace the version number with the commit hash of the dependent library. For example, you could use a particular commit for the [js-bson][js-bson] project on GitHub: `"bson": "mongodb/js-bson#e29156f7438fa77c1672fd70789d7ade9ca65061"`
@@ -118,10 +128,10 @@ repository.
## Manually Testing the Driver
-You may want to manually test changes you have made to the driver. The steps below will walk you through how to create a new Node project that uses your local copy of the Node driver. You can
+You may want to manually test changes you have made to the driver. The steps below will walk you through how to create a new Node project that uses your local copy of the Node driver. You can
modify the steps to work with existing Node projects.
-1. Navigate to a new directory and create a new Node project by running `npm init` in a terminal and working through the interactive prompts. A new file named `package.json` will be created for you.
+1. Navigate to a new directory and create a new Node project by running `npm init` in a terminal and working through the interactive prompts. A new file named `package.json` will be created for you.
1. In `package.json`, create a new dependency for `mongodb` that points to your local copy of the driver. For example:
```
"dependencies": {
@@ -131,12 +141,11 @@ modify the steps to work with existing Node projects.
1. Run `npm install` to install the dependency.
1. Create a new file that uses the driver to test your changes. See the [MongoDB Node.js Quick Start Repo][node-quick-start] for example scripts you can use.
-> **Note:** When making driver changes, you will need to run `npm run build:ts` with each change in order for it to take effect.
-
+> **Note:** When making driver changes, you will need to run `npm run build:ts` with each change in order for it to take effect.
## Testing with Special Environments
-In order to test some features, you will need to generate and set a specialized group of environment variables. The subsections below will walk you through how to generate and set the environment variables for these features.
+In order to test some features, you will need to generate and set a specialized group of environment variables. The subsections below will walk you through how to generate and set the environment variables for these features.
We recommend using a different terminal for each specialized environment to avoid the environment variables from one specialized environment impacting the test runs for another specialized environment.
@@ -152,22 +161,24 @@ The following steps will walk you through how to create and test a MongoDB Serve
> Note: MongoDB employees can pull these values from the Evergreen project's configuration.
- | Variable Name | Description |
- | ------------- | ----------- |
- | `Project` | The name of the Evergreen project where the tests will be run (e.g., `mongo-node-driver-next`) |
- | `SERVERLESS_DRIVERS_GROUP` | The Atlas organization where you will be creating the serverless instance |
- | `SERVERLESS_API_PUBLIC_KEY` | The [Atlas API Public Key][atlas-api-key] for the organization where you will be creating a serverless instance |
+ | Variable Name | Description |
+ | ---------------------------- | ---------------------------------------------------------------------------------------------------------------- |
+ | `Project` | The name of the Evergreen project where the tests will be run (e.g., `mongo-node-driver-next`) |
+ | `SERVERLESS_DRIVERS_GROUP` | The Atlas organization where you will be creating the serverless instance |
+ | `SERVERLESS_API_PUBLIC_KEY` | The [Atlas API Public Key][atlas-api-key] for the organization where you will be creating a serverless instance |
| `SERVERLESS_API_PRIVATE_KEY` | The [Atlas API Private Key][atlas-api-key] for the organization where you will be creating a serverless instance |
- | `SERVERLESS_ATLAS_USER` | The [SCRAM username][scram-auth] for the Atlas user who has permission to create a serverless instance |
- | `SERVERLESS_ATLAS_PASSWORD` | The [SCRAM password][scram-auth] for the Atlas user who has permission to create a serverless instance |
+ | `SERVERLESS_ATLAS_USER` | The [SCRAM username][scram-auth] for the Atlas user who has permission to create a serverless instance |
+ | `SERVERLESS_ATLAS_PASSWORD` | The [SCRAM password][scram-auth] for the Atlas user who has permission to create a serverless instance |
_**Remember**_ some of these are sensitive credentials, so keep them safe and only put them in your environment when you need them.
1. Run the [create-instance][create-instance-script] script:
+
```sh
$DRIVERS_TOOLS/.evergreen/serverless/create-instance.sh
```
- The script will take a few minutes to run. When it is finished, a new file named `serverless-expansion.yml` will be created in the current working directory. The file will contain information about an Evergreen expansion:
+
+ The script will take a few minutes to run. When it is finished, a new file named `serverless-expansion.yml` will be created in the current working directory. The file will contain information about an Evergreen expansion:
```yml
MONGODB_URI: xxx
@@ -186,9 +197,11 @@ The following steps will walk you through how to create and test a MongoDB Serve
```sh
cat serverless-expansion.yml | sed 's/: /=/g' > serverless.env
```
+
A new file named `serverless.env` is automatically created.
1. Update the following variables in `serverless.env`, so that they are equivalent to what our Evergreen builds do:
+
- Change `MONGODB_URI` to have the same value as `SINGLE_ATLASPROXY_SERVERLESS_URI`.
- Add `SINGLE_MONGOS_LB_URI` and set it to the value of `SINGLE_ATLASPROXY_SERVERLESS_URI`.
- Add `MULTI_MONGOS_LB_URI` and set it to the value of `SINGLE_ATLASPROXY_SERVERLESS_URI`.
@@ -216,8 +229,8 @@ The following steps will walk you through how to start and test a load balancer.
```
A new file name `lb-expansion.yml` will be automatically created. The contents of the file will be similar in structure to the code below.
```yaml
- SINGLE_MONGOS_LB_URI: "mongodb://127.0.0.1:8000/?loadBalanced=true"
- MULTI_MONGOS_LB_URI: "mongodb://127.0.0.1:8001/?loadBalanced=true"
+ SINGLE_MONGOS_LB_URI: 'mongodb://127.0.0.1:8000/?loadBalanced=true'
+ MULTI_MONGOS_LB_URI: 'mongodb://127.0.0.1:8001/?loadBalanced=true'
```
1. Generate a sourceable environment file from `lb-expansion.yml` by running the following command:
```sh
@@ -252,13 +265,13 @@ The following steps will walk you through how to run the tests for CSFLE.
> Note: MongoDB employees can pull these values from the Evergreen project's configuration.
- | Variable Name | Description |
- | ------------- | ----------- |
- | `AWS_ACCESS_KEY_ID` | The AWS access key ID used to generate KMS messages |
- | `AWS_SECRET_ACCESS_KEY` | The AWS secret access key used to generate KMS messages |
- | `AWS_REGION` | The AWS region where the KMS resides (e.g., `us-east-1`) |
- | `AWS_CMK_ID` | The Customer Master Key for the KMS |
- | `CSFLE_KMS_PROVIDERS` | The raw EJSON description of the KMS providers. An example of the format is provided below. |
+ | Variable Name | Description |
+ | ----------------------- | ------------------------------------------------------------------------------------------- |
+ | `AWS_ACCESS_KEY_ID` | The AWS access key ID used to generate KMS messages |
+ | `AWS_SECRET_ACCESS_KEY` | The AWS secret access key used to generate KMS messages |
+ | `AWS_REGION` | The AWS region where the KMS resides (e.g., `us-east-1`) |
+ | `AWS_CMK_ID` | The Customer Master Key for the KMS |
+ | `CSFLE_KMS_PROVIDERS` | The raw EJSON description of the KMS providers. An example of the format is provided below. |
The value of the `CSFLE_KMS_PROVIDERS` variable will have the following format:
@@ -285,14 +298,12 @@ The following steps will walk you through how to run the tests for CSFLE.
}
```
-
1. Run the functional tests:
`npm run check:test`
The output of the tests will include sections like "Client Side Encryption Corpus," "Client Side Encryption Functional," "Client Side Encryption Prose Tests," and "Client Side Encryption."
-
### TODO Special Env Sections
- Kerberos
diff --git a/test/tools/spec-runner/context.js b/test/tools/spec-runner/context.js
new file mode 100644
index 00000000000..1f3ae91354f
--- /dev/null
+++ b/test/tools/spec-runner/context.js
@@ -0,0 +1,246 @@
+'use strict';
+const { expect } = require('chai');
+const { resolveConnectionString } = require('./utils');
+const { ns } = require('../../../src/utils');
+class Thread {
+ constructor() {
+ this._killed = false;
+ this._error = undefined;
+ this._promise = new Promise(resolve => {
+ this.start = () => setTimeout(resolve);
+ });
+ }
+
+ run(opPromise) {
+ if (this._killed || this._error) {
+ throw new Error('Attempted to run operation on killed thread');
+ }
+
+ this._promise = this._promise.then(() => opPromise).catch(e => (this._error = e));
+ }
+
+ finish() {
+ this._killed = true;
+ return this._promise.then(() => {
+ if (this._error) {
+ throw this._error;
+ }
+ });
+ }
+}
+
+class TestRunnerContext {
+ constructor(opts) {
+ const defaults = {
+ password: undefined,
+ user: undefined,
+ authSource: undefined,
+ useSessions: true,
+ skipPrepareDatabase: false
+ };
+ opts = Object.assign({}, defaults, opts || {});
+ this.skipPrepareDatabase = opts.skipPrepareDatabase;
+ this.useSessions = opts.useSessions;
+ this.user = opts.user;
+ this.password = opts.password;
+ this.authSource = opts.authSource;
+ if (process.env.SERVERLESS) {
+ this.user = process.env.SERVERLESS_ATLAS_USER;
+ this.password = process.env.SERVERLESS_ATLAS_PASSWORD;
+ this.authSource = 'admin';
+ this.serverless = true;
+ }
+ this.sharedClient = null;
+ this.failPointClients = [];
+ this.appliedFailPoints = [];
+
+ // event tracking
+ this.commandEvents = [];
+ this.sdamEvents = [];
+ this.cmapEvents = [];
+
+ this.threads = new Map();
+ }
+
+ runForAllClients(fn) {
+ const allClients = [this.sharedClient].concat(this.failPointClients);
+ return Promise.all(allClients.map(fn));
+ }
+
+ runFailPointCmd(fn) {
+ return this.failPointClients.length
+ ? Promise.all(this.failPointClients.map(fn))
+ : fn(this.sharedClient);
+ }
+
+ setup(config) {
+ this.sharedClient = config.newClient(
+ resolveConnectionString(config, { useMultipleMongoses: true }, this)
+ );
+ if (config.topologyType === 'Sharded') {
+ this.failPointClients = config.options.hostAddresses.map(proxy =>
+ config.newClient(`mongodb://${proxy.host}:${proxy.port}/`)
+ );
+ }
+
+ return this.runForAllClients(client => client.connect());
+ }
+
+ teardown() {
+ return Promise.all([
+ this.runForAllClients(client => client.close()),
+ this.sharedClient.close()
+ ]);
+ }
+
+ cleanupAfterSuite() {
+ const context = this;
+
+ // clean up applied failpoints
+ const cleanupPromises = this.appliedFailPoints.map(failPoint => {
+ return context.disableFailPoint(failPoint);
+ });
+
+ this.appliedFailPoints = [];
+
+ const cleanup = err => {
+ if (Array.isArray(err)) {
+ err = undefined;
+ }
+
+ if (!context.testClient) {
+ if (err) throw err;
+ return;
+ }
+
+ // clean up state
+ context.commandEvents = [];
+ context.sdamEvents = [];
+ context.cmapEvents = [];
+ context.threads.clear();
+
+ const client = context.testClient;
+ context.testClient = undefined;
+ return err ? client.close().then(() => Promise.reject(err)) : client.close();
+ };
+
+ return Promise.all(cleanupPromises).then(cleanup, cleanup);
+ }
+
+ targetedFailPoint(options) {
+ const session = options.session;
+ const failPoint = options.failPoint;
+ expect(session.isPinned).to.be.true;
+
+ return new Promise((resolve, reject) => {
+ const serverOrConnection = session.loadBalanced
+ ? session.pinnedConnection
+ : session.transaction.server;
+
+ serverOrConnection.command(ns('admin.$cmd'), failPoint, undefined, err => {
+ if (err) return reject(err);
+
+ this.appliedFailPoints.push(failPoint);
+ resolve();
+ });
+ });
+ }
+
+ enableFailPoint(failPoint) {
+ return this.runFailPointCmd(client => {
+ return client.db('admin').command(failPoint);
+ });
+ }
+
+ disableFailPoint(failPoint) {
+ return this.runFailPointCmd(client => {
+ return client.db('admin').command({
+ configureFailPoint: failPoint.configureFailPoint,
+ mode: 'off'
+ });
+ });
+ }
+
+ // event helpers
+ waitForEvent(options) {
+ const eventName = options.event;
+ const count = options.count;
+
+ return new Promise(resolve => {
+ const checkForEvent = () => {
+ const matchingEvents = findMatchingEvents(this, eventName);
+ if (matchingEvents.length >= count) {
+ resolve();
+ return;
+ }
+
+ setTimeout(() => checkForEvent(), 1000);
+ };
+
+ checkForEvent();
+ });
+ }
+
+ assertEventCount(options) {
+ const eventName = options.event;
+ const count = options.count;
+ const matchingEvents = findMatchingEvents(this, eventName);
+ expect(matchingEvents).to.have.lengthOf.at.least(count);
+ }
+
+ runAdminCommand(command, options) {
+ return this.sharedClient.db('admin').command(command, options);
+ }
+
+ // simulated thread helpers
+ wait(options) {
+ const ms = options.ms;
+ return new Promise(r => setTimeout(r, ms));
+ }
+
+ startThread(options) {
+ const name = options.name;
+ const threads = this.threads;
+ if (threads.has(name)) {
+ throw new Error(`Thread "${name}" already exists`);
+ }
+
+ const thread = new Thread();
+ threads.set(name, thread);
+ thread.start();
+ }
+
+ runOnThread(threadName, operation) {
+ const threads = this.threads;
+ if (!threads.has(threadName)) {
+ throw new Error(`Attempted to run operation on non-existent thread "${threadName}"`);
+ }
+
+ const thread = threads.get(threadName);
+ thread.run(operation);
+ }
+
+ waitForThread(options) {
+ const name = options.name;
+ const threads = this.threads;
+ if (!threads.has(name)) {
+ throw new Error(`Attempted to wait for non-existent thread "${name}"`);
+ }
+
+ const thread = threads.get(name);
+ return thread.finish().catch(e => {
+ throw e;
+ });
+ }
+}
+
+function findMatchingEvents(context, eventName) {
+ const allEvents = context.sdamEvents.concat(context.cmapEvents);
+ return eventName === 'ServerMarkedUnknownEvent'
+ ? context.sdamEvents
+ .filter(event => event.constructor.name === 'ServerDescriptionChangedEvent')
+ .filter(event => event.newDescription.type === 'Unknown')
+ : allEvents.filter(event => event.constructor.name.match(new RegExp(eventName)));
+}
+
+module.exports = { TestRunnerContext };
diff --git a/test/tools/spec-runner/index.js b/test/tools/spec-runner/index.js
new file mode 100644
index 00000000000..c80d971931a
--- /dev/null
+++ b/test/tools/spec-runner/index.js
@@ -0,0 +1,882 @@
+'use strict';
+const path = require('path');
+const fs = require('fs');
+const chai = require('chai');
+const expect = chai.expect;
+const { EJSON } = require('bson');
+const { isRecord } = require('../../../src/utils');
+const TestRunnerContext = require('./context').TestRunnerContext;
+const resolveConnectionString = require('./utils').resolveConnectionString;
+const { shouldRunServerlessTest } = require('../../tools/utils');
+
+// Promise.try alternative https://stackoverflow.com/questions/60624081/promise-try-without-bluebird/60624164?noredirect=1#comment107255389_60624164
+function promiseTry(callback) {
+ return new Promise((resolve, reject) => {
+ try {
+ resolve(callback());
+ } catch (e) {
+ reject(e);
+ }
+ });
+}
+
+chai.use(require('chai-subset'));
+chai.use(require('./matcher').default);
+
+function escape(string) {
+ return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
+}
+
+function translateClientOptions(options) {
+ Object.keys(options).forEach(key => {
+ if (['j', 'journal', 'fsync', 'wtimeout', 'wtimeoutms'].indexOf(key) >= 0) {
+ throw new Error(
+ `Unhandled write concern key needs to be added to options.writeConcern: ${key}`
+ );
+ }
+
+ if (key === 'w') {
+ options.writeConcern = { w: options.w };
+ delete options[key];
+ } else if (key === 'readConcernLevel') {
+ options.readConcern = { level: options.readConcernLevel };
+ delete options[key];
+ } else if (key === 'autoEncryptOpts') {
+ options.autoEncryption = Object.assign({}, options.autoEncryptOpts);
+
+ if (options.autoEncryptOpts.keyVaultNamespace == null) {
+ options.autoEncryption.keyVaultNamespace = 'keyvault.datakeys';
+ }
+
+ if (options.autoEncryptOpts.kmsProviders) {
+ const kmsProviders = EJSON.parse(process.env.CSFLE_KMS_PROVIDERS || 'NOT_PROVIDED');
+ if (options.autoEncryptOpts.kmsProviders.local) {
+ kmsProviders.local = options.autoEncryptOpts.kmsProviders.local;
+ }
+
+ if (options.autoEncryptOpts.kmsProviders.awsTemporary) {
+ kmsProviders.aws = {
+ accessKeyId: process.env.CSFLE_AWS_TEMP_ACCESS_KEY_ID,
+ secretAccessKey: process.env.CSFLE_AWS_TEMP_SECRET_ACCESS_KEY,
+ sessionToken: process.env.CSFLE_AWS_TEMP_SESSION_TOKEN
+ };
+ }
+
+ if (options.autoEncryptOpts.kmsProviders.awsTemporaryNoSessionToken) {
+ kmsProviders.aws = {
+ accessKeyId: process.env.CSFLE_AWS_TEMP_ACCESS_KEY_ID,
+ secretAccessKey: process.env.CSFLE_AWS_TEMP_SECRET_ACCESS_KEY
+ };
+ }
+
+ options.autoEncryption.kmsProviders = kmsProviders;
+ }
+
+ delete options.autoEncryptOpts;
+ }
+ });
+
+ return options;
+}
+
+function gatherTestSuites(specPath) {
+ return fs
+ .readdirSync(specPath)
+ .filter(x => x.indexOf('.json') !== -1)
+ .map(x =>
+ Object.assign(EJSON.parse(fs.readFileSync(path.join(specPath, x)), { relaxed: true }), {
+ name: path.basename(x, '.json')
+ })
+ );
+}
+
+function parseTopologies(topologies) {
+ if (topologies == null) {
+ return ['replicaset', 'sharded', 'single'];
+ }
+
+ return topologies;
+}
+
+function parseRunOn(runOn) {
+ return runOn.map(config => {
+ const topology = parseTopologies(config.topology);
+ const version = [];
+ if (config.minServerVersion) {
+ version.push(`>= ${config.minServerVersion}`);
+ }
+
+ if (config.maxServerVersion) {
+ version.push(`<= ${config.maxServerVersion}`);
+ }
+
+ const mongodb = version.join(' ');
+ return {
+ topology,
+ mongodb,
+ authEnabled: !!config.authEnabled,
+ serverless: config.serverless
+ };
+ });
+}
+
+function generateTopologyTests(testSuites, testContext, filter) {
+ testSuites.forEach(testSuite => {
+ // TODO: remove this when SPEC-1255 is completed
+ let runOn = testSuite.runOn;
+ if (!testSuite.runOn) {
+ runOn = [{ minServerVersion: testSuite.minServerVersion }];
+ if (testSuite.maxServerVersion) {
+ runOn.push({ maxServerVersion: testSuite.maxServerVersion });
+ }
+ }
+ const environmentRequirementList = parseRunOn(runOn);
+
+ environmentRequirementList.forEach(requires => {
+ const suiteName = `${testSuite.name} - ${requires.topology.join()}`;
+ describe(suiteName, {
+ metadata: { requires },
+ test: function () {
+ beforeEach(() => prepareDatabaseForSuite(testSuite, testContext));
+ afterEach(() => testContext.cleanupAfterSuite());
+ testSuite.tests.forEach(spec => {
+ const maybeIt = shouldRunSpecTest(this.configuration, requires, spec, filter)
+ ? it
+ : it.skip;
+ maybeIt(spec.description, function () {
+ let testPromise = Promise.resolve();
+ if (spec.failPoint) {
+ testPromise = testPromise.then(() => testContext.enableFailPoint(spec.failPoint));
+ }
+
+ // run the actual test
+ testPromise = testPromise.then(() =>
+ runTestSuiteTest(this.configuration, spec, testContext)
+ );
+
+ if (spec.failPoint) {
+ testPromise = testPromise.then(() => testContext.disableFailPoint(spec.failPoint));
+ }
+ return testPromise.then(() => validateOutcome(spec, testContext));
+ });
+ });
+ }
+ });
+ });
+ });
+}
+
+function shouldRunSpecTest(configuration, requires, spec, filter) {
+ if (requires.authEnabled && process.env.AUTH !== 'auth') {
+ // TODO(NODE-3488): We do not have a way to determine if auth is enabled in our mocha metadata
+ // We need to do a admin.command({getCmdLineOpts: 1}) if it errors (code=13) auth is on
+ return false;
+ }
+
+ if (
+ requires.serverless &&
+ !shouldRunServerlessTest(requires.serverless, !!process.env.SERVERLESS)
+ ) {
+ return false;
+ }
+
+ if (
+ spec.operations.some(
+ op => op.name === 'waitForEvent' && op.arguments.event === 'PoolReadyEvent'
+ )
+ ) {
+ // TODO(NODE-2994): Connection storms work will add new events to connection pool
+ return false;
+ }
+
+ if (spec.skipReason || (filter && typeof filter === 'function' && !filter(spec, configuration))) {
+ return false;
+ }
+ return true;
+}
+
+// Test runner helpers
+function prepareDatabaseForSuite(suite, context) {
+ context.dbName = suite.database_name || 'spec_db';
+ context.collectionName = suite.collection_name || 'spec_collection';
+
+ const db = context.sharedClient.db(context.dbName);
+
+ if (context.skipPrepareDatabase) return Promise.resolve();
+
+ // Note: killAllSession is not supported on serverless, see CLOUDP-84298
+ const setupPromise = context.serverless
+ ? Promise.resolve()
+ : db
+ .admin()
+ .command({ killAllSessions: [] })
+ .catch(err => {
+ if (
+ err.message.match(/no such (cmd|command)/) ||
+ err.message.match(/Failed to kill on some hosts/) ||
+ err.code === 11601
+ ) {
+ return;
+ }
+
+ throw err;
+ });
+
+ if (context.collectionName == null || context.dbName === 'admin') {
+ return setupPromise;
+ }
+
+ const coll = db.collection(context.collectionName);
+ return setupPromise
+ .then(() => coll.drop({ writeConcern: { w: 'majority' } }))
+ .catch(err => {
+ if (!err.message.match(/ns not found/)) throw err;
+ })
+ .then(() => {
+ if (suite.key_vault_data) {
+ const dataKeysCollection = context.sharedClient.db('keyvault').collection('datakeys');
+ return dataKeysCollection
+ .drop({ writeConcern: { w: 'majority' } })
+ .catch(err => {
+ if (!err.message.match(/ns not found/)) {
+ throw err;
+ }
+ })
+ .then(() => {
+ if (suite.key_vault_data.length) {
+ return dataKeysCollection.insertMany(suite.key_vault_data, {
+ writeConcern: { w: 'majority' }
+ });
+ }
+ });
+ }
+ })
+ .then(() => {
+ const options = { writeConcern: { w: 'majority' } };
+ if (suite.json_schema) {
+ options.validator = { $jsonSchema: suite.json_schema };
+ }
+
+ return db.createCollection(context.collectionName, options);
+ })
+ .then(() => {
+ if (suite.data && Array.isArray(suite.data) && suite.data.length > 0) {
+ return coll.insertMany(suite.data, { writeConcern: { w: 'majority' } });
+ }
+ })
+ .then(() => {
+ return context.runForAllClients(client => {
+ return client
+ .db(context.dbName)
+ .collection(context.collectionName)
+ .distinct('x')
+ .catch(() => {});
+ });
+ });
+}
+
+function parseSessionOptions(options) {
+ const result = Object.assign({}, options);
+ if (result.defaultTransactionOptions && result.defaultTransactionOptions.readPreference) {
+ result.defaultTransactionOptions.readPreference = normalizeReadPreference(
+ result.defaultTransactionOptions.readPreference.mode
+ );
+ }
+
+ return result;
+}
+
+const IGNORED_COMMANDS = new Set(['ismaster', 'configureFailPoint', 'endSessions']);
+const SDAM_EVENTS = new Set([
+ 'serverOpening',
+ 'serverClosed',
+ 'serverDescriptionChanged',
+ 'topologyOpening',
+ 'topologyClosed',
+ 'topologyDescriptionChanged',
+ 'serverHeartbeatStarted',
+ 'serverHeartbeatSucceeded',
+ 'serverHeartbeatFailed'
+]);
+
+const CMAP_EVENTS = new Set([
+ 'connectionPoolCreated',
+ 'connectionPoolClosed',
+ 'connectionCreated',
+ 'connectionReady',
+ 'connectionClosed',
+ 'connectionCheckOutStarted',
+ 'connectionCheckOutFailed',
+ 'connectionCheckedOut',
+ 'connectionCheckedIn',
+ 'connectionPoolCleared'
+]);
+
+let displayCommands = false;
+function runTestSuiteTest(configuration, spec, context) {
+ context.commandEvents = [];
+ const clientOptions = translateClientOptions(
+ Object.assign(
+ {
+ heartbeatFrequencyMS: 100,
+ minHeartbeatFrequencyMS: 100,
+ monitorCommands: true
+ },
+ spec.clientOptions
+ )
+ );
+
+ const url = resolveConnectionString(configuration, spec, context);
+ const client = configuration.newClient(url, clientOptions);
+ CMAP_EVENTS.forEach(eventName => client.on(eventName, event => context.cmapEvents.push(event)));
+ SDAM_EVENTS.forEach(eventName => client.on(eventName, event => context.sdamEvents.push(event)));
+
+ let skippedInitialPing = false;
+ client.on('commandStarted', event => {
+ if (IGNORED_COMMANDS.has(event.commandName)) {
+ return;
+ }
+
+ // If credentials were provided, then the Topology sends an initial `ping` command
+ // that we want to skip
+ if (event.commandName === 'ping' && client.topology.s.credentials && !skippedInitialPing) {
+ skippedInitialPing = true;
+ return;
+ }
+
+ context.commandEvents.push(event);
+
+ // very useful for debugging
+ if (displayCommands) {
+ // console.dir(event, { depth: 5 });
+ }
+ });
+
+ return client.connect().then(client => {
+ context.testClient = client;
+ const sessionOptions = Object.assign({}, spec.transactionOptions);
+
+ spec.sessionOptions = spec.sessionOptions || {};
+ const database = client.db(context.dbName);
+
+ let session0, session1;
+ let savedSessionData;
+
+ if (context.useSessions) {
+ try {
+ session0 = client.startSession(
+ Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session0))
+ );
+ session1 = client.startSession(
+ Object.assign({}, sessionOptions, parseSessionOptions(spec.sessionOptions.session1))
+ );
+
+ savedSessionData = {
+ session0: JSON.parse(EJSON.stringify(session0.id)),
+ session1: JSON.parse(EJSON.stringify(session1.id))
+ };
+ } catch (err) {
+ // ignore
+ }
+ }
+ // enable to see useful APM debug information at the time of actual test run
+ // displayCommands = true;
+
+ const operationContext = {
+ client,
+ database,
+ collectionName: context.collectionName,
+ session0,
+ session1,
+ testRunner: context
+ };
+
+ let testPromise = Promise.resolve();
+ return testPromise
+ .then(() => testOperations(spec, operationContext))
+ .catch(err => {
+ // If the driver throws an exception / returns an error while executing this series
+ // of operations, store the error message.
+ throw err;
+ })
+ .then(() => {
+ const promises = [];
+ if (session0) promises.push(session0.endSession());
+ if (session1) promises.push(session1.endSession());
+ return Promise.all(promises);
+ })
+ .then(() => validateExpectations(context.commandEvents, spec, savedSessionData));
+ });
+}
+
+function validateOutcome(testData, testContext) {
+ if (testData.outcome && testData.outcome.collection) {
+ const outcomeCollection = testData.outcome.collection.name || testContext.collectionName;
+
+ // use the client without transactions to verify
+ return testContext.sharedClient
+ .db(testContext.dbName)
+ .collection(outcomeCollection)
+ .find({}, { readPreference: 'primary', readConcern: { level: 'local' } })
+ .sort({ _id: 1 })
+ .toArray()
+ .then(docs => {
+ expect(docs).to.matchMongoSpec(testData.outcome.collection.data);
+ });
+ }
+
+ return Promise.resolve();
+}
+
+function validateExpectations(commandEvents, spec, savedSessionData) {
+ if (!spec.expectations || !Array.isArray(spec.expectations) || spec.expectations.length === 0) {
+ return;
+ }
+
+ const actualEvents = normalizeCommandShapes(commandEvents);
+ const rawExpectedEvents = spec.expectations.map(x => x.command_started_event);
+ const expectedEvents = normalizeCommandShapes(rawExpectedEvents);
+ expect(actualEvents).to.have.length(expectedEvents.length);
+
+ expectedEvents.forEach((expected, idx) => {
+ const actual = actualEvents[idx];
+
+ if (expected.commandName != null) {
+ expect(actual.commandName).to.equal(expected.commandName);
+ }
+
+ if (expected.databaseName != null) {
+ expect(actual.databaseName).to.equal(expected.databaseName);
+ }
+
+ const actualCommand = actual.command;
+ const expectedCommand = expected.command;
+ if (expectedCommand.sort) {
+ // TODO: This is a workaround that works because all sorts in the specs
+ // are objects with one key; ideally we'd want to adjust the spec definitions
+ // to indicate whether order matters for any given key and set general
+ // expectations accordingly (see NODE-3235)
+ expect(Object.keys(expectedCommand.sort)).to.have.lengthOf(1);
+ expect(actualCommand.sort).to.be.instanceOf(Map);
+ expect(actualCommand.sort.size).to.equal(1);
+ const expectedKey = Object.keys(expectedCommand.sort)[0];
+ expect(actualCommand.sort).to.have.all.keys(expectedKey);
+ actualCommand.sort = { [expectedKey]: actualCommand.sort.get(expectedKey) };
+ }
+
+ expect(actualCommand).withSessionData(savedSessionData).to.matchMongoSpec(expectedCommand);
+ });
+}
+
+function normalizeCommandShapes(commands) {
+ return commands.map(def => {
+ const output = JSON.parse(
+ EJSON.stringify(
+ {
+ command: def.command,
+ commandName: def.command_name || def.commandName || Object.keys(def.command)[0],
+ databaseName: def.database_name ? def.database_name : def.databaseName
+ },
+ { relaxed: true }
+ )
+ );
+ // TODO: this is a workaround to preserve sort Map type until NODE-3235 is completed
+ if (def.command.sort) {
+ output.command.sort = def.command.sort;
+ }
+ return output;
+ });
+}
+
+function extractCrudResult(result, operation) {
+ if (Array.isArray(result) || !isRecord(result)) {
+ return result;
+ }
+
+ if (result.value) {
+ // some of our findAndModify results return more than just an id, so we need to pluck
+ const resultKeys = Object.keys(operation.result);
+ if (resultKeys.length === 1 && resultKeys[0] === '_id') {
+ return { _id: result.value._id };
+ }
+
+ return result.value;
+ }
+
+ return operation.result;
+}
+
+function isTransactionCommand(command) {
+ return ['startTransaction', 'commitTransaction', 'abortTransaction'].indexOf(command) !== -1;
+}
+
+function isTestRunnerCommand(context, commandName) {
+ const testRunnerContext = context.testRunner;
+
+ let methods = new Set();
+ let object = testRunnerContext;
+ while (object !== Object.prototype) {
+ Object.getOwnPropertyNames(object)
+ .filter(prop => typeof object[prop] === 'function' && prop !== 'constructor')
+ .map(prop => methods.add(prop));
+
+ object = Object.getPrototypeOf(object);
+ }
+
+ return methods.has(commandName);
+}
+
+function extractBulkRequests(requests) {
+ return requests.map(request => ({ [request.name]: request.arguments }));
+}
+
+function translateOperationName(operationName) {
+ if (operationName === 'runCommand') return 'command';
+ if (operationName === 'listDatabaseNames') return 'listDatabases';
+ if (operationName === 'listCollectionNames') return 'listCollections';
+ return operationName;
+}
+
+function normalizeReadPreference(mode) {
+ return mode.charAt(0).toLowerCase() + mode.substr(1);
+}
+
+function resolveOperationArgs(operationName, operationArgs, context) {
+ const result = [];
+ function pluck(fromObject, toArray, fields) {
+ for (const field of fields) {
+ if (fromObject[field]) toArray.push(fromObject[field]);
+ }
+ }
+
+ // TODO: migrate all operations here
+ if (operationName === 'distinct') {
+ pluck(operationArgs, result, ['fieldName', 'filter']);
+ if (result.length === 1) result.push({});
+ } else {
+ return;
+ }
+
+ // compile the options
+ const options = {};
+ if (operationArgs.options) {
+ Object.assign(options, operationArgs.options);
+ if (options.readPreference) {
+ options.readPreference = normalizeReadPreference(options.readPreference.mode);
+ }
+ }
+
+ if (operationArgs.session) {
+ if (isTransactionCommand(operationName)) return;
+ options.session = context[operationArgs.session];
+ }
+
+ result.push(options);
+
+ // determine if there is a callback to add
+ if (operationArgs.callback) {
+ result.push(() =>
+ testOperations(operationArgs.callback, context, { swallowOperationErrors: false })
+ );
+ }
+
+ return result;
+}
+
+const CURSOR_COMMANDS = new Set(['find', 'aggregate', 'listIndexes', 'listCollections']);
+const ADMIN_COMMANDS = new Set(['listDatabases']);
+
+function maybeSession(operation, context) {
+ return (
+ operation &&
+ operation.arguments &&
+ operation.arguments.session &&
+ context[operation.arguments.session]
+ );
+}
+
+const kOperations = new Map([
+ [
+ 'recordPrimary',
+ (operation, testRunner, context /*, options */) => {
+ testRunner.recordPrimary(context.client);
+ }
+ ],
+ [
+ 'waitForPrimaryChange',
+ (operation, testRunner, context /*, options */) => {
+ return testRunner.waitForPrimaryChange(context.client);
+ }
+ ],
+ [
+ 'runOnThread',
+ (operation, testRunner, context, options) => {
+ const args = operation.arguments;
+ const threadName = args.name;
+ const subOperation = args.operation;
+
+ return testRunner.runOnThread(
+ threadName,
+ testOperation(subOperation, context[subOperation.object], context, options)
+ );
+ }
+ ],
+ [
+ 'createIndex',
+ (operation, collection, context /*, options */) => {
+ const fieldOrSpec = operation.arguments.keys;
+ const options = { session: maybeSession(operation, context) };
+ if (operation.arguments.name) options.name = operation.arguments.name;
+ return collection.createIndex(fieldOrSpec, options);
+ }
+ ],
+ [
+ 'createCollection',
+ (operation, db, context /*, options */) => {
+ const collectionName = operation.arguments.collection;
+ const session = maybeSession(operation, context);
+ return db.createCollection(collectionName, { session });
+ }
+ ],
+ [
+ 'dropCollection',
+ (operation, db, context /*, options */) => {
+ const collectionName = operation.arguments.collection;
+ const session = maybeSession(operation, context);
+ return db.dropCollection(collectionName, { session });
+ }
+ ],
+ [
+ 'dropIndex',
+ (operation, collection /*, context, options */) => {
+ const indexName = operation.arguments.name;
+ const session = maybeSession(operation, context);
+ return collection.dropIndex(indexName, { session });
+ }
+ ],
+ [
+ 'mapReduce',
+ (operation, collection, context /*, options */) => {
+ const args = operation.arguments;
+ const map = args.map;
+ const reduce = args.reduce;
+ const options = { session: maybeSession(operation, context) };
+ if (args.out) options.out = args.out;
+ return collection.mapReduce(map, reduce, options);
+ }
+ ]
+]);
+
+/**
+ * @param {object} operation the operation definition from the spec test
+ * @param {object} obj the object to call the operation on
+ * @param {object} context a context object containing sessions used for the test
+ * @param {object} [options] Optional settings
+ * @param {boolean} [options.swallowOperationErrors] Generally we want to observe operation errors, validate them against our expectations, and then swallow them. In cases like `withTransaction` we want to use the same `testOperations` to build the lambda, and in those cases it is not desireable to swallow the errors, since we need to test this behavior.
+ */
+function testOperation(operation, obj, context, options) {
+ options = options || { swallowOperationErrors: true };
+ const opOptions = {};
+ let args = [];
+ const operationName = translateOperationName(operation.name);
+
+ let opPromise;
+ if (kOperations.has(operationName)) {
+ opPromise = kOperations.get(operationName)(operation, obj, context, options);
+ } else {
+ if (operation.arguments) {
+ args = resolveOperationArgs(operationName, operation.arguments, context);
+
+ if (args == null) {
+ args = [];
+ Object.keys(operation.arguments).forEach(key => {
+ if (key === 'callback') {
+ args.push(() =>
+ testOperations(operation.arguments.callback, context, {
+ swallowOperationErrors: false
+ })
+ );
+ return;
+ }
+
+ if (['filter', 'fieldName', 'document', 'documents', 'pipeline'].indexOf(key) !== -1) {
+ return args.unshift(operation.arguments[key]);
+ }
+
+ if ((key === 'map' || key === 'reduce') && operationName === 'mapReduce') {
+ return args.unshift(operation.arguments[key]);
+ }
+
+ if (key === 'command') return args.unshift(operation.arguments[key]);
+ if (key === 'requests')
+ return args.unshift(extractBulkRequests(operation.arguments[key]));
+ if (key === 'update' || key === 'replacement') return args.push(operation.arguments[key]);
+ if (key === 'session') {
+ if (isTransactionCommand(operationName)) return;
+ opOptions.session = context[operation.arguments.session];
+ return;
+ }
+
+ if (key === 'returnDocument') {
+ opOptions.returnDocument = operation.arguments[key].toLowerCase();
+ return;
+ }
+
+ if (key === 'options') {
+ Object.assign(opOptions, operation.arguments[key]);
+ if (opOptions.readPreference) {
+ opOptions.readPreference = normalizeReadPreference(opOptions.readPreference.mode);
+ }
+
+ return;
+ }
+
+ if (key === 'readPreference') {
+ opOptions[key] = normalizeReadPreference(operation.arguments[key].mode);
+ return;
+ }
+
+ opOptions[key] = operation.arguments[key];
+ });
+ }
+ }
+
+ if (
+ args.length === 0 &&
+ !isTransactionCommand(operationName) &&
+ !isTestRunnerCommand(context, operationName)
+ ) {
+ args.push({});
+ }
+
+ if (Object.keys(opOptions).length > 0) {
+ // NOTE: this is awful, but in order to provide options for some methods we need to add empty
+ // query objects.
+ if (operationName === 'distinct') {
+ args.push({});
+ }
+
+ args.push(opOptions);
+ }
+
+ if (ADMIN_COMMANDS.has(operationName)) {
+ obj = obj.db().admin();
+ }
+
+ if (operation.name === 'listDatabaseNames' || operation.name === 'listCollectionNames') {
+ opOptions.nameOnly = true;
+ }
+
+ if (CURSOR_COMMANDS.has(operationName)) {
+ // `find` creates a cursor, so we need to call `toArray` on it
+ const cursor = obj[operationName].apply(obj, args);
+ opPromise = cursor.toArray();
+ } else {
+ // wrap this in a `promiseTry` because some operations might throw
+ opPromise = promiseTry(() => obj[operationName].apply(obj, args));
+ }
+ }
+
+ if (operation.error) {
+ opPromise = opPromise.then(
+ () => {
+ throw new Error('expected an error!');
+ },
+ () => {}
+ );
+ }
+
+ if (operation.result) {
+ const result = operation.result;
+
+ if (
+ result.errorContains != null ||
+ result.errorCodeName ||
+ result.errorLabelsContain ||
+ result.errorLabelsOmit
+ ) {
+ return opPromise.then(
+ () => {
+ throw new Error('expected an error!');
+ },
+ err => {
+ const errorContains = result.errorContains;
+ const errorCodeName = result.errorCodeName;
+ const errorLabelsContain = result.errorLabelsContain;
+ const errorLabelsOmit = result.errorLabelsOmit;
+
+ if (errorLabelsContain) {
+ expect(err).to.have.property('errorLabels');
+ expect(err.errorLabels).to.include.members(errorLabelsContain);
+ }
+
+ if (errorLabelsOmit) {
+ if (err.errorLabels && Array.isArray(err.errorLabels) && err.errorLabels.length !== 0) {
+ expect(errorLabelsOmit).to.not.include.members(err.errorLabels);
+ }
+ }
+
+ if (operation.result.errorContains) {
+ expect(err.message).to.match(new RegExp(escape(errorContains), 'i'));
+ }
+
+ if (errorCodeName) {
+ expect(err.codeName).to.equal(errorCodeName);
+ }
+
+ if (!options.swallowOperationErrors) {
+ throw err;
+ }
+ }
+ );
+ }
+
+ return opPromise.then(opResult => {
+ const actual = extractCrudResult(opResult, operation);
+ expect(actual).to.matchMongoSpec(operation.result);
+ });
+ }
+
+ return opPromise;
+}
+
+function convertCollectionOptions(options) {
+ const result = {};
+ Object.keys(options).forEach(key => {
+ if (key === 'readPreference') {
+ result[key] = normalizeReadPreference(options[key].mode);
+ } else {
+ result[key] = options[key];
+ }
+ });
+
+ return result;
+}
+
+function testOperations(testData, operationContext, options) {
+ options = options || { swallowOperationErrors: true };
+ return testData.operations.reduce((combined, operation) => {
+ return combined.then(() => {
+ const object = operation.object || 'collection';
+ if (object === 'collection') {
+ const db = operationContext.database;
+ const collectionName = operationContext.collectionName;
+ const collectionOptions = operation.collectionOptions || {};
+
+ operationContext[object] = db.collection(
+ collectionName,
+ convertCollectionOptions(collectionOptions)
+ );
+ }
+
+ return testOperation(operation, operationContext[object], operationContext, options);
+ });
+ }, Promise.resolve());
+}
+
+module.exports = {
+ TestRunnerContext,
+ gatherTestSuites,
+ generateTopologyTests,
+ parseRunOn
+};
diff --git a/test/tools/spec-runner/matcher.js b/test/tools/spec-runner/matcher.js
new file mode 100644
index 00000000000..757b049d4a0
--- /dev/null
+++ b/test/tools/spec-runner/matcher.js
@@ -0,0 +1,199 @@
+'use strict';
+
+const SYMBOL_DOES_NOT_EXIST = Symbol('[[assert does not exist]]');
+const SYMBOL_DOES_EXIST = Symbol('[[assert does exist]]');
+const SYMBOL_IGNORE = Symbol('[[ignore]]');
+
+const MONGOCRYPT_TO_EJSON_TYPE_MAP = new Map([
+ ['binData', '$binary'],
+ ['long', ['$numberLong', 'number']]
+]);
+
+const BSON_TO_EJSON_TYPE_MAP = new Map([['Binary', '$binary']]);
+
+function valIs42(input) {
+ return input === 42 || input === '42';
+}
+
+function is42(input) {
+ if (!input) return false;
+ return valIs42(input) || valIs42(input.$numberInt) || valIs42(input.$numberLong);
+}
+
+function generateMatchAndDiffSpecialCase(key, expectedObj, actualObj, metadata) {
+ const expected = expectedObj[key];
+ const actual = actualObj[key];
+
+ if (expected === null) {
+ if (key === 'readConcern') {
+ // HACK: get around NODE-1889
+ return {
+ match: true,
+ expected: SYMBOL_DOES_NOT_EXIST,
+ actual: SYMBOL_DOES_NOT_EXIST
+ };
+ }
+
+ const match = !Object.prototype.hasOwnProperty.call(actualObj, key);
+ return {
+ match,
+ expected: SYMBOL_DOES_NOT_EXIST,
+ actual: match ? SYMBOL_DOES_NOT_EXIST : actual
+ };
+ }
+
+ if (typeof expected === 'object' && Object.keys(expected)[0] === '$$type') {
+ const expectedType = MONGOCRYPT_TO_EJSON_TYPE_MAP.get(expected.$$type);
+
+ let actualType;
+ if (actual._bsontype) {
+ actualType = BSON_TO_EJSON_TYPE_MAP.get(actual._bsontype);
+ } else {
+ if (typeof actual === 'object' && Object.keys(actual).length) {
+ actualType = Object.keys(actual)[0];
+ } else {
+ actualType = typeof actual;
+ }
+ }
+
+ let match;
+ if (Array.isArray(expectedType)) {
+ // we accept a direct type match, or a typeof match
+ match = expectedType.some(type => {
+ if (type[0] === '$') {
+ return type === actualType;
+ }
+
+ return typeof actual === type;
+ });
+ } else {
+ match = expectedType === actualType;
+ }
+
+ return { match, expected, actual };
+ }
+
+ const expectedIs42 = is42(expected);
+ if (key === 'lsid' && typeof expected === 'string') {
+ // Case lsid - assert that session matches session in session data
+ const sessionData = metadata.sessionData;
+ const lsid = sessionData[expected];
+ return generateMatchAndDiff(lsid, actual, metadata);
+ } else if (key === 'getMore' && expectedIs42) {
+ // cursorid - explicitly ignore 42 values
+ return {
+ match: true,
+ expected: SYMBOL_IGNORE,
+ actual: SYMBOL_IGNORE
+ };
+ } else if (key === 'afterClusterTime' && expectedIs42) {
+ // afterClusterTime - assert that value exists
+ const match = actual != null;
+ return {
+ match,
+ expected: match ? actual : SYMBOL_DOES_EXIST,
+ actual
+ };
+ } else if (key === 'recoveryToken' && expectedIs42) {
+ // recoveryToken - assert that value exists
+ // TODO: assert that value is BSON
+ const match = actual != null;
+ return {
+ match,
+ expected: match ? actual : SYMBOL_DOES_EXIST,
+ actual
+ };
+ } else if (expectedIs42) {
+ return {
+ match: true,
+ expected: SYMBOL_IGNORE,
+ actual: SYMBOL_IGNORE
+ };
+ } else {
+ // default
+ return generateMatchAndDiff(expected, actual, metadata);
+ }
+}
+
+function generateMatchAndDiff(expected, actual, metadata) {
+ const typeOfExpected = typeof expected;
+
+ if (typeOfExpected !== typeof actual) {
+ return { match: false, expected, actual };
+ }
+
+ if (typeOfExpected !== 'object' || expected == null || actual == null) {
+ return { match: expected === actual, expected, actual };
+ }
+
+ if (expected instanceof Date) {
+ return {
+ match: actual instanceof Date ? expected.getTime() === actual.getTime() : false,
+ expected,
+ actual
+ };
+ }
+
+ if (Array.isArray(expected)) {
+ if (!Array.isArray(actual)) {
+ return { match: false, expected, actual };
+ }
+
+ return expected
+ .map((val, idx) => generateMatchAndDiff(val, actual[idx], metadata))
+ .reduce(
+ (ret, value) => {
+ ret.match = ret.match && value.match;
+ ret.expected.push(value.expected);
+ ret.actual.push(value.actual);
+ return ret;
+ },
+ { match: true, expected: [], actual: [] }
+ );
+ }
+
+ return Object.keys(expected).reduce(
+ (ret, key) => {
+ const check = generateMatchAndDiffSpecialCase(key, expected, actual, metadata);
+ ret.match = ret.match && check.match;
+ ret.expected[key] = check.expected;
+ ret.actual[key] = check.actual;
+ return ret;
+ },
+ {
+ match: true,
+ expected: {},
+ actual: {}
+ }
+ );
+}
+
+function matchMongoSpec(chai, utils) {
+ chai.Assertion.addMethod('withSessionData', function (sessionData) {
+ utils.flag(this, 'testRunnerSessionData', sessionData);
+ });
+
+ chai.Assertion.addMethod('matchMongoSpec', function (expected) {
+ const actual = utils.flag(this, 'object');
+
+ const sessionData = utils.flag(this, 'testRunnerSessionData');
+
+ const result = generateMatchAndDiff(expected, actual, { sessionData });
+
+ chai.Assertion.prototype.assert.call(
+ this,
+ result.match,
+ 'expected #{act} to match spec #{exp}',
+ 'expected #{act} to not match spec #{exp}',
+ result.expected,
+ result.actual,
+ chai.config.showDiff || true
+ );
+ });
+
+ chai.assert.matchMongoSpec = function (val, exp, msg) {
+ new chai.Assertion(val, msg).to.matchMongoSpec(exp);
+ };
+}
+
+module.exports.default = matchMongoSpec;
diff --git a/test/tools/spec-runner/utils.js b/test/tools/spec-runner/utils.js
new file mode 100644
index 00000000000..c4664ac1564
--- /dev/null
+++ b/test/tools/spec-runner/utils.js
@@ -0,0 +1,18 @@
+'use strict';
+
+function resolveConnectionString(configuration, spec, context) {
+ const isShardedEnvironment = configuration.topologyType === 'Sharded';
+ const useMultipleMongoses = spec && !!spec.useMultipleMongoses;
+ const username = context && context.user;
+ const password = context && context.password;
+ const authSource = context && context.authSource;
+ const connectionString =
+ isShardedEnvironment && !useMultipleMongoses
+ ? `mongodb://${configuration.host}:${configuration.port}/${
+ configuration.db
+ }?directConnection=false${authSource ? `&authSource=${authSource}` : ''}`
+ : configuration.url({ username, password, authSource });
+ return connectionString;
+}
+
+module.exports = { resolveConnectionString };