From 403a8686643cf7397afd8327e56a283566c07c1f Mon Sep 17 00:00:00 2001 From: Sergey Zelenov Date: Tue, 30 Sep 2025 13:19:16 +0200 Subject: [PATCH] test(NODE-7193): migrate `integration/crud/bulk` tests --- test/integration/crud/bulk.test.ts | 1985 ++++++++++++---------------- 1 file changed, 844 insertions(+), 1141 deletions(-) diff --git a/test/integration/crud/bulk.test.ts b/test/integration/crud/bulk.test.ts index bdae64ca5f5..6dc1b1beb26 100644 --- a/test/integration/crud/bulk.test.ts +++ b/test/integration/crud/bulk.test.ts @@ -1,5 +1,6 @@ +import * as crypto from 'node:crypto'; + import { expect } from 'chai'; -import * as crypto from 'crypto'; import { type Collection, @@ -10,7 +11,7 @@ import { type MongoClient, MongoDriverError, MongoInvalidArgumentError -} from '../../mongodb'; +} from '../../../src'; import { assert as test, ignoreNsNotFound } from '../shared'; const MAX_BSON_SIZE = 16777216; @@ -102,7 +103,9 @@ describe('Bulk', function () { it('should throw a MongoInvalidArgument error ', async function () { const bulkOp = client.db('test').collection('test').initializeUnorderedBulkOp(); expect(() => bulkOp.raw(undefined)).to.throw(MongoInvalidArgumentError); + // @ts-expect-error Not allowed in TS, but can be used in JS expect(() => bulkOp.raw(true)).to.throw(MongoInvalidArgumentError); + // @ts-expect-error Not allowed in TS, but can be used in JS expect(() => bulkOp.raw(3)).to.throw(MongoInvalidArgumentError); }); @@ -115,16 +118,12 @@ describe('Bulk', function () { }); context('when called with a valid operation', function () { - it('should not throw a MongoInvalidArgument error', async function () { - try { - client - .db('test') - .collection('test') - .initializeUnorderedBulkOp() - .raw({ insertOne: { document: {} } }); - } catch (error) { - expect(error).not.to.exist; - } + it('should not throw a MongoInvalidArgument error', function () { + client + .db('test') + .collection('test') + .initializeUnorderedBulkOp() + .raw({ insertOne: { document: {} } }); }); }); @@ -199,36 +198,35 @@ describe('Bulk', function () { describe('#insertMany()', function () { context('when passed an invalid docs argument', function () { it('should throw a MongoInvalidArgument error', async function () { - try { - const docs = []; - docs[1] = { color: 'red' }; - await client.db('test').collection('test').insertMany(docs); - expect.fail('Expected insertMany to throw error, failed to throw error'); - } catch (error) { - expect(error).to.be.instanceOf(MongoInvalidArgumentError); - expect(error.message).to.equal( - 'Collection.insertMany() cannot be called with an array that has null/undefined values' - ); - } + const docs = []; + docs[1] = { color: 'red' }; + + const error = await client + .db('test') + .collection('test') + .insertMany(docs) + .catch(err => err); + + expect(error).to.be.instanceOf(MongoInvalidArgumentError); + expect(error.message).to.equal( + 'Collection.insertMany() cannot be called with an array that has null/undefined values' + ); }); }); context('when passed a valid document list', function () { it('insertMany should not throw a MongoInvalidArgument error when called with a valid operation', async function () { - try { - const result = await client - .db('test') - .collection('test') - .insertMany([{ color: 'blue' }]); - expect(result).to.exist; - } catch (error) { - expect(error).not.to.exist; - } + const result = await client + .db('test') + .collection('test') + .insertMany([{ color: 'blue' }]); + + expect(result).to.exist; }); }); context('when inserting duplicate values', function () { - let col; + let col: Collection; beforeEach(async function () { const db = client.db(); @@ -288,7 +286,7 @@ describe('Bulk', function () { describe('#bulkWrite()', function () { context('when inserting duplicate values', function () { - let col; + let col: Collection; beforeEach(async function () { const db = client.db(); @@ -375,70 +373,57 @@ describe('Bulk', function () { expect(result).to.exist; }); - it('should correctly handle ordered single batch api write command error', function (done) { + it('should correctly handle ordered single batch api write command error', async function () { const db = client.db(); const col = db.collection('batch_write_ordered_ops_10'); // Add unique index on b field causing all updates to fail - col.createIndex({ a: 1 }, { unique: true, sparse: false }, err => { - expect(err).to.not.exist; + await col.createIndex({ a: 1 }, { unique: true, sparse: false }); - const batch = col.initializeOrderedBulkOp(); - batch.insert({ b: 1, a: 1 }); - batch - .find({ b: 2 }) - .upsert() - .updateOne({ $set: { a: 1 } }); - batch.insert({ b: 3, a: 2 }); + const batch = col.initializeOrderedBulkOp(); + batch.insert({ b: 1, a: 1 }); + batch + .find({ b: 2 }) + .upsert() + .updateOne({ $set: { a: 1 } }); + batch.insert({ b: 3, a: 2 }); - batch.execute((err, result) => { - expect(err).to.exist; - expect(result).to.not.exist; + const error = await batch.execute().catch(err => err); - result = err.result; + const result = error.result; - // Basic properties check - test.equal(1, result.insertedCount); - test.equal(true, result.hasWriteErrors()); - test.equal(1, result.getWriteErrorCount()); - - // Get the write error - let error = result.getWriteErrorAt(0); - test.equal(11000, error.code); - test.ok(error.errmsg != null); - - // Get the operation that caused the error - const op = error.getOperation(); - test.equal(2, op.q.b); - test.equal(1, op.u['$set'].a); - expect(op.multi).to.not.be.true; - test.equal(true, op.upsert); - - // Get the first error - error = result.getWriteErrorAt(1); - expect(error).to.not.exist; - - // Finish up test - client.close(done); - }); - }); + // Basic properties check + test.equal(1, result.insertedCount); + test.equal(true, result.hasWriteErrors()); + test.equal(1, result.getWriteErrorCount()); + + // Get the write error + const writeError = result.getWriteErrorAt(0); + test.equal(11000, writeError.code); + test.ok(writeError.errmsg != null); + + // Get the operation that caused the error + const op = writeError.getOperation(); + test.equal(2, op.q.b); + test.equal(1, op.u['$set'].a); + expect(op.multi).to.not.be.true; + test.equal(true, op.upsert); + + // Get the second error + expect(result.getWriteErrorAt(1)).to.not.exist; }); - it('should use arrayFilters for updateMany', { - metadata: { requires: { mongodb: '>=3.6.x' } }, - async test() { - const db = client.db(); - const collection = db.collection<{ a: { x: number }[] }>('arrayfilterstest'); - const docs = [{ a: [{ x: 1 }, { x: 2 }] }, { a: [{ x: 3 }, { x: 4 }] }]; - await collection - .insertMany(docs) - .then(() => - collection.updateMany({}, { $set: { 'a.$[i].x': 5 } }, { arrayFilters: [{ 'i.x': 5 }] }) - ) - .then(data => { - expect(data.matchedCount).to.equal(2); - }); - } + it('should use arrayFilters for updateMany', async function () { + const db = client.db(); + const collection = db.collection<{ a: { x: number }[] }>('arrayfilterstest'); + const docs = [{ a: [{ x: 1 }, { x: 2 }] }, { a: [{ x: 3 }, { x: 4 }] }]; + await collection.insertMany(docs); + const data = await collection.updateMany( + {}, + { $set: { 'a.$[i].x': 5 } }, + { arrayFilters: [{ 'i.x': 5 }] } + ); + expect(data.matchedCount).to.equal(2); }); it('should ignore undefined values in unordered bulk operation if `ignoreUndefined` specified', async function () { @@ -527,56 +512,48 @@ describe('Bulk', function () { expect(item.a).to.have.property('_bsontype', 'Long'); }); - it('should correctly handle ordered multiple batch api write command errors', function (done) { + it('should correctly handle ordered multiple batch api write command errors', async function () { const db = client.db(); const col = db.collection('batch_write_ordered_ops_2'); // Add unique index on field `a` causing all updates to fail - col.createIndex({ a: 1 }, { unique: true, sparse: false }, function (err) { - expect(err).to.not.exist; + await col.createIndex({ a: 1 }, { unique: true, sparse: false }); - const batch = col.initializeOrderedBulkOp(); - batch.insert({ b: 1, a: 1 }); - batch - .find({ b: 2 }) - .upsert() - .updateOne({ $set: { a: 1 } }); - batch - .find({ b: 3 }) - .upsert() - .updateOne({ $set: { a: 2 } }); - batch - .find({ b: 2 }) - .upsert() - .updateOne({ $set: { a: 1 } }); - batch.insert({ b: 4, a: 3 }); - batch.insert({ b: 5, a: 1 }); - - batch.execute(function (err, result) { - expect(err).to.exist; - expect(result).to.not.exist; - - // Basic properties check - result = err.result; - test.equal(err instanceof Error, true); - test.equal(1, result.insertedCount); - test.equal(true, result.hasWriteErrors()); - test.ok(1, result.getWriteErrorCount()); + const batch = col.initializeOrderedBulkOp(); + batch.insert({ b: 1, a: 1 }); + batch + .find({ b: 2 }) + .upsert() + .updateOne({ $set: { a: 1 } }); + batch + .find({ b: 3 }) + .upsert() + .updateOne({ $set: { a: 2 } }); + batch + .find({ b: 2 }) + .upsert() + .updateOne({ $set: { a: 1 } }); + batch.insert({ b: 4, a: 3 }); + batch.insert({ b: 5, a: 1 }); - // Individual error checking - const error = result.getWriteErrorAt(0); - test.equal(1, error.index); - test.equal(11000, error.code); - test.ok(error.errmsg != null); - test.equal(2, error.getOperation().q.b); - test.equal(1, error.getOperation().u['$set'].a); - expect(error.getOperation().multi).to.not.be.true; - test.equal(true, error.getOperation().upsert); - - // Finish up test - client.close(done); - }); - }); + const error = await batch.execute().catch(err => err); + test.equal(error instanceof Error, true); + + // Basic properties check + const result = error.result; + test.equal(1, result.insertedCount); + test.equal(true, result.hasWriteErrors()); + test.equal(1, result.getWriteErrorCount()); + + // Individual error checking + const writeError = result.getWriteErrorAt(0); + test.equal(1, writeError.index); + test.equal(11000, writeError.code); + test.ok(writeError.errmsg != null); + test.equal(2, writeError.getOperation().q.b); + test.equal(1, writeError.getOperation().u['$set'].a); + expect(writeError.getOperation().multi).to.not.be.true; + test.equal(true, writeError.getOperation().upsert); }); it('should fail due to ordered document being to big', () => { @@ -601,7 +578,7 @@ describe('Bulk', function () { } }); - it('should correctly split up ordered messages into more batches', function (done) { + it('should correctly split up ordered messages into more batches', async function () { const db = client.db(); const coll = db.collection('batch_write_ordered_ops_4'); @@ -622,14 +599,10 @@ describe('Bulk', function () { batch.insert({ a: 6, b: hugeString }); // Execute the operations - batch.execute(function (err, result) { - // Basic properties check - test.equal(6, result.insertedCount); - test.equal(false, result.hasWriteErrors()); - - // Finish up test - client.close(done); - }); + const result = await batch.execute(); + // Basic properties check + test.equal(6, result.insertedCount); + test.equal(false, result.hasWriteErrors()); }); it('should Correctly Execute Ordered Batch of Write Operations with duplicate key errors on updates', async function () { @@ -717,59 +690,51 @@ describe('Bulk', function () { it('should correctly perform ordered upsert with custom _id', { metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + requires: { topology: ['single', 'replicaset', 'sharded'] } }, - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_ordered_ops_8'); - const batch = col.initializeOrderedBulkOp(); + test: async function () { + const db = client.db(); + const col = db.collection('batch_write_ordered_ops_8'); + const batch = col.initializeOrderedBulkOp(); - // Add some operations to be executed in order - batch - .find({ _id: 2 }) - .upsert() - .updateOne({ $set: { b: 2 } }); + // Add some operations to be executed in order + batch + .find({ _id: 2 }) + .upsert() + .updateOne({ $set: { b: 2 } }); - // Execute the operations - batch.execute(function (err, result) { - // Check state of result - test.equal(1, result.upsertedCount); - test.equal(0, result.insertedCount); - test.equal(0, result.matchedCount); - test.ok(0 === result.modifiedCount || result.modifiedCount == null); - test.equal(0, result.deletedCount); - - const upserts = result.result.upserted; - test.equal(1, upserts.length); - test.equal(0, upserts[0].index); - test.equal(2, upserts[0]._id); - - // Finish up test - client.close(done); - }); - }); + // Execute the operations + const result = await batch.execute(); + // Check state of result + test.equal(1, result.upsertedCount); + test.equal(0, result.insertedCount); + test.equal(0, result.matchedCount); + test.ok(0 === result.modifiedCount || result.modifiedCount == null); + test.equal(0, result.deletedCount); + + const upserts = result['result'].upserted; + test.equal(1, upserts.length); + test.equal(0, upserts[0].index); + test.equal(2, upserts[0]._id); } }); it('should return an error when no operations in ordered batch', { metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } + requires: { topology: ['single', 'replicaset', 'sharded'] } }, - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_ordered_ops_8'); - - col.initializeOrderedBulkOp().execute(function (err) { - expect(err).to.be.instanceOf(MongoDriverError); - expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty'); - - client.close(done); - }); - }); + test: async function () { + const db = client.db(); + const col = db.collection('batch_write_ordered_ops_8'); + + const err = await col + .initializeOrderedBulkOp() + .execute() + .catch(err => err); + expect(err).to.be.instanceOf(MongoDriverError); + expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty'); } }); @@ -777,447 +742,363 @@ describe('Bulk', function () { 'should correctly execute ordered batch using w:0', // TODO(NODE-6060): set `moreToCome` op_msg bit when `w: 0` is specified { requires: { mongodb: '<8.0.0' } }, - function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_ordered_ops_9'); - - const bulk = col.initializeOrderedBulkOp(); - for (let i = 0; i < 100; i++) { - bulk.insert({ a: 1 }); - } + async function () { + const db = client.db(); + const col = db.collection('batch_write_ordered_ops_9'); - bulk.find({ b: 1 }).upsert().update({ b: 1 }); - bulk.find({ c: 1 }).delete(); + const bulk = col.initializeOrderedBulkOp(); + for (let i = 0; i < 100; i++) { + bulk.insert({ a: 1 }); + } - bulk.execute({ writeConcern: { w: 0 } }, function (err, result) { - expect(err).to.not.exist; - test.equal(0, result.upsertedCount); - test.equal(0, result.insertedCount); - test.equal(0, result.matchedCount); - test.ok(0 === result.modifiedCount || result.modifiedCount == null); - test.equal(0, result.deletedCount); - test.equal(false, result.hasWriteErrors()); + bulk.find({ b: 1 }).upsert().update({ b: 1 }); + bulk.find({ c: 1 }).delete(); - client.close(done); - }); - }); + const result = await bulk.execute({ writeConcern: { w: 0 } }); + test.equal(0, result.upsertedCount); + test.equal(0, result.insertedCount); + test.equal(0, result.matchedCount); + test.ok(0 === result.modifiedCount || result.modifiedCount == null); + test.equal(0, result.deletedCount); + test.equal(false, result.hasWriteErrors()); } ); - it('should correctly handle single unordered batch API', function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_unordered_ops_legacy_1'); + it('should correctly handle single unordered batch API', async function () { + const db = client.db(); + const col = db.collection('batch_write_unordered_ops_legacy_1'); - // Add unique index on b field causing all updates to fail - col.createIndex({ a: 1 }, { unique: true, sparse: false }, function (err) { - expect(err).to.not.exist; + // Add unique index on b field causing all updates to fail + await col.createIndex({ a: 1 }, { unique: true, sparse: false }); - // Initialize the unordered Batch - const batch = col.initializeUnorderedBulkOp(); + // Initialize the unordered Batch + const batch = col.initializeUnorderedBulkOp(); - // Add some operations to be executed in order - batch.insert({ b: 1, a: 1 }); - batch - .find({ b: 2 }) - .upsert() - .updateOne({ $set: { a: 1 } }); - batch.insert({ b: 3, a: 2 }); + // Add some operations to be executed in order + batch.insert({ b: 1, a: 1 }); + batch + .find({ b: 2 }) + .upsert() + .updateOne({ $set: { a: 1 } }); + batch.insert({ b: 3, a: 2 }); - // Execute the operations - batch.execute(function (err, result) { - expect(err).to.exist; - expect(result).to.not.exist; - - // Basic properties check - result = err.result; - test.equal(err instanceof Error, true); - test.equal(2, result.insertedCount); - test.equal(0, result.upsertedCount); - test.equal(0, result.matchedCount); - test.ok(0 === result.modifiedCount || result.modifiedCount == null); - test.equal(true, result.hasWriteErrors()); - test.equal(1, result.getWriteErrorCount()); - - // Get the first error - let error = result.getWriteErrorAt(0); - test.equal(11000, error.code); - test.ok(error.errmsg != null); - - // Get the operation that caused the error - const op = error.getOperation(); - test.equal(2, op.q.b); - test.equal(1, op.u['$set'].a); - expect(op.multi).to.not.be.true; - test.equal(true, op.upsert); - - // Get the first error - error = result.getWriteErrorAt(1); - expect(error).to.not.exist; - - // Finish up test - client.close(done); - }); - }); - }); + // Execute the operations + const err = await batch.execute().catch(err => err); + // Basic properties check + const result = err.result; + test.equal(err instanceof Error, true); + test.equal(2, result.insertedCount); + test.equal(0, result.upsertedCount); + test.equal(0, result.matchedCount); + test.ok(0 === result.modifiedCount || result.modifiedCount == null); + test.equal(true, result.hasWriteErrors()); + test.equal(1, result.getWriteErrorCount()); + + // Get the first error + let error = result.getWriteErrorAt(0); + test.equal(11000, error.code); + test.ok(error.errmsg != null); + + // Get the operation that caused the error + const op = error.getOperation(); + test.equal(2, op.q.b); + test.equal(1, op.u['$set'].a); + expect(op.multi).to.not.be.true; + test.equal(true, op.upsert); + + // Get the first error + error = result.getWriteErrorAt(1); + expect(error).to.not.exist; }); - it('should correctly handle multiple unordered batch API', function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_unordered_ops_legacy_2'); + it('should correctly handle multiple unordered batch API', async function () { + const db = client.db(); + const col = db.collection('batch_write_unordered_ops_legacy_2'); - // Add unique index on b field causing all updates to fail - col.createIndex({ a: 1 }, { unique: true, sparse: false }, err => { - expect(err).to.not.exist; + // Add unique index on b field causing all updates to fail + await col.createIndex({ a: 1 }, { unique: true, sparse: false }); - // Initialize the unordered Batch - const batch = col.initializeUnorderedBulkOp({ useLegacyOps: true }); + // Initialize the unordered Batch + const batch = col.initializeUnorderedBulkOp(); - // Add some operations to be executed in order - batch.insert({ b: 1, a: 1 }); - batch.insert({ b: 5, a: 1 }); + // Add some operations to be executed in order + batch.insert({ b: 1, a: 1 }); + batch.insert({ b: 5, a: 1 }); - // Execute the operations - batch.execute((err, result) => { - expect(err).to.exist; - expect(result).to.not.exist; - - // Basic properties check - result = err.result; - expect(result.insertedCount).to.equal(1); - expect(result.hasWriteErrors()).to.equal(true); - expect(result.getWriteErrorCount()).to.equal(1); - - // Go over the error - const error = result.getWriteErrorAt(0); - expect(error.code).to.equal(11000); - expect(error.errmsg).to.exist; - expect(error.getOperation().b).to.equal(5); - expect(error.getOperation().a).to.equal(1); - - // Finish up test - client.close(done); - }); - }); - }); + // Execute the operations + const err = await batch.execute().catch(err => err); + expect(err).to.exist; + + // Basic properties check + const result = err.result; + expect(result.insertedCount).to.equal(1); + expect(result.hasWriteErrors()).to.equal(true); + expect(result.getWriteErrorCount()).to.equal(1); + + // Go over the error + const error = result.getWriteErrorAt(0); + expect(error.code).to.equal(11000); + expect(error.errmsg).to.exist; + expect(error.getOperation().b).to.equal(5); + expect(error.getOperation().a).to.equal(1); }); it('should fail due to document being to big for unordered batch', { - metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } }, + metadata: { requires: { topology: ['single', 'replicaset'] } }, - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const coll = db.collection('batch_write_unordered_ops_legacy_3'); - // Set up a giant string to blow through the max message size - let hugeString = ''; - // Create it bigger than 16MB - for (let i = 0; i < 1024 * 1100; i++) { - hugeString = hugeString + '1234567890123456'; - } + test: async function () { + const db = client.db(); + const coll = db.collection('batch_write_unordered_ops_legacy_3'); + // Set up a giant string to blow through the max message size + let hugeString = ''; + // Create it bigger than 16MB + for (let i = 0; i < 1024 * 1100; i++) { + hugeString = hugeString + '1234567890123456'; + } - // Set up the batch - const batch = coll.initializeUnorderedBulkOp(); - batch.insert({ b: 1, a: 1 }); - // should fail on insert due to string being to big - try { - batch.insert({ string: hugeString }); - test.ok(false); - } catch (err) { } // eslint-disable-line - - // Finish up test - client.close(done); - }); + // Set up the batch + const batch = coll.initializeUnorderedBulkOp(); + batch.insert({ b: 1, a: 1 }); + // should fail on insert due to string being to big + try { + batch.insert({ string: hugeString }); + test.ok(false); + } catch (err) {} // eslint-disable-line } }); it('should correctly split up messages into more batches for unordered batches', { - metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } }, - - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const coll = db.collection('batch_write_unordered_ops_legacy_4'); + metadata: { requires: { topology: ['single', 'replicaset'] } }, - // Set up a giant string to blow through the max message size - let hugeString = ''; - // Create it bigger than 16MB - for (let i = 0; i < 1024 * 256; i++) { - hugeString = hugeString + '1234567890123456'; - } + test: async function () { + const db = client.db(); + const coll = db.collection('batch_write_unordered_ops_legacy_4'); - // Insert the string a couple of times, should force split into multiple batches - const batch = coll.initializeUnorderedBulkOp(); - batch.insert({ a: 1, b: hugeString }); - batch.insert({ a: 2, b: hugeString }); - batch.insert({ a: 3, b: hugeString }); - batch.insert({ a: 4, b: hugeString }); - batch.insert({ a: 5, b: hugeString }); - batch.insert({ a: 6, b: hugeString }); + // Set up a giant string to blow through the max message size + let hugeString = ''; + // Create it bigger than 16MB + for (let i = 0; i < 1024 * 256; i++) { + hugeString = hugeString + '1234567890123456'; + } - // Execute the operations - batch.execute(function (err, result) { - // Basic properties check - test.equal(6, result.insertedCount); - test.equal(false, result.hasWriteErrors()); + // Insert the string a couple of times, should force split into multiple batches + const batch = coll.initializeUnorderedBulkOp(); + batch.insert({ a: 1, b: hugeString }); + batch.insert({ a: 2, b: hugeString }); + batch.insert({ a: 3, b: hugeString }); + batch.insert({ a: 4, b: hugeString }); + batch.insert({ a: 5, b: hugeString }); + batch.insert({ a: 6, b: hugeString }); - // Finish up test - client.close(done); - }); - }); + // Execute the operations + const result = await batch.execute(); + // Basic properties check + test.equal(6, result.insertedCount); + test.equal(false, result.hasWriteErrors()); } }); - it('should Correctly Execute Unordered Batch with duplicate key errors on updates', function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_unordered_ops_legacy_6'); + it('should Correctly Execute Unordered Batch with duplicate key errors on updates', async function () { + const db = client.db(); + const col = db.collection( + 'batch_write_unordered_ops_legacy_6', + this.configuration.writeConcernMax() + ); - // Write concern - const writeConcern = this.configuration.writeConcernMax(); - writeConcern.unique = true; - writeConcern.sparse = false; + // Add unique index on b field causing all updates to fail + await col.createIndex({ b: 1 }, { unique: true, sparse: false }); - // Add unique index on b field causing all updates to fail - col.createIndex({ b: 1 }, writeConcern, function (err) { - expect(err).to.not.exist; + // Initialize the unordered Batch + const batch = col.initializeUnorderedBulkOp(); - // Initialize the unordered Batch - const batch = col.initializeUnorderedBulkOp(); + // Add some operations to be executed in order + batch.insert({ a: 1 }); + batch.find({ a: 1 }).update({ $set: { b: 1 } }); + batch.insert({ b: 1 }); + batch.insert({ b: 1 }); + batch.insert({ b: 1 }); + batch.insert({ b: 1 }); - // Add some operations to be executed in order - batch.insert({ a: 1 }); - batch.find({ a: 1 }).update({ $set: { b: 1 } }); - batch.insert({ b: 1 }); - batch.insert({ b: 1 }); - batch.insert({ b: 1 }); - batch.insert({ b: 1 }); + // Execute the operations + const error = await batch.execute({}).catch(err => err); - // Execute the operations - batch.execute({}, function (err, result) { - expect(err).to.exist; - expect(result).to.not.exist; - - // Test basic settings - result = err.result; - test.equal(2, result.insertedCount); - test.equal(true, result.hasWriteErrors()); - test.ok(result.getWriteErrorCount() === 4 || result.getWriteErrorCount() === 3); - - // Individual error checking - const error = result.getWriteErrorAt(0); - test.ok(error.code === 11000 || error.code === 11001); - test.ok(error.errmsg != null); - - client.close(done); - }); - }); - }); + // Test basic settings + const result = error.result; + test.equal(2, result.insertedCount); + test.equal(true, result.hasWriteErrors()); + test.ok(result.getWriteErrorCount() === 4 || result.getWriteErrorCount() === 3); + + // Individual error checking + const writeError = result.getWriteErrorAt(0); + test.ok(writeError.code === 11000 || writeError.code === 11001); + test.ok(writeError.errmsg != null); }); - it('should provide descriptive error message for unordered batch with duplicate key errors on inserts', function (done) { + it('should provide descriptive error message for unordered batch with duplicate key errors on inserts', async function () { const configuration = this.configuration; - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('err_batch_write_unordered_ops_legacy_6'); + const db = client.db(); + const col = db.collection('err_batch_write_unordered_ops_legacy_6'); + + // Add unique index on a field causing all inserts to fail + await col.createIndexes([ + { + name: 'err_batch_write_unordered_ops_legacy_6', + key: { a: 1 }, + unique: true + } + ]); - // Add unique index on a field causing all inserts to fail - col.createIndexes( - [ - { - name: 'err_batch_write_unordered_ops_legacy_6', - key: { a: 1 }, - unique: true - } - ], - err => { - expect(err).to.not.exist; - - // Initialize the unordered Batch - const batch = col.initializeUnorderedBulkOp(); - - // Add some operations to be executed in order - batch.insert({ a: 1 }); - batch.insert({ a: 1 }); - - // Execute the operations - batch.execute(configuration.writeConcernMax(), (err, result) => { - expect(err).to.exist; - expect(result).to.not.exist; - - // Test basic settings - result = err.result; - expect(result.insertedCount).to.equal(1); - expect(result.hasWriteErrors()).to.equal(true); - expect(result.getWriteErrorCount() === 1).to.equal(true); - - // Individual error checking - const error = result.getWriteErrorAt(0); - expect(error.code === 11000).to.equal(true); - expect(error.errmsg).to.exist; - expect(err.message).to.equal(error.errmsg); - - client.close(done); - }); - } - ); - }); + // Initialize the unordered Batch + const batch = col.initializeUnorderedBulkOp(); + + // Add some operations to be executed in order + batch.insert({ a: 1 }); + batch.insert({ a: 1 }); + + // Execute the operations + const error = await batch.execute(configuration.writeConcernMax()).catch(err => err); + // Test basic settings + const result = error.result; + expect(result.insertedCount).to.equal(1); + expect(result.hasWriteErrors()).to.equal(true); + expect(result.getWriteErrorCount() === 1).to.equal(true); + + // Individual error checking + const writeError = result.getWriteErrorAt(0); + expect(writeError.code === 11000).to.equal(true); + expect(writeError.errmsg).to.exist; + expect(error.message).to.equal(writeError.errmsg); }); it( 'should Correctly Execute Unordered Batch of with upserts causing duplicate key errors on updates', { - metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } }, + metadata: { requires: { topology: ['single', 'replicaset'] } }, - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_unordered_ops_legacy_7'); - - // Add unique index on b field causing all updates to fail - col.createIndex({ b: 1 }, { unique: true, sparse: false }, err => { - expect(err).to.not.exist; - - // Initialize the unordered Batch - const batch = col.initializeUnorderedBulkOp(); - - // Add some operations to be executed in order - batch.insert({ a: 1 }); - batch.find({ a: 1 }).update({ $set: { b: 1 } }); - batch - .find({ a: 2 }) - .upsert() - .update({ $set: { b: 2 } }); - batch - .find({ a: 3 }) - .upsert() - .update({ $set: { b: 3 } }); - batch.find({ a: 1 }).update({ $set: { b: 1 } }); - batch.insert({ b: 1 }); - - // Execute the operations - batch.execute({}, function (err, result) { - expect(err).to.exist; - expect(result).to.not.exist; - - // Test basic settings - result = err.result; - test.equal(2, result.insertedCount); - test.equal(2, result.upsertedCount); - test.ok(0 === result.modifiedCount || result.modifiedCount == null); - test.equal(0, result.deletedCount); - test.equal(true, result.hasWriteErrors()); - test.ok(1, result.getWriteErrorCount()); - - // Individual error checking - const error = result.getWriteErrorAt(0); - test.ok(error.code === 11000 || error.code === 11001); - test.ok(error.errmsg != null); - test.equal(1, error.getOperation().u['$set'].b); - - // Check for upserted values - const ids = result.result.upserted; - test.equal(2, ids.length); - test.equal(2, ids[0].index); - test.ok(ids[0]._id != null); - test.equal(3, ids[1].index); - test.ok(ids[1]._id != null); - - client.close(done); - }); - }); - }); - } - } - ); + test: async function () { + const db = client.db(); + const col = db.collection('batch_write_unordered_ops_legacy_7'); - it('should correctly perform unordered upsert with custom _id', function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_unordered_ops_legacy_8'); - const batch = col.initializeUnorderedBulkOp(); + // Add unique index on b field causing all updates to fail + await col.createIndex({ b: 1 }, { unique: true, sparse: false }); - // Add some operations to be executed in order - batch - .find({ _id: 2 }) - .upsert() - .updateOne({ $set: { b: 2 } }); + // Initialize the unordered Batch + const batch = col.initializeUnorderedBulkOp(); - // Execute the operations - batch.execute({}, function (err, result) { - // Check state of result - test.equal(1, result.upsertedCount); - test.equal(0, result.insertedCount); - test.equal(0, result.matchedCount); + // Add some operations to be executed in order + batch.insert({ a: 1 }); + batch.find({ a: 1 }).update({ $set: { b: 1 } }); + batch + .find({ a: 2 }) + .upsert() + .update({ $set: { b: 2 } }); + batch + .find({ a: 3 }) + .upsert() + .update({ $set: { b: 3 } }); + batch.find({ a: 1 }).update({ $set: { b: 1 } }); + batch.insert({ b: 1 }); + + // Execute the operations + const error = await batch.execute({}).catch(err => err); + // Test basic settings + const result = error.result; + test.equal(2, result.insertedCount); + test.equal(2, result.upsertedCount); test.ok(0 === result.modifiedCount || result.modifiedCount == null); test.equal(0, result.deletedCount); + test.equal(true, result.hasWriteErrors()); + test.equal(2, result.getWriteErrorCount()); - const upserts = result.result.upserted; - test.equal(1, upserts.length); - test.equal(0, upserts[0].index); - test.equal(2, upserts[0]._id); + // Individual error checking + const writeError = result.getWriteErrorAt(0); + test.ok(writeError.code === 11000 || error.code === 11001); + test.ok(writeError.errmsg != null); + test.equal(1, writeError.getOperation().u['$set'].b); + + // Check for upserted values + const ids = result.result.upserted; + test.equal(2, ids.length); + test.equal(2, ids[0].index); + test.ok(ids[0]._id != null); + test.equal(3, ids[1].index); + test.ok(ids[1]._id != null); + } + } + ); - // Finish up test - client.close(done); - }); - }); - }); + it('should correctly perform unordered upsert with custom _id', async function () { + const db = client.db(); + const col = db.collection('batch_write_unordered_ops_legacy_8'); + const batch = col.initializeUnorderedBulkOp(); - it('should prohibit batch finds with no selector', { - metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } }, + // Add some operations to be executed in order + batch + .find({ _id: 2 }) + .upsert() + .updateOne({ $set: { b: 2 } }); - test: function (done) { - client.connect((err, client) => { - expect(err).to.not.exist; + // Execute the operations + const result = await batch.execute({}); + // Check state of result + test.equal(1, result.upsertedCount); + test.equal(0, result.insertedCount); + test.equal(0, result.matchedCount); + test.ok(0 === result.modifiedCount || result.modifiedCount == null); + test.equal(0, result.deletedCount); - const db = client.db(); - const col = db.collection('batch_write_unordered_ops_legacy_9'); + const upserts = result['result'].upserted; + test.equal(1, upserts.length); + test.equal(0, upserts[0].index); + test.equal(2, upserts[0]._id); + }); - const unorderedBatch = col.initializeUnorderedBulkOp(); - const orderedBatch = col.initializeOrderedBulkOp(); + it('should prohibit batch finds with no selector', { + metadata: { requires: { topology: ['single', 'replicaset'] } }, - try { - unorderedBatch.find(); - test.ok(false); - } catch (e) { - expect(e).to.match(/Bulk find operation must specify a selector/); - } + test: async function () { + const db = client.db(); + const col = db.collection('batch_write_unordered_ops_legacy_9'); - try { - orderedBatch.find(); - test.ok(false); - } catch (e) { - expect(e).to.match(/Bulk find operation must specify a selector/); - } + const unorderedBatch = col.initializeUnorderedBulkOp(); + const orderedBatch = col.initializeOrderedBulkOp(); - client.close(done); - }); + try { + // @ts-expect-error Not allowed in TS, but can be used in JS + unorderedBatch.find(); + test.ok(false); + } catch (e) { + expect(e).to.match(/Bulk find operation must specify a selector/); + } + + try { + // @ts-expect-error Not allowed in TS, but can be used in JS + orderedBatch.find(); + test.ok(false); + } catch (e) { + expect(e).to.match(/Bulk find operation must specify a selector/); + } } }); it('should return an error when no operations in unordered batch', { - metadata: { requires: { topology: ['single', 'replicaset', 'ssl', 'heap', 'wiredtiger'] } }, - - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_ordered_ops_8'); - - col.initializeUnorderedBulkOp().execute({}, function (err) { - expect(err).to.be.instanceOf(MongoDriverError); - expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty'); + metadata: { requires: { topology: ['single', 'replicaset'] } }, - client.close(done); - }); - }); + test: async function () { + const db = client.db(); + const col = db.collection('batch_write_ordered_ops_8'); + + const err = await col + .initializeUnorderedBulkOp() + .execute() + .catch(err => err); + expect(err).to.be.instanceOf(MongoDriverError); + expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty'); } }); it('should correctly execute unordered batch using w:0', async function () { - await client.connect(); const db = client.db(); const col = db.collection('batch_write_ordered_ops_9'); const bulk = col.initializeUnorderedBulkOp(); @@ -1235,308 +1116,243 @@ describe('Bulk', function () { test.ok(0 === result.modifiedCount || result.modifiedCount == null); test.equal(0, result.deletedCount); test.equal(false, result.hasWriteErrors()); - - await client.close(); }); - it('should provide an accessor for operations on ordered bulk ops', function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('bulk_get_operations_test'); + it('should provide an accessor for operations on ordered bulk ops', async function () { + const db = client.db(); + const col = db.collection('bulk_get_operations_test'); - const batch = col.initializeOrderedBulkOp(); - batch.insert({ b: 1, a: 1 }); - batch - .find({ b: 2 }) - .upsert() - .updateOne({ $set: { a: 1 } }); - batch.insert({ b: 3, a: 2 }); - const batches = batch.batches; - expect(batches).to.have.lengthOf(3); - expect(batches[0].operations[0]).to.containSubset({ b: 1, a: 1 }); - expect(batches[1].operations[0]).to.containSubset({ - q: { b: 2 }, - u: { $set: { a: 1 } }, - upsert: true - }); - expect(batches[2].operations[0]).to.containSubset({ b: 3, a: 2 }); - client.close(done); + const batch = col.initializeOrderedBulkOp(); + batch.insert({ b: 1, a: 1 }); + batch + .find({ b: 2 }) + .upsert() + .updateOne({ $set: { a: 1 } }); + batch.insert({ b: 3, a: 2 }); + const batches = batch.batches; + expect(batches).to.have.lengthOf(3); + expect(batches[0].operations[0]).to.containSubset({ b: 1, a: 1 }); + expect(batches[1].operations[0]).to.containSubset({ + q: { b: 2 }, + u: { $set: { a: 1 } }, + upsert: true }); + expect(batches[2].operations[0]).to.containSubset({ b: 3, a: 2 }); }); it('should fail with w:2 and wtimeout write concern due single mongod instance ordered', { - metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } }, - - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_concerns_ops_1'); - const batch = col.initializeOrderedBulkOp(); - batch.insert({ a: 1 }); - batch.insert({ a: 2 }); + metadata: { requires: { topology: 'single' } }, - batch.execute({ writeConcern: { w: 2, wtimeoutMS: 1000 } }, function (err) { - test.ok(err != null); - test.ok(err.code != null); - test.ok(err.errmsg != null); - - client.close(done); - }); - }); + test: async function () { + const db = client.db(); + const col = db.collection('batch_write_concerns_ops_1'); + const batch = col.initializeOrderedBulkOp(); + batch.insert({ a: 1 }); + batch.insert({ a: 2 }); + + const err = await batch + .execute({ writeConcern: { w: 2, wtimeoutMS: 1000 } }) + .catch(err => err); + test.ok(err != null); + test.ok(err.code != null); + test.ok(err.errmsg != null); } }); it('should correctly handle bulk operation split for ordered bulk operation', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { - mongodb: '>=2.6.0', - topology: 'single' - } - }, - - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const docs = []; - for (let i = 0; i < 5; i++) { - docs.push({ - s: new Array(6000000).join('x') - }); - } - - db.collection('bigdocs_ordered').insertMany(docs, function (err) { - expect(err).to.not.exist; - - db.collection('bigdocs_ordered').count(function (err, c) { - expect(err).to.not.exist; - test.equal(5, c); + metadata: { requires: { topology: 'single' } }, - client.close(done); - }); + test: async function () { + const db = client.db(); + const docs = []; + for (let i = 0; i < 5; i++) { + docs.push({ + s: new Array(6000000).join('x') }); - }); + } + + await db.collection('bigdocs_ordered').insertMany(docs); + const c = await db.collection('bigdocs_ordered').countDocuments(); + test.equal(5, c); } }); - it('should provide an accessor for operations on unordered bulk ops', function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('bulk_get_operations_test'); + it('should provide an accessor for operations on unordered bulk ops', async function () { + const db = client.db(); + const col = db.collection('bulk_get_operations_test'); - const batch = col.initializeUnorderedBulkOp(); - batch.insert({ b: 1, a: 1 }); - batch - .find({ b: 2 }) - .upsert() - .updateOne({ $set: { a: 1 } }); - batch.insert({ b: 3, a: 2 }); - const batches = batch.batches; - expect(batches).to.have.lengthOf(2); - expect(batches[0].operations[0]).to.containSubset({ b: 1, a: 1 }); - expect(batches[0].operations[1]).to.containSubset({ b: 3, a: 2 }); - expect(batches[1].operations[0]).to.containSubset({ - q: { b: 2 }, - u: { $set: { a: 1 } }, - upsert: true - }); - client.close(done); + const batch = col.initializeUnorderedBulkOp(); + batch.insert({ b: 1, a: 1 }); + batch + .find({ b: 2 }) + .upsert() + .updateOne({ $set: { a: 1 } }); + batch.insert({ b: 3, a: 2 }); + const batches = batch.batches; + expect(batches).to.have.lengthOf(2); + expect(batches[0].operations[0]).to.containSubset({ b: 1, a: 1 }); + expect(batches[0].operations[1]).to.containSubset({ b: 3, a: 2 }); + expect(batches[1].operations[0]).to.containSubset({ + q: { b: 2 }, + u: { $set: { a: 1 } }, + upsert: true }); }); it('should fail with w:2 and wtimeout write concern due single mongod instance unordered', { - metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } }, - - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_concerns_ops_1'); - const batch = col.initializeUnorderedBulkOp(); - batch.insert({ a: 1 }); - batch.insert({ a: 2 }); - - batch.execute({ writeConcern: { w: 2, wtimeoutMS: 1000 } }, function (err) { - test.ok(err != null); - test.ok(err.code != null); - test.ok(err.errmsg != null); + metadata: { requires: { topology: 'single' } }, - client.close(done); - }); - }); + test: async function () { + const db = client.db(); + const col = db.collection('batch_write_concerns_ops_1'); + const batch = col.initializeUnorderedBulkOp(); + batch.insert({ a: 1 }); + batch.insert({ a: 2 }); + + const err = await batch + .execute({ writeConcern: { w: 2, wtimeoutMS: 1000 } }) + .catch(err => err); + test.ok(err != null); + test.ok(err.code != null); + test.ok(err.errmsg != null); } }); it('should correctly return the number of operations in the bulk', { - metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } }, - - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const col = db.collection('batch_write_concerns_ops_1'); - let batch = col.initializeOrderedBulkOp(); - batch.insert({ a: 1 }); - batch - .find({}) - .upsert() - .update({ $set: { b: 1 } }); - test.equal(2, batch.length); + metadata: { requires: { topology: 'single' } }, - batch = col.initializeUnorderedBulkOp(); - batch.insert({ a: 1 }); - batch - .find({}) - .upsert() - .update({ $set: { b: 1 } }); - test.equal(2, batch.length); + test: async function () { + const db = client.db(); + const col = db.collection('batch_write_concerns_ops_1'); + let batch = col.initializeOrderedBulkOp(); + batch.insert({ a: 1 }); + batch + .find({}) + .upsert() + .update({ $set: { b: 1 } }); + test.equal(2, batch.length); - client.close(done); - }); + batch = col.initializeUnorderedBulkOp(); + batch.insert({ a: 1 }); + batch + .find({}) + .upsert() + .update({ $set: { b: 1 } }); + test.equal(2, batch.length); } }); it('should correctly split unordered bulk batch', { - metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } }, + metadata: { requires: { topology: 'single' } }, - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const insertFirst = false; - const batchSize = 1000; - const collection = db.collection('batch_write_unordered_split_test'); - let operation = collection.initializeUnorderedBulkOp(); - const documents = []; - - let i = 0; - for (; i < 10000; i++) { - const document = { name: 'bob' + i }; - documents.push(document); - operation.insert(document); - } + test: async function () { + const db = client.db(); + const insertFirst = false; + const batchSize = 1000; + const collection = db.collection('batch_write_unordered_split_test'); + let operation = collection.initializeUnorderedBulkOp(); + const documents = []; + + let i = 0; + for (; i < 10000; i++) { + const document = { name: 'bob' + i }; + documents.push(document); + operation.insert(document); + } - operation.execute(function (err) { - expect(err).to.not.exist; + await operation.execute(); - operation = collection.initializeUnorderedBulkOp(); + operation = collection.initializeUnorderedBulkOp(); - if (insertFirst) { - // if you add the inserts to the batch first, it works fine. - insertDocuments(); - replaceDocuments(); - } else { - // if you add the updates to the batch first, it fails with the error "insert must contain at least one document" - replaceDocuments(); - insertDocuments(); - } + if (insertFirst) { + // if you add the inserts to the batch first, it works fine. + insertDocuments(); + replaceDocuments(); + } else { + // if you add the updates to the batch first, it fails with the error "insert must contain at least one document" + replaceDocuments(); + insertDocuments(); + } - operation.execute(function (err) { - expect(err).to.not.exist; + await operation.execute(); - client.close(done); - }); - }); - - function insertDocuments() { - for (i = 10000; i < 10200; i++) { - operation.insert({ name: 'bob' + i }); - } + function insertDocuments() { + for (i = 10000; i < 10200; i++) { + operation.insert({ name: 'bob' + i }); } + } - function replaceDocuments() { - for (let i = 0; i < batchSize; i++) { - operation.find({ _id: documents[i]._id }).replaceOne({ name: 'joe' + i }); - } + function replaceDocuments() { + for (let i = 0; i < batchSize; i++) { + operation.find({ _id: documents[i]._id }).replaceOne({ name: 'joe' + i }); } - }); + } } }); it('should correctly split ordered bulk batch', { - metadata: { requires: { topology: 'single', mongodb: '>2.5.4' } }, + metadata: { requires: { topology: 'single' } }, - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const insertFirst = false; - const batchSize = 1000; - const collection = db.collection('batch_write_ordered_split_test'); - let operation = collection.initializeOrderedBulkOp(); - const documents = []; - - for (let i = 0; i < 10000; i++) { - const document = { name: 'bob' + i }; - documents.push(document); - operation.insert(document); - } + test: async function () { + const db = client.db(); + const insertFirst = false; + const batchSize = 1000; + const collection = db.collection('batch_write_ordered_split_test'); + let operation = collection.initializeOrderedBulkOp(); + const documents = []; + + for (let i = 0; i < 10000; i++) { + const document = { name: 'bob' + i }; + documents.push(document); + operation.insert(document); + } - operation.execute(function (err) { - expect(err).to.not.exist; + await operation.execute(); - operation = collection.initializeOrderedBulkOp(); + operation = collection.initializeOrderedBulkOp(); - if (insertFirst) { - // if you add the inserts to the batch first, it works fine. - insertDocuments(); - replaceDocuments(); - } else { - // if you add the updates to the batch first, it fails with the error "insert must contain at least one document" - replaceDocuments(); - insertDocuments(); - } + if (insertFirst) { + // if you add the inserts to the batch first, it works fine. + insertDocuments(); + replaceDocuments(); + } else { + // if you add the updates to the batch first, it fails with the error "insert must contain at least one document" + replaceDocuments(); + insertDocuments(); + } - operation.execute(function (err) { - expect(err).to.not.exist; + await operation.execute(); - client.close(done); - }); - }); - - function insertDocuments() { - for (let i = 10000; i < 10200; i++) { - operation.insert({ name: 'bob' + i }); - } + function insertDocuments() { + for (let i = 10000; i < 10200; i++) { + operation.insert({ name: 'bob' + i }); } + } - function replaceDocuments() { - for (let i = 0; i < batchSize; i++) { - operation.find({ _id: documents[i]._id }).replaceOne({ name: 'joe' + i }); - } + function replaceDocuments() { + for (let i = 0; i < batchSize; i++) { + operation.find({ _id: documents[i]._id }).replaceOne({ name: 'joe' + i }); } - }); + } } }); it('should correctly handle bulk operation split for unordered bulk operation', { - // Add a tag that our runner can trigger on - // in this case we are setting that node needs to be higher than 0.10.X to run - metadata: { - requires: { - mongodb: '>=2.6.0', - topology: 'single' - } - }, - - test: function (done) { - client.connect((err, client) => { - const db = client.db(); - const docs = []; - for (let i = 0; i < 5; i++) { - docs.push({ - s: new Array(6000000).join('x') - }); - } + metadata: { requires: { topology: 'single' } }, - db.collection('bigdocs_unordered').insertMany(docs, { ordered: false }, function (err) { - expect(err).to.not.exist; + test: async function () { + const db = client.db(); + const docs = []; + for (let i = 0; i < 5; i++) { + docs.push({ + s: new Array(6000000).join('x') + }); + } - db.collection('bigdocs_unordered').count(function (err, c) { - expect(err).to.not.exist; - test.equal(5, c); + await db.collection('bigdocs_unordered').insertMany(docs, { ordered: false }); - client.close(done); - }); - }); - }); + const c = await db.collection('bigdocs_unordered').countDocuments(); + test.equal(5, c); } }); @@ -1560,365 +1376,259 @@ describe('Bulk', function () { expect(error).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty'); }); - it('should return an error instead of throwing when an empty bulk operation is submitted (with promise)', function () { - return client - .db() - .collection('doesnt_matter') - .insertMany([]) - - .then(function () { - test.equal(false, true); // this should not happen! - }) - .catch(function (err) { - expect(err).to.be.instanceOf(MongoDriverError); - expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty'); - }); - }); - - it('should properly account for array key size in bulk unordered inserts', function () { + // TODO(NODE-7219): remove outdated test + // it('should return an error instead of throwing when an empty bulk operation is submitted (with promise)', function () { + // return client + // .db() + // .collection('doesnt_matter') + // .insertMany([]) + // + // .then(function () { + // test.equal(false, true); // this should not happen! + // }) + // .catch(function (err) { + // expect(err).to.be.instanceOf(MongoDriverError); + // expect(err).to.have.property('message', 'Invalid BulkOperation, Batch cannot be empty'); + // }); + // }); + + it('should properly account for array key size in bulk unordered inserts', async function () { const documents = new Array(20000).fill('').map(() => ({ arr: new Array(19).fill('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') })); // NOTE: Hack to get around unrelated strange error in bulkWrites for right now. - return client + await client .db() .dropCollection('doesnt_matter') .catch(() => { // ignore - }) - .then(() => { - return client.db().createCollection('doesnt_matter'); - }) - .then(() => { - const coll = client.db().collection('doesnt_matter'); - return coll.insertMany(documents, { ordered: false }); }); + + await client.db().createCollection('doesnt_matter'); + const coll = client.db().collection('doesnt_matter'); + await coll.insertMany(documents, { ordered: false }); }); - it('should properly account for array key size in bulk ordered inserts', function () { + it('should properly account for array key size in bulk ordered inserts', async function () { const documents = new Array(20000).fill('').map(() => ({ arr: new Array(19).fill('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') })); - return client + await client .db() .dropCollection('doesnt_matter') .catch(() => { // ignore - }) - .then(() => { - return client.db().createCollection('doesnt_matter'); - }) - .then(() => { - const coll = client.db().collection('doesnt_matter'); - return coll.insertMany(documents, { ordered: true }); }); + await client.db().createCollection('doesnt_matter'); + const coll = client.db().collection('doesnt_matter'); + await coll.insertMany(documents, { ordered: true }); }); - it('properly accounts for bson size in bytes in bulk ordered inserts', function () { + it('properly accounts for bson size in bytes in bulk ordered inserts', async function () { const size = MAX_BSON_SIZE / 2; const largeString = crypto.randomBytes(size - 100).toString('hex'); const documents = [{ s: largeString }, { s: largeString }]; - let db; - - return client - .connect() - .then(() => { - db = client.db(); - return db.dropCollection('doesnt_matter').catch(() => { - // ignore - }); - }) - .then(() => { - return db.createCollection('doesnt_matter'); - }) - .then(() => { - const coll = db.collection('doesnt_matter'); - return coll.insertMany(documents, { ordered: true }); - }) - .finally(() => client.close()); + const db = client.db(); + await db.dropCollection('doesnt_matter').catch(() => { + // ignore + }); + await db.createCollection('doesnt_matter'); + const coll = db.collection('doesnt_matter'); + await coll.insertMany(documents, { ordered: true }); }); - it('properly accounts for bson size in bytes in bulk unordered inserts', function () { + it('properly accounts for bson size in bytes in bulk unordered inserts', async function () { const size = MAX_BSON_SIZE / 2; const largeString = crypto.randomBytes(size - 100).toString('hex'); const documents = [{ s: largeString }, { s: largeString }]; - let db; - - return client - .connect() - .then(() => { - db = client.db(); - return db.dropCollection('doesnt_matter').catch(() => { - // ignore - }); - }) - .then(() => { - return db.createCollection('doesnt_matter'); - }) - .then(() => { - const coll = db.collection('doesnt_matter'); - return coll.insertMany(documents, { ordered: false }); - }) - .finally(() => client.close()); + const db = client.db(); + await db.dropCollection('doesnt_matter').catch(() => { + // ignore + }); + await db.createCollection('doesnt_matter'); + const coll = db.collection('doesnt_matter'); + await coll.insertMany(documents, { ordered: false }); }); - function testPropagationOfBulkWriteError(bulk) { - return bulk.execute().then( - () => { - throw new Error('Expected execute to error but it passed'); - }, - err => { - expect(err).to.be.an.instanceOf(MongoDriverError); - } - ); - } - it('should propagate the proper error from executing an empty ordered batch', async function () { - await client.connect(); const collection = client.db().collection('doesnt_matter'); - await testPropagationOfBulkWriteError(collection.initializeOrderedBulkOp()); - await client.close(); + const err = await collection + .initializeOrderedBulkOp() + .execute() + .catch(err => err); + expect(err).to.be.an.instanceOf(MongoDriverError); }); - it('should propagate the proper error from executing an empty unordered batch', function () { - return client - .connect() - .then(() => { - const collection = client.db().collection('doesnt_matter'); - - return testPropagationOfBulkWriteError(collection.initializeUnorderedBulkOp()); - }) - .then(() => client.close()); + it('should propagate the proper error from executing an empty unordered batch', async function () { + const collection = client.db().collection('doesnt_matter'); + const err = await collection + .initializeUnorderedBulkOp() + .execute() + .catch(err => err); + expect(err).to.be.an.instanceOf(MongoDriverError); }); - it('should promote a single error to the top-level message, and preserve writeErrors', function () { - return client.connect().then(() => { - this.defer(() => client.close()); - - const coll = client.db().collection<{ _id: number; a: number }>('single_bulk_write_error'); - return coll - .drop() - .catch(ignoreNsNotFound) - .then(() => coll.insertMany(Array.from({ length: 4 }, (_, i) => ({ _id: i, a: i })))) - .then(() => - coll.bulkWrite([{ insertOne: { _id: 5, a: 0 } }, { insertOne: { _id: 5, a: 0 } }]) - ) - .then( - () => { - throw new Error('expected a bulk error'); - }, - err => { - expect(err) - .property('message') - .to.match(/E11000/); - expect(err).to.have.property('writeErrors').with.length(1); - } - ); - }); + it('should promote a single error to the top-level message, and preserve writeErrors', async function () { + const coll = client.db().collection<{ _id: number; a: number }>('single_bulk_write_error'); + await coll.drop().catch(ignoreNsNotFound); + await coll.insertMany(Array.from({ length: 4 }, (_, i) => ({ _id: i, a: i }))); + const err = await coll + .bulkWrite([ + { insertOne: { document: { _id: 5, a: 0 } } }, + { insertOne: { document: { _id: 5, a: 0 } } } + ]) + .catch(err => err); + expect(err) + .property('message') + .to.match(/E11000/); + expect(err).to.have.property('writeErrors').with.length(1); }); - it('should preserve order of operation index in unordered bulkWrite', function () { - return client.connect().then(() => { - this.defer(() => client.close()); - - const coll = client.db().collection<{ _id: number; a: number }>('bulk_write_ordering_test'); - return coll - .drop() - .catch(ignoreNsNotFound) - .then(() => coll.insertMany(Array.from({ length: 4 }, (_, i) => ({ _id: i, a: i })))) - .then(() => - coll - .createIndex({ a: 1 }, { unique: true }) - .then(() => - coll.bulkWrite( - [ - { insertOne: { _id: 5, a: 0 } }, - { updateOne: { filter: { _id: 1 }, update: { $set: { a: 15 } } } }, - { insertOne: { _id: 6, a: 0 } }, - { updateOne: { filter: { _id: 2 }, update: { $set: { a: 42 } } } } - ], - { ordered: false } - ) - ) - ) - .then( - () => { - throw new Error('expected a bulk error'); - }, - err => { - expect(err).to.have.property('writeErrors').with.length(2); + it('should preserve order of operation index in unordered bulkWrite', async function () { + const coll = client.db().collection<{ _id: number; a: number }>('bulk_write_ordering_test'); + await coll.drop().catch(ignoreNsNotFound); + await coll.insertMany(Array.from({ length: 4 }, (_, i) => ({ _id: i, a: i }))); + await coll.createIndex({ a: 1 }, { unique: true }); + const err = await coll + .bulkWrite( + [ + { insertOne: { document: { _id: 5, a: 0 } } }, + { updateOne: { filter: { _id: 1 }, update: { $set: { a: 15 } } } }, + { insertOne: { document: { _id: 6, a: 0 } } }, + { updateOne: { filter: { _id: 2 }, update: { $set: { a: 42 } } } } + ], + { ordered: false } + ) + .catch(err => err); - expect(err).to.have.nested.property('writeErrors[0].err.index', 0); - expect(err).to.have.nested.property('writeErrors[1].err.index', 2); - } - ); - }); - }); + expect(err).to.have.property('writeErrors').with.length(2); - it('should preserve order of operation index in unordered bulk operation', function () { - return client.connect().then(() => { - this.defer(() => client.close()); - - const coll = client.db().collection('unordered_preserve_order'); - return coll - .drop() - .catch(ignoreNsNotFound) - .then(() => { - const batch = coll.initializeUnorderedBulkOp(); - batch.insert({ _id: 1, a: 0 }); - batch.insert({ _id: 1, a: 0 }); - batch.insert({ _id: 2, a: 0 }); - batch.insert({ _id: 2, a: 0 }); - return batch.execute(); - }) - .then( - () => { - throw new Error('expected a bulk error'); - }, - err => { - expect(err).to.have.property('writeErrors').with.length(2); + expect(err).to.have.nested.property('writeErrors[0].err.index', 0); + expect(err).to.have.nested.property('writeErrors[1].err.index', 2); + }); - expect(err).to.have.nested.property('writeErrors[0].err.index', 1); - expect(err).to.have.nested.property('writeErrors[1].err.index', 3); - } - ); - }); + it('should preserve order of operation index in unordered bulk operation', async function () { + const coll = client.db().collection('unordered_preserve_order'); + await coll.drop().catch(ignoreNsNotFound); + const batch = coll.initializeUnorderedBulkOp(); + batch.insert({ _id: 1, a: 0 }); + batch.insert({ _id: 1, a: 0 }); + batch.insert({ _id: 2, a: 0 }); + batch.insert({ _id: 2, a: 0 }); + const err = await batch.execute().catch(err => err); + expect(err).to.have.property('writeErrors').with.length(2); + + expect(err).to.have.nested.property('writeErrors[0].err.index', 1); + expect(err).to.have.nested.property('writeErrors[1].err.index', 3); }); - it('should not fail on the first error in an unorderd bulkWrite', function () { - return client.connect().then(() => { - this.defer(() => client.close()); - - const coll = client.db().collection('bulk_op_ordering_test'); - return coll - .drop() - .catch(ignoreNsNotFound) - .then(() => coll.createIndex({ email: 1 }, { unique: 1, background: false })) - .then(() => - Promise.all([ - coll.updateOne( - { email: 'adam@gmail.com' }, - { $set: { name: 'Adam Smith', age: 29 } }, - { upsert: true } - ), - coll.updateOne( - { email: 'john@gmail.com' }, - { $set: { name: 'John Doe', age: 32 } }, - { upsert: true } - ) - ]) - ) - .then(() => - coll.bulkWrite( - [ - { - updateOne: { - filter: { email: 'adam@gmail.com' }, - update: { $set: { age: 39 } } - } - }, - { - insertOne: { - document: { - email: 'john@gmail.com' - } - } - } - ], - { ordered: false } - ) - ) - .then( - () => { - throw new Error('expected a bulk error'); + it('should not fail on the first error in an unorderd bulkWrite', async function () { + const coll = client.db().collection('bulk_op_ordering_test'); + await coll.drop().catch(ignoreNsNotFound); + await coll.createIndex({ email: 1 }, { unique: true, background: false }); + await Promise.all([ + coll.updateOne( + { email: 'adam@gmail.com' }, + { $set: { name: 'Adam Smith', age: 29 } }, + { upsert: true } + ), + coll.updateOne( + { email: 'john@gmail.com' }, + { $set: { name: 'John Doe', age: 32 } }, + { upsert: true } + ) + ]); + const err = await coll + .bulkWrite( + [ + { + updateOne: { + filter: { email: 'adam@gmail.com' }, + update: { $set: { age: 39 } } + } }, - err => expect(err).property('code').to.equal(11000) - ) - .then(() => coll.findOne({ email: 'adam@gmail.com' })) - .then(updatedAdam => expect(updatedAdam).property('age').to.equal(39)); - }); + { + insertOne: { + document: { + email: 'john@gmail.com' + } + } + } + ], + { ordered: false } + ) + .catch(err => err); + expect(err).property('code').to.equal(11000); + + const updatedAdam = await coll.findOne({ email: 'adam@gmail.com' }); + expect(updatedAdam).property('age').to.equal(39); }); - it('should return correct ids for documents with generated ids', function (done) { + it('should return correct ids for documents with generated ids', async function () { const bulk = client.db().collection('coll').initializeUnorderedBulkOp(); for (let i = 0; i < 2; i++) bulk.insert({ x: 1 }); - bulk.execute((err, result) => { - expect(err).to.not.exist; - expect(result).property('insertedIds').to.exist; - expect(Object.keys(result.insertedIds)).to.have.length(2); - expect(result.insertedIds[0]).to.exist; - expect(result.insertedIds[1]).to.exist; - done(); - }); + const result = await bulk.execute(); + expect(result).property('insertedIds').to.exist; + expect(Object.keys(result.insertedIds)).to.have.length(2); + expect(result.insertedIds[0]).to.exist; + expect(result.insertedIds[1]).to.exist; }); - it('should throw an error if bulk execute is called more than once', function (done) { + it('should throw an error if bulk execute is called more than once', async function () { const bulk = client.db().collection('coll').initializeUnorderedBulkOp(); bulk.insert({}); - bulk.execute((err, result) => { - expect(err).to.not.exist; - expect(result).to.exist; + await bulk.execute(); - bulk.execute(err => { - expect(err).to.be.instanceof(MongoBatchReExecutionError); - done(); - }); - }); + const err = await bulk.execute().catch(err => err); + expect(err).to.be.instanceof(MongoBatchReExecutionError); }); - it('should apply collation via FindOperators', { - metadata: { requires: { mongodb: '>= 3.4' } }, - async test() { - const locales = ['fr', 'de', 'es']; - const bulk = client.db().collection('coll').initializeOrderedBulkOp(); + it('should apply collation via FindOperators', async function () { + const locales = ['fr', 'de', 'es']; + const bulk = client.db().collection('coll').initializeOrderedBulkOp(); - const events = []; - client.on('commandStarted', event => { - if (['update', 'delete'].includes(event.commandName)) { - events.push(event); - } - }); + const events = []; + client.on('commandStarted', event => { + if (['update', 'delete'].includes(event.commandName)) { + events.push(event); + } + }); - // updates - bulk - .find({ b: 1 }) - .collation({ locale: locales[0] }) - .updateOne({ $set: { b: 2 } }); - bulk - .find({ b: 2 }) - .collation({ locale: locales[1] }) - .update({ $set: { b: 3 } }); - bulk.find({ b: 3 }).collation({ locale: locales[2] }).replaceOne({ b: 2 }); + // updates + bulk + .find({ b: 1 }) + .collation({ locale: locales[0] }) + .updateOne({ $set: { b: 2 } }); + bulk + .find({ b: 2 }) + .collation({ locale: locales[1] }) + .update({ $set: { b: 3 } }); + bulk.find({ b: 3 }).collation({ locale: locales[2] }).replaceOne({ b: 2 }); - // deletes - bulk.find({ b: 2 }).collation({ locale: locales[0] }).deleteOne(); - bulk.find({ b: 1 }).collation({ locale: locales[1] }).delete(); + // deletes + bulk.find({ b: 2 }).collation({ locale: locales[0] }).deleteOne(); + bulk.find({ b: 1 }).collation({ locale: locales[1] }).delete(); - await bulk.execute(); + await bulk.execute(); - try { - expect(events).to.be.an('array').with.length.at.least(1); - expect(events[0]).property('commandName').to.equal('update'); - const updateCommand = events[0].command; - expect(updateCommand).property('updates').to.be.an('array').with.length(3); - updateCommand.updates.forEach((statement, idx) => { - expect(statement).property('collation').to.eql({ locale: locales[idx] }); - }); - expect(events[1]).property('commandName').to.equal('delete'); - const deleteCommand = events[1].command; - expect(deleteCommand).property('deletes').to.be.an('array').with.length(2); - deleteCommand.deletes.forEach((statement, idx) => { - expect(statement).property('collation').to.eql({ locale: locales[idx] }); - }); - } finally { - await client.close(); - } - } + expect(events).to.be.an('array').with.length.at.least(1); + expect(events[0]).property('commandName').to.equal('update'); + const updateCommand = events[0].command; + expect(updateCommand).property('updates').to.be.an('array').with.length(3); + updateCommand.updates.forEach((statement, idx) => { + expect(statement).property('collation').to.eql({ locale: locales[idx] }); + }); + expect(events[1]).property('commandName').to.equal('delete'); + const deleteCommand = events[1].command; + expect(deleteCommand).property('deletes').to.be.an('array').with.length(2); + deleteCommand.deletes.forEach((statement, idx) => { + expect(statement).property('collation').to.eql({ locale: locales[idx] }); + }); }); it('should apply hint via FindOperators', { @@ -1966,53 +1676,49 @@ describe('Bulk', function () { } }); - it('should apply arrayFilters to bulk updates via FindOperators', { - metadata: { requires: { mongodb: '>= 3.6' } }, - test: function (done) { - const events = []; - client.on('commandStarted', event => { - if (['update', 'delete'].includes(event.commandName)) { - events.push(event); - } - }); + it('should apply arrayFilters to bulk updates via FindOperators', async function () { + const events = []; + client.on('commandStarted', event => { + if (['update', 'delete'].includes(event.commandName)) { + events.push(event); + } + }); - client.db().dropCollection('bulkArrayFilters', () => { - const coll = client.db().collection('bulkArrayFilters'); - const bulk = coll.initializeOrderedBulkOp(); - - bulk.insert({ person: 'Foo', scores: [4, 9, 12] }); - bulk.insert({ person: 'Bar', scores: [13, 0, 52] }); - bulk - .find({ scores: { $lt: 1 } }) - .arrayFilters([{ e: { $lt: 1 } }]) - .updateOne({ $set: { 'scores.$[e]': 1 } }); - bulk - .find({ scores: { $gte: 10 } }) - .arrayFilters([{ e: { $gte: 10 } }]) - .update({ $set: { 'scores.$[e]': 10 } }); - - bulk.execute(err => { - expect(err).to.not.exist; - expect(events).to.be.an('array').with.lengthOf(1); - expect(events[0]).to.have.property('commandName', 'update'); - const updateCommand = events[0].command; - expect(updateCommand).property('updates').to.be.an('array').with.lengthOf(2); - updateCommand.updates.forEach(update => expect(update).to.have.property('arrayFilters')); - coll.find({}).toArray((err, result) => { - expect(err).to.not.exist; - expect(result[0]).to.containSubset({ - person: 'Foo', - scores: [4, 9, 10] - }); - expect(result[1]).to.containSubset({ - person: 'Bar', - scores: [10, 1, 10] - }); - client.close(done); - }); - }); + await client + .db() + .dropCollection('bulkArrayFilters') + .catch(() => { + // ignore }); - } + const coll = await client.db().createCollection('bulkArrayFilters'); + const bulk = coll.initializeOrderedBulkOp(); + + bulk.insert({ person: 'Foo', scores: [4, 9, 12] }); + bulk.insert({ person: 'Bar', scores: [13, 0, 52] }); + bulk + .find({ scores: { $lt: 1 } }) + .arrayFilters([{ e: { $lt: 1 } }]) + .updateOne({ $set: { 'scores.$[e]': 1 } }); + bulk + .find({ scores: { $gte: 10 } }) + .arrayFilters([{ e: { $gte: 10 } }]) + .update({ $set: { 'scores.$[e]': 10 } }); + + await bulk.execute(); + expect(events).to.be.an('array').with.lengthOf(1); + expect(events[0]).to.have.property('commandName', 'update'); + const updateCommand = events[0].command; + expect(updateCommand).property('updates').to.be.an('array').with.lengthOf(2); + updateCommand.updates.forEach(update => expect(update).to.have.property('arrayFilters')); + const result = await coll.find({}).toArray(); + expect(result[0]).to.containSubset({ + person: 'Foo', + scores: [4, 9, 10] + }); + expect(result[1]).to.containSubset({ + person: 'Bar', + scores: [10, 1, 10] + }); }); it('should accept pipeline-style updates', async function () { @@ -2030,21 +1736,18 @@ describe('Bulk', function () { expect(contents).to.deep.equal([{ a: 11 }, { a: 102 }]); }); - it('should throw an error if raw operations are passed to bulkWrite', function () { + it('should throw an error if raw operations are passed to bulkWrite', async function () { const coll = client.db().collection('single_bulk_write_error'); - return coll + const err = await coll .bulkWrite([ + // @ts-expect-error Not allowed in TS, but can be used in JS { updateOne: { q: { a: 2 }, u: { $set: { a: 2 } }, upsert: true } }, + // @ts-expect-error Not allowed in TS, but can be used in JS { deleteOne: { q: { c: 1 } } } ]) - .then( - () => { - throw new Error('expected a bulk error'); - }, - err => { - expect(err).to.match(/Raw operations are not allowed/); - } - ); + .catch(err => err); + + expect(err).to.match(/Raw operations are not allowed/); }); describe('Bulk operation transaction rollback', () => {