From 6aa2434628e85ead8e5be620c27ebe8ab08a1c05 Mon Sep 17 00:00:00 2001 From: Matt Broadstone Date: Mon, 2 Mar 2020 18:16:21 -0500 Subject: [PATCH] feat: remove legacy topology types NODE-2318 --- index.js | 4 - .../uri_parser.js => connection_string.js} | 8 +- lib/core/cursor.js | 3 +- lib/core/index.js | 7 +- lib/core/topologies/mongos.js | 1396 --------------- lib/core/topologies/replset.js | 1559 ----------------- lib/core/topologies/replset_state.js | 1121 ------------ lib/core/topologies/server.js | 990 ----------- lib/core/utils.js | 5 - lib/operations/connect.js | 103 +- lib/operations/execute_operation.js | 3 +- lib/topologies/mongos.js | 445 ----- lib/topologies/replset.js | 489 ------ lib/topologies/server.js | 448 ----- lib/url_parser.js | 623 ------- .../disconnect_handler.test.js | 0 .../core => disabled}/mongos/events.test.js | 0 .../mongos/reconnect.test.js | 0 .../mongos/retryable_writes.test.js | 0 .../core => disabled}/mongos/sessions.test.js | 0 .../mongos_mocks/mixed_seed_list.test.js | 0 .../mongos_mocks/multiple_proxies.test.js | 0 .../mongos_mocks/proxy_failover.test.js | 0 .../proxy_read_preference.test.js | 0 .../single_proxy_connection.test.js | 0 test/{unit/core => disabled}/pool.test.js | 0 .../reconnect.test.js | 0 .../core => disabled}/replset.test.js | 0 .../core => disabled}/replset/auth.test.js | 0 .../replset/compression.test.js | 0 .../replset/read_preference.test.js | 0 .../replset/retryable_writes.test.js | 0 .../replset/sessions.test.js | 0 .../replset/step_down.test.js | 0 .../transactions_feature_decoration.test.js | 0 .../core => disabled}/replset/utils.test.js | 0 .../replset_connection.test.js | 0 .../replset_failover.test.js | 0 .../replset_operations.test.js | 0 .../replset_read_preference.test.js | 0 .../rs_mocks/add_remove.test.js | 0 .../rs_mocks/all_servers_close.test.js | 0 .../rs_mocks/connection.test.js | 0 .../rs_mocks/failover.test.js | 0 .../rs_mocks/maintanance_mode.test.js | 0 .../rs_mocks/monitoring.test.js | 0 .../rs_mocks/no_primary_found.test.js | 0 .../rs_mocks/operation.test.js | 0 .../rs_mocks/primary_loses_network.test.js | 0 .../rs_mocks/read_preferences.test.js | 0 .../rs_mocks/step_down.test.js | 0 test/{functional => disabled}/sdam.test.js | 0 .../core => disabled}/server.test.js | 0 .../sharding_failover.test.js | 0 .../sharding_read_preference.test.js | 0 .../core => disabled}/single/sessions.test.js | 0 .../single_mocks/compression.test.js | 0 test/functional/apm.test.js | 24 +- test/functional/connection.test.js | 26 - .../functional/connection_string_spec.test.js | 32 - .../core/basic_replset_server_auth.test.js | 491 ------ test/functional/core/client_metadata.test.js | 97 - test/functional/core/max_staleness.test.js | 145 -- .../core/mongos_server_selection.test.js | 85 - .../functional/core/operation_example.test.js | 65 +- test/functional/core/replset_state.test.js | 131 -- test/functional/core/topology.test.js | 2 +- test/functional/cursor.test.js | 10 +- test/functional/mongo_client_options.test.js | 23 - test/functional/mongodb_srv.test.js | 61 - test/functional/operation_example.test.js | 17 +- .../operation_promises_example.test.js | 44 - test/functional/sessions.test.js | 5 - test/functional/spec-runner/index.js | 1 + test/functional/transactions.test.js | 4 +- test/functional/uri.test.js | 6 +- test/functional/uri_options_spec.test.js | 5 +- test/functional/url_parser.test.js | 1030 ----------- test/spec/dns-txt-records/README.rst | 92 - .../longer-parent-in-return.json | 16 - .../longer-parent-in-return.yml | 11 - .../dns-txt-records/misformatted-option.json | 7 - .../dns-txt-records/misformatted-option.yml | 5 - test/spec/dns-txt-records/no-results.json | 7 - test/spec/dns-txt-records/no-results.yml | 5 - .../dns-txt-records/not-enough-parts.json | 7 - .../spec/dns-txt-records/not-enough-parts.yml | 5 - .../one-result-default-port.json | 15 - .../one-result-default-port.yml | 10 - .../one-txt-record-multiple-strings.json | 15 - .../one-txt-record-multiple-strings.yml | 10 - test/spec/dns-txt-records/one-txt-record.json | 16 - test/spec/dns-txt-records/one-txt-record.yml | 11 - .../parent-part-mismatch1.json | 7 - .../dns-txt-records/parent-part-mismatch1.yml | 5 - .../parent-part-mismatch2.json | 7 - .../dns-txt-records/parent-part-mismatch2.yml | 5 - .../parent-part-mismatch3.json | 7 - .../dns-txt-records/parent-part-mismatch3.yml | 5 - .../parent-part-mismatch4.json | 7 - .../dns-txt-records/parent-part-mismatch4.yml | 5 - .../parent-part-mismatch5.json | 7 - .../dns-txt-records/parent-part-mismatch5.yml | 5 - .../returned-parent-too-short.json | 7 - .../returned-parent-too-short.yml | 5 - .../returned-parent-wrong.json | 7 - .../dns-txt-records/returned-parent-wrong.yml | 5 - .../two-results-default-port.json | 16 - .../two-results-default-port.yml | 11 - .../two-results-nonstandard-port.json | 16 - .../two-results-nonstandard-port.yml | 11 - .../spec/dns-txt-records/two-txt-records.json | 7 - test/spec/dns-txt-records/two-txt-records.yml | 5 - .../txt-record-not-allowed-option.json | 7 - .../txt-record-not-allowed-option.yml | 5 - ...txt-record-with-overridden-ssl-option.json | 16 - .../txt-record-with-overridden-ssl-option.yml | 11 - ...txt-record-with-overridden-uri-option.json | 16 - .../txt-record-with-overridden-uri-option.yml | 11 - .../txt-record-with-unallowed-option.json | 7 - .../txt-record-with-unallowed-option.yml | 5 - test/spec/dns-txt-records/uri-with-auth.json | 17 - test/spec/dns-txt-records/uri-with-auth.yml | 12 - test/spec/dns-txt-records/uri-with-port.json | 7 - test/spec/dns-txt-records/uri-with-port.yml | 5 - .../dns-txt-records/uri-with-two-hosts.json | 7 - .../dns-txt-records/uri-with-two-hosts.yml | 5 - test/tools/runner/config.js | 24 +- test/tools/runner/filters/unified_filter.js | 25 - test/tools/runner/index.js | 2 +- .../runner/plugins/client_leak_checker.js | 7 +- .../runner/plugins/session_leak_checker.js | 2 +- test/unit/client_metadata.test.js | 51 - test/unit/core/connection_string.test.js | 2 +- test/unit/core/mongodb_srv.test.js | 2 +- test/unit/core/response_test.js.test.js | 4 +- test/unit/core/scram_iterations.test.js | 10 +- test/unit/core/sessions.test.js | 12 +- test/unit/core/write_concern_error.test.js | 34 +- test/unit/db.test.js | 4 + .../server_selection/select_servers.test.js | 1 - test/unit/sdam/spec.test.js | 2 +- test/unit/sdam/topology.test.js | 62 +- 143 files changed, 171 insertions(+), 10094 deletions(-) rename lib/{core/uri_parser.js => connection_string.js} (99%) delete mode 100644 lib/core/topologies/mongos.js delete mode 100644 lib/core/topologies/replset.js delete mode 100644 lib/core/topologies/replset_state.js delete mode 100644 lib/core/topologies/server.js delete mode 100644 lib/topologies/mongos.js delete mode 100644 lib/topologies/replset.js delete mode 100644 lib/topologies/server.js delete mode 100644 lib/url_parser.js rename test/{functional => disabled}/disconnect_handler.test.js (100%) rename test/{unit/core => disabled}/mongos/events.test.js (100%) rename test/{unit/core => disabled}/mongos/reconnect.test.js (100%) rename test/{unit/core => disabled}/mongos/retryable_writes.test.js (100%) rename test/{unit/core => disabled}/mongos/sessions.test.js (100%) rename test/{functional/core => disabled}/mongos_mocks/mixed_seed_list.test.js (100%) rename test/{functional/core => disabled}/mongos_mocks/multiple_proxies.test.js (100%) rename test/{functional/core => disabled}/mongos_mocks/proxy_failover.test.js (100%) rename test/{functional/core => disabled}/mongos_mocks/proxy_read_preference.test.js (100%) rename test/{functional/core => disabled}/mongos_mocks/single_proxy_connection.test.js (100%) rename test/{unit/core => disabled}/pool.test.js (100%) rename test/{functional => disabled}/reconnect.test.js (100%) rename test/{functional/core => disabled}/replset.test.js (100%) rename test/{unit/core => disabled}/replset/auth.test.js (100%) rename test/{unit/core => disabled}/replset/compression.test.js (100%) rename test/{unit/core => disabled}/replset/read_preference.test.js (100%) rename test/{unit/core => disabled}/replset/retryable_writes.test.js (100%) rename test/{unit/core => disabled}/replset/sessions.test.js (100%) rename test/{unit/core => disabled}/replset/step_down.test.js (100%) rename test/{unit/core => disabled}/replset/transactions_feature_decoration.test.js (100%) rename test/{unit/core => disabled}/replset/utils.test.js (100%) rename test/{functional => disabled}/replset_connection.test.js (100%) rename test/{functional => disabled}/replset_failover.test.js (100%) rename test/{functional => disabled}/replset_operations.test.js (100%) rename test/{functional => disabled}/replset_read_preference.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/add_remove.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/all_servers_close.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/connection.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/failover.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/maintanance_mode.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/monitoring.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/no_primary_found.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/operation.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/primary_loses_network.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/read_preferences.test.js (100%) rename test/{functional/core => disabled}/rs_mocks/step_down.test.js (100%) rename test/{functional => disabled}/sdam.test.js (100%) rename test/{functional/core => disabled}/server.test.js (100%) rename test/{functional => disabled}/sharding_failover.test.js (100%) rename test/{functional => disabled}/sharding_read_preference.test.js (100%) rename test/{unit/core => disabled}/single/sessions.test.js (100%) rename test/{functional/core => disabled}/single_mocks/compression.test.js (100%) delete mode 100644 test/functional/connection_string_spec.test.js delete mode 100644 test/functional/core/basic_replset_server_auth.test.js delete mode 100644 test/functional/core/client_metadata.test.js delete mode 100644 test/functional/core/max_staleness.test.js delete mode 100644 test/functional/core/mongos_server_selection.test.js delete mode 100644 test/functional/core/replset_state.test.js delete mode 100644 test/functional/mongodb_srv.test.js delete mode 100644 test/functional/url_parser.test.js delete mode 100644 test/spec/dns-txt-records/README.rst delete mode 100644 test/spec/dns-txt-records/longer-parent-in-return.json delete mode 100644 test/spec/dns-txt-records/longer-parent-in-return.yml delete mode 100644 test/spec/dns-txt-records/misformatted-option.json delete mode 100644 test/spec/dns-txt-records/misformatted-option.yml delete mode 100644 test/spec/dns-txt-records/no-results.json delete mode 100644 test/spec/dns-txt-records/no-results.yml delete mode 100644 test/spec/dns-txt-records/not-enough-parts.json delete mode 100644 test/spec/dns-txt-records/not-enough-parts.yml delete mode 100644 test/spec/dns-txt-records/one-result-default-port.json delete mode 100644 test/spec/dns-txt-records/one-result-default-port.yml delete mode 100644 test/spec/dns-txt-records/one-txt-record-multiple-strings.json delete mode 100644 test/spec/dns-txt-records/one-txt-record-multiple-strings.yml delete mode 100644 test/spec/dns-txt-records/one-txt-record.json delete mode 100644 test/spec/dns-txt-records/one-txt-record.yml delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch1.json delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch1.yml delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch2.json delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch2.yml delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch3.json delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch3.yml delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch4.json delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch4.yml delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch5.json delete mode 100644 test/spec/dns-txt-records/parent-part-mismatch5.yml delete mode 100644 test/spec/dns-txt-records/returned-parent-too-short.json delete mode 100644 test/spec/dns-txt-records/returned-parent-too-short.yml delete mode 100644 test/spec/dns-txt-records/returned-parent-wrong.json delete mode 100644 test/spec/dns-txt-records/returned-parent-wrong.yml delete mode 100644 test/spec/dns-txt-records/two-results-default-port.json delete mode 100644 test/spec/dns-txt-records/two-results-default-port.yml delete mode 100644 test/spec/dns-txt-records/two-results-nonstandard-port.json delete mode 100644 test/spec/dns-txt-records/two-results-nonstandard-port.yml delete mode 100644 test/spec/dns-txt-records/two-txt-records.json delete mode 100644 test/spec/dns-txt-records/two-txt-records.yml delete mode 100644 test/spec/dns-txt-records/txt-record-not-allowed-option.json delete mode 100644 test/spec/dns-txt-records/txt-record-not-allowed-option.yml delete mode 100644 test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.json delete mode 100644 test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.yml delete mode 100644 test/spec/dns-txt-records/txt-record-with-overridden-uri-option.json delete mode 100644 test/spec/dns-txt-records/txt-record-with-overridden-uri-option.yml delete mode 100644 test/spec/dns-txt-records/txt-record-with-unallowed-option.json delete mode 100644 test/spec/dns-txt-records/txt-record-with-unallowed-option.yml delete mode 100644 test/spec/dns-txt-records/uri-with-auth.json delete mode 100644 test/spec/dns-txt-records/uri-with-auth.yml delete mode 100644 test/spec/dns-txt-records/uri-with-port.json delete mode 100644 test/spec/dns-txt-records/uri-with-port.yml delete mode 100644 test/spec/dns-txt-records/uri-with-two-hosts.json delete mode 100644 test/spec/dns-txt-records/uri-with-two-hosts.yml delete mode 100644 test/tools/runner/filters/unified_filter.js delete mode 100644 test/unit/client_metadata.test.js diff --git a/index.js b/index.js index 4e9e6359e8..38c83bfc77 100644 --- a/index.js +++ b/index.js @@ -22,9 +22,6 @@ connect.Admin = require('./lib/admin'); connect.MongoClient = require('./lib/mongo_client'); connect.Db = require('./lib/db'); connect.Collection = require('./lib/collection'); -connect.Server = require('./lib/topologies/server'); -connect.ReplSet = require('./lib/topologies/replset'); -connect.Mongos = require('./lib/topologies/mongos'); connect.ReadPreference = core.ReadPreference; connect.GridStore = require('./lib/gridfs/grid_store'); connect.Chunk = require('./lib/gridfs/chunk'); @@ -34,7 +31,6 @@ connect.CommandCursor = require('./lib/command_cursor'); connect.Cursor = require('./lib/cursor'); connect.GridFSBucket = require('./lib/gridfs-stream'); // Exported to be used in tests not to be used anywhere else -connect.CoreServer = core.Server; connect.CoreConnection = core.Connection; // BSON types exported diff --git a/lib/core/uri_parser.js b/lib/connection_string.js similarity index 99% rename from lib/core/uri_parser.js rename to lib/connection_string.js index 71755266e6..5be77ed44a 100644 --- a/lib/core/uri_parser.js +++ b/lib/connection_string.js @@ -2,8 +2,8 @@ const URL = require('url'); const qs = require('querystring'); const dns = require('dns'); -const MongoParseError = require('./error').MongoParseError; -const ReadPreference = require('./topologies/read_preference'); +const MongoParseError = require('./core/error').MongoParseError; +const ReadPreference = require('./core/topologies/read_preference'); /** * The following regular expression validates a connection string and breaks the @@ -698,4 +698,6 @@ function parseConnectionString(uri, options, callback) { callback(null, result); } -module.exports = parseConnectionString; +module.exports = { + parseConnectionString +}; diff --git a/lib/core/cursor.js b/lib/core/cursor.js index 8b6aa3dfc7..33bc5cfdf2 100644 --- a/lib/core/cursor.js +++ b/lib/core/cursor.js @@ -7,7 +7,6 @@ const MongoNetworkError = require('./error').MongoNetworkError; const mongoErrorContextSymbol = require('./error').mongoErrorContextSymbol; const collationNotSupported = require('./utils').collationNotSupported; const ReadPreference = require('./topologies/read_preference'); -const isUnifiedTopology = require('./utils').isUnifiedTopology; const executeOperation = require('../operations/execute_operation'); const Readable = require('stream').Readable; const SUPPORTS = require('../utils').SUPPORTS; @@ -428,7 +427,7 @@ class CoreCursor extends Readable { const cursor = this; // NOTE: this goes away once cursors use `executeOperation` - if (isUnifiedTopology(cursor.topology) && cursor.topology.shouldCheckForSessionSupport()) { + if (cursor.topology.shouldCheckForSessionSupport()) { cursor.topology.selectServer(ReadPreference.primaryPreferred, err => { if (err) { callback(err); diff --git a/lib/core/index.js b/lib/core/index.js index 2da5573a47..fcded516b7 100644 --- a/lib/core/index.js +++ b/lib/core/index.js @@ -25,9 +25,6 @@ module.exports = { mongoErrorContextSymbol: require('./error').mongoErrorContextSymbol, // Core Connection: require('./connection/connection'), - Server: require('./topologies/server'), - ReplSet: require('./topologies/replset'), - Mongos: require('./topologies/mongos'), Logger: require('./connection/logger'), Cursor: require('./cursor').CoreCursor, ReadPreference: require('./topologies/read_preference'), @@ -45,7 +42,5 @@ module.exports = { Plain: require('./auth/plain'), GSSAPI: require('./auth/gssapi'), ScramSHA1: require('./auth/scram').ScramSHA1, - ScramSHA256: require('./auth/scram').ScramSHA256, - // Utilities - parseConnectionString: require('./uri_parser') + ScramSHA256: require('./auth/scram').ScramSHA256 }; diff --git a/lib/core/topologies/mongos.js b/lib/core/topologies/mongos.js deleted file mode 100644 index 7a1ff59f7c..0000000000 --- a/lib/core/topologies/mongos.js +++ /dev/null @@ -1,1396 +0,0 @@ -'use strict'; - -const inherits = require('util').inherits; -const f = require('util').format; -const EventEmitter = require('events').EventEmitter; -const CoreCursor = require('../cursor').CoreCursor; -const Logger = require('../connection/logger'); -const retrieveBSON = require('../connection/utils').retrieveBSON; -const MongoError = require('../error').MongoError; -const Server = require('./server'); -const diff = require('./shared').diff; -const cloneOptions = require('./shared').cloneOptions; -const SessionMixins = require('./shared').SessionMixins; -const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; -const relayEvents = require('../utils').relayEvents; -const BSON = retrieveBSON(); -const getMMAPError = require('./shared').getMMAPError; -const makeClientMetadata = require('../utils').makeClientMetadata; -const legacyIsRetryableWriteError = require('./shared').legacyIsRetryableWriteError; - -/** - * @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is - * used to construct connections. - */ - -// -// States -var DISCONNECTED = 'disconnected'; -var CONNECTING = 'connecting'; -var CONNECTED = 'connected'; -var UNREFERENCED = 'unreferenced'; -var DESTROYING = 'destroying'; -var DESTROYED = 'destroyed'; - -function stateTransition(self, newState) { - var legalTransitions = { - disconnected: [CONNECTING, DESTROYING, DESTROYED, DISCONNECTED], - connecting: [CONNECTING, DESTROYING, DESTROYED, CONNECTED, DISCONNECTED], - connected: [CONNECTED, DISCONNECTED, DESTROYING, DESTROYED, UNREFERENCED], - unreferenced: [UNREFERENCED, DESTROYING, DESTROYED], - destroyed: [DESTROYED] - }; - - // Get current state - var legalStates = legalTransitions[self.state]; - if (legalStates && legalStates.indexOf(newState) !== -1) { - self.state = newState; - } else { - self.s.logger.error( - f( - 'Mongos with id [%s] failed attempted illegal state transition from [%s] to [%s] only following state allowed [%s]', - self.id, - self.state, - newState, - legalStates - ) - ); - } -} - -// -// ReplSet instance id -var id = 1; -var handlers = ['connect', 'close', 'error', 'timeout', 'parseError']; - -/** - * Creates a new Mongos instance - * @class - * @param {array} seedlist A list of seeds for the replicaset - * @param {number} [options.haInterval=5000] The High availability period for replicaset inquiry - * @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors - * @param {number} [options.size=5] Server connection pool size - * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled - * @param {number} [options.localThresholdMS=15] Cutoff latency point in MS for MongoS proxy selection - * @param {boolean} [options.noDelay=true] TCP Connection no delay - * @param {number} [options.connectionTimeout=1000] TCP Connection timeout setting - * @param {number} [options.socketTimeout=0] TCP Socket timeout setting - * @param {boolean} [options.ssl=false] Use SSL for connection - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {Buffer} [options.ca] SSL Certificate store binary buffer - * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer - * @param {Buffer} [options.cert] SSL Certificate binary buffer - * @param {Buffer} [options.key] SSL Key file binary buffer - * @param {string} [options.passphrase] SSL Certificate pass phrase - * @param {string} [options.servername=null] String containing the server name requested via TLS SNI. - * @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates - * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits - * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. - * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @return {Mongos} A cursor instance - * @fires Mongos#connect - * @fires Mongos#reconnect - * @fires Mongos#joined - * @fires Mongos#left - * @fires Mongos#failed - * @fires Mongos#fullsetup - * @fires Mongos#all - * @fires Mongos#serverHeartbeatStarted - * @fires Mongos#serverHeartbeatSucceeded - * @fires Mongos#serverHeartbeatFailed - * @fires Mongos#topologyOpening - * @fires Mongos#topologyClosed - * @fires Mongos#topologyDescriptionChanged - * @property {string} type the topology type. - * @property {string} parserType the parser type used (c++ or js). - */ -var Mongos = function(seedlist, options) { - options = options || {}; - - // Get replSet Id - this.id = id++; - - // deduplicate seedlist - if (Array.isArray(seedlist)) { - seedlist = seedlist.reduce((seeds, seed) => { - if (seeds.find(s => s.host === seed.host && s.port === seed.port)) { - return seeds; - } - - seeds.push(seed); - return seeds; - }, []); - } - - // Internal state - this.s = { - options: Object.assign({ metadata: makeClientMetadata(options) }, options), - // BSON instance - bson: - options.bson || - new BSON([ - BSON.Binary, - BSON.Code, - BSON.DBRef, - BSON.Decimal128, - BSON.Double, - BSON.Int32, - BSON.Long, - BSON.Map, - BSON.MaxKey, - BSON.MinKey, - BSON.ObjectId, - BSON.BSONRegExp, - BSON.Symbol, - BSON.Timestamp - ]), - // Factory overrides - Cursor: options.cursorFactory || CoreCursor, - // Logger instance - logger: Logger('Mongos', options), - // Seedlist - seedlist: seedlist, - // Ha interval - haInterval: options.haInterval ? options.haInterval : 10000, - // Disconnect handler - disconnectHandler: options.disconnectHandler, - // Server selection index - index: 0, - // Connect function options passed in - connectOptions: {}, - // Are we running in debug mode - debug: typeof options.debug === 'boolean' ? options.debug : false, - // localThresholdMS - localThresholdMS: options.localThresholdMS || 15 - }; - - // Log info warning if the socketTimeout < haInterval as it will cause - // a lot of recycled connections to happen. - if ( - this.s.logger.isWarn() && - this.s.options.socketTimeout !== 0 && - this.s.options.socketTimeout < this.s.haInterval - ) { - this.s.logger.warn( - f( - 'warning socketTimeout %s is less than haInterval %s. This might cause unnecessary server reconnections due to socket timeouts', - this.s.options.socketTimeout, - this.s.haInterval - ) - ); - } - - // Disconnected state - this.state = DISCONNECTED; - - // Current proxies we are connecting to - this.connectingProxies = []; - // Currently connected proxies - this.connectedProxies = []; - // Disconnected proxies - this.disconnectedProxies = []; - // Index of proxy to run operations against - this.index = 0; - // High availability timeout id - this.haTimeoutId = null; - // Last ismaster - this.ismaster = null; - - // Description of the Replicaset - this.topologyDescription = { - topologyType: 'Unknown', - servers: [] - }; - - // Highest clusterTime seen in responses from the current deployment - this.clusterTime = null; - - // Add event listener - EventEmitter.call(this); -}; - -inherits(Mongos, EventEmitter); -Object.assign(Mongos.prototype, SessionMixins); - -Object.defineProperty(Mongos.prototype, 'type', { - enumerable: true, - get: function() { - return 'mongos'; - } -}); - -Object.defineProperty(Mongos.prototype, 'parserType', { - enumerable: true, - get: function() { - return BSON.native ? 'c++' : 'js'; - } -}); - -Object.defineProperty(Mongos.prototype, 'logicalSessionTimeoutMinutes', { - enumerable: true, - get: function() { - if (!this.ismaster) return null; - return this.ismaster.logicalSessionTimeoutMinutes || null; - } -}); - -/** - * Emit event if it exists - * @method - */ -function emitSDAMEvent(self, event, description) { - if (self.listeners(event).length > 0) { - self.emit(event, description); - } -} - -const SERVER_EVENTS = ['serverDescriptionChanged', 'error', 'close', 'timeout', 'parseError']; -function destroyServer(server, options, callback) { - options = options || {}; - SERVER_EVENTS.forEach(event => server.removeAllListeners(event)); - server.destroy(options, callback); -} - -/** - * Initiate server connect - */ -Mongos.prototype.connect = function(options) { - var self = this; - // Add any connect level options to the internal state - this.s.connectOptions = options || {}; - - // Set connecting state - stateTransition(this, CONNECTING); - - // Create server instances - var servers = this.s.seedlist.map(function(x) { - const server = new Server( - Object.assign({}, self.s.options, x, options, { - reconnect: false, - monitoring: false, - parent: self - }) - ); - - relayEvents(server, self, ['serverDescriptionChanged']); - return server; - }); - - // Emit the topology opening event - emitSDAMEvent(this, 'topologyOpening', { topologyId: this.id }); - - // Start all server connections - connectProxies(self, servers); -}; - -/** - * Authenticate the topology. - * @method - * @param {MongoCredentials} credentials The credentials for authentication we are using - * @param {authResultCallback} callback A callback function - */ -Mongos.prototype.auth = function(credentials, callback) { - if (typeof callback === 'function') callback(null, null); -}; - -function handleEvent(self) { - return function() { - if (self.state === DESTROYED || self.state === DESTROYING) { - return; - } - - // Move to list of disconnectedProxies - moveServerFrom(self.connectedProxies, self.disconnectedProxies, this); - // Emit the initial topology - emitTopologyDescriptionChanged(self); - // Emit the left signal - self.emit('left', 'mongos', this); - // Emit the sdam event - self.emit('serverClosed', { - topologyId: self.id, - address: this.name - }); - }; -} - -function handleInitialConnectEvent(self, event) { - return function() { - var _this = this; - - // Destroy the instance - if (self.state === DESTROYED) { - // Emit the initial topology - emitTopologyDescriptionChanged(self); - // Move from connectingProxies - moveServerFrom(self.connectingProxies, self.disconnectedProxies, this); - return this.destroy(); - } - - // Check the type of server - if (event === 'connect') { - // Get last known ismaster - self.ismaster = _this.lastIsMaster(); - - // Is this not a proxy, remove t - if (self.ismaster.msg === 'isdbgrid') { - // Add to the connectd list - for (let i = 0; i < self.connectedProxies.length; i++) { - if (self.connectedProxies[i].name === _this.name) { - // Move from connectingProxies - moveServerFrom(self.connectingProxies, self.disconnectedProxies, _this); - // Emit the initial topology - emitTopologyDescriptionChanged(self); - _this.destroy(); - return self.emit('failed', _this); - } - } - - // Remove the handlers - for (let i = 0; i < handlers.length; i++) { - _this.removeAllListeners(handlers[i]); - } - - // Add stable state handlers - _this.on('error', handleEvent(self, 'error')); - _this.on('close', handleEvent(self, 'close')); - _this.on('timeout', handleEvent(self, 'timeout')); - _this.on('parseError', handleEvent(self, 'parseError')); - - // Move from connecting proxies connected - moveServerFrom(self.connectingProxies, self.connectedProxies, _this); - // Emit the joined event - self.emit('joined', 'mongos', _this); - } else { - // Print warning if we did not find a mongos proxy - if (self.s.logger.isWarn()) { - var message = 'expected mongos proxy, but found replicaset member mongod for server %s'; - // We have a standalone server - if (!self.ismaster.hosts) { - message = 'expected mongos proxy, but found standalone mongod for server %s'; - } - - self.s.logger.warn(f(message, _this.name)); - } - - // This is not a mongos proxy, destroy and remove it completely - _this.destroy(true); - removeProxyFrom(self.connectingProxies, _this); - // Emit the left event - self.emit('left', 'server', _this); - // Emit failed event - self.emit('failed', _this); - } - } else { - moveServerFrom(self.connectingProxies, self.disconnectedProxies, this); - // Emit the left event - self.emit('left', 'mongos', this); - // Emit failed event - self.emit('failed', this); - } - - // Emit the initial topology - emitTopologyDescriptionChanged(self); - - // Trigger topologyMonitor - if (self.connectingProxies.length === 0) { - // Emit connected if we are connected - if (self.connectedProxies.length > 0 && self.state === CONNECTING) { - // Set the state to connected - stateTransition(self, CONNECTED); - // Emit the connect event - self.emit('connect', self); - self.emit('fullsetup', self); - self.emit('all', self); - } else if (self.disconnectedProxies.length === 0) { - // Print warning if we did not find a mongos proxy - if (self.s.logger.isWarn()) { - self.s.logger.warn( - f('no mongos proxies found in seed list, did you mean to connect to a replicaset') - ); - } - - // Emit the error that no proxies were found - return self.emit('error', new MongoError('no mongos proxies found in seed list')); - } - - // Topology monitor - topologyMonitor(self, { firstConnect: true }); - } - }; -} - -function connectProxies(self, servers) { - // Update connectingProxies - self.connectingProxies = self.connectingProxies.concat(servers); - - // Index used to interleaf the server connects, avoiding - // runtime issues on io constrained vm's - var timeoutInterval = 0; - - function connect(server, timeoutInterval) { - setTimeout(function() { - // Emit opening server event - self.emit('serverOpening', { - topologyId: self.id, - address: server.name - }); - - // Emit the initial topology - emitTopologyDescriptionChanged(self); - - // Add event handlers - server.once('close', handleInitialConnectEvent(self, 'close')); - server.once('timeout', handleInitialConnectEvent(self, 'timeout')); - server.once('parseError', handleInitialConnectEvent(self, 'parseError')); - server.once('error', handleInitialConnectEvent(self, 'error')); - server.once('connect', handleInitialConnectEvent(self, 'connect')); - - // Command Monitoring events - relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - // Start connection - server.connect(self.s.connectOptions); - }, timeoutInterval); - } - - // Start all the servers - servers.forEach(server => connect(server, timeoutInterval++)); -} - -function pickProxy(self, session) { - // TODO: Destructure :) - const transaction = session && session.transaction; - - if (transaction && transaction.server) { - if (transaction.server.isConnected()) { - return transaction.server; - } else { - transaction.unpinServer(); - } - } - - // Get the currently connected Proxies - var connectedProxies = self.connectedProxies.slice(0); - - // Set lower bound - var lowerBoundLatency = Number.MAX_VALUE; - - // Determine the lower bound for the Proxies - for (var i = 0; i < connectedProxies.length; i++) { - if (connectedProxies[i].lastIsMasterMS < lowerBoundLatency) { - lowerBoundLatency = connectedProxies[i].lastIsMasterMS; - } - } - - // Filter out the possible servers - connectedProxies = connectedProxies.filter(function(server) { - if ( - server.lastIsMasterMS <= lowerBoundLatency + self.s.localThresholdMS && - server.isConnected() - ) { - return true; - } - }); - - let proxy; - - // We have no connectedProxies pick first of the connected ones - if (connectedProxies.length === 0) { - proxy = self.connectedProxies[0]; - } else { - // Get proxy - proxy = connectedProxies[self.index % connectedProxies.length]; - // Update the index - self.index = (self.index + 1) % connectedProxies.length; - } - - if (transaction && transaction.isActive && proxy && proxy.isConnected()) { - transaction.pinServer(proxy); - } - - // Return the proxy - return proxy; -} - -function moveServerFrom(from, to, proxy) { - for (var i = 0; i < from.length; i++) { - if (from[i].name === proxy.name) { - from.splice(i, 1); - } - } - - for (i = 0; i < to.length; i++) { - if (to[i].name === proxy.name) { - to.splice(i, 1); - } - } - - to.push(proxy); -} - -function removeProxyFrom(from, proxy) { - for (var i = 0; i < from.length; i++) { - if (from[i].name === proxy.name) { - from.splice(i, 1); - } - } -} - -function reconnectProxies(self, proxies, callback) { - // Count lefts - var count = proxies.length; - - // Handle events - var _handleEvent = function(self, event) { - return function() { - var _self = this; - count = count - 1; - - // Destroyed - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - moveServerFrom(self.connectingProxies, self.disconnectedProxies, _self); - return this.destroy(); - } - - if (event === 'connect') { - // Destroyed - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - moveServerFrom(self.connectingProxies, self.disconnectedProxies, _self); - return _self.destroy(); - } - - // Remove the handlers - for (var i = 0; i < handlers.length; i++) { - _self.removeAllListeners(handlers[i]); - } - - // Add stable state handlers - _self.on('error', handleEvent(self, 'error')); - _self.on('close', handleEvent(self, 'close')); - _self.on('timeout', handleEvent(self, 'timeout')); - _self.on('parseError', handleEvent(self, 'parseError')); - - // Move to the connected servers - moveServerFrom(self.connectingProxies, self.connectedProxies, _self); - // Emit topology Change - emitTopologyDescriptionChanged(self); - // Emit joined event - self.emit('joined', 'mongos', _self); - } else { - // Move from connectingProxies - moveServerFrom(self.connectingProxies, self.disconnectedProxies, _self); - this.destroy(); - } - - // Are we done finish up callback - if (count === 0) { - callback(); - } - }; - }; - - // No new servers - if (count === 0) { - return callback(); - } - - // Execute method - function execute(_server, i) { - setTimeout(function() { - // Destroyed - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - return; - } - - // Create a new server instance - var server = new Server( - Object.assign({}, self.s.options, { - host: _server.name.split(':')[0], - port: parseInt(_server.name.split(':')[1], 10), - reconnect: false, - monitoring: false, - parent: self - }) - ); - - destroyServer(_server, { force: true }); - removeProxyFrom(self.disconnectedProxies, _server); - - // Relay the server description change - relayEvents(server, self, ['serverDescriptionChanged']); - - // Emit opening server event - self.emit('serverOpening', { - topologyId: server.s.topologyId !== -1 ? server.s.topologyId : self.id, - address: server.name - }); - - // Add temp handlers - server.once('connect', _handleEvent(self, 'connect')); - server.once('close', _handleEvent(self, 'close')); - server.once('timeout', _handleEvent(self, 'timeout')); - server.once('error', _handleEvent(self, 'error')); - server.once('parseError', _handleEvent(self, 'parseError')); - - // Command Monitoring events - relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - // Connect to proxy - self.connectingProxies.push(server); - server.connect(self.s.connectOptions); - }, i); - } - - // Create new instances - for (var i = 0; i < proxies.length; i++) { - execute(proxies[i], i); - } -} - -function topologyMonitor(self, options) { - options = options || {}; - - // no need to set up the monitor if we're already closed - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - return; - } - - // Set momitoring timeout - self.haTimeoutId = setTimeout(function() { - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - return; - } - - // If we have a primary and a disconnect handler, execute - // buffered operations - if (self.isConnected() && self.s.disconnectHandler) { - self.s.disconnectHandler.execute(); - } - - // Get the connectingServers - var proxies = self.connectedProxies.slice(0); - // Get the count - var count = proxies.length; - - // If the count is zero schedule a new fast - function pingServer(_self, _server, cb) { - // Measure running time - var start = new Date().getTime(); - - // Emit the server heartbeat start - emitSDAMEvent(self, 'serverHeartbeatStarted', { connectionId: _server.name }); - - // Execute ismaster - _server.command( - 'admin.$cmd', - { - ismaster: true - }, - { - monitoring: true, - socketTimeout: self.s.options.connectionTimeout || 2000 - }, - function(err, r) { - if ( - self.state === DESTROYED || - self.state === DESTROYING || - self.state === UNREFERENCED - ) { - // Move from connectingProxies - moveServerFrom(self.connectedProxies, self.disconnectedProxies, _server); - _server.destroy(); - return cb(err, r); - } - - // Calculate latency - var latencyMS = new Date().getTime() - start; - - // We had an error, remove it from the state - if (err) { - // Emit the server heartbeat failure - emitSDAMEvent(self, 'serverHeartbeatFailed', { - durationMS: latencyMS, - failure: err, - connectionId: _server.name - }); - // Move from connected proxies to disconnected proxies - moveServerFrom(self.connectedProxies, self.disconnectedProxies, _server); - } else { - // Update the server ismaster - _server.ismaster = r.result; - _server.lastIsMasterMS = latencyMS; - - // Server heart beat event - emitSDAMEvent(self, 'serverHeartbeatSucceeded', { - durationMS: latencyMS, - reply: r.result, - connectionId: _server.name - }); - } - - cb(err, r); - } - ); - } - - // No proxies initiate monitor again - if (proxies.length === 0) { - // Emit close event if any listeners registered - if (self.listeners('close').length > 0 && self.state === CONNECTING) { - self.emit('error', new MongoError('no mongos proxy available')); - } else { - self.emit('close', self); - } - - // Attempt to connect to any unknown servers - return reconnectProxies(self, self.disconnectedProxies, function() { - if (self.state === DESTROYED || self.state === DESTROYING || self.state === UNREFERENCED) { - return; - } - - // Are we connected ? emit connect event - if (self.state === CONNECTING && options.firstConnect) { - self.emit('connect', self); - self.emit('fullsetup', self); - self.emit('all', self); - } else if (self.isConnected()) { - self.emit('reconnect', self); - } else if (!self.isConnected() && self.listeners('close').length > 0) { - self.emit('close', self); - } - - // Perform topology monitor - topologyMonitor(self); - }); - } - - // Ping all servers - for (var i = 0; i < proxies.length; i++) { - pingServer(self, proxies[i], function() { - count = count - 1; - - if (count === 0) { - if ( - self.state === DESTROYED || - self.state === DESTROYING || - self.state === UNREFERENCED - ) { - return; - } - - // Attempt to connect to any unknown servers - reconnectProxies(self, self.disconnectedProxies, function() { - if ( - self.state === DESTROYED || - self.state === DESTROYING || - self.state === UNREFERENCED - ) { - return; - } - - // Perform topology monitor - topologyMonitor(self); - }); - } - }); - } - }, self.s.haInterval); -} - -/** - * Returns the last known ismaster document for this server - * @method - * @return {object} - */ -Mongos.prototype.lastIsMaster = function() { - return this.ismaster; -}; - -/** - * Unref all connections belong to this server - * @method - */ -Mongos.prototype.unref = function() { - // Transition state - stateTransition(this, UNREFERENCED); - // Get all proxies - var proxies = this.connectedProxies.concat(this.connectingProxies); - proxies.forEach(function(x) { - x.unref(); - }); - - clearTimeout(this.haTimeoutId); -}; - -/** - * Destroy the server connection - * @param {boolean} [options.force=false] Force destroy the pool - * @method - */ -Mongos.prototype.destroy = function(options, callback) { - if (typeof options === 'function') { - callback = options; - options = {}; - } - - options = options || {}; - - stateTransition(this, DESTROYING); - if (this.haTimeoutId) { - clearTimeout(this.haTimeoutId); - } - - const proxies = this.connectedProxies.concat(this.connectingProxies); - let serverCount = proxies.length; - const serverDestroyed = () => { - serverCount--; - if (serverCount > 0) { - return; - } - - emitTopologyDescriptionChanged(this); - emitSDAMEvent(this, 'topologyClosed', { topologyId: this.id }); - stateTransition(this, DESTROYED); - if (typeof callback === 'function') { - callback(null, null); - } - }; - - if (serverCount === 0) { - serverDestroyed(); - return; - } - - // Destroy all connecting servers - proxies.forEach(server => { - // Emit the sdam event - this.emit('serverClosed', { - topologyId: this.id, - address: server.name - }); - - destroyServer(server, options, serverDestroyed); - moveServerFrom(this.connectedProxies, this.disconnectedProxies, server); - }); -}; - -/** - * Figure out if the server is connected - * @method - * @return {boolean} - */ -Mongos.prototype.isConnected = function() { - return this.connectedProxies.length > 0; -}; - -/** - * Figure out if the server instance was destroyed by calling destroy - * @method - * @return {boolean} - */ -Mongos.prototype.isDestroyed = function() { - return this.state === DESTROYED; -}; - -// -// Operations -// - -function executeWriteOperation(args, options, callback) { - if (typeof options === 'function') (callback = options), (options = {}); - options = options || {}; - - // TODO: once we drop Node 4, use destructuring either here or in arguments. - const self = args.self; - const op = args.op; - const ns = args.ns; - const ops = args.ops; - - // Pick a server - let server = pickProxy(self, options.session); - // No server found error out - if (!server) return callback(new MongoError('no mongos proxy available')); - - const willRetryWrite = - !args.retrying && - !!options.retryWrites && - options.session && - isRetryableWritesSupported(self) && - !options.session.inTransaction(); - - const handler = (err, result) => { - if (!err) return callback(null, result); - if (!legacyIsRetryableWriteError(err, self) || !willRetryWrite) { - err = getMMAPError(err); - return callback(err); - } - - // Pick another server - server = pickProxy(self, options.session); - - // No server found error out with original error - if (!server) { - return callback(err); - } - - const newArgs = Object.assign({}, args, { retrying: true }); - return executeWriteOperation(newArgs, options, callback); - }; - - if (callback.operationId) { - handler.operationId = callback.operationId; - } - - // increment and assign txnNumber - if (willRetryWrite) { - options.session.incrementTransactionNumber(); - options.willRetryWrite = willRetryWrite; - } - - // rerun the operation - server[op](ns, ops, options, handler); -} - -/** - * Insert one or more documents - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of documents to insert - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -Mongos.prototype.insert = function(ns, ops, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - // Not connected but we have a disconnecthandler - if (!this.isConnected() && this.s.disconnectHandler != null) { - return this.s.disconnectHandler.add('insert', ns, ops, options, callback); - } - - // No mongos proxy available - if (!this.isConnected()) { - return callback(new MongoError('no mongos proxy available')); - } - - // Execute write operation - executeWriteOperation({ self: this, op: 'insert', ns, ops }, options, callback); -}; - -/** - * Perform one or more update operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of updates - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -Mongos.prototype.update = function(ns, ops, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - // Not connected but we have a disconnecthandler - if (!this.isConnected() && this.s.disconnectHandler != null) { - return this.s.disconnectHandler.add('update', ns, ops, options, callback); - } - - // No mongos proxy available - if (!this.isConnected()) { - return callback(new MongoError('no mongos proxy available')); - } - - // Execute write operation - executeWriteOperation({ self: this, op: 'update', ns, ops }, options, callback); -}; - -/** - * Perform one or more remove operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of removes - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -Mongos.prototype.remove = function(ns, ops, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - // Not connected but we have a disconnecthandler - if (!this.isConnected() && this.s.disconnectHandler != null) { - return this.s.disconnectHandler.add('remove', ns, ops, options, callback); - } - - // No mongos proxy available - if (!this.isConnected()) { - return callback(new MongoError('no mongos proxy available')); - } - - // Execute write operation - executeWriteOperation({ self: this, op: 'remove', ns, ops }, options, callback); -}; - -const RETRYABLE_WRITE_OPERATIONS = ['findAndModify', 'insert', 'update', 'delete']; - -function isWriteCommand(command) { - return RETRYABLE_WRITE_OPERATIONS.some(op => command[op]); -} - -/** - * Execute a command - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cmd The command hash - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Connection} [options.connection] Specify connection object to execute command against - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Mongos.prototype.command = function(ns, cmd, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - var self = this; - - // Pick a proxy - var server = pickProxy(self, options.session); - - // Topology is not connected, save the call in the provided store to be - // Executed at some point when the handler deems it's reconnected - if ((server == null || !server.isConnected()) && this.s.disconnectHandler != null) { - return this.s.disconnectHandler.add('command', ns, cmd, options, callback); - } - - // No server returned we had an error - if (server == null) { - return callback(new MongoError('no mongos proxy available')); - } - - // Cloned options - var clonedOptions = cloneOptions(options); - clonedOptions.topology = self; - - const willRetryWrite = - !options.retrying && - options.retryWrites && - options.session && - isRetryableWritesSupported(self) && - !options.session.inTransaction() && - isWriteCommand(cmd); - - const cb = (err, result) => { - if (!err) return callback(null, result); - if (!legacyIsRetryableWriteError(err, self)) { - return callback(err); - } - - if (willRetryWrite) { - const newOptions = Object.assign({}, clonedOptions, { retrying: true }); - return this.command(ns, cmd, newOptions, callback); - } - - return callback(err); - }; - - // increment and assign txnNumber - if (willRetryWrite) { - clonedOptions.session.incrementTransactionNumber(); - clonedOptions.willRetryWrite = willRetryWrite; - } - - // Execute the command - server.command(ns, cmd, clonedOptions, cb); -}; - -/** - * Get a new cursor - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId - * @param {object} [options] Options for the cursor - * @param {object} [options.batchSize=0] Batchsize for the operation - * @param {array} [options.documents=[]] Initial documents list for cursor - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {object} [options.topology] The internal topology of the created cursor - * @returns {Cursor} - */ -Mongos.prototype.cursor = function(ns, cmd, options) { - options = options || {}; - const topology = options.topology || this; - - // Set up final cursor type - var FinalCursor = options.cursorFactory || this.s.Cursor; - - // Return the cursor - return new FinalCursor(topology, ns, cmd, options); -}; - -/** - * Selects a server - * - * @method - * @param {function} selector Unused - * @param {ReadPreference} [options.readPreference] Unused - * @param {ClientSession} [options.session] Specify a session if it is being used - * @param {function} callback - */ -Mongos.prototype.selectServer = function(selector, options, callback) { - if (typeof selector === 'function' && typeof callback === 'undefined') - (callback = selector), (selector = undefined), (options = {}); - if (typeof options === 'function') - (callback = options), (options = selector), (selector = undefined); - options = options || {}; - - const server = pickProxy(this, options.session); - if (server == null) { - callback(new MongoError('server selection failed')); - return; - } - - if (this.s.debug) this.emit('pickedServer', null, server); - callback(null, server); -}; - -/** - * All raw connections - * @method - * @return {Connection[]} - */ -Mongos.prototype.connections = function() { - var connections = []; - - for (var i = 0; i < this.connectedProxies.length; i++) { - connections = connections.concat(this.connectedProxies[i].connections()); - } - - return connections; -}; - -function emitTopologyDescriptionChanged(self) { - if (self.listeners('topologyDescriptionChanged').length > 0) { - var topology = 'Unknown'; - if (self.connectedProxies.length > 0) { - topology = 'Sharded'; - } - - // Generate description - var description = { - topologyType: topology, - servers: [] - }; - - // All proxies - var proxies = self.disconnectedProxies.concat(self.connectingProxies); - - // Add all the disconnected proxies - description.servers = description.servers.concat( - proxies.map(function(x) { - var description = x.getDescription(); - description.type = 'Unknown'; - return description; - }) - ); - - // Add all the connected proxies - description.servers = description.servers.concat( - self.connectedProxies.map(function(x) { - var description = x.getDescription(); - description.type = 'Mongos'; - return description; - }) - ); - - // Get the diff - var diffResult = diff(self.topologyDescription, description); - - // Create the result - var result = { - topologyId: self.id, - previousDescription: self.topologyDescription, - newDescription: description, - diff: diffResult - }; - - // Emit the topologyDescription change - if (diffResult.servers.length > 0) { - self.emit('topologyDescriptionChanged', result); - } - - // Set the new description - self.topologyDescription = description; - } -} - -/** - * A mongos connect event, used to verify that the connection is up and running - * - * @event Mongos#connect - * @type {Mongos} - */ - -/** - * A mongos reconnect event, used to verify that the mongos topology has reconnected - * - * @event Mongos#reconnect - * @type {Mongos} - */ - -/** - * A mongos fullsetup event, used to signal that all topology members have been contacted. - * - * @event Mongos#fullsetup - * @type {Mongos} - */ - -/** - * A mongos all event, used to signal that all topology members have been contacted. - * - * @event Mongos#all - * @type {Mongos} - */ - -/** - * A server member left the mongos list - * - * @event Mongos#left - * @type {Mongos} - * @param {string} type The type of member that left (mongos) - * @param {Server} server The server object that left - */ - -/** - * A server member joined the mongos list - * - * @event Mongos#joined - * @type {Mongos} - * @param {string} type The type of member that left (mongos) - * @param {Server} server The server object that joined - */ - -/** - * A server opening SDAM monitoring event - * - * @event Mongos#serverOpening - * @type {object} - */ - -/** - * A server closed SDAM monitoring event - * - * @event Mongos#serverClosed - * @type {object} - */ - -/** - * A server description SDAM change monitoring event - * - * @event Mongos#serverDescriptionChanged - * @type {object} - */ - -/** - * A topology open SDAM event - * - * @event Mongos#topologyOpening - * @type {object} - */ - -/** - * A topology closed SDAM event - * - * @event Mongos#topologyClosed - * @type {object} - */ - -/** - * A topology structure SDAM change event - * - * @event Mongos#topologyDescriptionChanged - * @type {object} - */ - -/** - * A topology serverHeartbeatStarted SDAM event - * - * @event Mongos#serverHeartbeatStarted - * @type {object} - */ - -/** - * A topology serverHeartbeatFailed SDAM event - * - * @event Mongos#serverHeartbeatFailed - * @type {object} - */ - -/** - * A topology serverHeartbeatSucceeded SDAM change event - * - * @event Mongos#serverHeartbeatSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event Mongos#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event Mongos#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event Mongos#commandFailed - * @type {object} - */ - -module.exports = Mongos; diff --git a/lib/core/topologies/replset.js b/lib/core/topologies/replset.js deleted file mode 100644 index 586e45de53..0000000000 --- a/lib/core/topologies/replset.js +++ /dev/null @@ -1,1559 +0,0 @@ -'use strict'; - -const inherits = require('util').inherits; -const f = require('util').format; -const EventEmitter = require('events').EventEmitter; -const ReadPreference = require('./read_preference'); -const CoreCursor = require('../cursor').CoreCursor; -const retrieveBSON = require('../connection/utils').retrieveBSON; -const Logger = require('../connection/logger'); -const MongoError = require('../error').MongoError; -const Server = require('./server'); -const ReplSetState = require('./replset_state'); -const Timeout = require('./shared').Timeout; -const Interval = require('./shared').Interval; -const SessionMixins = require('./shared').SessionMixins; -const isRetryableWritesSupported = require('./shared').isRetryableWritesSupported; -const relayEvents = require('../utils').relayEvents; -const BSON = retrieveBSON(); -const calculateDurationInMs = require('../utils').calculateDurationInMs; -const getMMAPError = require('./shared').getMMAPError; -const makeClientMetadata = require('../utils').makeClientMetadata; -const legacyIsRetryableWriteError = require('./shared').legacyIsRetryableWriteError; - -// -// States -var DISCONNECTED = 'disconnected'; -var CONNECTING = 'connecting'; -var CONNECTED = 'connected'; -var UNREFERENCED = 'unreferenced'; -var DESTROYED = 'destroyed'; - -function stateTransition(self, newState) { - var legalTransitions = { - disconnected: [CONNECTING, DESTROYED, DISCONNECTED], - connecting: [CONNECTING, DESTROYED, CONNECTED, DISCONNECTED], - connected: [CONNECTED, DISCONNECTED, DESTROYED, UNREFERENCED], - unreferenced: [UNREFERENCED, DESTROYED], - destroyed: [DESTROYED] - }; - - // Get current state - var legalStates = legalTransitions[self.state]; - if (legalStates && legalStates.indexOf(newState) !== -1) { - self.state = newState; - } else { - self.s.logger.error( - f( - 'Pool with id [%s] failed attempted illegal state transition from [%s] to [%s] only following state allowed [%s]', - self.id, - self.state, - newState, - legalStates - ) - ); - } -} - -// -// ReplSet instance id -var id = 1; -var handlers = ['connect', 'close', 'error', 'timeout', 'parseError']; - -/** - * Creates a new Replset instance - * @class - * @param {array} seedlist A list of seeds for the replicaset - * @param {boolean} options.setName The Replicaset set name - * @param {boolean} [options.secondaryOnlyConnectionAllowed=false] Allow connection to a secondary only replicaset - * @param {number} [options.haInterval=10000] The High availability period for replicaset inquiry - * @param {boolean} [options.emitError=false] Server will emit errors events - * @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors - * @param {number} [options.size=5] Server connection pool size - * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.keepAliveInitialDelay=0] Initial delay before TCP keep alive enabled - * @param {boolean} [options.noDelay=true] TCP Connection no delay - * @param {number} [options.connectionTimeout=10000] TCP Connection timeout setting - * @param {number} [options.socketTimeout=0] TCP Socket timeout setting - * @param {boolean} [options.ssl=false] Use SSL for connection - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {Buffer} [options.ca] SSL Certificate store binary buffer - * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer - * @param {Buffer} [options.cert] SSL Certificate binary buffer - * @param {Buffer} [options.key] SSL Key file binary buffer - * @param {string} [options.passphrase] SSL Certificate pass phrase - * @param {string} [options.servername=null] String containing the server name requested via TLS SNI. - * @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates - * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits - * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. - * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. - * @param {number} [options.pingInterval=5000] Ping interval to check the response time to the different servers - * @param {number} [options.localThresholdMS=15] Cutoff latency point in MS for Replicaset member selection - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @return {ReplSet} A cursor instance - * @fires ReplSet#connect - * @fires ReplSet#ha - * @fires ReplSet#joined - * @fires ReplSet#left - * @fires ReplSet#failed - * @fires ReplSet#fullsetup - * @fires ReplSet#all - * @fires ReplSet#error - * @fires ReplSet#serverHeartbeatStarted - * @fires ReplSet#serverHeartbeatSucceeded - * @fires ReplSet#serverHeartbeatFailed - * @fires ReplSet#topologyOpening - * @fires ReplSet#topologyClosed - * @fires ReplSet#topologyDescriptionChanged - * @property {string} type the topology type. - * @property {string} parserType the parser type used (c++ or js). - */ -var ReplSet = function(seedlist, options) { - var self = this; - options = options || {}; - - // Validate seedlist - if (!Array.isArray(seedlist)) throw new MongoError('seedlist must be an array'); - // Validate list - if (seedlist.length === 0) throw new MongoError('seedlist must contain at least one entry'); - // Validate entries - seedlist.forEach(function(e) { - if (typeof e.host !== 'string' || typeof e.port !== 'number') - throw new MongoError('seedlist entry must contain a host and port'); - }); - - // Add event listener - EventEmitter.call(this); - - // Get replSet Id - this.id = id++; - - // Get the localThresholdMS - var localThresholdMS = options.localThresholdMS || 15; - // Backward compatibility - if (options.acceptableLatency) localThresholdMS = options.acceptableLatency; - - // Create a logger - var logger = Logger('ReplSet', options); - - // Internal state - this.s = { - options: Object.assign({ metadata: makeClientMetadata(options) }, options), - // BSON instance - bson: - options.bson || - new BSON([ - BSON.Binary, - BSON.Code, - BSON.DBRef, - BSON.Decimal128, - BSON.Double, - BSON.Int32, - BSON.Long, - BSON.Map, - BSON.MaxKey, - BSON.MinKey, - BSON.ObjectId, - BSON.BSONRegExp, - BSON.Symbol, - BSON.Timestamp - ]), - // Factory overrides - Cursor: options.cursorFactory || CoreCursor, - // Logger instance - logger: logger, - // Seedlist - seedlist: seedlist, - // Replicaset state - replicaSetState: new ReplSetState({ - id: this.id, - setName: options.setName, - acceptableLatency: localThresholdMS, - heartbeatFrequencyMS: options.haInterval ? options.haInterval : 10000, - logger: logger - }), - // Current servers we are connecting to - connectingServers: [], - // Ha interval - haInterval: options.haInterval ? options.haInterval : 10000, - // Minimum heartbeat frequency used if we detect a server close - minHeartbeatFrequencyMS: 500, - // Disconnect handler - disconnectHandler: options.disconnectHandler, - // Server selection index - index: 0, - // Connect function options passed in - connectOptions: {}, - // Are we running in debug mode - debug: typeof options.debug === 'boolean' ? options.debug : false - }; - - // Add handler for topology change - this.s.replicaSetState.on('topologyDescriptionChanged', function(r) { - self.emit('topologyDescriptionChanged', r); - }); - - // Log info warning if the socketTimeout < haInterval as it will cause - // a lot of recycled connections to happen. - if ( - this.s.logger.isWarn() && - this.s.options.socketTimeout !== 0 && - this.s.options.socketTimeout < this.s.haInterval - ) { - this.s.logger.warn( - f( - 'warning socketTimeout %s is less than haInterval %s. This might cause unnecessary server reconnections due to socket timeouts', - this.s.options.socketTimeout, - this.s.haInterval - ) - ); - } - - // Add forwarding of events from state handler - var types = ['joined', 'left']; - types.forEach(function(x) { - self.s.replicaSetState.on(x, function(t, s) { - self.emit(x, t, s); - }); - }); - - // Connect stat - this.initialConnectState = { - connect: false, - fullsetup: false, - all: false - }; - - // Disconnected state - this.state = DISCONNECTED; - this.haTimeoutId = null; - // Last ismaster - this.ismaster = null; - // Contains the intervalId - this.intervalIds = []; - - // Highest clusterTime seen in responses from the current deployment - this.clusterTime = null; -}; - -inherits(ReplSet, EventEmitter); -Object.assign(ReplSet.prototype, SessionMixins); - -Object.defineProperty(ReplSet.prototype, 'type', { - enumerable: true, - get: function() { - return 'replset'; - } -}); - -Object.defineProperty(ReplSet.prototype, 'parserType', { - enumerable: true, - get: function() { - return BSON.native ? 'c++' : 'js'; - } -}); - -Object.defineProperty(ReplSet.prototype, 'logicalSessionTimeoutMinutes', { - enumerable: true, - get: function() { - return this.s.replicaSetState.logicalSessionTimeoutMinutes || null; - } -}); - -function rexecuteOperations(self) { - // If we have a primary and a disconnect handler, execute - // buffered operations - if (self.s.replicaSetState.hasPrimaryAndSecondary() && self.s.disconnectHandler) { - self.s.disconnectHandler.execute(); - } else if (self.s.replicaSetState.hasPrimary() && self.s.disconnectHandler) { - self.s.disconnectHandler.execute({ executePrimary: true }); - } else if (self.s.replicaSetState.hasSecondary() && self.s.disconnectHandler) { - self.s.disconnectHandler.execute({ executeSecondary: true }); - } -} - -function connectNewServers(self, servers, callback) { - // No new servers - if (servers.length === 0) { - return callback(); - } - - // Count lefts - var count = servers.length; - var error = null; - - function done() { - count = count - 1; - if (count === 0) { - callback(error); - } - } - - // Handle events - var _handleEvent = function(self, event) { - return function(err) { - var _self = this; - - // Destroyed - if (self.state === DESTROYED || self.state === UNREFERENCED) { - this.destroy({ force: true }); - return done(); - } - - if (event === 'connect') { - // Update the state - var result = self.s.replicaSetState.update(_self); - // Update the state with the new server - if (result) { - // Primary lastIsMaster store it - if (_self.lastIsMaster() && _self.lastIsMaster().ismaster) { - self.ismaster = _self.lastIsMaster(); - } - - // Remove the handlers - for (let i = 0; i < handlers.length; i++) { - _self.removeAllListeners(handlers[i]); - } - - // Add stable state handlers - _self.on('error', handleEvent(self, 'error')); - _self.on('close', handleEvent(self, 'close')); - _self.on('timeout', handleEvent(self, 'timeout')); - _self.on('parseError', handleEvent(self, 'parseError')); - - // Enalbe the monitoring of the new server - monitorServer(_self.lastIsMaster().me, self, {}); - - // Rexecute any stalled operation - rexecuteOperations(self); - } else { - _self.destroy({ force: true }); - } - } else if (event === 'error') { - error = err; - } - - // Rexecute any stalled operation - rexecuteOperations(self); - done(); - }; - }; - - // Execute method - function execute(_server, i) { - setTimeout(function() { - // Destroyed - if (self.state === DESTROYED || self.state === UNREFERENCED) { - return; - } - - // remove existing connecting server if it's failed to connect, otherwise - // wait for that server to connect - const existingServerIdx = self.s.connectingServers.findIndex(s => s.name === _server); - if (existingServerIdx >= 0) { - const connectingServer = self.s.connectingServers[existingServerIdx]; - connectingServer.destroy({ force: true }); - - self.s.connectingServers.splice(existingServerIdx, 1); - return done(); - } - - // Create a new server instance - var server = new Server( - Object.assign({}, self.s.options, { - host: _server.split(':')[0], - port: parseInt(_server.split(':')[1], 10), - reconnect: false, - monitoring: false, - parent: self - }) - ); - - // Add temp handlers - server.once('connect', _handleEvent(self, 'connect')); - server.once('close', _handleEvent(self, 'close')); - server.once('timeout', _handleEvent(self, 'timeout')); - server.once('error', _handleEvent(self, 'error')); - server.once('parseError', _handleEvent(self, 'parseError')); - - // SDAM Monitoring events - server.on('serverOpening', e => self.emit('serverOpening', e)); - server.on('serverDescriptionChanged', e => self.emit('serverDescriptionChanged', e)); - server.on('serverClosed', e => self.emit('serverClosed', e)); - - // Command Monitoring events - relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - self.s.connectingServers.push(server); - server.connect(self.s.connectOptions); - }, i); - } - - // Create new instances - for (var i = 0; i < servers.length; i++) { - execute(servers[i], i); - } -} - -// Ping the server -var pingServer = function(self, server, cb) { - // Measure running time - var start = new Date().getTime(); - - // Emit the server heartbeat start - emitSDAMEvent(self, 'serverHeartbeatStarted', { connectionId: server.name }); - - // Execute ismaster - // Set the socketTimeout for a monitoring message to a low number - // Ensuring ismaster calls are timed out quickly - server.command( - 'admin.$cmd', - { - ismaster: true - }, - { - monitoring: true, - socketTimeout: self.s.options.connectionTimeout || 2000 - }, - function(err, r) { - if (self.state === DESTROYED || self.state === UNREFERENCED) { - server.destroy({ force: true }); - return cb(err, r); - } - - // Calculate latency - var latencyMS = new Date().getTime() - start; - - // Set the last updatedTime - var hrtime = process.hrtime(); - server.lastUpdateTime = (hrtime[0] * 1e9 + hrtime[1]) / 1e6; - - // We had an error, remove it from the state - if (err) { - // Emit the server heartbeat failure - emitSDAMEvent(self, 'serverHeartbeatFailed', { - durationMS: latencyMS, - failure: err, - connectionId: server.name - }); - - // Remove server from the state - self.s.replicaSetState.remove(server); - } else { - // Update the server ismaster - server.ismaster = r.result; - - // Check if we have a lastWriteDate convert it to MS - // and store on the server instance for later use - if (server.ismaster.lastWrite && server.ismaster.lastWrite.lastWriteDate) { - server.lastWriteDate = server.ismaster.lastWrite.lastWriteDate.getTime(); - } - - // Do we have a brand new server - if (server.lastIsMasterMS === -1) { - server.lastIsMasterMS = latencyMS; - } else if (server.lastIsMasterMS) { - // After the first measurement, average RTT MUST be computed using an - // exponentially-weighted moving average formula, with a weighting factor (alpha) of 0.2. - // If the prior average is denoted old_rtt, then the new average (new_rtt) is - // computed from a new RTT measurement (x) using the following formula: - // alpha = 0.2 - // new_rtt = alpha * x + (1 - alpha) * old_rtt - server.lastIsMasterMS = 0.2 * latencyMS + (1 - 0.2) * server.lastIsMasterMS; - } - - if (self.s.replicaSetState.update(server)) { - // Primary lastIsMaster store it - if (server.lastIsMaster() && server.lastIsMaster().ismaster) { - self.ismaster = server.lastIsMaster(); - } - } - - // Server heart beat event - emitSDAMEvent(self, 'serverHeartbeatSucceeded', { - durationMS: latencyMS, - reply: r.result, - connectionId: server.name - }); - } - - // Calculate the staleness for this server - self.s.replicaSetState.updateServerMaxStaleness(server, self.s.haInterval); - - // Callback - cb(err, r); - } - ); -}; - -// Each server is monitored in parallel in their own timeout loop -var monitorServer = function(host, self, options) { - // If this is not the initial scan - // Is this server already being monitoried, then skip monitoring - if (!options.haInterval) { - for (var i = 0; i < self.intervalIds.length; i++) { - if (self.intervalIds[i].__host === host) { - return; - } - } - } - - // Get the haInterval - var _process = options.haInterval ? Timeout : Interval; - var _haInterval = options.haInterval ? options.haInterval : self.s.haInterval; - - // Create the interval - var intervalId = new _process(function() { - if (self.state === DESTROYED || self.state === UNREFERENCED) { - // clearInterval(intervalId); - intervalId.stop(); - return; - } - - // Do we already have server connection available for this host - var _server = self.s.replicaSetState.get(host); - - // Check if we have a known server connection and reuse - if (_server) { - // Ping the server - return pingServer(self, _server, function(err) { - if (err) { - // NOTE: should something happen here? - return; - } - - if (self.state === DESTROYED || self.state === UNREFERENCED) { - intervalId.stop(); - return; - } - - // Filter out all called intervaliIds - self.intervalIds = self.intervalIds.filter(function(intervalId) { - return intervalId.isRunning(); - }); - - // Initial sweep - if (_process === Timeout) { - if ( - self.state === CONNECTING && - ((self.s.replicaSetState.hasSecondary() && - self.s.options.secondaryOnlyConnectionAllowed) || - self.s.replicaSetState.hasPrimary()) - ) { - self.state = CONNECTED; - - // Emit connected sign - process.nextTick(function() { - self.emit('connect', self); - }); - - // Start topology interval check - topologyMonitor(self, {}); - } - } else { - if ( - self.state === DISCONNECTED && - ((self.s.replicaSetState.hasSecondary() && - self.s.options.secondaryOnlyConnectionAllowed) || - self.s.replicaSetState.hasPrimary()) - ) { - self.state = CONNECTED; - - // Rexecute any stalled operation - rexecuteOperations(self); - - // Emit connected sign - process.nextTick(function() { - self.emit('reconnect', self); - }); - } - } - - if ( - self.initialConnectState.connect && - !self.initialConnectState.fullsetup && - self.s.replicaSetState.hasPrimaryAndSecondary() - ) { - // Set initial connect state - self.initialConnectState.fullsetup = true; - self.initialConnectState.all = true; - - process.nextTick(function() { - self.emit('fullsetup', self); - self.emit('all', self); - }); - } - }); - } - }, _haInterval); - - // Start the interval - intervalId.start(); - // Add the intervalId host name - intervalId.__host = host; - // Add the intervalId to our list of intervalIds - self.intervalIds.push(intervalId); -}; - -function topologyMonitor(self, options) { - if (self.state === DESTROYED || self.state === UNREFERENCED) return; - options = options || {}; - - // Get the servers - var servers = Object.keys(self.s.replicaSetState.set); - - // Get the haInterval - var _process = options.haInterval ? Timeout : Interval; - var _haInterval = options.haInterval ? options.haInterval : self.s.haInterval; - - if (_process === Timeout) { - return connectNewServers(self, self.s.replicaSetState.unknownServers, function(err) { - // Don't emit errors if the connection was already - if (self.state === DESTROYED || self.state === UNREFERENCED) { - return; - } - - if (!self.s.replicaSetState.hasPrimary() && !self.s.options.secondaryOnlyConnectionAllowed) { - if (err) { - return self.emit('error', err); - } - - self.emit( - 'error', - new MongoError('no primary found in replicaset or invalid replica set name') - ); - return self.destroy({ force: true }); - } else if ( - !self.s.replicaSetState.hasSecondary() && - self.s.options.secondaryOnlyConnectionAllowed - ) { - if (err) { - return self.emit('error', err); - } - - self.emit( - 'error', - new MongoError('no secondary found in replicaset or invalid replica set name') - ); - return self.destroy({ force: true }); - } - - for (var i = 0; i < servers.length; i++) { - monitorServer(servers[i], self, options); - } - }); - } else { - for (var i = 0; i < servers.length; i++) { - monitorServer(servers[i], self, options); - } - } - - // Run the reconnect process - function executeReconnect(self) { - return function() { - if (self.state === DESTROYED || self.state === UNREFERENCED) { - return; - } - - connectNewServers(self, self.s.replicaSetState.unknownServers, function() { - var monitoringFrequencey = self.s.replicaSetState.hasPrimary() - ? _haInterval - : self.s.minHeartbeatFrequencyMS; - - // Create a timeout - self.intervalIds.push(new Timeout(executeReconnect(self), monitoringFrequencey).start()); - }); - }; - } - - // Decide what kind of interval to use - var intervalTime = !self.s.replicaSetState.hasPrimary() - ? self.s.minHeartbeatFrequencyMS - : _haInterval; - - self.intervalIds.push(new Timeout(executeReconnect(self), intervalTime).start()); -} - -function addServerToList(list, server) { - for (var i = 0; i < list.length; i++) { - if (list[i].name.toLowerCase() === server.name.toLowerCase()) return true; - } - - list.push(server); -} - -function handleEvent(self, event) { - return function() { - if (self.state === DESTROYED || self.state === UNREFERENCED) return; - // Debug log - if (self.s.logger.isDebug()) { - self.s.logger.debug( - f('handleEvent %s from server %s in replset with id %s', event, this.name, self.id) - ); - } - - // Remove from the replicaset state - self.s.replicaSetState.remove(this); - - // Are we in a destroyed state return - if (self.state === DESTROYED || self.state === UNREFERENCED) return; - - // If no primary and secondary available - if ( - !self.s.replicaSetState.hasPrimary() && - !self.s.replicaSetState.hasSecondary() && - self.s.options.secondaryOnlyConnectionAllowed - ) { - stateTransition(self, DISCONNECTED); - } else if (!self.s.replicaSetState.hasPrimary()) { - stateTransition(self, DISCONNECTED); - } - - addServerToList(self.s.connectingServers, this); - }; -} - -function shouldTriggerConnect(self) { - const isConnecting = self.state === CONNECTING; - const hasPrimary = self.s.replicaSetState.hasPrimary(); - const hasSecondary = self.s.replicaSetState.hasSecondary(); - const secondaryOnlyConnectionAllowed = self.s.options.secondaryOnlyConnectionAllowed; - const readPreferenceSecondary = - self.s.connectOptions.readPreference && - self.s.connectOptions.readPreference.equals(ReadPreference.secondary); - - return ( - (isConnecting && - ((readPreferenceSecondary && hasSecondary) || (!readPreferenceSecondary && hasPrimary))) || - (hasSecondary && secondaryOnlyConnectionAllowed) - ); -} - -function handleInitialConnectEvent(self, event) { - return function() { - var _this = this; - // Debug log - if (self.s.logger.isDebug()) { - self.s.logger.debug( - f( - 'handleInitialConnectEvent %s from server %s in replset with id %s', - event, - this.name, - self.id - ) - ); - } - - // Destroy the instance - if (self.state === DESTROYED || self.state === UNREFERENCED) { - return this.destroy({ force: true }); - } - - // Check the type of server - if (event === 'connect') { - // Update the state - var result = self.s.replicaSetState.update(_this); - if (result === true) { - // Primary lastIsMaster store it - if (_this.lastIsMaster() && _this.lastIsMaster().ismaster) { - self.ismaster = _this.lastIsMaster(); - } - - // Debug log - if (self.s.logger.isDebug()) { - self.s.logger.debug( - f( - 'handleInitialConnectEvent %s from server %s in replset with id %s has state [%s]', - event, - _this.name, - self.id, - JSON.stringify(self.s.replicaSetState.set) - ) - ); - } - - // Remove the handlers - for (let i = 0; i < handlers.length; i++) { - _this.removeAllListeners(handlers[i]); - } - - // Add stable state handlers - _this.on('error', handleEvent(self, 'error')); - _this.on('close', handleEvent(self, 'close')); - _this.on('timeout', handleEvent(self, 'timeout')); - _this.on('parseError', handleEvent(self, 'parseError')); - - // Do we have a primary or primaryAndSecondary - if (shouldTriggerConnect(self)) { - // We are connected - self.state = CONNECTED; - - // Set initial connect state - self.initialConnectState.connect = true; - // Emit connect event - process.nextTick(function() { - self.emit('connect', self); - }); - - topologyMonitor(self, {}); - } - } else if (result instanceof MongoError) { - _this.destroy({ force: true }); - self.destroy({ force: true }); - return self.emit('error', result); - } else { - _this.destroy({ force: true }); - } - } else { - // Emit failure to connect - self.emit('failed', this); - - addServerToList(self.s.connectingServers, this); - // Remove from the state - self.s.replicaSetState.remove(this); - } - - if ( - self.initialConnectState.connect && - !self.initialConnectState.fullsetup && - self.s.replicaSetState.hasPrimaryAndSecondary() - ) { - // Set initial connect state - self.initialConnectState.fullsetup = true; - self.initialConnectState.all = true; - - process.nextTick(function() { - self.emit('fullsetup', self); - self.emit('all', self); - }); - } - - // Remove from the list from connectingServers - for (var i = 0; i < self.s.connectingServers.length; i++) { - if (self.s.connectingServers[i].equals(this)) { - self.s.connectingServers.splice(i, 1); - } - } - - // Trigger topologyMonitor - if (self.s.connectingServers.length === 0 && self.state === CONNECTING) { - topologyMonitor(self, { haInterval: 1 }); - } - }; -} - -function connectServers(self, servers) { - // Update connectingServers - self.s.connectingServers = self.s.connectingServers.concat(servers); - - // Index used to interleaf the server connects, avoiding - // runtime issues on io constrained vm's - var timeoutInterval = 0; - - function connect(server, timeoutInterval) { - setTimeout(function() { - // Add the server to the state - if (self.s.replicaSetState.update(server)) { - // Primary lastIsMaster store it - if (server.lastIsMaster() && server.lastIsMaster().ismaster) { - self.ismaster = server.lastIsMaster(); - } - } - - // Add event handlers - server.once('close', handleInitialConnectEvent(self, 'close')); - server.once('timeout', handleInitialConnectEvent(self, 'timeout')); - server.once('parseError', handleInitialConnectEvent(self, 'parseError')); - server.once('error', handleInitialConnectEvent(self, 'error')); - server.once('connect', handleInitialConnectEvent(self, 'connect')); - - // SDAM Monitoring events - server.on('serverOpening', e => self.emit('serverOpening', e)); - server.on('serverDescriptionChanged', e => self.emit('serverDescriptionChanged', e)); - server.on('serverClosed', e => self.emit('serverClosed', e)); - - // Command Monitoring events - relayEvents(server, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - // Start connection - server.connect(self.s.connectOptions); - }, timeoutInterval); - } - - // Start all the servers - while (servers.length > 0) { - connect(servers.shift(), timeoutInterval++); - } -} - -/** - * Emit event if it exists - * @method - */ -function emitSDAMEvent(self, event, description) { - if (self.listeners(event).length > 0) { - self.emit(event, description); - } -} - -/** - * Initiate server connect - */ -ReplSet.prototype.connect = function(options) { - var self = this; - // Add any connect level options to the internal state - this.s.connectOptions = options || {}; - - // Set connecting state - stateTransition(this, CONNECTING); - - // Create server instances - var servers = this.s.seedlist.map(function(x) { - return new Server( - Object.assign({}, self.s.options, x, options, { - reconnect: false, - monitoring: false, - parent: self - }) - ); - }); - - // Error out as high availbility interval must be < than socketTimeout - if ( - this.s.options.socketTimeout > 0 && - this.s.options.socketTimeout <= this.s.options.haInterval - ) { - return self.emit( - 'error', - new MongoError( - f( - 'haInterval [%s] MS must be set to less than socketTimeout [%s] MS', - this.s.options.haInterval, - this.s.options.socketTimeout - ) - ) - ); - } - - // Emit the topology opening event - emitSDAMEvent(this, 'topologyOpening', { topologyId: this.id }); - // Start all server connections - connectServers(self, servers); -}; - -/** - * Authenticate the topology. - * @method - * @param {MongoCredentials} credentials The credentials for authentication we are using - * @param {authResultCallback} callback A callback function - */ -ReplSet.prototype.auth = function(credentials, callback) { - if (typeof callback === 'function') callback(null, null); -}; - -/** - * Destroy the server connection - * @param {boolean} [options.force=false] Force destroy the pool - * @method - */ -ReplSet.prototype.destroy = function(options, callback) { - if (typeof options === 'function') { - callback = options; - options = {}; - } - - options = options || {}; - - let destroyCount = this.s.connectingServers.length + 1; // +1 for the callback from `replicaSetState.destroy` - const serverDestroyed = () => { - destroyCount--; - if (destroyCount > 0) { - return; - } - - // Emit toplogy closing event - emitSDAMEvent(this, 'topologyClosed', { topologyId: this.id }); - - // Transition state - stateTransition(this, DESTROYED); - - if (typeof callback === 'function') { - callback(null, null); - } - }; - - // Clear out any monitoring process - if (this.haTimeoutId) clearTimeout(this.haTimeoutId); - - // Clear out all monitoring - for (var i = 0; i < this.intervalIds.length; i++) { - this.intervalIds[i].stop(); - } - - // Reset list of intervalIds - this.intervalIds = []; - - if (destroyCount === 0) { - serverDestroyed(); - return; - } - - // Destroy the replicaset - this.s.replicaSetState.destroy(options, serverDestroyed); - - // Destroy all connecting servers - this.s.connectingServers.forEach(function(x) { - x.destroy(options, serverDestroyed); - }); -}; - -/** - * Unref all connections belong to this server - * @method - */ -ReplSet.prototype.unref = function() { - // Transition state - stateTransition(this, UNREFERENCED); - - this.s.replicaSetState.allServers().forEach(function(x) { - x.unref(); - }); - - clearTimeout(this.haTimeoutId); -}; - -/** - * Returns the last known ismaster document for this server - * @method - * @return {object} - */ -ReplSet.prototype.lastIsMaster = function() { - // If secondaryOnlyConnectionAllowed and no primary but secondary - // return the secondaries ismaster result. - if ( - this.s.options.secondaryOnlyConnectionAllowed && - !this.s.replicaSetState.hasPrimary() && - this.s.replicaSetState.hasSecondary() - ) { - return this.s.replicaSetState.secondaries[0].lastIsMaster(); - } - - return this.s.replicaSetState.primary - ? this.s.replicaSetState.primary.lastIsMaster() - : this.ismaster; -}; - -/** - * All raw connections - * @method - * @return {Connection[]} - */ -ReplSet.prototype.connections = function() { - var servers = this.s.replicaSetState.allServers(); - var connections = []; - for (var i = 0; i < servers.length; i++) { - connections = connections.concat(servers[i].connections()); - } - - return connections; -}; - -/** - * Figure out if the server is connected - * @method - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @return {boolean} - */ -ReplSet.prototype.isConnected = function(options) { - options = options || {}; - - // If we specified a read preference check if we are connected to something - // than can satisfy this - if (options.readPreference && options.readPreference.equals(ReadPreference.secondary)) { - return this.s.replicaSetState.hasSecondary(); - } - - if (options.readPreference && options.readPreference.equals(ReadPreference.primary)) { - return this.s.replicaSetState.hasPrimary(); - } - - if (options.readPreference && options.readPreference.equals(ReadPreference.primaryPreferred)) { - return this.s.replicaSetState.hasSecondary() || this.s.replicaSetState.hasPrimary(); - } - - if (options.readPreference && options.readPreference.equals(ReadPreference.secondaryPreferred)) { - return this.s.replicaSetState.hasSecondary() || this.s.replicaSetState.hasPrimary(); - } - - if (this.s.options.secondaryOnlyConnectionAllowed && this.s.replicaSetState.hasSecondary()) { - return true; - } - - return this.s.replicaSetState.hasPrimary(); -}; - -/** - * Figure out if the replicaset instance was destroyed by calling destroy - * @method - * @return {boolean} - */ -ReplSet.prototype.isDestroyed = function() { - return this.state === DESTROYED; -}; - -const SERVER_SELECTION_TIMEOUT_MS = 10000; // hardcoded `serverSelectionTimeoutMS` for legacy topology -const SERVER_SELECTION_INTERVAL_MS = 1000; // time to wait between selection attempts -/** - * Selects a server - * - * @method - * @param {function} selector Unused - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {ClientSession} [options.session] Unused - * @param {function} callback - */ -ReplSet.prototype.selectServer = function(selector, options, callback) { - if (typeof selector === 'function' && typeof callback === 'undefined') - (callback = selector), (selector = undefined), (options = {}); - if (typeof options === 'function') (callback = options), (options = selector); - options = options || {}; - - let readPreference; - if (selector instanceof ReadPreference) { - readPreference = selector; - } else { - readPreference = options.readPreference || ReadPreference.primary; - } - - let lastError; - const start = process.hrtime(); - const _selectServer = () => { - if (calculateDurationInMs(start) >= SERVER_SELECTION_TIMEOUT_MS) { - if (lastError != null) { - callback(lastError, null); - } else { - callback(new MongoError('Server selection timed out')); - } - - return; - } - - const server = this.s.replicaSetState.pickServer(readPreference); - if (server == null) { - setTimeout(_selectServer, SERVER_SELECTION_INTERVAL_MS); - return; - } - - if (!(server instanceof Server)) { - lastError = server; - setTimeout(_selectServer, SERVER_SELECTION_INTERVAL_MS); - return; - } - - if (this.s.debug) this.emit('pickedServer', options.readPreference, server); - callback(null, server); - }; - - _selectServer(); -}; - -/** - * Get all connected servers - * @method - * @return {Server[]} - */ -ReplSet.prototype.getServers = function() { - return this.s.replicaSetState.allServers(); -}; - -// -// Execute write operation -function executeWriteOperation(args, options, callback) { - if (typeof options === 'function') (callback = options), (options = {}); - options = options || {}; - - // TODO: once we drop Node 4, use destructuring either here or in arguments. - const self = args.self; - const op = args.op; - const ns = args.ns; - const ops = args.ops; - - if (self.state === DESTROYED) { - return callback(new MongoError(f('topology was destroyed'))); - } - - const willRetryWrite = - !args.retrying && - !!options.retryWrites && - options.session && - isRetryableWritesSupported(self) && - !options.session.inTransaction(); - - if (!self.s.replicaSetState.hasPrimary()) { - if (self.s.disconnectHandler) { - // Not connected but we have a disconnecthandler - return self.s.disconnectHandler.add(op, ns, ops, options, callback); - } else if (!willRetryWrite) { - // No server returned we had an error - return callback(new MongoError('no primary server found')); - } - } - - const handler = (err, result) => { - if (!err) return callback(null, result); - if (!legacyIsRetryableWriteError(err, self)) { - err = getMMAPError(err); - return callback(err); - } - - if (willRetryWrite) { - const newArgs = Object.assign({}, args, { retrying: true }); - return executeWriteOperation(newArgs, options, callback); - } - - // Per SDAM, remove primary from replicaset - if (self.s.replicaSetState.primary) { - self.s.replicaSetState.primary.destroy(); - self.s.replicaSetState.remove(self.s.replicaSetState.primary, { force: true }); - } - - return callback(err); - }; - - if (callback.operationId) { - handler.operationId = callback.operationId; - } - - // increment and assign txnNumber - if (willRetryWrite) { - options.session.incrementTransactionNumber(); - options.willRetryWrite = willRetryWrite; - } - - self.s.replicaSetState.primary[op](ns, ops, options, handler); -} - -/** - * Insert one or more documents - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of documents to insert - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -ReplSet.prototype.insert = function(ns, ops, options, callback) { - // Execute write operation - executeWriteOperation({ self: this, op: 'insert', ns, ops }, options, callback); -}; - -/** - * Perform one or more update operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of updates - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -ReplSet.prototype.update = function(ns, ops, options, callback) { - // Execute write operation - executeWriteOperation({ self: this, op: 'update', ns, ops }, options, callback); -}; - -/** - * Perform one or more remove operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of removes - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {boolean} [options.retryWrites] Enable retryable writes for this operation - * @param {opResultCallback} callback A callback function - */ -ReplSet.prototype.remove = function(ns, ops, options, callback) { - // Execute write operation - executeWriteOperation({ self: this, op: 'remove', ns, ops }, options, callback); -}; - -const RETRYABLE_WRITE_OPERATIONS = ['findAndModify', 'insert', 'update', 'delete']; - -function isWriteCommand(command) { - return RETRYABLE_WRITE_OPERATIONS.some(op => command[op]); -} - -/** - * Execute a command - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cmd The command hash - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Connection} [options.connection] Specify connection object to execute command against - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -ReplSet.prototype.command = function(ns, cmd, options, callback) { - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - if (this.state === DESTROYED) return callback(new MongoError(f('topology was destroyed'))); - var self = this; - - // Establish readPreference - var readPreference = options.readPreference ? options.readPreference : ReadPreference.primary; - - // If the readPreference is primary and we have no primary, store it - if ( - readPreference.preference === 'primary' && - !this.s.replicaSetState.hasPrimary() && - this.s.disconnectHandler != null - ) { - return this.s.disconnectHandler.add('command', ns, cmd, options, callback); - } else if ( - readPreference.preference === 'secondary' && - !this.s.replicaSetState.hasSecondary() && - this.s.disconnectHandler != null - ) { - return this.s.disconnectHandler.add('command', ns, cmd, options, callback); - } else if ( - readPreference.preference !== 'primary' && - !this.s.replicaSetState.hasSecondary() && - !this.s.replicaSetState.hasPrimary() && - this.s.disconnectHandler != null - ) { - return this.s.disconnectHandler.add('command', ns, cmd, options, callback); - } - - // Pick a server - var server = this.s.replicaSetState.pickServer(readPreference); - // We received an error, return it - if (!(server instanceof Server)) return callback(server); - // Emit debug event - if (self.s.debug) self.emit('pickedServer', ReadPreference.primary, server); - - // No server returned we had an error - if (server == null) { - return callback( - new MongoError( - f('no server found that matches the provided readPreference %s', readPreference) - ) - ); - } - - const willRetryWrite = - !options.retrying && - !!options.retryWrites && - options.session && - isRetryableWritesSupported(self) && - !options.session.inTransaction() && - isWriteCommand(cmd); - - const cb = (err, result) => { - if (!err) return callback(null, result); - if (!legacyIsRetryableWriteError(err, self)) { - return callback(err); - } - - if (willRetryWrite) { - const newOptions = Object.assign({}, options, { retrying: true }); - return this.command(ns, cmd, newOptions, callback); - } - - // Per SDAM, remove primary from replicaset - if (this.s.replicaSetState.primary) { - this.s.replicaSetState.primary.destroy(); - this.s.replicaSetState.remove(this.s.replicaSetState.primary, { force: true }); - } - - return callback(err); - }; - - // increment and assign txnNumber - if (willRetryWrite) { - options.session.incrementTransactionNumber(); - options.willRetryWrite = willRetryWrite; - } - - // Execute the command - server.command(ns, cmd, options, cb); -}; - -/** - * Get a new cursor - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId - * @param {object} [options] Options for the cursor - * @param {object} [options.batchSize=0] Batchsize for the operation - * @param {array} [options.documents=[]] Initial documents list for cursor - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {object} [options.topology] The internal topology of the created cursor - * @returns {Cursor} - */ -ReplSet.prototype.cursor = function(ns, cmd, options) { - options = options || {}; - const topology = options.topology || this; - - // Set up final cursor type - var FinalCursor = options.cursorFactory || this.s.Cursor; - - // Return the cursor - return new FinalCursor(topology, ns, cmd, options); -}; - -/** - * A replset connect event, used to verify that the connection is up and running - * - * @event ReplSet#connect - * @type {ReplSet} - */ - -/** - * A replset reconnect event, used to verify that the topology reconnected - * - * @event ReplSet#reconnect - * @type {ReplSet} - */ - -/** - * A replset fullsetup event, used to signal that all topology members have been contacted. - * - * @event ReplSet#fullsetup - * @type {ReplSet} - */ - -/** - * A replset all event, used to signal that all topology members have been contacted. - * - * @event ReplSet#all - * @type {ReplSet} - */ - -/** - * A replset failed event, used to signal that initial replset connection failed. - * - * @event ReplSet#failed - * @type {ReplSet} - */ - -/** - * A server member left the replicaset - * - * @event ReplSet#left - * @type {function} - * @param {string} type The type of member that left (primary|secondary|arbiter) - * @param {Server} server The server object that left - */ - -/** - * A server member joined the replicaset - * - * @event ReplSet#joined - * @type {function} - * @param {string} type The type of member that joined (primary|secondary|arbiter) - * @param {Server} server The server object that joined - */ - -/** - * A server opening SDAM monitoring event - * - * @event ReplSet#serverOpening - * @type {object} - */ - -/** - * A server closed SDAM monitoring event - * - * @event ReplSet#serverClosed - * @type {object} - */ - -/** - * A server description SDAM change monitoring event - * - * @event ReplSet#serverDescriptionChanged - * @type {object} - */ - -/** - * A topology open SDAM event - * - * @event ReplSet#topologyOpening - * @type {object} - */ - -/** - * A topology closed SDAM event - * - * @event ReplSet#topologyClosed - * @type {object} - */ - -/** - * A topology structure SDAM change event - * - * @event ReplSet#topologyDescriptionChanged - * @type {object} - */ - -/** - * A topology serverHeartbeatStarted SDAM event - * - * @event ReplSet#serverHeartbeatStarted - * @type {object} - */ - -/** - * A topology serverHeartbeatFailed SDAM event - * - * @event ReplSet#serverHeartbeatFailed - * @type {object} - */ - -/** - * A topology serverHeartbeatSucceeded SDAM change event - * - * @event ReplSet#serverHeartbeatSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event ReplSet#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event ReplSet#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event ReplSet#commandFailed - * @type {object} - */ - -module.exports = ReplSet; diff --git a/lib/core/topologies/replset_state.js b/lib/core/topologies/replset_state.js deleted file mode 100644 index 24c16d6d71..0000000000 --- a/lib/core/topologies/replset_state.js +++ /dev/null @@ -1,1121 +0,0 @@ -'use strict'; - -var inherits = require('util').inherits, - f = require('util').format, - diff = require('./shared').diff, - EventEmitter = require('events').EventEmitter, - Logger = require('../connection/logger'), - ReadPreference = require('./read_preference'), - MongoError = require('../error').MongoError, - Buffer = require('safe-buffer').Buffer; - -var TopologyType = { - Single: 'Single', - ReplicaSetNoPrimary: 'ReplicaSetNoPrimary', - ReplicaSetWithPrimary: 'ReplicaSetWithPrimary', - Sharded: 'Sharded', - Unknown: 'Unknown' -}; - -var ServerType = { - Standalone: 'Standalone', - Mongos: 'Mongos', - PossiblePrimary: 'PossiblePrimary', - RSPrimary: 'RSPrimary', - RSSecondary: 'RSSecondary', - RSArbiter: 'RSArbiter', - RSOther: 'RSOther', - RSGhost: 'RSGhost', - Unknown: 'Unknown' -}; - -var ReplSetState = function(options) { - options = options || {}; - // Add event listener - EventEmitter.call(this); - // Topology state - this.topologyType = TopologyType.ReplicaSetNoPrimary; - this.setName = options.setName; - - // Server set - this.set = {}; - - // Unpacked options - this.id = options.id; - this.setName = options.setName; - - // Replicaset logger - this.logger = options.logger || Logger('ReplSet', options); - - // Server selection index - this.index = 0; - // Acceptable latency - this.acceptableLatency = options.acceptableLatency || 15; - - // heartbeatFrequencyMS - this.heartbeatFrequencyMS = options.heartbeatFrequencyMS || 10000; - - // Server side - this.primary = null; - this.secondaries = []; - this.arbiters = []; - this.passives = []; - this.ghosts = []; - // Current unknown hosts - this.unknownServers = []; - // In set status - this.set = {}; - // Status - this.maxElectionId = null; - this.maxSetVersion = 0; - // Description of the Replicaset - this.replicasetDescription = { - topologyType: 'Unknown', - servers: [] - }; - - this.logicalSessionTimeoutMinutes = undefined; -}; - -inherits(ReplSetState, EventEmitter); - -ReplSetState.prototype.hasPrimaryAndSecondary = function() { - return this.primary != null && this.secondaries.length > 0; -}; - -ReplSetState.prototype.hasPrimaryOrSecondary = function() { - return this.hasPrimary() || this.hasSecondary(); -}; - -ReplSetState.prototype.hasPrimary = function() { - return this.primary != null; -}; - -ReplSetState.prototype.hasSecondary = function() { - return this.secondaries.length > 0; -}; - -ReplSetState.prototype.get = function(host) { - var servers = this.allServers(); - - for (var i = 0; i < servers.length; i++) { - if (servers[i].name.toLowerCase() === host.toLowerCase()) { - return servers[i]; - } - } - - return null; -}; - -ReplSetState.prototype.allServers = function(options) { - options = options || {}; - var servers = this.primary ? [this.primary] : []; - servers = servers.concat(this.secondaries); - if (!options.ignoreArbiters) servers = servers.concat(this.arbiters); - servers = servers.concat(this.passives); - return servers; -}; - -ReplSetState.prototype.destroy = function(options, callback) { - const serversToDestroy = this.secondaries - .concat(this.arbiters) - .concat(this.passives) - .concat(this.ghosts); - if (this.primary) serversToDestroy.push(this.primary); - - let serverCount = serversToDestroy.length; - const serverDestroyed = () => { - serverCount--; - if (serverCount > 0) { - return; - } - - // Clear out the complete state - this.secondaries = []; - this.arbiters = []; - this.passives = []; - this.ghosts = []; - this.unknownServers = []; - this.set = {}; - this.primary = null; - - // Emit the topology changed - emitTopologyDescriptionChanged(this); - - if (typeof callback === 'function') { - callback(null, null); - } - }; - - if (serverCount === 0) { - serverDestroyed(); - return; - } - - serversToDestroy.forEach(server => server.destroy(options, serverDestroyed)); -}; - -ReplSetState.prototype.remove = function(server, options) { - options = options || {}; - - // Get the server name and lowerCase it - var serverName = server.name.toLowerCase(); - - // Only remove if the current server is not connected - var servers = this.primary ? [this.primary] : []; - servers = servers.concat(this.secondaries); - servers = servers.concat(this.arbiters); - servers = servers.concat(this.passives); - - // Check if it's active and this is just a failed connection attempt - for (var i = 0; i < servers.length; i++) { - if ( - !options.force && - servers[i].equals(server) && - servers[i].isConnected && - servers[i].isConnected() - ) { - return; - } - } - - // If we have it in the set remove it - if (this.set[serverName]) { - this.set[serverName].type = ServerType.Unknown; - this.set[serverName].electionId = null; - this.set[serverName].setName = null; - this.set[serverName].setVersion = null; - } - - // Remove type - var removeType = null; - - // Remove from any lists - if (this.primary && this.primary.equals(server)) { - this.primary = null; - this.topologyType = TopologyType.ReplicaSetNoPrimary; - removeType = 'primary'; - } - - // Remove from any other server lists - removeType = removeFrom(server, this.secondaries) ? 'secondary' : removeType; - removeType = removeFrom(server, this.arbiters) ? 'arbiter' : removeType; - removeType = removeFrom(server, this.passives) ? 'secondary' : removeType; - removeFrom(server, this.ghosts); - removeFrom(server, this.unknownServers); - - // Push to unknownServers - this.unknownServers.push(serverName); - - // Do we have a removeType - if (removeType) { - this.emit('left', removeType, server); - } -}; - -const isArbiter = ismaster => ismaster.arbiterOnly && ismaster.setName; - -ReplSetState.prototype.update = function(server) { - var self = this; - // Get the current ismaster - var ismaster = server.lastIsMaster(); - - // Get the server name and lowerCase it - var serverName = server.name.toLowerCase(); - - // - // Add any hosts - // - if (ismaster) { - // Join all the possible new hosts - var hosts = Array.isArray(ismaster.hosts) ? ismaster.hosts : []; - hosts = hosts.concat(Array.isArray(ismaster.arbiters) ? ismaster.arbiters : []); - hosts = hosts.concat(Array.isArray(ismaster.passives) ? ismaster.passives : []); - hosts = hosts.map(function(s) { - return s.toLowerCase(); - }); - - // Add all hosts as unknownServers - for (var i = 0; i < hosts.length; i++) { - // Add to the list of unknown server - if ( - this.unknownServers.indexOf(hosts[i]) === -1 && - (!this.set[hosts[i]] || this.set[hosts[i]].type === ServerType.Unknown) - ) { - this.unknownServers.push(hosts[i].toLowerCase()); - } - - if (!this.set[hosts[i]]) { - this.set[hosts[i]] = { - type: ServerType.Unknown, - electionId: null, - setName: null, - setVersion: null - }; - } - } - } - - // - // Unknown server - // - if (!ismaster && !inList(ismaster, server, this.unknownServers)) { - self.set[serverName] = { - type: ServerType.Unknown, - setVersion: null, - electionId: null, - setName: null - }; - // Update set information about the server instance - self.set[serverName].type = ServerType.Unknown; - self.set[serverName].electionId = ismaster ? ismaster.electionId : ismaster; - self.set[serverName].setName = ismaster ? ismaster.setName : ismaster; - self.set[serverName].setVersion = ismaster ? ismaster.setVersion : ismaster; - - if (self.unknownServers.indexOf(server.name) === -1) { - self.unknownServers.push(serverName); - } - - // Set the topology - return false; - } - - // Update logicalSessionTimeoutMinutes - if (ismaster.logicalSessionTimeoutMinutes !== undefined && !isArbiter(ismaster)) { - if ( - self.logicalSessionTimeoutMinutes === undefined || - ismaster.logicalSessionTimeoutMinutes === null - ) { - self.logicalSessionTimeoutMinutes = ismaster.logicalSessionTimeoutMinutes; - } else { - self.logicalSessionTimeoutMinutes = Math.min( - self.logicalSessionTimeoutMinutes, - ismaster.logicalSessionTimeoutMinutes - ); - } - } - - // - // Is this a mongos - // - if (ismaster && ismaster.msg === 'isdbgrid') { - if (this.primary && this.primary.name === serverName) { - this.primary = null; - this.topologyType = TopologyType.ReplicaSetNoPrimary; - } - - return false; - } - - // A RSGhost instance - if (ismaster.isreplicaset) { - self.set[serverName] = { - type: ServerType.RSGhost, - setVersion: null, - electionId: null, - setName: ismaster.setName - }; - - if (this.primary && this.primary.name === serverName) { - this.primary = null; - } - - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - - // Set the topology - return false; - } - - // A RSOther instance - if ( - (ismaster.setName && ismaster.hidden) || - (ismaster.setName && - !ismaster.ismaster && - !ismaster.secondary && - !ismaster.arbiterOnly && - !ismaster.passive) - ) { - self.set[serverName] = { - type: ServerType.RSOther, - setVersion: null, - electionId: null, - setName: ismaster.setName - }; - - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - return false; - } - - // - // Standalone server, destroy and return - // - if (ismaster && ismaster.ismaster && !ismaster.setName) { - this.topologyType = this.primary ? TopologyType.ReplicaSetWithPrimary : TopologyType.Unknown; - this.remove(server, { force: true }); - return false; - } - - // - // Server in maintanance mode - // - if (ismaster && !ismaster.ismaster && !ismaster.secondary && !ismaster.arbiterOnly) { - this.remove(server, { force: true }); - return false; - } - - // - // If the .me field does not match the passed in server - // - if (ismaster.me && ismaster.me.toLowerCase() !== serverName) { - if (this.logger.isWarn()) { - this.logger.warn( - f( - 'the seedlist server was removed due to its address %s not matching its ismaster.me address %s', - server.name, - ismaster.me - ) - ); - } - - // Delete from the set - delete this.set[serverName]; - // Delete unknown servers - removeFrom(server, self.unknownServers); - - // Destroy the instance - server.destroy({ force: true }); - - // Set the type of topology we have - if (this.primary && !this.primary.equals(server)) { - this.topologyType = TopologyType.ReplicaSetWithPrimary; - } else { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - } - - // - // We have a potential primary - // - if (!this.primary && ismaster.primary) { - this.set[ismaster.primary.toLowerCase()] = { - type: ServerType.PossiblePrimary, - setName: null, - electionId: null, - setVersion: null - }; - } - - return false; - } - - // - // Primary handling - // - if (!this.primary && ismaster.ismaster && ismaster.setName) { - var ismasterElectionId = server.lastIsMaster().electionId; - if (this.setName && this.setName !== ismaster.setName) { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - return new MongoError( - f( - 'setName from ismaster does not match provided connection setName [%s] != [%s]', - ismaster.setName, - this.setName - ) - ); - } - - if (!this.maxElectionId && ismasterElectionId) { - this.maxElectionId = ismasterElectionId; - } else if (this.maxElectionId && ismasterElectionId) { - var result = compareObjectIds(this.maxElectionId, ismasterElectionId); - // Get the electionIds - var ismasterSetVersion = server.lastIsMaster().setVersion; - - if (result === 1) { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - return false; - } else if (result === 0 && ismasterSetVersion) { - if (ismasterSetVersion < this.maxSetVersion) { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - return false; - } - } - - this.maxSetVersion = ismasterSetVersion; - this.maxElectionId = ismasterElectionId; - } - - // Hande normalization of server names - var normalizedHosts = ismaster.hosts.map(function(x) { - return x.toLowerCase(); - }); - var locationIndex = normalizedHosts.indexOf(serverName); - - // Validate that the server exists in the host list - if (locationIndex !== -1) { - self.primary = server; - self.set[serverName] = { - type: ServerType.RSPrimary, - setVersion: ismaster.setVersion, - electionId: ismaster.electionId, - setName: ismaster.setName - }; - - // Set the topology - this.topologyType = TopologyType.ReplicaSetWithPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - removeFrom(server, self.secondaries); - removeFrom(server, self.passives); - self.emit('joined', 'primary', server); - } else { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - } - - emitTopologyDescriptionChanged(self); - return true; - } else if (ismaster.ismaster && ismaster.setName) { - // Get the electionIds - var currentElectionId = self.set[self.primary.name.toLowerCase()].electionId; - var currentSetVersion = self.set[self.primary.name.toLowerCase()].setVersion; - var currentSetName = self.set[self.primary.name.toLowerCase()].setName; - ismasterElectionId = server.lastIsMaster().electionId; - ismasterSetVersion = server.lastIsMaster().setVersion; - var ismasterSetName = server.lastIsMaster().setName; - - // Is it the same server instance - if (this.primary.equals(server) && currentSetName === ismasterSetName) { - return false; - } - - // If we do not have the same rs name - if (currentSetName && currentSetName !== ismasterSetName) { - if (!this.primary.equals(server)) { - this.topologyType = TopologyType.ReplicaSetWithPrimary; - } else { - this.topologyType = TopologyType.ReplicaSetNoPrimary; - } - - return false; - } - - // Check if we need to replace the server - if (currentElectionId && ismasterElectionId) { - result = compareObjectIds(currentElectionId, ismasterElectionId); - - if (result === 1) { - return false; - } else if (result === 0 && currentSetVersion > ismasterSetVersion) { - return false; - } - } else if (!currentElectionId && ismasterElectionId && ismasterSetVersion) { - if (ismasterSetVersion < this.maxSetVersion) { - return false; - } - } - - if (!this.maxElectionId && ismasterElectionId) { - this.maxElectionId = ismasterElectionId; - } else if (this.maxElectionId && ismasterElectionId) { - result = compareObjectIds(this.maxElectionId, ismasterElectionId); - - if (result === 1) { - return false; - } else if (result === 0 && currentSetVersion && ismasterSetVersion) { - if (ismasterSetVersion < this.maxSetVersion) { - return false; - } - } else { - if (ismasterSetVersion < this.maxSetVersion) { - return false; - } - } - - this.maxElectionId = ismasterElectionId; - this.maxSetVersion = ismasterSetVersion; - } else { - this.maxSetVersion = ismasterSetVersion; - } - - // Modify the entry to unknown - self.set[self.primary.name.toLowerCase()] = { - type: ServerType.Unknown, - setVersion: null, - electionId: null, - setName: null - }; - - // Signal primary left - self.emit('left', 'primary', this.primary); - // Destroy the instance - self.primary.destroy({ force: true }); - // Set the new instance - self.primary = server; - // Set the set information - self.set[serverName] = { - type: ServerType.RSPrimary, - setVersion: ismaster.setVersion, - electionId: ismaster.electionId, - setName: ismaster.setName - }; - - // Set the topology - this.topologyType = TopologyType.ReplicaSetWithPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - removeFrom(server, self.secondaries); - removeFrom(server, self.passives); - self.emit('joined', 'primary', server); - emitTopologyDescriptionChanged(self); - return true; - } - - // A possible instance - if (!this.primary && ismaster.primary) { - self.set[ismaster.primary.toLowerCase()] = { - type: ServerType.PossiblePrimary, - setVersion: null, - electionId: null, - setName: null - }; - } - - // - // Secondary handling - // - if ( - ismaster.secondary && - ismaster.setName && - !inList(ismaster, server, this.secondaries) && - this.setName && - this.setName === ismaster.setName - ) { - addToList(self, ServerType.RSSecondary, ismaster, server, this.secondaries); - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - - // Remove primary - if (this.primary && this.primary.name.toLowerCase() === serverName) { - server.destroy({ force: true }); - this.primary = null; - self.emit('left', 'primary', server); - } - - // Emit secondary joined replicaset - self.emit('joined', 'secondary', server); - emitTopologyDescriptionChanged(self); - return true; - } - - // - // Arbiter handling - // - if ( - isArbiter(ismaster) && - !inList(ismaster, server, this.arbiters) && - this.setName && - this.setName === ismaster.setName - ) { - addToList(self, ServerType.RSArbiter, ismaster, server, this.arbiters); - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - self.emit('joined', 'arbiter', server); - emitTopologyDescriptionChanged(self); - return true; - } - - // - // Passive handling - // - if ( - ismaster.passive && - ismaster.setName && - !inList(ismaster, server, this.passives) && - this.setName && - this.setName === ismaster.setName - ) { - addToList(self, ServerType.RSSecondary, ismaster, server, this.passives); - // Set the topology - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - if (ismaster.setName) this.setName = ismaster.setName; - removeFrom(server, self.unknownServers); - - // Remove primary - if (this.primary && this.primary.name.toLowerCase() === serverName) { - server.destroy({ force: true }); - this.primary = null; - self.emit('left', 'primary', server); - } - - self.emit('joined', 'secondary', server); - emitTopologyDescriptionChanged(self); - return true; - } - - // - // Remove the primary - // - if (this.set[serverName] && this.set[serverName].type === ServerType.RSPrimary) { - self.emit('left', 'primary', this.primary); - this.primary.destroy({ force: true }); - this.primary = null; - this.topologyType = TopologyType.ReplicaSetNoPrimary; - return false; - } - - this.topologyType = this.primary - ? TopologyType.ReplicaSetWithPrimary - : TopologyType.ReplicaSetNoPrimary; - return false; -}; - -/** - * Recalculate single server max staleness - * @method - */ -ReplSetState.prototype.updateServerMaxStaleness = function(server, haInterval) { - // Locate the max secondary lastwrite - var max = 0; - // Go over all secondaries - for (var i = 0; i < this.secondaries.length; i++) { - max = Math.max(max, this.secondaries[i].lastWriteDate); - } - - // Perform this servers staleness calculation - if (server.ismaster.maxWireVersion >= 5 && server.ismaster.secondary && this.hasPrimary()) { - server.staleness = - server.lastUpdateTime - - server.lastWriteDate - - (this.primary.lastUpdateTime - this.primary.lastWriteDate) + - haInterval; - } else if (server.ismaster.maxWireVersion >= 5 && server.ismaster.secondary) { - server.staleness = max - server.lastWriteDate + haInterval; - } -}; - -/** - * Recalculate all the staleness values for secodaries - * @method - */ -ReplSetState.prototype.updateSecondariesMaxStaleness = function(haInterval) { - for (var i = 0; i < this.secondaries.length; i++) { - this.updateServerMaxStaleness(this.secondaries[i], haInterval); - } -}; - -/** - * Pick a server by the passed in ReadPreference - * @method - * @param {ReadPreference} readPreference The ReadPreference instance to use - */ -ReplSetState.prototype.pickServer = function(readPreference) { - // If no read Preference set to primary by default - readPreference = readPreference || ReadPreference.primary; - - // maxStalenessSeconds is not allowed with a primary read - if (readPreference.preference === 'primary' && readPreference.maxStalenessSeconds != null) { - return new MongoError('primary readPreference incompatible with maxStalenessSeconds'); - } - - // Check if we have any non compatible servers for maxStalenessSeconds - var allservers = this.primary ? [this.primary] : []; - allservers = allservers.concat(this.secondaries); - - // Does any of the servers not support the right wire protocol version - // for maxStalenessSeconds when maxStalenessSeconds specified on readPreference. Then error out - if (readPreference.maxStalenessSeconds != null) { - for (var i = 0; i < allservers.length; i++) { - if (allservers[i].ismaster.maxWireVersion < 5) { - return new MongoError( - 'maxStalenessSeconds not supported by at least one of the replicaset members' - ); - } - } - } - - // Do we have the nearest readPreference - if (readPreference.preference === 'nearest' && readPreference.maxStalenessSeconds == null) { - return pickNearest(this, readPreference); - } else if ( - readPreference.preference === 'nearest' && - readPreference.maxStalenessSeconds != null - ) { - return pickNearestMaxStalenessSeconds(this, readPreference); - } - - // Get all the secondaries - var secondaries = this.secondaries; - - // Check if we can satisfy and of the basic read Preferences - if (readPreference.equals(ReadPreference.secondary) && secondaries.length === 0) { - return new MongoError('no secondary server available'); - } - - if ( - readPreference.equals(ReadPreference.secondaryPreferred) && - secondaries.length === 0 && - this.primary == null - ) { - return new MongoError('no secondary or primary server available'); - } - - if (readPreference.equals(ReadPreference.primary) && this.primary == null) { - return new MongoError('no primary server available'); - } - - // Secondary preferred or just secondaries - if ( - readPreference.equals(ReadPreference.secondaryPreferred) || - readPreference.equals(ReadPreference.secondary) - ) { - if (secondaries.length > 0 && readPreference.maxStalenessSeconds == null) { - // Pick nearest of any other servers available - var server = pickNearest(this, readPreference); - // No server in the window return primary - if (server) { - return server; - } - } else if (secondaries.length > 0 && readPreference.maxStalenessSeconds != null) { - // Pick nearest of any other servers available - server = pickNearestMaxStalenessSeconds(this, readPreference); - // No server in the window return primary - if (server) { - return server; - } - } - - if (readPreference.equals(ReadPreference.secondaryPreferred)) { - return this.primary; - } - - return null; - } - - // Primary preferred - if (readPreference.equals(ReadPreference.primaryPreferred)) { - server = null; - - // We prefer the primary if it's available - if (this.primary) { - return this.primary; - } - - // Pick a secondary - if (secondaries.length > 0 && readPreference.maxStalenessSeconds == null) { - server = pickNearest(this, readPreference); - } else if (secondaries.length > 0 && readPreference.maxStalenessSeconds != null) { - server = pickNearestMaxStalenessSeconds(this, readPreference); - } - - // Did we find a server - if (server) return server; - } - - // Return the primary - return this.primary; -}; - -// -// Filter serves by tags -var filterByTags = function(readPreference, servers) { - if (readPreference.tags == null) return servers; - var filteredServers = []; - var tagsArray = Array.isArray(readPreference.tags) ? readPreference.tags : [readPreference.tags]; - - // Iterate over the tags - for (var j = 0; j < tagsArray.length; j++) { - var tags = tagsArray[j]; - - // Iterate over all the servers - for (var i = 0; i < servers.length; i++) { - var serverTag = servers[i].lastIsMaster().tags || {}; - - // Did we find the a matching server - var found = true; - // Check if the server is valid - for (var name in tags) { - if (serverTag[name] !== tags[name]) { - found = false; - } - } - - // Add to candidate list - if (found) { - filteredServers.push(servers[i]); - } - } - } - - // Returned filtered servers - return filteredServers; -}; - -function pickNearestMaxStalenessSeconds(self, readPreference) { - // Only get primary and secondaries as seeds - var servers = []; - - // Get the maxStalenessMS - var maxStalenessMS = readPreference.maxStalenessSeconds * 1000; - - // Check if the maxStalenessMS > 90 seconds - if (maxStalenessMS < 90 * 1000) { - return new MongoError('maxStalenessSeconds must be set to at least 90 seconds'); - } - - // Add primary to list if not a secondary read preference - if ( - self.primary && - readPreference.preference !== 'secondary' && - readPreference.preference !== 'secondaryPreferred' - ) { - servers.push(self.primary); - } - - // Add all the secondaries - for (var i = 0; i < self.secondaries.length; i++) { - servers.push(self.secondaries[i]); - } - - // If we have a secondaryPreferred readPreference and no server add the primary - if (self.primary && servers.length === 0 && readPreference.preference !== 'secondaryPreferred') { - servers.push(self.primary); - } - - // Filter by tags - servers = filterByTags(readPreference, servers); - - // Filter by latency - servers = servers.filter(function(s) { - return s.staleness <= maxStalenessMS; - }); - - // Sort by time - servers.sort(function(a, b) { - return a.lastIsMasterMS - b.lastIsMasterMS; - }); - - // No servers, default to primary - if (servers.length === 0) { - return null; - } - - // Ensure index does not overflow the number of available servers - self.index = self.index % servers.length; - - // Get the server - var server = servers[self.index]; - // Add to the index - self.index = self.index + 1; - // Return the first server of the sorted and filtered list - return server; -} - -function pickNearest(self, readPreference) { - // Only get primary and secondaries as seeds - var servers = []; - - // Add primary to list if not a secondary read preference - if ( - self.primary && - readPreference.preference !== 'secondary' && - readPreference.preference !== 'secondaryPreferred' - ) { - servers.push(self.primary); - } - - // Add all the secondaries - for (var i = 0; i < self.secondaries.length; i++) { - servers.push(self.secondaries[i]); - } - - // If we have a secondaryPreferred readPreference and no server add the primary - if (servers.length === 0 && self.primary && readPreference.preference !== 'secondaryPreferred') { - servers.push(self.primary); - } - - // Filter by tags - servers = filterByTags(readPreference, servers); - - // Sort by time - servers.sort(function(a, b) { - return a.lastIsMasterMS - b.lastIsMasterMS; - }); - - // Locate lowest time (picked servers are lowest time + acceptable Latency margin) - var lowest = servers.length > 0 ? servers[0].lastIsMasterMS : 0; - - // Filter by latency - servers = servers.filter(function(s) { - return s.lastIsMasterMS <= lowest + self.acceptableLatency; - }); - - // No servers, default to primary - if (servers.length === 0) { - return null; - } - - // Ensure index does not overflow the number of available servers - self.index = self.index % servers.length; - // Get the server - var server = servers[self.index]; - // Add to the index - self.index = self.index + 1; - // Return the first server of the sorted and filtered list - return server; -} - -function inList(ismaster, server, list) { - for (var i = 0; i < list.length; i++) { - if (list[i] && list[i].name && list[i].name.toLowerCase() === server.name.toLowerCase()) - return true; - } - - return false; -} - -function addToList(self, type, ismaster, server, list) { - var serverName = server.name.toLowerCase(); - // Update set information about the server instance - self.set[serverName].type = type; - self.set[serverName].electionId = ismaster ? ismaster.electionId : ismaster; - self.set[serverName].setName = ismaster ? ismaster.setName : ismaster; - self.set[serverName].setVersion = ismaster ? ismaster.setVersion : ismaster; - // Add to the list - list.push(server); -} - -function compareObjectIds(id1, id2) { - var a = Buffer.from(id1.toHexString(), 'hex'); - var b = Buffer.from(id2.toHexString(), 'hex'); - - if (a === b) { - return 0; - } - - if (typeof Buffer.compare === 'function') { - return Buffer.compare(a, b); - } - - var x = a.length; - var y = b.length; - var len = Math.min(x, y); - - for (var i = 0; i < len; i++) { - if (a[i] !== b[i]) { - break; - } - } - - if (i !== len) { - x = a[i]; - y = b[i]; - } - - return x < y ? -1 : y < x ? 1 : 0; -} - -function removeFrom(server, list) { - for (var i = 0; i < list.length; i++) { - if (list[i].equals && list[i].equals(server)) { - list.splice(i, 1); - return true; - } else if (typeof list[i] === 'string' && list[i].toLowerCase() === server.name.toLowerCase()) { - list.splice(i, 1); - return true; - } - } - - return false; -} - -function emitTopologyDescriptionChanged(self) { - if (self.listeners('topologyDescriptionChanged').length > 0) { - var topology = 'Unknown'; - var setName = self.setName; - - if (self.hasPrimaryAndSecondary()) { - topology = 'ReplicaSetWithPrimary'; - } else if (!self.hasPrimary() && self.hasSecondary()) { - topology = 'ReplicaSetNoPrimary'; - } - - // Generate description - var description = { - topologyType: topology, - setName: setName, - servers: [] - }; - - // Add the primary to the list - if (self.hasPrimary()) { - var desc = self.primary.getDescription(); - desc.type = 'RSPrimary'; - description.servers.push(desc); - } - - // Add all the secondaries - description.servers = description.servers.concat( - self.secondaries.map(function(x) { - var description = x.getDescription(); - description.type = 'RSSecondary'; - return description; - }) - ); - - // Add all the arbiters - description.servers = description.servers.concat( - self.arbiters.map(function(x) { - var description = x.getDescription(); - description.type = 'RSArbiter'; - return description; - }) - ); - - // Add all the passives - description.servers = description.servers.concat( - self.passives.map(function(x) { - var description = x.getDescription(); - description.type = 'RSSecondary'; - return description; - }) - ); - - // Get the diff - var diffResult = diff(self.replicasetDescription, description); - - // Create the result - var result = { - topologyId: self.id, - previousDescription: self.replicasetDescription, - newDescription: description, - diff: diffResult - }; - - // Emit the topologyDescription change - // if(diffResult.servers.length > 0) { - self.emit('topologyDescriptionChanged', result); - // } - - // Set the new description - self.replicasetDescription = description; - } -} - -module.exports = ReplSetState; diff --git a/lib/core/topologies/server.js b/lib/core/topologies/server.js deleted file mode 100644 index 6f6de12eaa..0000000000 --- a/lib/core/topologies/server.js +++ /dev/null @@ -1,990 +0,0 @@ -'use strict'; - -var inherits = require('util').inherits, - f = require('util').format, - EventEmitter = require('events').EventEmitter, - ReadPreference = require('./read_preference'), - Logger = require('../connection/logger'), - debugOptions = require('../connection/utils').debugOptions, - retrieveBSON = require('../connection/utils').retrieveBSON, - Pool = require('../connection/pool'), - MongoError = require('../error').MongoError, - MongoNetworkError = require('../error').MongoNetworkError, - wireProtocol = require('../wireprotocol'), - CoreCursor = require('../cursor').CoreCursor, - sdam = require('./shared'), - createCompressionInfo = require('./shared').createCompressionInfo, - resolveClusterTime = require('./shared').resolveClusterTime, - SessionMixins = require('./shared').SessionMixins, - relayEvents = require('../utils').relayEvents; - -const collationNotSupported = require('../utils').collationNotSupported; -const makeClientMetadata = require('../utils').makeClientMetadata; - -// Used for filtering out fields for loggin -var debugFields = [ - 'reconnect', - 'reconnectTries', - 'reconnectInterval', - 'emitError', - 'cursorFactory', - 'host', - 'port', - 'size', - 'keepAlive', - 'keepAliveInitialDelay', - 'noDelay', - 'connectionTimeout', - 'checkServerIdentity', - 'socketTimeout', - 'ssl', - 'ca', - 'crl', - 'cert', - 'key', - 'rejectUnauthorized', - 'promoteLongs', - 'promoteValues', - 'promoteBuffers', - 'servername' -]; - -// Server instance id -var id = 0; -var serverAccounting = false; -var servers = {}; -var BSON = retrieveBSON(); - -function topologyId(server) { - return server.s.parent == null ? server.id : server.s.parent.id; -} - -/** - * Creates a new Server instance - * @class - * @param {boolean} [options.reconnect=true] Server will attempt to reconnect on loss of connection - * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times - * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries - * @param {number} [options.monitoring=true] Enable the server state monitoring (calling ismaster at monitoringInterval) - * @param {number} [options.monitoringInterval=5000] The interval of calling ismaster when monitoring is enabled. - * @param {Cursor} [options.cursorFactory=Cursor] The cursor factory class used for all query cursors - * @param {string} options.host The server host - * @param {number} options.port The server port - * @param {number} [options.size=5] Server connection pool size - * @param {boolean} [options.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.keepAliveInitialDelay=300000] Initial delay before TCP keep alive enabled - * @param {boolean} [options.noDelay=true] TCP Connection no delay - * @param {number} [options.connectionTimeout=30000] TCP Connection timeout setting - * @param {number} [options.socketTimeout=360000] TCP Socket timeout setting - * @param {boolean} [options.ssl=false] Use SSL for connection - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {Buffer} [options.ca] SSL Certificate store binary buffer - * @param {Buffer} [options.crl] SSL Certificate revocation store binary buffer - * @param {Buffer} [options.cert] SSL Certificate binary buffer - * @param {Buffer} [options.key] SSL Key file binary buffer - * @param {string} [options.passphrase] SSL Certificate pass phrase - * @param {boolean} [options.rejectUnauthorized=true] Reject unauthorized server certificates - * @param {string} [options.servername=null] String containing the server name requested via TLS SNI. - * @param {boolean} [options.promoteLongs=true] Convert Long values from the db into Numbers if they fit into 53 bits - * @param {boolean} [options.promoteValues=true] Promotes BSON values to native types where possible, set to false to only receive wrapper types. - * @param {boolean} [options.promoteBuffers=false] Promotes Binary BSON values to native Node Buffers. - * @param {string} [options.appname=null] Application name, passed in on ismaster call and logged in mongod server logs. Maximum size 128 bytes. - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @return {Server} A cursor instance - * @fires Server#connect - * @fires Server#close - * @fires Server#error - * @fires Server#timeout - * @fires Server#parseError - * @fires Server#reconnect - * @fires Server#reconnectFailed - * @fires Server#serverHeartbeatStarted - * @fires Server#serverHeartbeatSucceeded - * @fires Server#serverHeartbeatFailed - * @fires Server#topologyOpening - * @fires Server#topologyClosed - * @fires Server#topologyDescriptionChanged - * @property {string} type the topology type. - * @property {string} parserType the parser type used (c++ or js). - */ -var Server = function(options) { - options = options || {}; - - // Add event listener - EventEmitter.call(this); - - // Server instance id - this.id = id++; - - // Internal state - this.s = { - // Options - options: Object.assign({ metadata: makeClientMetadata(options) }, options), - // Logger - logger: Logger('Server', options), - // Factory overrides - Cursor: options.cursorFactory || CoreCursor, - // BSON instance - bson: - options.bson || - new BSON([ - BSON.Binary, - BSON.Code, - BSON.DBRef, - BSON.Decimal128, - BSON.Double, - BSON.Int32, - BSON.Long, - BSON.Map, - BSON.MaxKey, - BSON.MinKey, - BSON.ObjectId, - BSON.BSONRegExp, - BSON.Symbol, - BSON.Timestamp - ]), - // Pool - pool: null, - // Disconnect handler - disconnectHandler: options.disconnectHandler, - // Monitor thread (keeps the connection alive) - monitoring: typeof options.monitoring === 'boolean' ? options.monitoring : true, - // Is the server in a topology - inTopology: !!options.parent, - // Monitoring timeout - monitoringInterval: - typeof options.monitoringInterval === 'number' ? options.monitoringInterval : 5000, - compression: { compressors: createCompressionInfo(options) }, - // Optional parent topology - parent: options.parent - }; - - // If this is a single deployment we need to track the clusterTime here - if (!this.s.parent) { - this.s.clusterTime = null; - } - - // Curent ismaster - this.ismaster = null; - // Current ping time - this.lastIsMasterMS = -1; - // The monitoringProcessId - this.monitoringProcessId = null; - // Initial connection - this.initialConnect = true; - // Default type - this._type = 'server'; - - // Max Stalleness values - // last time we updated the ismaster state - this.lastUpdateTime = 0; - // Last write time - this.lastWriteDate = 0; - // Stalleness - this.staleness = 0; -}; - -inherits(Server, EventEmitter); -Object.assign(Server.prototype, SessionMixins); - -Object.defineProperty(Server.prototype, 'type', { - enumerable: true, - get: function() { - return this._type; - } -}); - -Object.defineProperty(Server.prototype, 'parserType', { - enumerable: true, - get: function() { - return BSON.native ? 'c++' : 'js'; - } -}); - -Object.defineProperty(Server.prototype, 'logicalSessionTimeoutMinutes', { - enumerable: true, - get: function() { - if (!this.ismaster) return null; - return this.ismaster.logicalSessionTimeoutMinutes || null; - } -}); - -Object.defineProperty(Server.prototype, 'clientMetadata', { - enumerable: true, - get: function() { - return this.s.options.metadata; - } -}); - -// In single server deployments we track the clusterTime directly on the topology, however -// in Mongos and ReplSet deployments we instead need to delegate the clusterTime up to the -// tracking objects so we can ensure we are gossiping the maximum time received from the -// server. -Object.defineProperty(Server.prototype, 'clusterTime', { - enumerable: true, - set: function(clusterTime) { - const settings = this.s.parent ? this.s.parent : this.s; - resolveClusterTime(settings, clusterTime); - }, - get: function() { - const settings = this.s.parent ? this.s.parent : this.s; - return settings.clusterTime || null; - } -}); - -Server.enableServerAccounting = function() { - serverAccounting = true; - servers = {}; -}; - -Server.disableServerAccounting = function() { - serverAccounting = false; -}; - -Server.servers = function() { - return servers; -}; - -Object.defineProperty(Server.prototype, 'name', { - enumerable: true, - get: function() { - return this.s.options.host + ':' + this.s.options.port; - } -}); - -function disconnectHandler(self, type, ns, cmd, options, callback) { - // Topology is not connected, save the call in the provided store to be - // Executed at some point when the handler deems it's reconnected - if ( - !self.s.pool.isConnected() && - self.s.options.reconnect && - self.s.disconnectHandler != null && - !options.monitoring - ) { - self.s.disconnectHandler.add(type, ns, cmd, options, callback); - return true; - } - - // If we have no connection error - if (!self.s.pool.isConnected()) { - callback(new MongoError(f('no connection available to server %s', self.name))); - return true; - } -} - -function monitoringProcess(self) { - return function() { - // Pool was destroyed do not continue process - if (self.s.pool.isDestroyed()) return; - // Emit monitoring Process event - self.emit('monitoring', self); - // Perform ismaster call - // Get start time - var start = new Date().getTime(); - - // Execute the ismaster query - self.command( - 'admin.$cmd', - { ismaster: true }, - { - socketTimeout: - typeof self.s.options.connectionTimeout !== 'number' - ? 2000 - : self.s.options.connectionTimeout, - monitoring: true - }, - (err, result) => { - // Set initial lastIsMasterMS - self.lastIsMasterMS = new Date().getTime() - start; - if (self.s.pool.isDestroyed()) return; - // Update the ismaster view if we have a result - if (result) { - self.ismaster = result.result; - } - // Re-schedule the monitoring process - self.monitoringProcessId = setTimeout(monitoringProcess(self), self.s.monitoringInterval); - } - ); - }; -} - -var eventHandler = function(self, event) { - return function(err, conn) { - // Log information of received information if in info mode - if (self.s.logger.isInfo()) { - var object = err instanceof MongoError ? JSON.stringify(err) : {}; - self.s.logger.info( - f('server %s fired event %s out with message %s', self.name, event, object) - ); - } - - // Handle connect event - if (event === 'connect') { - self.initialConnect = false; - self.ismaster = conn.ismaster; - self.lastIsMasterMS = conn.lastIsMasterMS; - if (conn.agreedCompressor) { - self.s.pool.options.agreedCompressor = conn.agreedCompressor; - } - - if (conn.zlibCompressionLevel) { - self.s.pool.options.zlibCompressionLevel = conn.zlibCompressionLevel; - } - - if (conn.ismaster.$clusterTime) { - const $clusterTime = conn.ismaster.$clusterTime; - self.clusterTime = $clusterTime; - } - - // It's a proxy change the type so - // the wireprotocol will send $readPreference - if (self.ismaster.msg === 'isdbgrid') { - self._type = 'mongos'; - } - - // Have we defined self monitoring - if (self.s.monitoring) { - self.monitoringProcessId = setTimeout(monitoringProcess(self), self.s.monitoringInterval); - } - - // Emit server description changed if something listening - sdam.emitServerDescriptionChanged(self, { - address: self.name, - arbiters: [], - hosts: [], - passives: [], - type: sdam.getTopologyType(self) - }); - - if (!self.s.inTopology) { - // Emit topology description changed if something listening - sdam.emitTopologyDescriptionChanged(self, { - topologyType: 'Single', - servers: [ - { - address: self.name, - arbiters: [], - hosts: [], - passives: [], - type: sdam.getTopologyType(self) - } - ] - }); - } - - // Log the ismaster if available - if (self.s.logger.isInfo()) { - self.s.logger.info( - f('server %s connected with ismaster [%s]', self.name, JSON.stringify(self.ismaster)) - ); - } - - // Emit connect - self.emit('connect', self); - } else if ( - event === 'error' || - event === 'parseError' || - event === 'close' || - event === 'timeout' || - event === 'reconnect' || - event === 'attemptReconnect' || - 'reconnectFailed' - ) { - // Remove server instance from accounting - if ( - serverAccounting && - ['close', 'timeout', 'error', 'parseError', 'reconnectFailed'].indexOf(event) !== -1 - ) { - // Emit toplogy opening event if not in topology - if (!self.s.inTopology) { - self.emit('topologyOpening', { topologyId: self.id }); - } - - delete servers[self.id]; - } - - if (event === 'close') { - // Closing emits a server description changed event going to unknown. - sdam.emitServerDescriptionChanged(self, { - address: self.name, - arbiters: [], - hosts: [], - passives: [], - type: 'Unknown' - }); - } - - // Reconnect failed return error - if (event === 'reconnectFailed') { - self.emit('reconnectFailed', err); - // Emit error if any listeners - if (self.listeners('error').length > 0) { - self.emit('error', err); - } - // Terminate - return; - } - - // On first connect fail - if ( - ['disconnected', 'connecting'].indexOf(self.s.pool.state) !== -1 && - self.initialConnect && - ['close', 'timeout', 'error', 'parseError'].indexOf(event) !== -1 - ) { - self.initialConnect = false; - return self.emit( - 'error', - new MongoNetworkError( - f('failed to connect to server [%s] on first connect [%s]', self.name, err) - ) - ); - } - - // Reconnect event, emit the server - if (event === 'reconnect') { - // Reconnecting emits a server description changed event going from unknown to the - // current server type. - sdam.emitServerDescriptionChanged(self, { - address: self.name, - arbiters: [], - hosts: [], - passives: [], - type: sdam.getTopologyType(self) - }); - return self.emit(event, self); - } - - // Emit the event - self.emit(event, err); - } - }; -}; - -/** - * Initiate server connect - */ -Server.prototype.connect = function(options) { - var self = this; - options = options || {}; - - // Set the connections - if (serverAccounting) servers[this.id] = this; - - // Do not allow connect to be called on anything that's not disconnected - if (self.s.pool && !self.s.pool.isDisconnected() && !self.s.pool.isDestroyed()) { - throw new MongoError(f('server instance in invalid state %s', self.s.pool.state)); - } - - // Create a pool - self.s.pool = new Pool(this, Object.assign(self.s.options, options, { bson: this.s.bson })); - - // Set up listeners - self.s.pool.on('close', eventHandler(self, 'close')); - self.s.pool.on('error', eventHandler(self, 'error')); - self.s.pool.on('timeout', eventHandler(self, 'timeout')); - self.s.pool.on('parseError', eventHandler(self, 'parseError')); - self.s.pool.on('connect', eventHandler(self, 'connect')); - self.s.pool.on('reconnect', eventHandler(self, 'reconnect')); - self.s.pool.on('reconnectFailed', eventHandler(self, 'reconnectFailed')); - - // Set up listeners for command monitoring - relayEvents(self.s.pool, self, ['commandStarted', 'commandSucceeded', 'commandFailed']); - - // Emit toplogy opening event if not in topology - if (!self.s.inTopology) { - this.emit('topologyOpening', { topologyId: topologyId(self) }); - } - - // Emit opening server event - self.emit('serverOpening', { topologyId: topologyId(self), address: self.name }); - - self.s.pool.connect(); -}; - -/** - * Authenticate the topology. - * @method - * @param {MongoCredentials} credentials The credentials for authentication we are using - * @param {authResultCallback} callback A callback function - */ -Server.prototype.auth = function(credentials, callback) { - if (typeof callback === 'function') callback(null, null); -}; - -/** - * Get the server description - * @method - * @return {object} - */ -Server.prototype.getDescription = function() { - var ismaster = this.ismaster || {}; - var description = { - type: sdam.getTopologyType(this), - address: this.name - }; - - // Add fields if available - if (ismaster.hosts) description.hosts = ismaster.hosts; - if (ismaster.arbiters) description.arbiters = ismaster.arbiters; - if (ismaster.passives) description.passives = ismaster.passives; - if (ismaster.setName) description.setName = ismaster.setName; - return description; -}; - -/** - * Returns the last known ismaster document for this server - * @method - * @return {object} - */ -Server.prototype.lastIsMaster = function() { - return this.ismaster; -}; - -/** - * Unref all connections belong to this server - * @method - */ -Server.prototype.unref = function() { - this.s.pool.unref(); -}; - -/** - * Figure out if the server is connected - * @method - * @return {boolean} - */ -Server.prototype.isConnected = function() { - if (!this.s.pool) return false; - return this.s.pool.isConnected(); -}; - -/** - * Figure out if the server instance was destroyed by calling destroy - * @method - * @return {boolean} - */ -Server.prototype.isDestroyed = function() { - if (!this.s.pool) return false; - return this.s.pool.isDestroyed(); -}; - -function basicWriteValidations(self) { - if (!self.s.pool) return new MongoError('server instance is not connected'); - if (self.s.pool.isDestroyed()) return new MongoError('server instance pool was destroyed'); -} - -function basicReadValidations(self, options) { - basicWriteValidations(self, options); - - if (options.readPreference && !(options.readPreference instanceof ReadPreference)) { - throw new Error('readPreference must be an instance of ReadPreference'); - } -} - -/** - * Execute a command - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cmd The command hash - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.checkKeys=false] Specify if the bson parser should validate keys. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {Boolean} [options.fullResult=false] Return the full envelope instead of just the result document. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Server.prototype.command = function(ns, cmd, options, callback) { - var self = this; - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - var result = basicReadValidations(self, options); - if (result) return callback(result); - - // Clone the options - options = Object.assign({}, options, { wireProtocolCommand: false }); - - // Debug log - if (self.s.logger.isDebug()) - self.s.logger.debug( - f( - 'executing command [%s] against %s', - JSON.stringify({ - ns: ns, - cmd: cmd, - options: debugOptions(debugFields, options) - }), - self.name - ) - ); - - // If we are not connected or have a disconnectHandler specified - if (disconnectHandler(self, 'command', ns, cmd, options, callback)) return; - - // error if collation not supported - if (collationNotSupported(this, cmd)) { - return callback(new MongoError(`server ${this.name} does not support collation`)); - } - - wireProtocol.command(self, ns, cmd, options, callback); -}; - -/** - * Execute a query against the server - * - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cmd The command document for the query - * @param {object} options Optional settings - * @param {function} callback - */ -Server.prototype.query = function(ns, cmd, cursorState, options, callback) { - wireProtocol.query(this, ns, cmd, cursorState, options, callback); -}; - -/** - * Execute a `getMore` against the server - * - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cursorState State data associated with the cursor calling this method - * @param {object} options Optional settings - * @param {function} callback - */ -Server.prototype.getMore = function(ns, cursorState, batchSize, options, callback) { - wireProtocol.getMore(this, ns, cursorState, batchSize, options, callback); -}; - -/** - * Execute a `killCursors` command against the server - * - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object} cursorState State data associated with the cursor calling this method - * @param {function} callback - */ -Server.prototype.killCursors = function(ns, cursorState, callback) { - wireProtocol.killCursors(this, ns, cursorState, callback); -}; - -/** - * Insert one or more documents - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of documents to insert - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Server.prototype.insert = function(ns, ops, options, callback) { - var self = this; - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - var result = basicWriteValidations(self, options); - if (result) return callback(result); - - // If we are not connected or have a disconnectHandler specified - if (disconnectHandler(self, 'insert', ns, ops, options, callback)) return; - - // Setup the docs as an array - ops = Array.isArray(ops) ? ops : [ops]; - - // Execute write - return wireProtocol.insert(self, ns, ops, options, callback); -}; - -/** - * Perform one or more update operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of updates - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Server.prototype.update = function(ns, ops, options, callback) { - var self = this; - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - var result = basicWriteValidations(self, options); - if (result) return callback(result); - - // If we are not connected or have a disconnectHandler specified - if (disconnectHandler(self, 'update', ns, ops, options, callback)) return; - - // error if collation not supported - if (collationNotSupported(this, options)) { - return callback(new MongoError(`server ${this.name} does not support collation`)); - } - - // Setup the docs as an array - ops = Array.isArray(ops) ? ops : [ops]; - // Execute write - return wireProtocol.update(self, ns, ops, options, callback); -}; - -/** - * Perform one or more remove operations - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {array} ops An array of removes - * @param {boolean} [options.ordered=true] Execute in order or out of order - * @param {object} [options.writeConcern={}] Write concern for the operation - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {opResultCallback} callback A callback function - */ -Server.prototype.remove = function(ns, ops, options, callback) { - var self = this; - if (typeof options === 'function') { - (callback = options), (options = {}), (options = options || {}); - } - - var result = basicWriteValidations(self, options); - if (result) return callback(result); - - // If we are not connected or have a disconnectHandler specified - if (disconnectHandler(self, 'remove', ns, ops, options, callback)) return; - - // error if collation not supported - if (collationNotSupported(this, options)) { - return callback(new MongoError(`server ${this.name} does not support collation`)); - } - - // Setup the docs as an array - ops = Array.isArray(ops) ? ops : [ops]; - // Execute write - return wireProtocol.remove(self, ns, ops, options, callback); -}; - -/** - * Get a new cursor - * @method - * @param {string} ns The MongoDB fully qualified namespace (ex: db1.collection1) - * @param {object|Long} cmd Can be either a command returning a cursor or a cursorId - * @param {object} [options] Options for the cursor - * @param {object} [options.batchSize=0] Batchsize for the operation - * @param {array} [options.documents=[]] Initial documents list for cursor - * @param {ReadPreference} [options.readPreference] Specify read preference if command supports it - * @param {Boolean} [options.serializeFunctions=false] Specify if functions on an object should be serialized. - * @param {Boolean} [options.ignoreUndefined=false] Specify if the BSON serializer should ignore undefined fields. - * @param {ClientSession} [options.session=null] Session to use for the operation - * @param {object} [options.topology] The internal topology of the created cursor - * @returns {Cursor} - */ -Server.prototype.cursor = function(ns, cmd, options) { - options = options || {}; - const topology = options.topology || this; - - // Set up final cursor type - var FinalCursor = options.cursorFactory || this.s.Cursor; - - // Return the cursor - return new FinalCursor(topology, ns, cmd, options); -}; - -/** - * Compare two server instances - * @method - * @param {Server} server Server to compare equality against - * @return {boolean} - */ -Server.prototype.equals = function(server) { - if (typeof server === 'string') return this.name.toLowerCase() === server.toLowerCase(); - if (server.name) return this.name.toLowerCase() === server.name.toLowerCase(); - return false; -}; - -/** - * All raw connections - * @method - * @return {Connection[]} - */ -Server.prototype.connections = function() { - return this.s.pool.allConnections(); -}; - -/** - * Selects a server - * @method - * @param {function} selector Unused - * @param {ReadPreference} [options.readPreference] Unused - * @param {ClientSession} [options.session] Unused - * @return {Server} - */ -Server.prototype.selectServer = function(selector, options, callback) { - if (typeof selector === 'function' && typeof callback === 'undefined') - (callback = selector), (selector = undefined), (options = {}); - if (typeof options === 'function') - (callback = options), (options = selector), (selector = undefined); - - callback(null, this); -}; - -var listeners = ['close', 'error', 'timeout', 'parseError', 'connect']; - -/** - * Destroy the server connection - * @method - * @param {boolean} [options.emitClose=false] Emit close event on destroy - * @param {boolean} [options.emitDestroy=false] Emit destroy event on destroy - * @param {boolean} [options.force=false] Force destroy the pool - */ -Server.prototype.destroy = function(options, callback) { - if (this._destroyed) { - if (typeof callback === 'function') callback(null, null); - return; - } - - if (typeof options === 'function') { - callback = options; - options = {}; - } - - options = options || {}; - var self = this; - - // Set the connections - if (serverAccounting) delete servers[this.id]; - - // Destroy the monitoring process if any - if (this.monitoringProcessId) { - clearTimeout(this.monitoringProcessId); - } - - // No pool, return - if (!self.s.pool) { - this._destroyed = true; - if (typeof callback === 'function') callback(null, null); - return; - } - - // Emit close event - if (options.emitClose) { - self.emit('close', self); - } - - // Emit destroy event - if (options.emitDestroy) { - self.emit('destroy', self); - } - - // Remove all listeners - listeners.forEach(function(event) { - self.s.pool.removeAllListeners(event); - }); - - // Emit opening server event - if (self.listeners('serverClosed').length > 0) - self.emit('serverClosed', { topologyId: topologyId(self), address: self.name }); - - // Emit toplogy opening event if not in topology - if (self.listeners('topologyClosed').length > 0 && !self.s.inTopology) { - self.emit('topologyClosed', { topologyId: topologyId(self) }); - } - - if (self.s.logger.isDebug()) { - self.s.logger.debug(f('destroy called on server %s', self.name)); - } - - // Destroy the pool - this.s.pool.destroy(options.force, callback); - this._destroyed = true; -}; - -/** - * A server connect event, used to verify that the connection is up and running - * - * @event Server#connect - * @type {Server} - */ - -/** - * A server reconnect event, used to verify that the server topology has reconnected - * - * @event Server#reconnect - * @type {Server} - */ - -/** - * A server opening SDAM monitoring event - * - * @event Server#serverOpening - * @type {object} - */ - -/** - * A server closed SDAM monitoring event - * - * @event Server#serverClosed - * @type {object} - */ - -/** - * A server description SDAM change monitoring event - * - * @event Server#serverDescriptionChanged - * @type {object} - */ - -/** - * A topology open SDAM event - * - * @event Server#topologyOpening - * @type {object} - */ - -/** - * A topology closed SDAM event - * - * @event Server#topologyClosed - * @type {object} - */ - -/** - * A topology structure SDAM change event - * - * @event Server#topologyDescriptionChanged - * @type {object} - */ - -/** - * Server reconnect failed - * - * @event Server#reconnectFailed - * @type {Error} - */ - -/** - * Server connection pool closed - * - * @event Server#close - * @type {object} - */ - -/** - * Server connection pool caused an error - * - * @event Server#error - * @type {Error} - */ - -/** - * Server destroyed was called - * - * @event Server#destroy - * @type {Server} - */ - -module.exports = Server; diff --git a/lib/core/utils.js b/lib/core/utils.js index f4a8222bf7..1542c71aa4 100644 --- a/lib/core/utils.js +++ b/lib/core/utils.js @@ -158,10 +158,6 @@ function eachAsync(arr, eachFn, callback) { } } -function isUnifiedTopology(topology) { - return topology.description != null; -} - function arrayStrictEqual(arr, arr2) { if (!Array.isArray(arr) || !Array.isArray(arr2)) { return false; @@ -265,7 +261,6 @@ module.exports = { maxWireVersion, isPromiseLike, eachAsync, - isUnifiedTopology, arrayStrictEqual, tagsStrictEqual, errorStrictEqual, diff --git a/lib/operations/connect.js b/lib/operations/connect.js index c671f34f31..d1a1fb9e54 100644 --- a/lib/operations/connect.js +++ b/lib/operations/connect.js @@ -1,28 +1,18 @@ 'use strict'; -const deprecate = require('util').deprecate; const Logger = require('../core').Logger; const MongoCredentials = require('../core').MongoCredentials; const MongoError = require('../core').MongoError; -const Mongos = require('../topologies/mongos'); const NativeTopology = require('../topologies/native_topology'); -const parse = require('../core').parseConnectionString; +const parseConnectionString = require('../connection_string').parseConnectionString; const ReadConcern = require('../read_concern'); const ReadPreference = require('../core').ReadPreference; -const ReplSet = require('../topologies/replset'); -const Server = require('../topologies/server'); const ServerSessionPool = require('../core').Sessions.ServerSessionPool; const emitDeprecationWarning = require('../utils').emitDeprecationWarning; const fs = require('fs'); const BSON = require('../core/connection/utils').retrieveBSON(); const CMAP_EVENT_NAMES = require('../cmap/events').CMAP_EVENT_NAMES; -const legacyParse = deprecate( - require('../url_parser'), - 'current URL string parser is deprecated, and will be removed in a future version. ' + - 'To use the new parser, pass option { useNewUrlParser: true } to MongoClient.connect.' -); - const AUTH_MECHANISM_INTERNAL_MAP = { DEFAULT: 'default', PLAIN: 'plain', @@ -200,22 +190,12 @@ function connect(mongoClient, url, options, callback) { let didRequestAuthentication = false; const logger = Logger('MongoClient', options); - // Did we pass in a Server/ReplSet/Mongos - if (url instanceof Server || url instanceof ReplSet || url instanceof Mongos) { - return connectWithUrl(mongoClient, url, options, connectCallback); - } - - const useNewUrlParser = options.useNewUrlParser !== false; - - const parseFn = useNewUrlParser ? parse : legacyParse; - const transform = useNewUrlParser ? transformUrlOptions : legacyTransformUrlOptions; - - parseFn(url, options, (err, _object) => { + parseConnectionString(url, options, (err, _object) => { // Do not attempt to connect if parsing error if (err) return callback(err); // Flatten - const object = transform(_object); + const object = transformUrlOptions(_object); // Parse the string const _finalOptions = createUnifiedOptions(object, options); @@ -256,7 +236,7 @@ function connect(mongoClient, url, options, callback) { } } - return createTopology(mongoClient, 'unified', _finalOptions, connectCallback); + return createTopology(mongoClient, _finalOptions, connectCallback); }); function connectCallback(err, topology) { @@ -279,42 +259,6 @@ function connect(mongoClient, url, options, callback) { } } -function connectWithUrl(mongoClient, url, options, connectCallback) { - // Set the topology - assignTopology(mongoClient, url); - - // Add listeners - addListeners(mongoClient, url); - - // Propagate the events to the client - relayEvents(mongoClient, url); - - let finalOptions = Object.assign({}, options); - - // If we have a readPreference passed in by the db options, convert it from a string - if (typeof options.readPreference === 'string' || typeof options.read_preference === 'string') { - finalOptions.readPreference = new ReadPreference( - options.readPreference || options.read_preference - ); - } - - const isDoingAuth = finalOptions.user || finalOptions.password || finalOptions.authMechanism; - if (isDoingAuth && !finalOptions.credentials) { - try { - finalOptions.credentials = generateCredentials( - mongoClient, - finalOptions.user, - finalOptions.password, - finalOptions - ); - } catch (err) { - return connectCallback(err, url); - } - } - - return url.connect(finalOptions, connectCallback); -} - function createListener(mongoClient, event) { const eventSet = new Set(['all', 'fullsetup', 'open', 'reconnect']); return (v1, v2) => { @@ -350,15 +294,16 @@ function registerDeprecatedEventNotifiers(client) { }); } -function createTopology(mongoClient, topologyType, options, callback) { +function createTopology(mongoClient, options, callback) { // Pass in the promise library options.promiseLibrary = mongoClient.s.promiseLibrary; - const translationOptions = {}; - if (topologyType === 'unified') translationOptions.createServers = false; + const translationOptions = { + createServers: false + }; // Set default options - const servers = translateOptions(options, translationOptions); + translateOptions(options, translationOptions); // determine CSFLE support if (options.autoEncryption != null) { @@ -417,15 +362,8 @@ function createTopology(mongoClient, topologyType, options, callback) { } // Create the topology - let topology; - if (topologyType === 'mongos') { - topology = new Mongos(servers, options); - } else if (topologyType === 'replicaset') { - topology = new ReplSet(servers, options); - } else if (topologyType === 'unified') { - topology = new NativeTopology(options.servers, options); - registerDeprecatedEventNotifiers(mongoClient); - } + const topology = new NativeTopology(options.servers, options); + registerDeprecatedEventNotifiers(mongoClient); // Add listeners addListeners(mongoClient, topology); @@ -543,10 +481,6 @@ function generateCredentials(client, username, password, options) { }); } -function legacyTransformUrlOptions(object) { - return mergeOptions(createUnifiedOptions({}, object), object, false); -} - function mergeOptions(target, source, flatten) { for (const name in source) { if (source[name] && typeof source[name] === 'object' && flatten) { @@ -636,9 +570,7 @@ function transformUrlOptions(_object) { return object; } -function translateOptions(options, translationOptions) { - translationOptions = Object.assign({}, { createServers: true }, translationOptions); - +function translateOptions(options) { // If we have a readPreference passed in by the db options if (typeof options.readPreference === 'string' || typeof options.read_preference === 'string') { options.readPreference = new ReadPreference(options.readPreference || options.read_preference); @@ -657,17 +589,6 @@ function translateOptions(options, translationOptions) { // Set the socket and connection timeouts if (options.socketTimeoutMS == null) options.socketTimeoutMS = 360000; if (options.connectTimeoutMS == null) options.connectTimeoutMS = 10000; - - if (!translationOptions.createServers) { - return; - } - - // Create server instances - return options.servers.map(serverObj => { - return serverObj.domain_socket - ? new Server(serverObj.domain_socket, 27017, options) - : new Server(serverObj.host, serverObj.port, options); - }); } module.exports = { validOptions, connect }; diff --git a/lib/operations/execute_operation.js b/lib/operations/execute_operation.js index 80d57857e8..c71f1bbcf9 100644 --- a/lib/operations/execute_operation.js +++ b/lib/operations/execute_operation.js @@ -6,7 +6,6 @@ const OperationBase = require('./operation').OperationBase; const ReadPreference = require('../core/topologies/read_preference'); const isRetryableError = require('../core/error').isRetryableError; const maxWireVersion = require('../core/utils').maxWireVersion; -const isUnifiedTopology = require('../core/utils').isUnifiedTopology; /** * Executes the given operation with provided arguments. @@ -30,7 +29,7 @@ function executeOperation(topology, operation, callback) { throw new TypeError('This method requires a valid operation instance'); } - if (isUnifiedTopology(topology) && topology.shouldCheckForSessionSupport()) { + if (topology.shouldCheckForSessionSupport()) { return selectServerForSessionSupport(topology, operation, callback); } diff --git a/lib/topologies/mongos.js b/lib/topologies/mongos.js deleted file mode 100644 index 10e66d2151..0000000000 --- a/lib/topologies/mongos.js +++ /dev/null @@ -1,445 +0,0 @@ -'use strict'; - -const TopologyBase = require('./topology_base').TopologyBase; -const MongoError = require('../core').MongoError; -const CMongos = require('../core').Mongos; -const Cursor = require('../cursor'); -const Server = require('./server'); -const Store = require('./topology_base').Store; -const MAX_JS_INT = require('../utils').MAX_JS_INT; -const translateOptions = require('../utils').translateOptions; -const filterOptions = require('../utils').filterOptions; -const mergeOptions = require('../utils').mergeOptions; - -/** - * @fileOverview The **Mongos** class is a class that represents a Mongos Proxy topology and is - * used to construct connections. - * - * **Mongos Should not be used, use MongoClient.connect** - */ - -// Allowed parameters -var legalOptionNames = [ - 'ha', - 'haInterval', - 'acceptableLatencyMS', - 'poolSize', - 'ssl', - 'checkServerIdentity', - 'sslValidate', - 'sslCA', - 'sslCRL', - 'sslCert', - 'ciphers', - 'ecdhCurve', - 'sslKey', - 'sslPass', - 'socketOptions', - 'bufferMaxEntries', - 'store', - 'auto_reconnect', - 'autoReconnect', - 'emitError', - 'keepAlive', - 'keepAliveInitialDelay', - 'noDelay', - 'connectTimeoutMS', - 'socketTimeoutMS', - 'loggerLevel', - 'logger', - 'reconnectTries', - 'appname', - 'domainsEnabled', - 'servername', - 'promoteLongs', - 'promoteValues', - 'promoteBuffers', - 'promiseLibrary', - 'monitorCommands' -]; - -/** - * Creates a new Mongos instance - * @class - * @deprecated - * @param {Server[]} servers A seedlist of servers participating in the replicaset. - * @param {object} [options] Optional settings. - * @param {booelan} [options.ha=true] Turn on high availability monitoring. - * @param {number} [options.haInterval=5000] Time between each replicaset status check. - * @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons. - * @param {number} [options.acceptableLatencyMS=15] Cutoff latency point in MS for MongoS proxy selection - * @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support) - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {boolean} [options.sslValidate=false] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCA] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCRL] Array of revocation certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.ciphers] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {string} [options.ecdhCurve] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {(Buffer|string)} [options.sslCert] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslKey] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslPass] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.servername] String containing the server name requested via TLS SNI. - * @param {object} [options.socketOptions] Socket options - * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. - * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out - * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @fires Mongos#connect - * @fires Mongos#ha - * @fires Mongos#joined - * @fires Mongos#left - * @fires Mongos#fullsetup - * @fires Mongos#open - * @fires Mongos#close - * @fires Mongos#error - * @fires Mongos#timeout - * @fires Mongos#parseError - * @fires Mongos#commandStarted - * @fires Mongos#commandSucceeded - * @fires Mongos#commandFailed - * @property {string} parserType the parser type used (c++ or js). - * @return {Mongos} a Mongos instance. - */ -class Mongos extends TopologyBase { - constructor(servers, options) { - super(); - - options = options || {}; - var self = this; - - // Filter the options - options = filterOptions(options, legalOptionNames); - - // Ensure all the instances are Server - for (var i = 0; i < servers.length; i++) { - if (!(servers[i] instanceof Server)) { - throw MongoError.create({ - message: 'all seed list instances must be of the Server type', - driver: true - }); - } - } - - // Stored options - var storeOptions = { - force: false, - bufferMaxEntries: - typeof options.bufferMaxEntries === 'number' ? options.bufferMaxEntries : MAX_JS_INT - }; - - // Shared global store - var store = options.store || new Store(self, storeOptions); - - // Build seed list - var seedlist = servers.map(function(x) { - return { host: x.host, port: x.port }; - }); - - // Get the reconnect option - var reconnect = typeof options.auto_reconnect === 'boolean' ? options.auto_reconnect : true; - reconnect = typeof options.autoReconnect === 'boolean' ? options.autoReconnect : reconnect; - - // Clone options - var clonedOptions = mergeOptions( - {}, - { - disconnectHandler: store, - cursorFactory: Cursor, - reconnect: reconnect, - emitError: typeof options.emitError === 'boolean' ? options.emitError : true, - size: typeof options.poolSize === 'number' ? options.poolSize : 5, - monitorCommands: - typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false - } - ); - - // Translate any SSL options and other connectivity options - clonedOptions = translateOptions(clonedOptions, options); - - // Socket options - var socketOptions = - options.socketOptions && Object.keys(options.socketOptions).length > 0 - ? options.socketOptions - : options; - - // Translate all the options to the core types - clonedOptions = translateOptions(clonedOptions, socketOptions); - - // Internal state - this.s = { - // Create the Mongos - coreTopology: new CMongos(seedlist, clonedOptions), - // Server capabilities - sCapabilities: null, - // Debug turned on - debug: clonedOptions.debug, - // Store option defaults - storeOptions: storeOptions, - // Cloned options - clonedOptions: clonedOptions, - // Actual store of callbacks - store: store, - // Options - options: options, - // Server Session Pool - sessionPool: null, - // Active client sessions - sessions: new Set(), - // Promise library - promiseLibrary: options.promiseLibrary || Promise - }; - } - - // Connect - connect(_options, callback) { - var self = this; - if ('function' === typeof _options) (callback = _options), (_options = {}); - if (_options == null) _options = {}; - if (!('function' === typeof callback)) callback = null; - _options = Object.assign({}, this.s.clonedOptions, _options); - self.s.options = _options; - - // Update bufferMaxEntries - self.s.storeOptions.bufferMaxEntries = - typeof _options.bufferMaxEntries === 'number' ? _options.bufferMaxEntries : -1; - - // Error handler - var connectErrorHandler = function() { - return function(err) { - // Remove all event handlers - var events = ['timeout', 'error', 'close']; - events.forEach(function(e) { - self.removeListener(e, connectErrorHandler); - }); - - self.s.coreTopology.removeListener('connect', connectErrorHandler); - // Force close the topology - self.close(true); - - // Try to callback - try { - callback(err); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - }; - - // Actual handler - var errorHandler = function(event) { - return function(err) { - if (event !== 'error') { - self.emit(event, err); - } - }; - }; - - // Error handler - var reconnectHandler = function() { - self.emit('reconnect'); - self.s.store.execute(); - }; - - // relay the event - var relay = function(event) { - return function(t, server) { - self.emit(event, t, server); - }; - }; - - // Connect handler - var connectHandler = function() { - // Clear out all the current handlers left over - var events = ['timeout', 'error', 'close', 'fullsetup']; - events.forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // Set up listeners - self.s.coreTopology.on('timeout', errorHandler('timeout')); - self.s.coreTopology.on('error', errorHandler('error')); - self.s.coreTopology.on('close', errorHandler('close')); - - // Set up serverConfig listeners - self.s.coreTopology.on('fullsetup', function() { - self.emit('fullsetup', self); - }); - - // Emit open event - self.emit('open', null, self); - - // Return correctly - try { - callback(null, self); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - - // Clear out all the current handlers left over - var events = [ - 'timeout', - 'error', - 'close', - 'serverOpening', - 'serverDescriptionChanged', - 'serverHeartbeatStarted', - 'serverHeartbeatSucceeded', - 'serverHeartbeatFailed', - 'serverClosed', - 'topologyOpening', - 'topologyClosed', - 'topologyDescriptionChanged', - 'commandStarted', - 'commandSucceeded', - 'commandFailed' - ]; - events.forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // Set up SDAM listeners - self.s.coreTopology.on('serverDescriptionChanged', relay('serverDescriptionChanged')); - self.s.coreTopology.on('serverHeartbeatStarted', relay('serverHeartbeatStarted')); - self.s.coreTopology.on('serverHeartbeatSucceeded', relay('serverHeartbeatSucceeded')); - self.s.coreTopology.on('serverHeartbeatFailed', relay('serverHeartbeatFailed')); - self.s.coreTopology.on('serverOpening', relay('serverOpening')); - self.s.coreTopology.on('serverClosed', relay('serverClosed')); - self.s.coreTopology.on('topologyOpening', relay('topologyOpening')); - self.s.coreTopology.on('topologyClosed', relay('topologyClosed')); - self.s.coreTopology.on('topologyDescriptionChanged', relay('topologyDescriptionChanged')); - self.s.coreTopology.on('commandStarted', relay('commandStarted')); - self.s.coreTopology.on('commandSucceeded', relay('commandSucceeded')); - self.s.coreTopology.on('commandFailed', relay('commandFailed')); - - // Set up listeners - self.s.coreTopology.once('timeout', connectErrorHandler('timeout')); - self.s.coreTopology.once('error', connectErrorHandler('error')); - self.s.coreTopology.once('close', connectErrorHandler('close')); - self.s.coreTopology.once('connect', connectHandler); - // Join and leave events - self.s.coreTopology.on('joined', relay('joined')); - self.s.coreTopology.on('left', relay('left')); - - // Reconnect server - self.s.coreTopology.on('reconnect', reconnectHandler); - - // Start connection - self.s.coreTopology.connect(_options); - } -} - -Object.defineProperty(Mongos.prototype, 'haInterval', { - enumerable: true, - get: function() { - return this.s.coreTopology.s.haInterval; - } -}); - -/** - * A mongos connect event, used to verify that the connection is up and running - * - * @event Mongos#connect - * @type {Mongos} - */ - -/** - * The mongos high availability event - * - * @event Mongos#ha - * @type {function} - * @param {string} type The stage in the high availability event (start|end) - * @param {boolean} data.norepeat This is a repeating high availability process or a single execution only - * @param {number} data.id The id for this high availability request - * @param {object} data.state An object containing the information about the current replicaset - */ - -/** - * A server member left the mongos set - * - * @event Mongos#left - * @type {function} - * @param {string} type The type of member that left (primary|secondary|arbiter) - * @param {Server} server The server object that left - */ - -/** - * A server member joined the mongos set - * - * @event Mongos#joined - * @type {function} - * @param {string} type The type of member that joined (primary|secondary|arbiter) - * @param {Server} server The server object that joined - */ - -/** - * Mongos fullsetup event, emitted when all proxies in the topology have been connected to. - * - * @event Mongos#fullsetup - * @type {Mongos} - */ - -/** - * Mongos open event, emitted when mongos can start processing commands. - * - * @event Mongos#open - * @type {Mongos} - */ - -/** - * Mongos close event - * - * @event Mongos#close - * @type {object} - */ - -/** - * Mongos error event, emitted if there is an error listener. - * - * @event Mongos#error - * @type {MongoError} - */ - -/** - * Mongos timeout event - * - * @event Mongos#timeout - * @type {object} - */ - -/** - * Mongos parseError event - * - * @event Mongos#parseError - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event Mongos#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event Mongos#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event Mongos#commandFailed - * @type {object} - */ - -module.exports = Mongos; diff --git a/lib/topologies/replset.js b/lib/topologies/replset.js deleted file mode 100644 index 69df26d19e..0000000000 --- a/lib/topologies/replset.js +++ /dev/null @@ -1,489 +0,0 @@ -'use strict'; - -const Server = require('./server'); -const Cursor = require('../cursor'); -const MongoError = require('../core').MongoError; -const TopologyBase = require('./topology_base').TopologyBase; -const Store = require('./topology_base').Store; -const CReplSet = require('../core').ReplSet; -const MAX_JS_INT = require('../utils').MAX_JS_INT; -const translateOptions = require('../utils').translateOptions; -const filterOptions = require('../utils').filterOptions; -const mergeOptions = require('../utils').mergeOptions; - -/** - * @fileOverview The **ReplSet** class is a class that represents a Replicaset topology and is - * used to construct connections. - * - * **ReplSet Should not be used, use MongoClient.connect** - */ - -// Allowed parameters -var legalOptionNames = [ - 'ha', - 'haInterval', - 'replicaSet', - 'rs_name', - 'secondaryAcceptableLatencyMS', - 'connectWithNoPrimary', - 'poolSize', - 'ssl', - 'checkServerIdentity', - 'sslValidate', - 'sslCA', - 'sslCert', - 'ciphers', - 'ecdhCurve', - 'sslCRL', - 'sslKey', - 'sslPass', - 'socketOptions', - 'bufferMaxEntries', - 'store', - 'auto_reconnect', - 'autoReconnect', - 'emitError', - 'keepAlive', - 'keepAliveInitialDelay', - 'noDelay', - 'connectTimeoutMS', - 'socketTimeoutMS', - 'strategy', - 'debug', - 'family', - 'loggerLevel', - 'logger', - 'reconnectTries', - 'appname', - 'domainsEnabled', - 'servername', - 'promoteLongs', - 'promoteValues', - 'promoteBuffers', - 'maxStalenessSeconds', - 'promiseLibrary', - 'minSize', - 'monitorCommands' -]; - -/** - * Creates a new ReplSet instance - * @class - * @deprecated - * @param {Server[]} servers A seedlist of servers participating in the replicaset. - * @param {object} [options] Optional settings. - * @param {boolean} [options.ha=true] Turn on high availability monitoring. - * @param {number} [options.haInterval=10000] Time between each replicaset status check. - * @param {string} [options.replicaSet] The name of the replicaset to connect to. - * @param {number} [options.secondaryAcceptableLatencyMS=15] Sets the range of servers to pick when using NEAREST (lowest ping ms + the latency fence, ex: range of 1 to (1 + 15) ms) - * @param {boolean} [options.connectWithNoPrimary=false] Sets if the driver should connect even if no primary is available - * @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons. - * @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support) - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {boolean} [options.sslValidate=false] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCA] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCRL] Array of revocation certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslCert] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher. - * @param {string} [options.ciphers] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {string} [options.ecdhCurve] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {(Buffer|string)} [options.sslKey] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslPass] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.servername] String containing the server name requested via TLS SNI. - * @param {object} [options.socketOptions] Socket options - * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. - * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out - * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {number} [options.maxStalenessSeconds=undefined] The max staleness to secondary reads (values under 10 seconds cannot be guaranteed); - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @fires ReplSet#connect - * @fires ReplSet#ha - * @fires ReplSet#joined - * @fires ReplSet#left - * @fires ReplSet#fullsetup - * @fires ReplSet#open - * @fires ReplSet#close - * @fires ReplSet#error - * @fires ReplSet#timeout - * @fires ReplSet#parseError - * @fires ReplSet#commandStarted - * @fires ReplSet#commandSucceeded - * @fires ReplSet#commandFailed - * @property {string} parserType the parser type used (c++ or js). - * @return {ReplSet} a ReplSet instance. - */ -class ReplSet extends TopologyBase { - constructor(servers, options) { - super(); - - options = options || {}; - var self = this; - - // Filter the options - options = filterOptions(options, legalOptionNames); - - // Ensure all the instances are Server - for (var i = 0; i < servers.length; i++) { - if (!(servers[i] instanceof Server)) { - throw MongoError.create({ - message: 'all seed list instances must be of the Server type', - driver: true - }); - } - } - - // Stored options - var storeOptions = { - force: false, - bufferMaxEntries: - typeof options.bufferMaxEntries === 'number' ? options.bufferMaxEntries : MAX_JS_INT - }; - - // Shared global store - var store = options.store || new Store(self, storeOptions); - - // Build seed list - var seedlist = servers.map(function(x) { - return { host: x.host, port: x.port }; - }); - - // Clone options - var clonedOptions = mergeOptions( - {}, - { - disconnectHandler: store, - cursorFactory: Cursor, - reconnect: false, - emitError: typeof options.emitError === 'boolean' ? options.emitError : true, - size: typeof options.poolSize === 'number' ? options.poolSize : 5, - monitorCommands: - typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false - } - ); - - // Translate any SSL options and other connectivity options - clonedOptions = translateOptions(clonedOptions, options); - - // Socket options - var socketOptions = - options.socketOptions && Object.keys(options.socketOptions).length > 0 - ? options.socketOptions - : options; - - // Translate all the options to the core types - clonedOptions = translateOptions(clonedOptions, socketOptions); - - // Create the ReplSet - var coreTopology = new CReplSet(seedlist, clonedOptions); - - // Listen to reconnect event - coreTopology.on('reconnect', function() { - self.emit('reconnect'); - store.execute(); - }); - - // Internal state - this.s = { - // Replicaset - coreTopology: coreTopology, - // Server capabilities - sCapabilities: null, - // Debug tag - tag: options.tag, - // Store options - storeOptions: storeOptions, - // Cloned options - clonedOptions: clonedOptions, - // Store - store: store, - // Options - options: options, - // Server Session Pool - sessionPool: null, - // Active client sessions - sessions: new Set(), - // Promise library - promiseLibrary: options.promiseLibrary || Promise - }; - - // Debug - if (clonedOptions.debug) { - // Last ismaster - Object.defineProperty(this, 'replset', { - enumerable: true, - get: function() { - return coreTopology; - } - }); - } - } - - // Connect method - connect(_options, callback) { - var self = this; - if ('function' === typeof _options) (callback = _options), (_options = {}); - if (_options == null) _options = {}; - if (!('function' === typeof callback)) callback = null; - _options = Object.assign({}, this.s.clonedOptions, _options); - self.s.options = _options; - - // Update bufferMaxEntries - self.s.storeOptions.bufferMaxEntries = - typeof _options.bufferMaxEntries === 'number' ? _options.bufferMaxEntries : -1; - - // Actual handler - var errorHandler = function(event) { - return function(err) { - if (event !== 'error') { - self.emit(event, err); - } - }; - }; - - // Clear out all the current handlers left over - var events = [ - 'timeout', - 'error', - 'close', - 'serverOpening', - 'serverDescriptionChanged', - 'serverHeartbeatStarted', - 'serverHeartbeatSucceeded', - 'serverHeartbeatFailed', - 'serverClosed', - 'topologyOpening', - 'topologyClosed', - 'topologyDescriptionChanged', - 'commandStarted', - 'commandSucceeded', - 'commandFailed', - 'joined', - 'left', - 'ping', - 'ha' - ]; - events.forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // relay the event - var relay = function(event) { - return function(t, server) { - self.emit(event, t, server); - }; - }; - - // Replset events relay - var replsetRelay = function(event) { - return function(t, server) { - self.emit(event, t, server.lastIsMaster(), server); - }; - }; - - // Relay ha - var relayHa = function(t, state) { - self.emit('ha', t, state); - - if (t === 'start') { - self.emit('ha_connect', t, state); - } else if (t === 'end') { - self.emit('ha_ismaster', t, state); - } - }; - - // Set up serverConfig listeners - self.s.coreTopology.on('joined', replsetRelay('joined')); - self.s.coreTopology.on('left', relay('left')); - self.s.coreTopology.on('ping', relay('ping')); - self.s.coreTopology.on('ha', relayHa); - - // Set up SDAM listeners - self.s.coreTopology.on('serverDescriptionChanged', relay('serverDescriptionChanged')); - self.s.coreTopology.on('serverHeartbeatStarted', relay('serverHeartbeatStarted')); - self.s.coreTopology.on('serverHeartbeatSucceeded', relay('serverHeartbeatSucceeded')); - self.s.coreTopology.on('serverHeartbeatFailed', relay('serverHeartbeatFailed')); - self.s.coreTopology.on('serverOpening', relay('serverOpening')); - self.s.coreTopology.on('serverClosed', relay('serverClosed')); - self.s.coreTopology.on('topologyOpening', relay('topologyOpening')); - self.s.coreTopology.on('topologyClosed', relay('topologyClosed')); - self.s.coreTopology.on('topologyDescriptionChanged', relay('topologyDescriptionChanged')); - self.s.coreTopology.on('commandStarted', relay('commandStarted')); - self.s.coreTopology.on('commandSucceeded', relay('commandSucceeded')); - self.s.coreTopology.on('commandFailed', relay('commandFailed')); - - self.s.coreTopology.on('fullsetup', function() { - self.emit('fullsetup', self, self); - }); - - self.s.coreTopology.on('all', function() { - self.emit('all', null, self); - }); - - // Connect handler - var connectHandler = function() { - // Set up listeners - self.s.coreTopology.once('timeout', errorHandler('timeout')); - self.s.coreTopology.once('error', errorHandler('error')); - self.s.coreTopology.once('close', errorHandler('close')); - - // Emit open event - self.emit('open', null, self); - - // Return correctly - try { - callback(null, self); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - - // Error handler - var connectErrorHandler = function() { - return function(err) { - ['timeout', 'error', 'close'].forEach(function(e) { - self.s.coreTopology.removeListener(e, connectErrorHandler); - }); - - self.s.coreTopology.removeListener('connect', connectErrorHandler); - // Destroy the replset - self.s.coreTopology.destroy(); - - // Try to callback - try { - callback(err); - } catch (err) { - if (!self.s.coreTopology.isConnected()) - process.nextTick(function() { - throw err; - }); - } - }; - }; - - // Set up listeners - self.s.coreTopology.once('timeout', connectErrorHandler('timeout')); - self.s.coreTopology.once('error', connectErrorHandler('error')); - self.s.coreTopology.once('close', connectErrorHandler('close')); - self.s.coreTopology.once('connect', connectHandler); - - // Start connection - self.s.coreTopology.connect(_options); - } - - close(forceClosed, callback) { - ['timeout', 'error', 'close', 'joined', 'left'].forEach(e => this.removeAllListeners(e)); - super.close(forceClosed, callback); - } -} - -Object.defineProperty(ReplSet.prototype, 'haInterval', { - enumerable: true, - get: function() { - return this.s.coreTopology.s.haInterval; - } -}); - -/** - * A replset connect event, used to verify that the connection is up and running - * - * @event ReplSet#connect - * @type {ReplSet} - */ - -/** - * The replset high availability event - * - * @event ReplSet#ha - * @type {function} - * @param {string} type The stage in the high availability event (start|end) - * @param {boolean} data.norepeat This is a repeating high availability process or a single execution only - * @param {number} data.id The id for this high availability request - * @param {object} data.state An object containing the information about the current replicaset - */ - -/** - * A server member left the replicaset - * - * @event ReplSet#left - * @type {function} - * @param {string} type The type of member that left (primary|secondary|arbiter) - * @param {Server} server The server object that left - */ - -/** - * A server member joined the replicaset - * - * @event ReplSet#joined - * @type {function} - * @param {string} type The type of member that joined (primary|secondary|arbiter) - * @param {Server} server The server object that joined - */ - -/** - * ReplSet open event, emitted when replicaset can start processing commands. - * - * @event ReplSet#open - * @type {Replset} - */ - -/** - * ReplSet fullsetup event, emitted when all servers in the topology have been connected to. - * - * @event ReplSet#fullsetup - * @type {Replset} - */ - -/** - * ReplSet close event - * - * @event ReplSet#close - * @type {object} - */ - -/** - * ReplSet error event, emitted if there is an error listener. - * - * @event ReplSet#error - * @type {MongoError} - */ - -/** - * ReplSet timeout event - * - * @event ReplSet#timeout - * @type {object} - */ - -/** - * ReplSet parseError event - * - * @event ReplSet#parseError - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event ReplSet#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event ReplSet#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event ReplSet#commandFailed - * @type {object} - */ - -module.exports = ReplSet; diff --git a/lib/topologies/server.js b/lib/topologies/server.js deleted file mode 100644 index 3079cb9953..0000000000 --- a/lib/topologies/server.js +++ /dev/null @@ -1,448 +0,0 @@ -'use strict'; - -const CServer = require('../core').Server; -const Cursor = require('../cursor'); -const TopologyBase = require('./topology_base').TopologyBase; -const Store = require('./topology_base').Store; -const MongoError = require('../core').MongoError; -const MAX_JS_INT = require('../utils').MAX_JS_INT; -const translateOptions = require('../utils').translateOptions; -const filterOptions = require('../utils').filterOptions; -const mergeOptions = require('../utils').mergeOptions; - -/** - * @fileOverview The **Server** class is a class that represents a single server topology and is - * used to construct connections. - * - * **Server Should not be used, use MongoClient.connect** - */ - -// Allowed parameters -var legalOptionNames = [ - 'ha', - 'haInterval', - 'acceptableLatencyMS', - 'poolSize', - 'ssl', - 'checkServerIdentity', - 'sslValidate', - 'sslCA', - 'sslCRL', - 'sslCert', - 'ciphers', - 'ecdhCurve', - 'sslKey', - 'sslPass', - 'socketOptions', - 'bufferMaxEntries', - 'store', - 'auto_reconnect', - 'autoReconnect', - 'emitError', - 'keepAlive', - 'keepAliveInitialDelay', - 'noDelay', - 'connectTimeoutMS', - 'socketTimeoutMS', - 'family', - 'loggerLevel', - 'logger', - 'reconnectTries', - 'reconnectInterval', - 'monitoring', - 'appname', - 'domainsEnabled', - 'servername', - 'promoteLongs', - 'promoteValues', - 'promoteBuffers', - 'compression', - 'promiseLibrary', - 'monitorCommands' -]; - -/** - * Creates a new Server instance - * @class - * @deprecated - * @param {string} host The host for the server, can be either an IP4, IP6 or domain socket style host. - * @param {number} [port] The server port if IP4. - * @param {object} [options] Optional settings. - * @param {number} [options.poolSize=5] Number of connections in the connection pool for each server instance, set to 5 as default for legacy reasons. - * @param {boolean} [options.ssl=false] Use ssl connection (needs to have a mongod server with ssl support) - * @param {boolean} [options.sslValidate=false] Validate mongod server certificate against ca (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {boolean|function} [options.checkServerIdentity=true] Ensure we check server identify during SSL, set to false to disable checking. Only works for Node 0.12.x or higher. You can pass in a boolean or your own checkServerIdentity override function. - * @param {array} [options.sslCA] Array of valid certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {array} [options.sslCRL] Array of revocation certificates either as Buffers or Strings (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslCert] String or buffer containing the certificate we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.ciphers] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {string} [options.ecdhCurve] Passed directly through to tls.createSecureContext. See https://nodejs.org/dist/latest-v9.x/docs/api/tls.html#tls_tls_createsecurecontext_options for more info. - * @param {(Buffer|string)} [options.sslKey] String or buffer containing the certificate private key we wish to present (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {(Buffer|string)} [options.sslPass] String or buffer containing the certificate password (needs to have a mongod server with ssl support, 2.4 or higher) - * @param {string} [options.servername] String containing the server name requested via TLS SNI. - * @param {object} [options.socketOptions] Socket options - * @param {boolean} [options.socketOptions.autoReconnect=true] Reconnect on error. - * @param {boolean} [options.socketOptions.noDelay=true] TCP Socket NoDelay option. - * @param {boolean} [options.socketOptions.keepAlive=true] TCP Connection keep alive enabled - * @param {number} [options.socketOptions.keepAliveInitialDelay=30000] The number of milliseconds to wait before initiating keepAlive on the TCP socket - * @param {number} [options.socketOptions.connectTimeoutMS=10000] How long to wait for a connection to be established before timing out - * @param {number} [options.socketOptions.socketTimeoutMS=360000] How long a send or receive on a socket can take before timing out - * @param {number} [options.reconnectTries=30] Server attempt to reconnect #times - * @param {number} [options.reconnectInterval=1000] Server will wait # milliseconds between retries - * @param {boolean} [options.monitoring=true] Triggers the server instance to call ismaster - * @param {number} [options.haInterval=10000] The interval of calling ismaster when monitoring is enabled. - * @param {boolean} [options.domainsEnabled=false] Enable the wrapping of the callback in the current domain, disabled by default to avoid perf hit. - * @param {boolean} [options.monitorCommands=false] Enable command monitoring for this topology - * @fires Server#connect - * @fires Server#close - * @fires Server#error - * @fires Server#timeout - * @fires Server#parseError - * @fires Server#reconnect - * @fires Server#commandStarted - * @fires Server#commandSucceeded - * @fires Server#commandFailed - * @property {string} parserType the parser type used (c++ or js). - * @return {Server} a Server instance. - */ -class Server extends TopologyBase { - constructor(host, port, options) { - super(); - var self = this; - - // Filter the options - options = filterOptions(options, legalOptionNames); - - // Promise library - const promiseLibrary = options.promiseLibrary; - - // Stored options - var storeOptions = { - force: false, - bufferMaxEntries: - typeof options.bufferMaxEntries === 'number' ? options.bufferMaxEntries : MAX_JS_INT - }; - - // Shared global store - var store = options.store || new Store(self, storeOptions); - - // Detect if we have a socket connection - if (host.indexOf('/') !== -1) { - if (port != null && typeof port === 'object') { - options = port; - port = null; - } - } else if (port == null) { - throw MongoError.create({ message: 'port must be specified', driver: true }); - } - - // Get the reconnect option - var reconnect = typeof options.auto_reconnect === 'boolean' ? options.auto_reconnect : true; - reconnect = typeof options.autoReconnect === 'boolean' ? options.autoReconnect : reconnect; - - // Clone options - var clonedOptions = mergeOptions( - {}, - { - host: host, - port: port, - disconnectHandler: store, - cursorFactory: Cursor, - reconnect: reconnect, - emitError: typeof options.emitError === 'boolean' ? options.emitError : true, - size: typeof options.poolSize === 'number' ? options.poolSize : 5, - monitorCommands: - typeof options.monitorCommands === 'boolean' ? options.monitorCommands : false - } - ); - - // Translate any SSL options and other connectivity options - clonedOptions = translateOptions(clonedOptions, options); - - // Socket options - var socketOptions = - options.socketOptions && Object.keys(options.socketOptions).length > 0 - ? options.socketOptions - : options; - - // Translate all the options to the core types - clonedOptions = translateOptions(clonedOptions, socketOptions); - - // Define the internal properties - this.s = { - // Create an instance of a server instance from core module - coreTopology: new CServer(clonedOptions), - // Server capabilities - sCapabilities: null, - // Cloned options - clonedOptions: clonedOptions, - // Reconnect - reconnect: clonedOptions.reconnect, - // Emit error - emitError: clonedOptions.emitError, - // Pool size - poolSize: clonedOptions.size, - // Store Options - storeOptions: storeOptions, - // Store - store: store, - // Host - host: host, - // Port - port: port, - // Options - options: options, - // Server Session Pool - sessionPool: null, - // Active client sessions - sessions: new Set(), - // Promise library - promiseLibrary: promiseLibrary || Promise - }; - } - - // Connect - connect(_options, callback) { - var self = this; - if ('function' === typeof _options) (callback = _options), (_options = {}); - if (_options == null) _options = this.s.clonedOptions; - if (!('function' === typeof callback)) callback = null; - _options = Object.assign({}, this.s.clonedOptions, _options); - self.s.options = _options; - - // Update bufferMaxEntries - self.s.storeOptions.bufferMaxEntries = - typeof _options.bufferMaxEntries === 'number' ? _options.bufferMaxEntries : -1; - - // Error handler - var connectErrorHandler = function() { - return function(err) { - // Remove all event handlers - var events = ['timeout', 'error', 'close']; - events.forEach(function(e) { - self.s.coreTopology.removeListener(e, connectHandlers[e]); - }); - - self.s.coreTopology.removeListener('connect', connectErrorHandler); - - // Try to callback - try { - callback(err); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - }; - - // Actual handler - var errorHandler = function(event) { - return function(err) { - if (event !== 'error') { - self.emit(event, err); - } - }; - }; - - // Error handler - var reconnectHandler = function() { - self.emit('reconnect', self); - self.s.store.execute(); - }; - - // Reconnect failed - var reconnectFailedHandler = function(err) { - self.emit('reconnectFailed', err); - self.s.store.flush(err); - }; - - // Destroy called on topology, perform cleanup - var destroyHandler = function() { - self.s.store.flush(); - }; - - // relay the event - var relay = function(event) { - return function(t, server) { - self.emit(event, t, server); - }; - }; - - // Connect handler - var connectHandler = function() { - // Clear out all the current handlers left over - ['timeout', 'error', 'close', 'destroy'].forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // Set up listeners - self.s.coreTopology.on('timeout', errorHandler('timeout')); - self.s.coreTopology.once('error', errorHandler('error')); - self.s.coreTopology.on('close', errorHandler('close')); - // Only called on destroy - self.s.coreTopology.on('destroy', destroyHandler); - - // Emit open event - self.emit('open', null, self); - - // Return correctly - try { - callback(null, self); - } catch (err) { - process.nextTick(function() { - throw err; - }); - } - }; - - // Set up listeners - var connectHandlers = { - timeout: connectErrorHandler('timeout'), - error: connectErrorHandler('error'), - close: connectErrorHandler('close') - }; - - // Clear out all the current handlers left over - [ - 'timeout', - 'error', - 'close', - 'serverOpening', - 'serverDescriptionChanged', - 'serverHeartbeatStarted', - 'serverHeartbeatSucceeded', - 'serverHeartbeatFailed', - 'serverClosed', - 'topologyOpening', - 'topologyClosed', - 'topologyDescriptionChanged', - 'commandStarted', - 'commandSucceeded', - 'commandFailed' - ].forEach(function(e) { - self.s.coreTopology.removeAllListeners(e); - }); - - // Add the event handlers - self.s.coreTopology.once('timeout', connectHandlers.timeout); - self.s.coreTopology.once('error', connectHandlers.error); - self.s.coreTopology.once('close', connectHandlers.close); - self.s.coreTopology.once('connect', connectHandler); - // Reconnect server - self.s.coreTopology.on('reconnect', reconnectHandler); - self.s.coreTopology.on('reconnectFailed', reconnectFailedHandler); - - // Set up SDAM listeners - self.s.coreTopology.on('serverDescriptionChanged', relay('serverDescriptionChanged')); - self.s.coreTopology.on('serverHeartbeatStarted', relay('serverHeartbeatStarted')); - self.s.coreTopology.on('serverHeartbeatSucceeded', relay('serverHeartbeatSucceeded')); - self.s.coreTopology.on('serverHeartbeatFailed', relay('serverHeartbeatFailed')); - self.s.coreTopology.on('serverOpening', relay('serverOpening')); - self.s.coreTopology.on('serverClosed', relay('serverClosed')); - self.s.coreTopology.on('topologyOpening', relay('topologyOpening')); - self.s.coreTopology.on('topologyClosed', relay('topologyClosed')); - self.s.coreTopology.on('topologyDescriptionChanged', relay('topologyDescriptionChanged')); - self.s.coreTopology.on('commandStarted', relay('commandStarted')); - self.s.coreTopology.on('commandSucceeded', relay('commandSucceeded')); - self.s.coreTopology.on('commandFailed', relay('commandFailed')); - self.s.coreTopology.on('attemptReconnect', relay('attemptReconnect')); - self.s.coreTopology.on('monitoring', relay('monitoring')); - - // Start connection - self.s.coreTopology.connect(_options); - } -} - -Object.defineProperty(Server.prototype, 'poolSize', { - enumerable: true, - get: function() { - return this.s.coreTopology.connections().length; - } -}); - -Object.defineProperty(Server.prototype, 'autoReconnect', { - enumerable: true, - get: function() { - return this.s.reconnect; - } -}); - -Object.defineProperty(Server.prototype, 'host', { - enumerable: true, - get: function() { - return this.s.host; - } -}); - -Object.defineProperty(Server.prototype, 'port', { - enumerable: true, - get: function() { - return this.s.port; - } -}); - -/** - * Server connect event - * - * @event Server#connect - * @type {object} - */ - -/** - * Server close event - * - * @event Server#close - * @type {object} - */ - -/** - * Server reconnect event - * - * @event Server#reconnect - * @type {object} - */ - -/** - * Server error event - * - * @event Server#error - * @type {MongoError} - */ - -/** - * Server timeout event - * - * @event Server#timeout - * @type {object} - */ - -/** - * Server parseError event - * - * @event Server#parseError - * @type {object} - */ - -/** - * An event emitted indicating a command was started, if command monitoring is enabled - * - * @event Server#commandStarted - * @type {object} - */ - -/** - * An event emitted indicating a command succeeded, if command monitoring is enabled - * - * @event Server#commandSucceeded - * @type {object} - */ - -/** - * An event emitted indicating a command failed, if command monitoring is enabled - * - * @event Server#commandFailed - * @type {object} - */ - -module.exports = Server; diff --git a/lib/url_parser.js b/lib/url_parser.js deleted file mode 100644 index c0f10b467d..0000000000 --- a/lib/url_parser.js +++ /dev/null @@ -1,623 +0,0 @@ -'use strict'; - -const ReadPreference = require('./core').ReadPreference, - parser = require('url'), - f = require('util').format, - Logger = require('./core').Logger, - dns = require('dns'); -const ReadConcern = require('./read_concern'); - -module.exports = function(url, options, callback) { - if (typeof options === 'function') (callback = options), (options = {}); - options = options || {}; - - let result; - try { - result = parser.parse(url, true); - } catch (e) { - return callback(new Error('URL malformed, cannot be parsed')); - } - - if (result.protocol !== 'mongodb:' && result.protocol !== 'mongodb+srv:') { - return callback(new Error('Invalid schema, expected `mongodb` or `mongodb+srv`')); - } - - if (result.protocol === 'mongodb:') { - return parseHandler(url, options, callback); - } - - // Otherwise parse this as an SRV record - if (result.hostname.split('.').length < 3) { - return callback(new Error('URI does not have hostname, domain name and tld')); - } - - result.domainLength = result.hostname.split('.').length; - - if (result.pathname && result.pathname.match(',')) { - return callback(new Error('Invalid URI, cannot contain multiple hostnames')); - } - - if (result.port) { - return callback(new Error('Ports not accepted with `mongodb+srv` URIs')); - } - - let srvAddress = `_mongodb._tcp.${result.host}`; - dns.resolveSrv(srvAddress, function(err, addresses) { - if (err) return callback(err); - - if (addresses.length === 0) { - return callback(new Error('No addresses found at host')); - } - - for (let i = 0; i < addresses.length; i++) { - if (!matchesParentDomain(addresses[i].name, result.hostname, result.domainLength)) { - return callback(new Error('Server record does not share hostname with parent URI')); - } - } - - let base = result.auth ? `mongodb://${result.auth}@` : `mongodb://`; - let connectionStrings = addresses.map(function(address, i) { - if (i === 0) return `${base}${address.name}:${address.port}`; - else return `${address.name}:${address.port}`; - }); - - let connectionString = connectionStrings.join(',') + '/'; - let connectionStringOptions = []; - - // Add the default database if needed - if (result.path) { - let defaultDb = result.path.slice(1); - if (defaultDb.indexOf('?') !== -1) { - defaultDb = defaultDb.slice(0, defaultDb.indexOf('?')); - } - - connectionString += defaultDb; - } - - // Default to SSL true - if (!options.ssl && !result.search) { - connectionStringOptions.push('ssl=true'); - } else if (!options.ssl && result.search && !result.search.match('ssl')) { - connectionStringOptions.push('ssl=true'); - } - - // Keep original uri options - if (result.search) { - connectionStringOptions.push(result.search.replace('?', '')); - } - - dns.resolveTxt(result.host, function(err, record) { - if (err && err.code !== 'ENODATA') return callback(err); - if (err && err.code === 'ENODATA') record = null; - - if (record) { - if (record.length > 1) { - return callback(new Error('Multiple text records not allowed')); - } - - record = record[0]; - if (record.length > 1) record = record.join(''); - else record = record[0]; - - if (!record.includes('authSource') && !record.includes('replicaSet')) { - return callback(new Error('Text record must only set `authSource` or `replicaSet`')); - } - - connectionStringOptions.push(record); - } - - // Add any options to the connection string - if (connectionStringOptions.length) { - connectionString += `?${connectionStringOptions.join('&')}`; - } - - parseHandler(connectionString, options, callback); - }); - }); -}; - -function matchesParentDomain(srvAddress, parentDomain) { - let regex = /^.*?\./; - let srv = `.${srvAddress.replace(regex, '')}`; - let parent = `.${parentDomain.replace(regex, '')}`; - if (srv.endsWith(parent)) return true; - else return false; -} - -function parseHandler(address, options, callback) { - let result, err; - try { - result = parseConnectionString(address, options); - } catch (e) { - err = e; - } - - return err ? callback(err, null) : callback(null, result); -} - -function parseConnectionString(url, options) { - // Variables - let connection_part = ''; - let auth_part = ''; - let query_string_part = ''; - let dbName = 'admin'; - - // Url parser result - let result = parser.parse(url, true); - if ((result.hostname == null || result.hostname === '') && url.indexOf('.sock') === -1) { - throw new Error('No hostname or hostnames provided in connection string'); - } - - if (result.port === '0') { - throw new Error('Invalid port (zero) with hostname'); - } - - if (!isNaN(parseInt(result.port, 10)) && parseInt(result.port, 10) > 65535) { - throw new Error('Invalid port (larger than 65535) with hostname'); - } - - if ( - result.path && - result.path.length > 0 && - result.path[0] !== '/' && - url.indexOf('.sock') === -1 - ) { - throw new Error('Missing delimiting slash between hosts and options'); - } - - if (result.query) { - for (let name in result.query) { - if (name.indexOf('::') !== -1) { - throw new Error('Double colon in host identifier'); - } - - if (result.query[name] === '') { - throw new Error('Query parameter ' + name + ' is an incomplete value pair'); - } - } - } - - if (result.auth) { - let parts = result.auth.split(':'); - if (url.indexOf(result.auth) !== -1 && parts.length > 2) { - throw new Error('Username with password containing an unescaped colon'); - } - - if (url.indexOf(result.auth) !== -1 && result.auth.indexOf('@') !== -1) { - throw new Error('Username containing an unescaped at-sign'); - } - } - - // Remove query - let clean = url.split('?').shift(); - - // Extract the list of hosts - let strings = clean.split(','); - let hosts = []; - - for (let i = 0; i < strings.length; i++) { - let hostString = strings[i]; - - if (hostString.indexOf('mongodb') !== -1) { - if (hostString.indexOf('@') !== -1) { - hosts.push(hostString.split('@').pop()); - } else { - hosts.push(hostString.substr('mongodb://'.length)); - } - } else if (hostString.indexOf('/') !== -1) { - hosts.push(hostString.split('/').shift()); - } else if (hostString.indexOf('/') === -1) { - hosts.push(hostString.trim()); - } - } - - for (let i = 0; i < hosts.length; i++) { - let r = parser.parse(f('mongodb://%s', hosts[i].trim())); - if (r.path && r.path.indexOf('.sock') !== -1) continue; - if (r.path && r.path.indexOf(':') !== -1) { - // Not connecting to a socket so check for an extra slash in the hostname. - // Using String#split as perf is better than match. - if (r.path.split('/').length > 1 && r.path.indexOf('::') === -1) { - throw new Error('Slash in host identifier'); - } else { - throw new Error('Double colon in host identifier'); - } - } - } - - // If we have a ? mark cut the query elements off - if (url.indexOf('?') !== -1) { - query_string_part = url.substr(url.indexOf('?') + 1); - connection_part = url.substring('mongodb://'.length, url.indexOf('?')); - } else { - connection_part = url.substring('mongodb://'.length); - } - - // Check if we have auth params - if (connection_part.indexOf('@') !== -1) { - auth_part = connection_part.split('@')[0]; - connection_part = connection_part.split('@')[1]; - } - - // Check there is not more than one unescaped slash - if (connection_part.split('/').length > 2) { - throw new Error( - "Unsupported host '" + - connection_part.split('?')[0] + - "', hosts must be URL encoded and contain at most one unencoded slash" - ); - } - - // Check if the connection string has a db - if (connection_part.indexOf('.sock') !== -1) { - if (connection_part.indexOf('.sock/') !== -1) { - dbName = connection_part.split('.sock/')[1]; - // Check if multiple database names provided, or just an illegal trailing backslash - if (dbName.indexOf('/') !== -1) { - if (dbName.split('/').length === 2 && dbName.split('/')[1].length === 0) { - throw new Error('Illegal trailing backslash after database name'); - } - throw new Error('More than 1 database name in URL'); - } - connection_part = connection_part.split( - '/', - connection_part.indexOf('.sock') + '.sock'.length - ); - } - } else if (connection_part.indexOf('/') !== -1) { - // Check if multiple database names provided, or just an illegal trailing backslash - if (connection_part.split('/').length > 2) { - if (connection_part.split('/')[2].length === 0) { - throw new Error('Illegal trailing backslash after database name'); - } - throw new Error('More than 1 database name in URL'); - } - dbName = connection_part.split('/')[1]; - connection_part = connection_part.split('/')[0]; - } - - // URI decode the host information - connection_part = decodeURIComponent(connection_part); - - // Result object - let object = {}; - - // Pick apart the authentication part of the string - let authPart = auth_part || ''; - let auth = authPart.split(':', 2); - - // Decode the authentication URI components and verify integrity - let user = decodeURIComponent(auth[0]); - if (auth[0] !== encodeURIComponent(user)) { - throw new Error('Username contains an illegal unescaped character'); - } - auth[0] = user; - - if (auth[1]) { - let pass = decodeURIComponent(auth[1]); - if (auth[1] !== encodeURIComponent(pass)) { - throw new Error('Password contains an illegal unescaped character'); - } - auth[1] = pass; - } - - // Add auth to final object if we have 2 elements - if (auth.length === 2) object.auth = { user: auth[0], password: auth[1] }; - // if user provided auth options, use that - if (options && options.auth != null) object.auth = options.auth; - - // Variables used for temporary storage - let hostPart; - let urlOptions; - let servers; - let compression; - let serverOptions = { socketOptions: {} }; - let dbOptions = { read_preference_tags: [] }; - let replSetServersOptions = { socketOptions: {} }; - let mongosOptions = { socketOptions: {} }; - // Add server options to final object - object.server_options = serverOptions; - object.db_options = dbOptions; - object.rs_options = replSetServersOptions; - object.mongos_options = mongosOptions; - - // Let's check if we are using a domain socket - if (url.match(/\.sock/)) { - // Split out the socket part - let domainSocket = url.substring( - url.indexOf('mongodb://') + 'mongodb://'.length, - url.lastIndexOf('.sock') + '.sock'.length - ); - // Clean out any auth stuff if any - if (domainSocket.indexOf('@') !== -1) domainSocket = domainSocket.split('@')[1]; - domainSocket = decodeURIComponent(domainSocket); - servers = [{ domain_socket: domainSocket }]; - } else { - // Split up the db - hostPart = connection_part; - // Deduplicate servers - let deduplicatedServers = {}; - - // Parse all server results - servers = hostPart - .split(',') - .map(function(h) { - let _host, _port, ipv6match; - //check if it matches [IPv6]:port, where the port number is optional - if ((ipv6match = /\[([^\]]+)\](?::(.+))?/.exec(h))) { - _host = ipv6match[1]; - _port = parseInt(ipv6match[2], 10) || 27017; - } else { - //otherwise assume it's IPv4, or plain hostname - let hostPort = h.split(':', 2); - _host = hostPort[0] || 'localhost'; - _port = hostPort[1] != null ? parseInt(hostPort[1], 10) : 27017; - // Check for localhost?safe=true style case - if (_host.indexOf('?') !== -1) _host = _host.split(/\?/)[0]; - } - - // No entry returned for duplicate server - if (deduplicatedServers[_host + '_' + _port]) return null; - deduplicatedServers[_host + '_' + _port] = 1; - - // Return the mapped object - return { host: _host, port: _port }; - }) - .filter(function(x) { - return x != null; - }); - } - - // Get the db name - object.dbName = dbName || 'admin'; - // Split up all the options - urlOptions = (query_string_part || '').split(/[&;]/); - // Ugh, we have to figure out which options go to which constructor manually. - urlOptions.forEach(function(opt) { - if (!opt) return; - var splitOpt = opt.split('='), - name = splitOpt[0], - value = splitOpt[1]; - - // Options implementations - switch (name) { - case 'slaveOk': - case 'slave_ok': - serverOptions.slave_ok = value === 'true'; - dbOptions.slaveOk = value === 'true'; - break; - case 'maxPoolSize': - case 'poolSize': - serverOptions.poolSize = parseInt(value, 10); - replSetServersOptions.poolSize = parseInt(value, 10); - break; - case 'appname': - object.appname = decodeURIComponent(value); - break; - case 'autoReconnect': - case 'auto_reconnect': - serverOptions.auto_reconnect = value === 'true'; - break; - case 'ssl': - if (value === 'prefer') { - serverOptions.ssl = value; - replSetServersOptions.ssl = value; - mongosOptions.ssl = value; - break; - } - serverOptions.ssl = value === 'true'; - replSetServersOptions.ssl = value === 'true'; - mongosOptions.ssl = value === 'true'; - break; - case 'sslValidate': - serverOptions.sslValidate = value === 'true'; - replSetServersOptions.sslValidate = value === 'true'; - mongosOptions.sslValidate = value === 'true'; - break; - case 'replicaSet': - case 'rs_name': - replSetServersOptions.rs_name = value; - break; - case 'reconnectWait': - replSetServersOptions.reconnectWait = parseInt(value, 10); - break; - case 'retries': - replSetServersOptions.retries = parseInt(value, 10); - break; - case 'readSecondary': - case 'read_secondary': - replSetServersOptions.read_secondary = value === 'true'; - break; - case 'fsync': - dbOptions.fsync = value === 'true'; - break; - case 'journal': - dbOptions.j = value === 'true'; - break; - case 'safe': - dbOptions.safe = value === 'true'; - break; - case 'nativeParser': - case 'native_parser': - dbOptions.native_parser = value === 'true'; - break; - case 'readConcernLevel': - dbOptions.readConcern = new ReadConcern(value); - break; - case 'connectTimeoutMS': - serverOptions.socketOptions.connectTimeoutMS = parseInt(value, 10); - replSetServersOptions.socketOptions.connectTimeoutMS = parseInt(value, 10); - mongosOptions.socketOptions.connectTimeoutMS = parseInt(value, 10); - break; - case 'socketTimeoutMS': - serverOptions.socketOptions.socketTimeoutMS = parseInt(value, 10); - replSetServersOptions.socketOptions.socketTimeoutMS = parseInt(value, 10); - mongosOptions.socketOptions.socketTimeoutMS = parseInt(value, 10); - break; - case 'w': - dbOptions.w = parseInt(value, 10); - if (isNaN(dbOptions.w)) dbOptions.w = value; - break; - case 'authSource': - dbOptions.authSource = value; - break; - case 'gssapiServiceName': - dbOptions.gssapiServiceName = value; - break; - case 'authMechanism': - if (value === 'GSSAPI') { - // If no password provided decode only the principal - if (object.auth == null) { - let urlDecodeAuthPart = decodeURIComponent(authPart); - if (urlDecodeAuthPart.indexOf('@') === -1) - throw new Error('GSSAPI requires a provided principal'); - object.auth = { user: urlDecodeAuthPart, password: null }; - } else { - object.auth.user = decodeURIComponent(object.auth.user); - } - } else if (value === 'MONGODB-X509') { - object.auth = { user: decodeURIComponent(authPart) }; - } - - // Only support GSSAPI or MONGODB-CR for now - if ( - value !== 'GSSAPI' && - value !== 'MONGODB-X509' && - value !== 'MONGODB-CR' && - value !== 'DEFAULT' && - value !== 'SCRAM-SHA-1' && - value !== 'SCRAM-SHA-256' && - value !== 'PLAIN' - ) - throw new Error( - 'Only DEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1 is supported by authMechanism' - ); - - // Authentication mechanism - dbOptions.authMechanism = value; - break; - case 'authMechanismProperties': - { - // Split up into key, value pairs - let values = value.split(','); - let o = {}; - // For each value split into key, value - values.forEach(function(x) { - let v = x.split(':'); - o[v[0]] = v[1]; - }); - - // Set all authMechanismProperties - dbOptions.authMechanismProperties = o; - // Set the service name value - if (typeof o.SERVICE_NAME === 'string') dbOptions.gssapiServiceName = o.SERVICE_NAME; - if (typeof o.SERVICE_REALM === 'string') dbOptions.gssapiServiceRealm = o.SERVICE_REALM; - if (typeof o.CANONICALIZE_HOST_NAME === 'string') - dbOptions.gssapiCanonicalizeHostName = - o.CANONICALIZE_HOST_NAME === 'true' ? true : false; - } - break; - case 'wtimeoutMS': - dbOptions.wtimeout = parseInt(value, 10); - break; - case 'readPreference': - if (!ReadPreference.isValid(value)) - throw new Error( - 'readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest' - ); - dbOptions.readPreference = value; - break; - case 'maxStalenessSeconds': - dbOptions.maxStalenessSeconds = parseInt(value, 10); - break; - case 'readPreferenceTags': - { - // Decode the value - value = decodeURIComponent(value); - // Contains the tag object - let tagObject = {}; - if (value == null || value === '') { - dbOptions.read_preference_tags.push(tagObject); - break; - } - - // Split up the tags - let tags = value.split(/,/); - for (let i = 0; i < tags.length; i++) { - let parts = tags[i].trim().split(/:/); - tagObject[parts[0]] = parts[1]; - } - - // Set the preferences tags - dbOptions.read_preference_tags.push(tagObject); - } - break; - case 'compressors': - { - compression = serverOptions.compression || {}; - let compressors = value.split(','); - if ( - !compressors.every(function(compressor) { - return compressor === 'snappy' || compressor === 'zlib'; - }) - ) { - throw new Error('Compressors must be at least one of snappy or zlib'); - } - - compression.compressors = compressors; - serverOptions.compression = compression; - } - break; - case 'zlibCompressionLevel': - { - compression = serverOptions.compression || {}; - let zlibCompressionLevel = parseInt(value, 10); - if (zlibCompressionLevel < -1 || zlibCompressionLevel > 9) { - throw new Error('zlibCompressionLevel must be an integer between -1 and 9'); - } - - compression.zlibCompressionLevel = zlibCompressionLevel; - serverOptions.compression = compression; - } - break; - case 'retryWrites': - dbOptions.retryWrites = value === 'true'; - break; - case 'minSize': - dbOptions.minSize = parseInt(value, 10); - break; - default: - { - let logger = Logger('URL Parser'); - logger.warn(`${name} is not supported as a connection string option`); - } - break; - } - }); - - // No tags: should be null (not []) - if (dbOptions.read_preference_tags.length === 0) { - dbOptions.read_preference_tags = null; - } - - // Validate if there are an invalid write concern combinations - if ( - (dbOptions.w === -1 || dbOptions.w === 0) && - (dbOptions.journal === true || dbOptions.fsync === true || dbOptions.safe === true) - ) - throw new Error('w set to -1 or 0 cannot be combined with safe/w/journal/fsync'); - - // If no read preference set it to primary - if (!dbOptions.readPreference) { - dbOptions.readPreference = 'primary'; - } - - // make sure that user-provided options are applied with priority - dbOptions = Object.assign(dbOptions, options); - - // Add servers to result - object.servers = servers; - - // Returned parsed object - return object; -} diff --git a/test/functional/disconnect_handler.test.js b/test/disabled/disconnect_handler.test.js similarity index 100% rename from test/functional/disconnect_handler.test.js rename to test/disabled/disconnect_handler.test.js diff --git a/test/unit/core/mongos/events.test.js b/test/disabled/mongos/events.test.js similarity index 100% rename from test/unit/core/mongos/events.test.js rename to test/disabled/mongos/events.test.js diff --git a/test/unit/core/mongos/reconnect.test.js b/test/disabled/mongos/reconnect.test.js similarity index 100% rename from test/unit/core/mongos/reconnect.test.js rename to test/disabled/mongos/reconnect.test.js diff --git a/test/unit/core/mongos/retryable_writes.test.js b/test/disabled/mongos/retryable_writes.test.js similarity index 100% rename from test/unit/core/mongos/retryable_writes.test.js rename to test/disabled/mongos/retryable_writes.test.js diff --git a/test/unit/core/mongos/sessions.test.js b/test/disabled/mongos/sessions.test.js similarity index 100% rename from test/unit/core/mongos/sessions.test.js rename to test/disabled/mongos/sessions.test.js diff --git a/test/functional/core/mongos_mocks/mixed_seed_list.test.js b/test/disabled/mongos_mocks/mixed_seed_list.test.js similarity index 100% rename from test/functional/core/mongos_mocks/mixed_seed_list.test.js rename to test/disabled/mongos_mocks/mixed_seed_list.test.js diff --git a/test/functional/core/mongos_mocks/multiple_proxies.test.js b/test/disabled/mongos_mocks/multiple_proxies.test.js similarity index 100% rename from test/functional/core/mongos_mocks/multiple_proxies.test.js rename to test/disabled/mongos_mocks/multiple_proxies.test.js diff --git a/test/functional/core/mongos_mocks/proxy_failover.test.js b/test/disabled/mongos_mocks/proxy_failover.test.js similarity index 100% rename from test/functional/core/mongos_mocks/proxy_failover.test.js rename to test/disabled/mongos_mocks/proxy_failover.test.js diff --git a/test/functional/core/mongos_mocks/proxy_read_preference.test.js b/test/disabled/mongos_mocks/proxy_read_preference.test.js similarity index 100% rename from test/functional/core/mongos_mocks/proxy_read_preference.test.js rename to test/disabled/mongos_mocks/proxy_read_preference.test.js diff --git a/test/functional/core/mongos_mocks/single_proxy_connection.test.js b/test/disabled/mongos_mocks/single_proxy_connection.test.js similarity index 100% rename from test/functional/core/mongos_mocks/single_proxy_connection.test.js rename to test/disabled/mongos_mocks/single_proxy_connection.test.js diff --git a/test/unit/core/pool.test.js b/test/disabled/pool.test.js similarity index 100% rename from test/unit/core/pool.test.js rename to test/disabled/pool.test.js diff --git a/test/functional/reconnect.test.js b/test/disabled/reconnect.test.js similarity index 100% rename from test/functional/reconnect.test.js rename to test/disabled/reconnect.test.js diff --git a/test/functional/core/replset.test.js b/test/disabled/replset.test.js similarity index 100% rename from test/functional/core/replset.test.js rename to test/disabled/replset.test.js diff --git a/test/unit/core/replset/auth.test.js b/test/disabled/replset/auth.test.js similarity index 100% rename from test/unit/core/replset/auth.test.js rename to test/disabled/replset/auth.test.js diff --git a/test/unit/core/replset/compression.test.js b/test/disabled/replset/compression.test.js similarity index 100% rename from test/unit/core/replset/compression.test.js rename to test/disabled/replset/compression.test.js diff --git a/test/unit/core/replset/read_preference.test.js b/test/disabled/replset/read_preference.test.js similarity index 100% rename from test/unit/core/replset/read_preference.test.js rename to test/disabled/replset/read_preference.test.js diff --git a/test/unit/core/replset/retryable_writes.test.js b/test/disabled/replset/retryable_writes.test.js similarity index 100% rename from test/unit/core/replset/retryable_writes.test.js rename to test/disabled/replset/retryable_writes.test.js diff --git a/test/unit/core/replset/sessions.test.js b/test/disabled/replset/sessions.test.js similarity index 100% rename from test/unit/core/replset/sessions.test.js rename to test/disabled/replset/sessions.test.js diff --git a/test/unit/core/replset/step_down.test.js b/test/disabled/replset/step_down.test.js similarity index 100% rename from test/unit/core/replset/step_down.test.js rename to test/disabled/replset/step_down.test.js diff --git a/test/unit/core/replset/transactions_feature_decoration.test.js b/test/disabled/replset/transactions_feature_decoration.test.js similarity index 100% rename from test/unit/core/replset/transactions_feature_decoration.test.js rename to test/disabled/replset/transactions_feature_decoration.test.js diff --git a/test/unit/core/replset/utils.test.js b/test/disabled/replset/utils.test.js similarity index 100% rename from test/unit/core/replset/utils.test.js rename to test/disabled/replset/utils.test.js diff --git a/test/functional/replset_connection.test.js b/test/disabled/replset_connection.test.js similarity index 100% rename from test/functional/replset_connection.test.js rename to test/disabled/replset_connection.test.js diff --git a/test/functional/replset_failover.test.js b/test/disabled/replset_failover.test.js similarity index 100% rename from test/functional/replset_failover.test.js rename to test/disabled/replset_failover.test.js diff --git a/test/functional/replset_operations.test.js b/test/disabled/replset_operations.test.js similarity index 100% rename from test/functional/replset_operations.test.js rename to test/disabled/replset_operations.test.js diff --git a/test/functional/replset_read_preference.test.js b/test/disabled/replset_read_preference.test.js similarity index 100% rename from test/functional/replset_read_preference.test.js rename to test/disabled/replset_read_preference.test.js diff --git a/test/functional/core/rs_mocks/add_remove.test.js b/test/disabled/rs_mocks/add_remove.test.js similarity index 100% rename from test/functional/core/rs_mocks/add_remove.test.js rename to test/disabled/rs_mocks/add_remove.test.js diff --git a/test/functional/core/rs_mocks/all_servers_close.test.js b/test/disabled/rs_mocks/all_servers_close.test.js similarity index 100% rename from test/functional/core/rs_mocks/all_servers_close.test.js rename to test/disabled/rs_mocks/all_servers_close.test.js diff --git a/test/functional/core/rs_mocks/connection.test.js b/test/disabled/rs_mocks/connection.test.js similarity index 100% rename from test/functional/core/rs_mocks/connection.test.js rename to test/disabled/rs_mocks/connection.test.js diff --git a/test/functional/core/rs_mocks/failover.test.js b/test/disabled/rs_mocks/failover.test.js similarity index 100% rename from test/functional/core/rs_mocks/failover.test.js rename to test/disabled/rs_mocks/failover.test.js diff --git a/test/functional/core/rs_mocks/maintanance_mode.test.js b/test/disabled/rs_mocks/maintanance_mode.test.js similarity index 100% rename from test/functional/core/rs_mocks/maintanance_mode.test.js rename to test/disabled/rs_mocks/maintanance_mode.test.js diff --git a/test/functional/core/rs_mocks/monitoring.test.js b/test/disabled/rs_mocks/monitoring.test.js similarity index 100% rename from test/functional/core/rs_mocks/monitoring.test.js rename to test/disabled/rs_mocks/monitoring.test.js diff --git a/test/functional/core/rs_mocks/no_primary_found.test.js b/test/disabled/rs_mocks/no_primary_found.test.js similarity index 100% rename from test/functional/core/rs_mocks/no_primary_found.test.js rename to test/disabled/rs_mocks/no_primary_found.test.js diff --git a/test/functional/core/rs_mocks/operation.test.js b/test/disabled/rs_mocks/operation.test.js similarity index 100% rename from test/functional/core/rs_mocks/operation.test.js rename to test/disabled/rs_mocks/operation.test.js diff --git a/test/functional/core/rs_mocks/primary_loses_network.test.js b/test/disabled/rs_mocks/primary_loses_network.test.js similarity index 100% rename from test/functional/core/rs_mocks/primary_loses_network.test.js rename to test/disabled/rs_mocks/primary_loses_network.test.js diff --git a/test/functional/core/rs_mocks/read_preferences.test.js b/test/disabled/rs_mocks/read_preferences.test.js similarity index 100% rename from test/functional/core/rs_mocks/read_preferences.test.js rename to test/disabled/rs_mocks/read_preferences.test.js diff --git a/test/functional/core/rs_mocks/step_down.test.js b/test/disabled/rs_mocks/step_down.test.js similarity index 100% rename from test/functional/core/rs_mocks/step_down.test.js rename to test/disabled/rs_mocks/step_down.test.js diff --git a/test/functional/sdam.test.js b/test/disabled/sdam.test.js similarity index 100% rename from test/functional/sdam.test.js rename to test/disabled/sdam.test.js diff --git a/test/functional/core/server.test.js b/test/disabled/server.test.js similarity index 100% rename from test/functional/core/server.test.js rename to test/disabled/server.test.js diff --git a/test/functional/sharding_failover.test.js b/test/disabled/sharding_failover.test.js similarity index 100% rename from test/functional/sharding_failover.test.js rename to test/disabled/sharding_failover.test.js diff --git a/test/functional/sharding_read_preference.test.js b/test/disabled/sharding_read_preference.test.js similarity index 100% rename from test/functional/sharding_read_preference.test.js rename to test/disabled/sharding_read_preference.test.js diff --git a/test/unit/core/single/sessions.test.js b/test/disabled/single/sessions.test.js similarity index 100% rename from test/unit/core/single/sessions.test.js rename to test/disabled/single/sessions.test.js diff --git a/test/functional/core/single_mocks/compression.test.js b/test/disabled/single_mocks/compression.test.js similarity index 100% rename from test/functional/core/single_mocks/compression.test.js rename to test/disabled/single_mocks/compression.test.js diff --git a/test/functional/apm.test.js b/test/functional/apm.test.js index 3f229b751a..5b9c33b9af 100644 --- a/test/functional/apm.test.js +++ b/test/functional/apm.test.js @@ -223,15 +223,9 @@ describe('APM', function() { ) .then(() => { expect(started).to.have.lengthOf(2); - - if (self.configuration.usingUnifiedTopology()) { - expect(started[0]) - .property('address') - .to.not.equal(started[1].address); - } else { - // Ensure command was not sent to the primary - expect(started[0].connectionId).to.not.equal(started[1].connectionId); - } + expect(started[0]) + .property('address') + .to.not.equal(started[1].address); return client.close(); }); @@ -279,15 +273,9 @@ describe('APM', function() { ) .then(() => { expect(started).to.have.lengthOf(2); - - // Ensure command was not sent to the primary - if (self.configuration.usingUnifiedTopology()) { - expect(started[0]) - .property('address') - .to.not.equal(started[1].address); - } else { - expect(started[0].connectionId).to.not.equal(started[1].connectionId); - } + expect(started[0]) + .property('address') + .to.not.equal(started[1].address); return client.close(); }); diff --git a/test/functional/connection.test.js b/test/functional/connection.test.js index aa0f7d3051..ccc73efc68 100644 --- a/test/functional/connection.test.js +++ b/test/functional/connection.test.js @@ -163,32 +163,6 @@ describe('Connection', function() { } }); - /** - * @ignore - */ - it('should fail to connect using non-domain socket with undefined port', { - metadata: { requires: { topology: 'single' } }, - - // The actual test we wish to run - test: function(done) { - var configuration = this.configuration, - Server = configuration.require.Server, - MongoClient = configuration.require.MongoClient; - - var error; - try { - var client = new MongoClient(new Server('localhost', undefined), { w: 0 }); - client.connect(function() {}); - } catch (err) { - error = err; - } - - test.ok(error instanceof Error); - test.ok(/port must be specified/.test(error)); - done(); - } - }); - /** * @ignore */ diff --git a/test/functional/connection_string_spec.test.js b/test/functional/connection_string_spec.test.js deleted file mode 100644 index 2a8fb961aa..0000000000 --- a/test/functional/connection_string_spec.test.js +++ /dev/null @@ -1,32 +0,0 @@ -'use strict'; - -const parse = require('../../lib/url_parser'); -const expect = require('chai').expect; -const loadSpecTests = require('../spec').loadSpecTests; - -describe('Connection String (spec)', function() { - loadSpecTests('connection-string').forEach(suite => { - describe(suite.name, function() { - suite.tests.forEach(test => { - it(test.description, { - metadata: { requires: { topology: 'single' } }, - test: function(done) { - const valid = test.valid; - - parse(test.uri, {}, function(err, result) { - if (valid === false) { - expect(err).to.exist; - expect(result).to.not.exist; - } else { - expect(err).to.not.exist; - expect(result).to.exist; - } - - done(); - }); - } - }); - }); - }); - }); -}); diff --git a/test/functional/core/basic_replset_server_auth.test.js b/test/functional/core/basic_replset_server_auth.test.js deleted file mode 100644 index 17846ced65..0000000000 --- a/test/functional/core/basic_replset_server_auth.test.js +++ /dev/null @@ -1,491 +0,0 @@ -'use strict'; - -var expect = require('chai').expect, - f = require('util').format, - locateAuthMethod = require('./shared').locateAuthMethod, - executeCommand = require('./shared').executeCommand, - ReplSet = require('../../../lib/core/topologies/replset'), - Connection = require('../../../lib/core/connection/connection'); - -const MongoCredentials = require('../../../lib/core/auth/mongo_credentials').MongoCredentials; - -var setUp = function(configuration, options, callback) { - var ReplSetManager = require('mongodb-topology-manager').ReplSet; - - // Check if we have any options - if (typeof options === 'function') { - callback = options; - options = null; - } - - // Override options - var rsOptions; - if (options) { - rsOptions = options; - } else { - rsOptions = { - server: { - keyFile: __dirname + '/key/keyfile.key', - auth: null, - replSet: 'rs' - }, - client: { replSet: 'rs' } - }; - } - - // Set up the nodes - var nodes = [ - { - options: { - bind_ip: 'localhost', - port: 31000, - dbpath: f('%s/../db/31000', __dirname) - } - }, - { - options: { - bind_ip: 'localhost', - port: 31001, - dbpath: f('%s/../db/31001', __dirname) - } - }, - { - options: { - bind_ip: 'localhost', - port: 31002, - dbpath: f('%s/../db/31002', __dirname) - } - }, - { - options: { - bind_ip: 'localhost', - port: 31003, - dbpath: f('%s/../db/31003', __dirname) - } - }, - { - options: { - bind_ip: 'localhost', - port: 31004, - dbpath: f('%s/../db/31004', __dirname) - } - } - ]; - - // Merge in any node start up options - for (var i = 0; i < nodes.length; i++) { - for (var name in rsOptions.server) { - nodes[i].options[name] = rsOptions.server[name]; - } - } - - // Create a manager - var replicasetManager = new ReplSetManager('mongod', nodes, rsOptions.client); - // Purge the set - replicasetManager.purge().then(function() { - // Start the server - replicasetManager - .start() - .then(function() { - setTimeout(function() { - callback(null, replicasetManager); - }, 10000); - }) - .catch(function(e) { - console.dir(e); - }); - }); -}; - -describe.skip('Basic replica set server auth tests', function() { - it('should fail to authenticat emitting an error due to it being the initial connect', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - setUp(self.configuration, function(err, replicasetManager) { - // Enable connections accounting - Connection.enableConnectionAccounting(); - - // Get right auth method - locateAuthMethod(self.configuration, function(locateErr, method) { - expect(locateErr).to.not.exist; - - const credentials = new MongoCredentials({ - mechanism: method, - source: 'admin', - username: 'root', - password: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - { - host: 'localhost', - port: 31000 - }, - function(createUserErr, createUserRes) { - expect(createUserRes).to.exist; - expect(createUserErr).to.not.exist; - - // Attempt to connect - var server = new ReplSet( - [ - { - host: 'localhost', - port: 31000 - }, - { - host: 'localhost', - port: 31001 - } - ], - { - setName: 'rs' - } - ); - - server.on('error', function() { - // console.log('=================== ' + Object.keys(Connection.connections()).length) - expect(Object.keys(Connection.connections()).length).to.equal(0); - Connection.disableConnectionAccounting(); - - executeCommand( - self.configuration, - 'admin', - { - dropUser: 'root' - }, - { - credentials, - host: 'localhost', - port: 31000 - }, - function(dropUserErr, dropUserRes) { - expect(dropUserErr).to.not.exist; - expect(dropUserRes).to.exist; - replicasetManager.stop().then(function() { - done(); - }); - } - ); - }); - - server.connect({ credentials }); - } - ); - }); - }); - } - }); - - it('should correctly authenticate server using scram-sha-1 using connect auth', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - setUp(self.configuration, function(err, replicasetManager) { - // Enable connections accounting - Connection.enableConnectionAccounting(); - - locateAuthMethod(self.configuration, function(locateErr, method) { - expect(locateErr).to.not.exist; - - const credentials = new MongoCredentials({ - mechanism: method, - source: 'admin', - username: 'root', - password: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - { - host: 'localhost', - port: 31000 - }, - function(createUserErr, createUserRes) { - expect(createUserRes).to.exist; - expect(createUserErr).to.not.exist; - - // Attempt to connect - var server = new ReplSet( - [ - { - host: 'localhost', - port: 31000 - }, - { - host: 'localhost', - port: 31001 - } - ], - { - setName: 'rs' - } - ); - - server.on('connect', function(_server) { - _server.insert('test.test', [{ a: 1 }], function(insertErr, insertRes) { - expect(err).to.not.exist; - expect(insertRes.result.n).to.equal(1); - - executeCommand( - self.configuration, - 'admin', - { - dropUser: 'root' - }, - { - credentials, - host: 'localhost', - port: 31000 - }, - function(dropUserErr, dropUserRes) { - expect(dropUserRes).to.exist; - expect(dropUserErr).to.not.exist; - - _server.destroy(); - expect(Object.keys(Connection.connections()).length).to.equal(0); - Connection.disableConnectionAccounting(); - - replicasetManager.stop().then(function() { - done(); - }); - } - ); - }); - }); - - server.connect({ credentials }); - } - ); - }); - }); - } - }); - - it('should correctly authenticate using auth method instead of connect', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - setUp(self.configuration, function(err, replicasetManager) { - // Enable connections accounting - Connection.enableConnectionAccounting(); - - locateAuthMethod(self.configuration, function(locateErr, method) { - expect(locateErr).to.not.exist; - - const credentials = new MongoCredentials({ - mechanism: method, - source: 'admin', - username: 'root', - password: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - { - host: 'localhost', - port: 31000 - }, - function(createUserErr, createUserRes) { - expect(createUserRes).to.exist; - expect(createUserErr).to.not.exist; - - // Attempt to connect - var server = new ReplSet( - [ - { - host: 'localhost', - port: 31000 - } - ], - { - setName: 'rs' - } - ); - - server.on('connect', function(_server) { - //{auth: [method, 'admin', 'root', 'root']} - // Attempt authentication - _server.auth(credentials, function(authErr, authRes) { - expect(authRes).to.exist; - expect(authErr).to.not.exist; - - _server.insert('test.test', [{ a: 1 }], function(insertErr, insertRes) { - expect(insertErr).to.not.exist; - expect(insertRes.result.n).to.equal(1); - - executeCommand( - self.configuration, - 'admin', - { - dropUser: 'root' - }, - { - credentials, - host: 'localhost', - port: 31000 - }, - function(dropUserErr, dropUserRes) { - expect(dropUserRes).to.exist; - expect(dropUserErr).to.not.exist; - - _server.destroy(); - // console.log('=================== ' + Object.keys(Connection.connections()).length) - expect(Object.keys(Connection.connections()).length).to.equal(0); - Connection.disableConnectionAccounting(); - - replicasetManager.stop().then(function() { - done(); - }); - } - ); - }); - }); - }); - - server.connect(); - } - ); - }); - }); - } - }); - - it('should correctly authenticate using auth method instead of connect and logout user', { - metadata: { requires: { topology: 'auth' } }, - - test: function(done) { - var self = this; - - setUp(self.configuration, function(err, replicasetManager) { - // console.log('------------------------------ -2') - // Enable connections accounting - Connection.enableConnectionAccounting(); - - locateAuthMethod(self.configuration, function(locateErr, method) { - expect(locateErr).to.not.exist; - - const credentials = new MongoCredentials({ - mechanism: method, - source: 'admin', - username: 'root', - password: 'root' - }); - - executeCommand( - self.configuration, - 'admin', - { - createUser: 'root', - pwd: 'root', - roles: [{ role: 'root', db: 'admin' }], - digestPassword: true - }, - { - host: 'localhost', - port: 31000 - }, - function(createUserErr, createUserRes) { - expect(createUserRes).to.exist; - expect(createUserErr).to.not.exist; - - // Attempt to connect - var server = new ReplSet( - [ - { - host: 'localhost', - port: 31000 - } - ], - { - setName: 'rs' - } - ); - - server.on('connect', function(_server) { - // Attempt authentication - _server.auth(credentials, function(authErr, authRes) { - expect(authErr).to.exist; - expect(authRes).to.not.exist; - - _server.insert('test.test', [{ a: 1 }], function(insertErr, insertRes) { - expect(insertErr).to.not.exist; - expect(insertRes.result.n).to.equal(1); - - _server.logout('admin', function(logoutErr, logoutRes) { - expect(logoutRes).to.exist; - expect(logoutErr).to.not.exist; - - _server.insert('test.test', [{ a: 1 }], function( - secondInsertErr, - secondInsertRes - ) { - if (secondInsertRes) console.dir(secondInsertRes.result); - - executeCommand( - self.configuration, - 'admin', - { - dropUser: 'root' - }, - { - credentials, - host: 'localhost', - port: 31000 - }, - function(dropUserErr, dropUserRes) { - expect(dropUserRes).to.exist; - expect(dropUserErr).to.not.exist; - - _server.destroy(); - // console.log('=================== ' + Object.keys(Connection.connections()).length) - expect(Object.keys(Connection.connections()).length).to.equal(0); - Connection.disableConnectionAccounting(); - - replicasetManager.stop().then(function() { - done(); - }); - } - ); - }); - }); - }); - }); - }); - - server.connect(); - } - ); - }); - }); - } - }); -}); diff --git a/test/functional/core/client_metadata.test.js b/test/functional/core/client_metadata.test.js deleted file mode 100644 index 05660d2075..0000000000 --- a/test/functional/core/client_metadata.test.js +++ /dev/null @@ -1,97 +0,0 @@ -'use strict'; - -const expect = require('chai').expect; - -const core = require('../../../lib/core'); -const BSON = core.BSON; -const Mongos = core.Mongos; -const ReplSet = core.ReplSet; - -describe('Client metadata tests', function() { - it('should correctly pass the configuration settings to server', { - metadata: { requires: { topology: 'single' } }, - - test: function(done) { - // Attempt to connect - var server = this.configuration.newTopology( - this.configuration.host, - this.configuration.port, - { - bson: new BSON(), - appname: 'My application name' - } - ); - - expect(server.clientMetadata.application.name).to.equal('My application name'); - done(); - } - }); - - // Skipped due to use of topology manager - it.skip('should correctly pass the configuration settings to replset', { - metadata: { requires: { topology: 'replicaset' } }, - - test: function(done) { - const self = this; - const manager = this.configuration.manager; - - // Get the primary server - manager.primary().then(function(_manager) { - // Attempt to connect - var server = new ReplSet( - [ - { - host: _manager.host, - port: _manager.port - } - ], - { - setName: self.configuration.setName, - appname: 'My application name' - } - ); - - server.on('connect', function(_server) { - _server.s.replicaSetState.allServers().forEach(function(x) { - expect(x.clientMetadata.application.name).to.equal('My application name'); - expect(x.clientMetadata.platform.split('mongodb-core').length).to.equal(2); - }); - - _server.destroy(done); - }); - - server.connect(); - }); - } - }); - - it('should correctly pass the configuration settings to mongos', { - metadata: { requires: { topology: 'sharded' } }, - - test: function(done) { - // Attempt to connect - var _server = new Mongos( - [ - { - host: 'localhost', - port: 51000 - } - ], - { - appname: 'My application name' - } - ); - - // Add event listeners - _server.once('connect', function(server) { - server.connectedProxies.forEach(function(x) { - expect(x.clientMetadata.application.name).to.equal('My application name'); - }); - - server.destroy(done); - }); - - _server.connect(); - } - }); -}); diff --git a/test/functional/core/max_staleness.test.js b/test/functional/core/max_staleness.test.js deleted file mode 100644 index 2dbccd3253..0000000000 --- a/test/functional/core/max_staleness.test.js +++ /dev/null @@ -1,145 +0,0 @@ -'use strict'; - -const expect = require('chai').expect, - p = require('path'), - fs = require('fs'), - Server = require('../../../lib/core/topologies/server'), - ReplSetState = require('../../../lib/core/topologies/replset_state'), - MongoError = require('../../../lib/core/error').MongoError, - ReadPreference = require('../../../lib/core/topologies/read_preference'); - -const rsWithPrimaryPath = p.resolve(__dirname, '../../spec/max-staleness/ReplicaSetWithPrimary'); -const rsWithoutPrimaryPath = p.resolve(__dirname, '../../spec/max-staleness/ReplicaSetNoPrimary'); - -describe('Max Staleness', function() { - describe('ReplicaSet without primary', function() { - fs.readdirSync(rsWithoutPrimaryPath) - .filter(x => x.indexOf('.json') !== -1) - .forEach(x => { - it(p.basename(x, '.json'), function(done) { - executeEntry(`${rsWithoutPrimaryPath}/${x}`, done); - }); - }); - }); - - describe('ReplicaSet with primary', function() { - fs.readdirSync(rsWithPrimaryPath) - .filter(x => x.indexOf('.json') !== -1) - .filter(x => x.indexOf('LongHeartbeat2.jwson') === -1) - .forEach(x => { - it(p.basename(x, '.json'), function(done) { - executeEntry(`${rsWithPrimaryPath}/${x}`, done); - }); - }); - }); -}); - -function convert(mode) { - if (mode === undefined) return 'primary'; - if (mode.toLowerCase() === 'primarypreferred') return 'primaryPreferred'; - if (mode.toLowerCase() === 'secondarypreferred') return 'secondaryPreferred'; - return mode.toLowerCase(); -} - -function executeEntry(path, callback) { - // Read and parse the json file - var file = require(path); - - // Let's pick out the parts of the selection specification - var error = file.error; - var heartbeatFrequencyMS = file.heartbeatFrequencyMS || 10000; - var inLatencyWindow = file.in_latency_window; - var readPreference = file.read_preference; - var topologyDescription = file.topology_description; - - try { - // Create a Replset and populate it with dummy topology servers - var replset = new ReplSetState({ - heartbeatFrequencyMS: heartbeatFrequencyMS - }); - - replset.topologyType = topologyDescription.type; - // For each server add them to the state - topologyDescription.servers.forEach(function(s) { - var server = new Server({ - host: s.address.split(':')[0], - port: parseInt(s.address.split(':')[1], 10) - }); - - // Add additional information - if (s.avg_rtt_ms) server.lastIsMasterMS = s.avg_rtt_ms; - if (s.lastUpdateTime) server.lastUpdateTime = s.lastUpdateTime; - // Set the last write - if (s.lastWrite) { - server.lastWriteDate = s.lastWrite.lastWriteDate.$numberLong; - } - - server.ismaster = {}; - if (s.tags) server.ismaster.tags = s.tags; - if (s.maxWireVersion) server.ismaster.maxWireVersion = s.maxWireVersion; - // Ensure the server looks connected - server.isConnected = function() { - return true; - }; - - if (s.type === 'RSSecondary') { - server.ismaster.secondary = true; - replset.secondaries.push(server); - } else if (s.type === 'RSPrimary') { - server.ismaster.ismaster = true; - replset.primary = server; - } else if (s.type === 'RSArbiter') { - server.ismaster.arbiterOnly = true; - replset.arbiters.push(server); - } - }); - - // Calculate staleness - replset.updateSecondariesMaxStaleness(heartbeatFrequencyMS); - - // Create read preference - var rp = new ReadPreference(convert(readPreference.mode), readPreference.tag_sets, { - maxStalenessSeconds: readPreference.maxStalenessSeconds - }); - - // Perform a pickServer - var server = replset.pickServer(rp); - var foundWindow = null; - - // We expect an error - if (error) { - expect(server).to.be.an.instanceof(MongoError); - return callback(null, null); - } - - // server should be in the latency window - for (var i = 0; i < inLatencyWindow.length; i++) { - var w = inLatencyWindow[i]; - - if (server.name === w.address) { - foundWindow = w; - break; - } - } - - if ( - ['ReplicaSetNoPrimary', 'Primary', 'ReplicaSetWithPrimary'].indexOf( - topologyDescription.type - ) !== -1 && - inLatencyWindow.length === 0 - ) { - if (server instanceof MongoError) { - expect(server.message).to.equal('maxStalenessSeconds must be set to at least 90 seconds'); - } else { - expect(server).to.be.null; - } - } else { - expect(foundWindow).to.not.be.null; - } - } catch (err) { - if (file.error) return callback(null, null); - return callback(err, null); - } - - callback(null, null); -} diff --git a/test/functional/core/mongos_server_selection.test.js b/test/functional/core/mongos_server_selection.test.js deleted file mode 100644 index db9ff6cbd4..0000000000 --- a/test/functional/core/mongos_server_selection.test.js +++ /dev/null @@ -1,85 +0,0 @@ -'use strict'; - -const expect = require('chai').expect; -const path = require('path'); -const fs = require('fs'); -const Mongos = require('../../../lib/core/topologies/mongos'); -const ReadPreference = require('../../../lib/core/topologies/read_preference'); -const Server = require('../../../lib/core/topologies/server'); - -describe('Mongos server selection tests', function() { - var specPath = `${__dirname}/../../spec/server-selection/server_selection/Sharded/read`; - var entries = fs.readdirSync(specPath).filter(function(x) { - return x.indexOf('.json') !== -1; - }); - - entries.forEach(entry => { - it(path.basename(entry, '.json'), function(done) { - executeEntry(entry, `${specPath}/${entry}`, done); - }); - }); -}); - -function convert(mode) { - if (mode.toLowerCase() === 'primarypreferred') return 'primaryPreferred'; - if (mode.toLowerCase() === 'secondarypreferred') return 'secondaryPreferred'; - return mode.toLowerCase(); -} - -function executeEntry(file, path, done) { - // Read and parse the json file - file = require(path); - // Let's pick out the parts of the selection specification - var topologyDescription = file.topology_description; - var inLatencyWindow = file.in_latency_window; - var readPreferenceSpec = file.read_preference; - - try { - // Create a Replset and populate it with dummy topology servers - var topology = new Mongos(); - // For each server add them to the state - topologyDescription.servers.forEach(function(s) { - var server = new Server({ - host: s.address.split(':')[0], - port: parseInt(s.address.split(':')[1], 10) - }); - - // Add additional information - if (s.avg_rtt_ms) server.lastIsMasterMS = s.avg_rtt_ms; - if (s.tags) server.ismaster = { tags: s.tags }; - // Ensure the server looks connected - server.isConnected = function() { - return true; - }; - // Add server to topology - topology.connectedProxies.push(server); - }); - - // Create read preference - var readPreference = new ReadPreference( - convert(readPreferenceSpec.mode), - readPreferenceSpec.tag_sets - ); - - // Perform a pickServer - topology.selectServer({ readPreference }, (err, server) => { - if (err) return done(err); - var foundWindow = null; - - // server should be in the latency window - for (var i = 0; i < inLatencyWindow.length; i++) { - var w = inLatencyWindow[i]; - - if (server.name === w.address) { - foundWindow = w; - break; - } - } - - expect(foundWindow).to.not.be.null; - done(); - }); - } catch (err) { - done(err); - } -} diff --git a/test/functional/core/operation_example.test.js b/test/functional/core/operation_example.test.js index e614e39fa8..d7a78c0322 100644 --- a/test/functional/core/operation_example.test.js +++ b/test/functional/core/operation_example.test.js @@ -2,8 +2,7 @@ const expect = require('chai').expect; const core = require('../../../lib/core'); -const ReplSet = core.ReplSet; -const Mongos = core.Mongos; +const Topology = core.Topology; /************************************************************************** * @@ -329,15 +328,15 @@ describe('Server operation example tests', function() { * *************************************************************************/ -describe('Replset operation example tests', function() { +describe('Topology operation example tests', function() { /** - * Correctly insert a document using the ReplSet insert method + * Correctly insert a document using the Topology insert method * - * @example-class ReplSet + * @example-class Topology * @example-method insert * @ignore */ - it('simple insert into db using ReplSet', { + it('simple insert into db using Topology', { metadata: { requires: { topology: 'replicaset' @@ -357,11 +356,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -393,11 +392,11 @@ describe('Replset operation example tests', function() { /** * Correctly update a document using the Server update method * - * @example-class ReplSet + * @example-class Topology * @example-method update * @ignore */ - it('update using ReplSet instance', { + it('update using Topology instance', { metadata: { requires: { topology: 'replicaset' @@ -417,11 +416,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -470,13 +469,13 @@ describe('Replset operation example tests', function() { }); /** - * Correctly remove a document using the ReplSet remove method + * Correctly remove a document using the Topology remove method * - * @example-class ReplSet + * @example-class Topology * @example-method remove * @ignore */ - it('remove using ReplSet instance', { + it('remove using Topology instance', { metadata: { requires: { topology: 'replicaset' @@ -496,11 +495,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -549,13 +548,13 @@ describe('Replset operation example tests', function() { }); /** - * Correctly find a document on the ReplSet using the cursor + * Correctly find a document on the Topology using the cursor * - * @example-class ReplSet + * @example-class Topology * @example-method cursor * @ignore */ - it('cursor using ReplSet instance', { + it('cursor using Topology instance', { metadata: { requires: { topology: 'replicaset' @@ -575,11 +574,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -621,13 +620,13 @@ describe('Replset operation example tests', function() { }); /** - * Correctly execute ismaster command on the ReplSet using the cursor + * Correctly execute ismaster command on the Topology using the cursor * - * @example-class ReplSet + * @example-class Topology * @example-method command * @ignore */ - it('command using ReplSet instance', { + it('command using Topology instance', { metadata: { requires: { topology: 'replicaset' @@ -647,11 +646,11 @@ describe('Replset operation example tests', function() { }; // Attempt to connect - var server = new ReplSet(config, options); + var server = new Topology(config, options); // LINE var Server = require('mongodb-core').Server, // LINE test = require('assert'); - // LINE var server = new ReplSet([{host: 'localhost', port:31000}], {setName:'rs'}); + // LINE var server = new Topology([{host: 'localhost', port:31000}], {setName:'rs'}); // REMOVE-LINE done(); // BEGIN // Add event listeners @@ -691,7 +690,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port @@ -730,7 +729,7 @@ describe.skip('Mongos operation example tests', function() { * @example-method update * @ignore */ - it('update using ReplSet instance', { + it('update using Topology instance', { metadata: { requires: { topology: 'mongos' @@ -739,7 +738,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port @@ -806,7 +805,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port @@ -873,7 +872,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port @@ -933,7 +932,7 @@ describe.skip('Mongos operation example tests', function() { test: function(done) { // Attempt to connect - var server = new Mongos([ + var server = new Topology([ { host: this.configuration.host, port: this.configuration.port diff --git a/test/functional/core/replset_state.test.js b/test/functional/core/replset_state.test.js deleted file mode 100644 index 1c9e757540..0000000000 --- a/test/functional/core/replset_state.test.js +++ /dev/null @@ -1,131 +0,0 @@ -'use strict'; - -const expect = require('chai').expect, - f = require('util').format, - p = require('path'), - fs = require('fs'), - ObjectId = require('bson').ObjectId, - ReplSetState = require('../../../lib/core/topologies/replset_state'); - -describe('ReplicaSet state', function() { - const path = p.resolve(__dirname, '../../spec/server-discovery-and-monitoring/rs'); - - fs.readdirSync(path) - .filter(x => x.indexOf('.json') !== -1) - .forEach(x => { - var testData = require(f('%s/%s', path, x)); - - it(testData.description, function(done) { - executeEntry(testData, done); - }); - }); -}); - -function executeEntry(testData, callback) { - var uri = testData.uri; - var phases = testData.phases; - - // Get replicaset name if any - var match = uri.match(/replicaSet=[a-z|A-Z|0-9]*/); - var replicaSet = match ? match.toString().split(/=/)[1] : null; - - // Replicaset - // Create a replset state - var state = new ReplSetState({ setName: replicaSet }); - - // Get all the server instances - var parts = uri - .split('mongodb://')[1] - .split('/')[0] - .split(','); - - // For each of the servers - parts.forEach(function(x) { - var params = x.split(':'); - state.update({ - name: f('%s:%s', params[0], params[1] ? parseInt(params[1], 10) : 27017), - lastIsMaster: function() { - return null; - }, - equals: function(s) { - if (typeof s === 'string') return s === this.name; - return s.name === this.name; - }, - destroy: function() {} - }); - }); - - // Run each phase - executePhases(phases, state, callback); -} - -function executePhases(phases, state, callback) { - if (phases.length === 0) { - return callback(null, null); - } - - executePhase(phases.shift(), state, err => { - if (err) return callback(err, null); - return executePhases(phases, state, callback); - }); -} - -function executePhase(phase, state, callback) { - var responses = phase.responses; - var outcome = phase.outcome; - - // Apply all the responses - responses.forEach(function(x) { - if (Object.keys(x[1]).length === 0) { - state.remove({ - name: x[0], - lastIsMaster: function() { - return null; - }, - equals: function(s) { - if (typeof s === 'string') return s === this.name; - return s.name === this.name; - }, - destroy: function() {} - }); - } else { - var ismaster = x[1]; - if (ismaster.electionId) ismaster.electionId = new ObjectId(ismaster.electionId.$oid); - - state.update({ - name: x[0], - lastIsMaster: function() { - return ismaster; - }, - equals: function(s) { - if (typeof s === 'string') return s === this.name; - return s.name === this.name; - }, - destroy: function() {} - }); - } - }); - - // Validate the state of the final outcome - for (var name in outcome.servers) { - try { - if (outcome.servers[name].electionId) { - outcome.servers[name].electionId = new ObjectId(outcome.servers[name].electionId.$oid); - } - - expect(state.set[name]).to.exist; - for (var n in outcome.servers[name]) { - if (outcome.servers[name][n]) { - expect(state.set[name][n]).to.eql(outcome.servers[name][n]); - } - } - } catch (e) { - return callback(e); - } - } - - // // Check the topology type - expect(state.topologyType).to.equal(outcome.topologyType); - expect(state.setName).to.equal(outcome.setName); - callback(null, null); -} diff --git a/test/functional/core/topology.test.js b/test/functional/core/topology.test.js index 06bb36c695..017f9a746e 100644 --- a/test/functional/core/topology.test.js +++ b/test/functional/core/topology.test.js @@ -1,7 +1,7 @@ 'use strict'; const expect = require('chai').expect; -describe('Topology', { requires: { unifiedTopology: true } }, function() { +describe('Topology', function() { it('should correctly track states of a topology', function(done) { const topology = this.configuration.newTopology(); diff --git a/test/functional/cursor.test.js b/test/functional/cursor.test.js index 9fc0133f3f..5a4ad34d60 100644 --- a/test/functional/cursor.test.js +++ b/test/functional/cursor.test.js @@ -5,7 +5,6 @@ const fs = require('fs'); const expect = require('chai').expect; const Long = require('bson').Long; const sinon = require('sinon'); -const Buffer = require('safe-buffer').Buffer; const Writable = require('stream').Writable; const core = require('../../lib/core'); @@ -296,15 +295,8 @@ describe('Cursor', function() { expect(err).to.not.exist; const db = client.db(configuration.db); - let internalClientCursor; - if (configuration.usingUnifiedTopology()) { - internalClientCursor = sinon.spy(client.topology, 'cursor'); - } else { - internalClientCursor = sinon.spy(client.topology.s.coreTopology, 'cursor'); - } - + const internalClientCursor = sinon.spy(client.topology, 'cursor'); const expectedReadPreference = new ReadPreference(ReadPreference.SECONDARY); - const cursor = db.collection('countTEST').find({ qty: { $gt: 4 } }); cursor.count(true, { readPreference: ReadPreference.SECONDARY }, err => { expect(err).to.be.null; diff --git a/test/functional/mongo_client_options.test.js b/test/functional/mongo_client_options.test.js index eff199c602..b71f5daea0 100644 --- a/test/functional/mongo_client_options.test.js +++ b/test/functional/mongo_client_options.test.js @@ -35,27 +35,4 @@ describe('MongoClient Options', function() { ); } }); - - /** - * @ignore - */ - function connectionTester(configuration, testName, callback) { - return function(err, client) { - test.equal(err, null); - var db = client.db(configuration.db); - - db.collection(testName, function(err, collection) { - test.equal(err, null); - - collection.insert({ foo: 123 }, { w: 1 }, function(err) { - test.equal(err, null); - db.dropDatabase(function(err, dropped) { - test.equal(err, null); - test.ok(dropped); - if (callback) return callback(client); - }); - }); - }); - }; - } }); diff --git a/test/functional/mongodb_srv.test.js b/test/functional/mongodb_srv.test.js deleted file mode 100644 index 2b9310818a..0000000000 --- a/test/functional/mongodb_srv.test.js +++ /dev/null @@ -1,61 +0,0 @@ -'use strict'; - -var fs = require('fs'); -var path = require('path'); - -var parse = require('../../lib/url_parser'); -var expect = require('chai').expect; - -function getTests() { - return fs - .readdirSync(path.resolve(__dirname, '../spec/dns-txt-records')) - .filter(x => x.indexOf('json') !== -1) - .map(x => [x, fs.readFileSync(path.resolve(__dirname, '../spec/dns-txt-records', x), 'utf8')]) - .map(x => [path.basename(x[0], '.json'), JSON.parse(x[1])]); -} - -describe('mongodb+srv (spec)', function() { - it('should parse a default database', function(done) { - parse('mongodb+srv://test5.test.build.10gen.cc/somedb', (err, result) => { - expect(err).to.not.exist; - expect(result.dbName).to.eql('somedb'); - done(); - }); - }); - - getTests().forEach(function(test) { - if (!test[1].comment) test[1].comment = test[0]; - - it(test[1].comment, { - metadata: { - requires: { topology: ['single'] } - }, - test: function(done) { - parse(test[1].uri, function(err, object) { - if (test[1].error) { - expect(err).to.exist; - expect(object).to.not.exist; - } else { - expect(err).to.be.null; - expect(object).to.exist; - if (test[1].options && test[1].options.replicaSet) { - expect(object.rs_options.rs_name).to.equal(test[1].options.replicaSet); - } - if (test[1].options && test[1].options.ssl) { - expect(object.server_options.ssl).to.equal(test[1].options.ssl); - } - if ( - test[1].parsed_options && - test[1].parsed_options.user && - test[1].parsed_options.password - ) { - expect(object.auth.user).to.equal(test[1].parsed_options.user); - expect(object.auth.password).to.equal(test[1].parsed_options.password); - } - } - done(); - }); - } - }); - }); -}); diff --git a/test/functional/operation_example.test.js b/test/functional/operation_example.test.js index 6fa883dc52..45d1619e7c 100644 --- a/test/functional/operation_example.test.js +++ b/test/functional/operation_example.test.js @@ -3,6 +3,7 @@ const test = require('./shared').assert; const setupDatabase = require('./shared').setupDatabase; const f = require('util').format; const Buffer = require('safe-buffer').Buffer; +const Topology = require('../../lib/core').Topology; const chai = require('chai'); const expect = chai.expect; @@ -4601,23 +4602,19 @@ describe('Operation Examples', function() { // The actual test we wish to run test: function(done) { var configuration = this.configuration; - var ReplSet = configuration.require.ReplSet, - MongoClient = configuration.require.MongoClient, - Server = configuration.require.Server; // Replica configuration - var replSet = new ReplSet( + var client = new Topology( [ - new Server(configuration.host, configuration.port), - new Server(configuration.host, configuration.port + 1), - new Server(configuration.host, configuration.port + 2) + { host: configuration.host, port: configuration.port }, + { host: configuration.host, port: configuration.port + 1 }, + { host: configuration.host, port: configuration.port + 2 } ], - { rs_name: configuration.replicasetName } + { replicaSet: configuration.replicasetName } ); - var client = new MongoClient(replSet, { w: 0 }); client.connect(function(err, client) { - test.equal(null, err); + expect(err).to.not.exist; // LINE var MongoClient = require('mongodb').MongoClient, // LINE test = require('assert'); // LINE const client = new MongoClient('mongodb://localhost:27017/test'); diff --git a/test/functional/operation_promises_example.test.js b/test/functional/operation_promises_example.test.js index 102ab5e8aa..f7dc2fd2cf 100644 --- a/test/functional/operation_promises_example.test.js +++ b/test/functional/operation_promises_example.test.js @@ -3773,50 +3773,6 @@ describe('Operation (Promises)', function() { } }); - /** - * Simple replicaset connection setup, requires a running replicaset on the correct ports using a Promise. - * - * @example-class Db - * @example-method open - * @ignore - */ - it('Should correctly connect with default replicasetNoOption With Promises', { - metadata: { requires: { topology: 'replicaset' } }, - - // The actual test we wish to run - test: function() { - var configuration = this.configuration; - var ReplSet = configuration.require.ReplSet, - MongoClient = configuration.require.MongoClient, - Server = configuration.require.Server; - - // Replica configuration - var replSet = new ReplSet( - [ - new Server(configuration.host, configuration.port), - new Server(configuration.host, configuration.port + 1), - new Server(configuration.host, configuration.port + 2) - ], - { rs_name: configuration.replicasetName } - ); - - var client = new MongoClient(replSet, { w: 0 }); - return client.connect().then(function() { - // LINE var MongoClient = require('mongodb').MongoClient, - // LINE test = require('assert'); - // LINE const client = new MongoClient('mongodb://localhost:27017/test'); - // LINE client.connect().then(() => { - // LINE var db = client.db('test); - // REPLACE configuration.writeConcernMax() WITH {w:1} - // REMOVE-LINE restartAndDone - // REMOVE-LINE done(); - // BEGIN - return client.close(); - }); - // END - } - }); - /************************************************************************** * * ADMIN TESTS diff --git a/test/functional/sessions.test.js b/test/functional/sessions.test.js index c79510001d..45e708f424 100644 --- a/test/functional/sessions.test.js +++ b/test/functional/sessions.test.js @@ -187,11 +187,6 @@ describe('Sessions', function() { after(() => testContext.teardown()); before(function() { - if (!this.configuration.usingUnifiedTopology()) { - this.test.parent.pending = true; // https://github.com/mochajs/mocha/issues/2683 - this.skip(); - return; - } return testContext.setup(this.configuration); }); diff --git a/test/functional/spec-runner/index.js b/test/functional/spec-runner/index.js index 5dd159f4e8..3c25ee2d9b 100644 --- a/test/functional/spec-runner/index.js +++ b/test/functional/spec-runner/index.js @@ -233,6 +233,7 @@ function runTestSuiteTest(configuration, spec, context) { // test-specific client options clientOptions.autoReconnect = false; clientOptions.haInterval = 100; + clientOptions.minHeartbeatFrequencyMS = 100; clientOptions.useRecoveryToken = true; const url = resolveConnectionString(configuration, spec); diff --git a/test/functional/transactions.test.js b/test/functional/transactions.test.js index d661660338..ec49a1d4b0 100644 --- a/test/functional/transactions.test.js +++ b/test/functional/transactions.test.js @@ -49,8 +49,8 @@ describe('Transactions', function() { describe('withTransaction', function() { let session, sessionPool; beforeEach(() => { - const topology = new core.Server(); - sessionPool = new sessions.ServerSessionPool(topology); + const topology = new core.Topology('localhost:27017'); + sessionPool = topology.s.sessionPool; session = new sessions.ClientSession(topology, sessionPool); }); diff --git a/test/functional/uri.test.js b/test/functional/uri.test.js index b6c8f2baf7..88c278216b 100644 --- a/test/functional/uri.test.js +++ b/test/functional/uri.test.js @@ -2,7 +2,6 @@ const expect = require('chai').expect; const sinon = require('sinon'); -const ReplSet = require('../../lib/topologies/replset'); const NativeTopology = require('../../lib/topologies/native_topology'); describe('URI', function() { @@ -150,10 +149,7 @@ describe('URI', function() { done(); } - const topologyPrototype = this.configuration.usingUnifiedTopology() - ? NativeTopology.prototype - : ReplSet.prototype; - + const topologyPrototype = NativeTopology.prototype; const connectStub = sinon.stub(topologyPrototype, 'connect').callsFake(validateConnect); const uri = 'mongodb://some-hostname/test?ssl=true&authMechanism=MONGODB-X509&replicaSet=rs0'; const client = this.configuration.newClient(uri, { useNewUrlParser: true }); diff --git a/test/functional/uri_options_spec.test.js b/test/functional/uri_options_spec.test.js index 19804decd0..aa278ad7ac 100644 --- a/test/functional/uri_options_spec.test.js +++ b/test/functional/uri_options_spec.test.js @@ -4,9 +4,8 @@ const chai = require('chai'); const expect = chai.expect; chai.use(require('chai-subset')); -const core = require('../../lib/core'); -const parse = core.parseConnectionString; -const MongoParseError = core.MongoParseError; +const parse = require('../../lib/connection_string').parseConnectionString; +const MongoParseError = require('../../lib/core').MongoParseError; const loadSpecTests = require('../spec').loadSpecTests; describe('URI Options (spec)', function() { diff --git a/test/functional/url_parser.test.js b/test/functional/url_parser.test.js deleted file mode 100644 index df25d45a90..0000000000 --- a/test/functional/url_parser.test.js +++ /dev/null @@ -1,1030 +0,0 @@ -'use strict'; - -/*! - * Module dependencies. - */ -var parse = require('../../lib/url_parser'); -var expect = require('chai').expect; - -describe('Url Parser', function() { - /** - * @ignore - */ - it('should correctly parse mongodb://localhost', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost:27017', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost:27017/', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost:27017test?appname=hello%20world', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost:27017/test?appname=hello%20world', {}, function(err, object) { - expect(err).to.be.null; - expect(object.appname).to.equal('hello world'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost/?safe=true&readPreference=secondary', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?safe=true&readPreference=secondary', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost:28101/', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost:28101/', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(28101); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foobar@localhost/baz', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - // The actual test we wish to run - test: function(done) { - parse('mongodb://fred:foobar@localhost/baz', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('baz'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foobar'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foo%20bar@localhost/baz', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - // The actual test we wish to run - test: function(done) { - parse('mongodb://fred:foo%20bar@localhost/baz', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('baz'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foo bar'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://%2Ftmp%2Fmongodb-27017.sock', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://%2Ftmp%2Fmongodb-27017.sock', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].domain_socket).to.equal('/tmp/mongodb-27017.sock'); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].domain_socket).to.equal('/tmp/mongodb-27017.sock'); - expect(object.dbName).to.equal('admin'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foo'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock/somedb', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock/somedb', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].domain_socket).to.equal('/tmp/mongodb-27017.sock'); - expect(object.dbName).to.equal('somedb'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foo'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://fred:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].domain_socket).to.equal('/tmp/mongodb-27017.sock'); - expect(object.dbName).to.equal('somedb'); - expect(object.auth.user).to.equal('fred'); - expect(object.auth.password).to.equal('foo'); - expect(object.db_options.safe).to.be.true; - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://example1.com:27017,example2.com:27018', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://example1.com:27017,example2.com:27018', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(2); - expect(object.servers[0].host).to.equal('example1.com'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('example2.com'); - expect(object.servers[1].port).to.equal(27018); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost,localhost:27018,localhost:27019', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost,localhost:27018,localhost:27019', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('localhost'); - expect(object.servers[1].port).to.equal(27018); - expect(object.servers[2].host).to.equal('localhost'); - expect(object.servers[2].port).to.equal(27019); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://host1,host2,host3/?slaveOk=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://host1,host2,host3/?slaveOk=true', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('host1'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('host2'); - expect(object.servers[1].port).to.equal(27017); - expect(object.servers[2].host).to.equal('host3'); - expect(object.servers[2].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.slave_ok).to.be.true; - done(); - }); - } - }); - - /** - * @ignore - */ - it( - 'should correctly parse mongodb://host1,host2,host3,host1/?slaveOk=true and de-duplicate names', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://host1,host2,host3,host1/?slaveOk=true', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('host1'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('host2'); - expect(object.servers[1].port).to.equal(27017); - expect(object.servers[2].host).to.equal('host3'); - expect(object.servers[2].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.slave_ok).to.be.true; - done(); - }); - } - } - ); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost/?safe=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?safe=true', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.db_options.safe).to.be.true; - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://host1,host2,host3/?safe=true;w=2;wtimeoutMS=2000', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://host1,host2,host3/?safe=true;w=2;wtimeoutMS=2000', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('host1'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('host2'); - expect(object.servers[1].port).to.equal(27017); - expect(object.servers[2].host).to.equal('host3'); - expect(object.servers[2].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.db_options.safe).to.be.true; - expect(object.db_options.w).to.equal(2); - expect(object.db_options.wtimeout).to.equal(2000); - done(); - }); - } - }); - - /** - * @ignore - */ - it( - 'should parse mongodb://localhost/db?replicaSet=hello&ssl=prefer&connectTimeoutMS=1000&socketTimeoutMS=2000', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse( - 'mongodb://localhost/db?replicaSet=hello&ssl=prefer&connectTimeoutMS=1000&socketTimeoutMS=2000', - {}, - function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('db'); - expect(object.rs_options.rs_name).to.equal('hello'); - expect(object.server_options.socketOptions.connectTimeoutMS).to.equal(1000); - expect(object.server_options.socketOptions.socketTimeoutMS).to.equal(2000); - expect(object.rs_options.socketOptions.connectTimeoutMS).to.equal(1000); - expect(object.rs_options.socketOptions.socketTimeoutMS).to.equal(2000); - expect(object.rs_options.ssl).to.equal('prefer'); - expect(object.server_options.ssl).to.equal('prefer'); - done(); - } - ); - } - } - ); - - /** - * @ignore - */ - it('should parse mongodb://localhost/db?ssl=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?ssl=true', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('db'); - expect(object.rs_options.ssl).to.be.true; - expect(object.server_options.ssl).to.be.true; - done(); - }); - } - }); - - /** - * @ignore - */ - it('should parse mongodb://localhost/db?maxPoolSize=100', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?maxPoolSize=100', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('db'); - expect(object.rs_options.poolSize).to.equal(100); - expect(object.server_options.poolSize).to.equal(100); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should parse mongodb://localhost/db?w=-1', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?w=-1', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('db'); - expect(object.db_options.w).to.equal(-1); - done(); - }); - } - }); - - /** - * @ignore - */ - it( - 'should be able to parse mongodb://localhost/?compressors=snappy, with one compressor specified', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?compressors=snappy', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.compression.compressors[0]).to.equal('snappy'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should be able to parse mongodb://localhost/?zlibCompressionLevel=-1 without issuing a warning', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?zlibCompressionLevel=-1 ', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.compression.zlibCompressionLevel).to.equal(-1); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should be able to parse mongodb://localhost/?compressors=snappy&zlibCompressionLevel=3 without issuing a warning', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?compressors=snappy&zlibCompressionLevel=3', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.compression.compressors[0]).to.equal('snappy'); - expect(object.server_options.compression.zlibCompressionLevel).to.equal(3); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should be able to parse mongodb://localhost/?compressors=snappy,zlib&zlibCompressionLevel=-1', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/?compressors=snappy,zlib&zlibCompressionLevel=-1', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - expect(object.server_options.compression.compressors[0]).to.equal('snappy'); - expect(object.server_options.compression.compressors[1]).to.equal('zlib'); - expect(object.server_options.compression.zlibCompressionLevel).to.equal(-1); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should throw an error when parsing mongodb://localhost/?compressors=foo, where foo is an unsupported compressor', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - // Should throw due to unsupported compressor - parse('mongodb://localhost/?compressors=foo', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('Compressors must be at least one of snappy or zlib'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should throw an error when parsing mongodb://localhost/?zlibCompressionLevel=10, where the integer is out of the specified bounds', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - // Should throw due to unsupported compressor - parse('mongodb://localhost/?zlibCompressionLevel=10', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('zlibCompressionLevel must be an integer between -1 and 9'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it('should log when unsupported options are used in url', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - var self = this, - Logger = self.configuration.require.Logger, - logged = false; - - const logger = Logger.currentLogger(); - Logger.setCurrentLogger(function(msg, context) { - expect(msg).to.exist; - expect(msg).to.contain('not supported'); - expect(context.type).to.equal('warn'); - expect(context.className).to.equal('URL Parser'); - logged = true; - }); - - Logger.setLevel('warn'); - - parse('mongodb://localhost/db?minPoolSize=100', {}, function() { - expect(logged).to.be.true; - parse('mongodb://localhost/db?maxIdleTimeMS=100', {}, function() { - expect(logged).to.be.true; - parse('mongodb://localhost/db?waitQueueMultiple=100', {}, function() { - expect(logged).to.be.true; - parse('mongodb://localhost/db?waitQueueTimeoutMS=100', {}, function() { - expect(logged).to.be.true; - parse('mongodb://localhost/db?uuidRepresentation=1', {}, function() { - expect(logged).to.be.true; - - Logger.setCurrentLogger(logger); - done(); - }); - }); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('should write concerns parsing', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?safe=true&w=1', {}, function(err, object) { - expect(err).to.be.null; - expect(object.db_options.safe).to.be.true; - parse('mongodb://localhost/db?safe=false&w=1', {}, function(err, object) { - expect(err).to.be.null; - expect(object.db_options.safe).to.be.false; - // should throw as fireAndForget is set aswell as safe or any other - // write concerns - parse('mongodb://localhost/db?safe=true&w=0', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal( - 'w set to -1 or 0 cannot be combined with safe/w/journal/fsync' - ); - parse('mongodb://localhost/db?fsync=true&w=-1', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal( - 'w set to -1 or 0 cannot be combined with safe/w/journal/fsync' - ); - done(); - }); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('should parse GSSAPI', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://dev1%4010GEN.ME@kdc.10gen.com/test?authMechanism=GSSAPI', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.auth).to.eql({ user: 'dev1@10GEN.ME', password: null }); - expect(object.db_options.authMechanism).to.equal('GSSAPI'); - // Should throw due to missing principal - parse('mongodb://kdc.10gen.com/test?authMechanism=GSSAPI', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('GSSAPI requires a provided principal'); - // Should throw due to unsupported mechanism - parse('mongodb://kdc.10gen.com/test?authMechanism=NONE', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal( - 'Only DEFAULT, GSSAPI, PLAIN, MONGODB-X509, or SCRAM-SHA-1 is supported by authMechanism' - ); - parse( - 'mongodb://dev1%4010GEN.ME:test@kdc.10gen.com/test?authMechanism=GSSAPI', - {}, - function(err, object) { - expect(err).to.be.null; - expect(object.auth).to.eql({ user: 'dev1@10GEN.ME', password: 'test' }); - expect(object.db_options.authMechanism).to.equal('GSSAPI'); - done(); - } - ); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('Read preferences parsing', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db?slaveOk=true', {}, function(err, object) { - expect(object.server_options.slave_ok).to.be.true; - parse('mongodb://localhost/db?readPreference=primary', {}, function(err, object) { - expect(object.db_options.readPreference).to.equal('primary'); - parse('mongodb://localhost/db?readPreference=primaryPreferred', {}, function( - err, - object - ) { - expect(object.db_options.readPreference).to.equal('primaryPreferred'); - parse('mongodb://localhost/db?readPreference=secondary', {}, function(err, object) { - expect(object.db_options.readPreference).to.equal('secondary'); - parse('mongodb://localhost/db?readPreference=secondaryPreferred', {}, function( - err, - object - ) { - expect(object.db_options.readPreference).to.equal('secondaryPreferred'); - parse('mongodb://localhost/db?readPreference=nearest', {}, function(err, object) { - expect(object.db_options.readPreference).to.equal('nearest'); - parse('mongodb://localhost/db', {}, function(err, object) { - expect(object.db_options.readPreference).to.equal('primary'); - parse('mongodb://localhost/db?readPreference=blah', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal( - 'readPreference must be either primary/primaryPreferred/secondary/secondaryPreferred/nearest' - ); - done(); - }); - }); - }); - }); - }); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('Read preferences tag parsing', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost/db', {}, function(err, object) { - expect(object.db_options.read_preference_tags).to.be.null; - parse('mongodb://localhost/db?readPreferenceTags=dc:ny', {}, function(err, object) { - expect(err).to.not.exist; - expect(object.db_options.read_preference_tags).to.eql([{ dc: 'ny' }]); - parse('mongodb://localhost/db?readPreferenceTags=dc:ny,rack:1', {}, function( - err, - object - ) { - expect(err).to.not.exist; - expect(object.db_options.read_preference_tags).to.eql([{ dc: 'ny', rack: '1' }]); - parse( - 'mongodb://localhost/db?readPreferenceTags=dc:ny,rack:1&readPreferenceTags=dc:sf,rack:2', - {}, - function(err, object) { - expect(err).to.not.exist; - expect(object.db_options.read_preference_tags).to.eql([ - { dc: 'ny', rack: '1' }, - { dc: 'sf', rack: '2' } - ]); - parse( - 'mongodb://localhost/db?readPreferenceTags=dc:ny,rack:1&readPreferenceTags=dc:sf,rack:2&readPreferenceTags=', - {}, - function(err, object) { - expect(err).to.not.exist; - expect(object.db_options.read_preference_tags).to.eql([ - { dc: 'ny', rack: '1' }, - { dc: 'sf', rack: '2' }, - {} - ]); - done(); - } - ); - } - ); - }); - }); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://[::1]:1234', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://[::1]:1234', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('::1'); - expect(object.servers[0].port).to.equal(1234); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://[::1]', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://[::1]', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(1); - expect(object.servers[0].host).to.equal('::1'); - expect(object.servers[0].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://localhost,[::1]:27018,[2607:f0d0:1002:51::41]', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://localhost,[::1]:27018,[2607:f0d0:1002:51::41]', {}, function(err, object) { - expect(err).to.be.null; - expect(object.servers).to.have.length(3); - expect(object.servers[0].host).to.equal('localhost'); - expect(object.servers[0].port).to.equal(27017); - expect(object.servers[1].host).to.equal('::1'); - expect(object.servers[1].port).to.equal(27018); - expect(object.servers[2].host).to.equal('2607:f0d0:1002:51::41'); - expect(object.servers[2].port).to.equal(27017); - expect(object.dbName).to.equal('admin'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should correctly parse mongodb://k?y:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://k%3Fy:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.auth.user).to.equal('k?y'); - done(); - }); - } - }); - - /** - * @ignore - */ - it( - 'should correctly parse uriencoded k?y mongodb://k%3Fy:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://k%3Fy:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.auth.user).to.equal('k?y'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it( - 'should correctly parse username kay:kay mongodb://kay%3Akay:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', - { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - - test: function(done) { - parse('mongodb://kay%3Akay:foo@%2Ftmp%2Fmongodb-27017.sock/somedb?safe=true', {}, function( - err, - object - ) { - expect(err).to.be.null; - expect(object.auth.user).to.equal('kay:kay'); - done(); - }); - } - } - ); - - /** - * @ignore - */ - it('should use options passed into url parsing', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - test: function(done) { - parse('mongodb://localhost/', { readPreference: 'secondary' }, function(err, object) { - expect(err).to.be.null; - expect(object.db_options.readPreference).to.equal('secondary'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should raise exceptions on invalid hostnames with double colon in host identifier', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - test: function(done) { - parse('mongodb://invalid::host:27017/db', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('Double colon in host identifier'); - done(); - }); - } - }); - - /** - * @ignore - */ - it('should raise exceptions on invalid hostnames with slash in host identifier', { - metadata: { - requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] } - }, - test: function(done) { - parse('mongodb://invalid/host:27017/db', {}, function(err) { - expect(err).to.exist; - expect(err.message).to.equal('Slash in host identifier'); - done(); - }); - } - }); -}); diff --git a/test/spec/dns-txt-records/README.rst b/test/spec/dns-txt-records/README.rst deleted file mode 100644 index 5999557948..0000000000 --- a/test/spec/dns-txt-records/README.rst +++ /dev/null @@ -1,92 +0,0 @@ -==================================== -Initial DNS Seedlist Discovery tests -==================================== - -This directory contains platform-independent tests that drivers can use -to prove their conformance to the Initial DNS Seedlist Discovery spec. - -Test Setup ----------- - -Start a three-node replica set on localhost, on ports 27017, 27018, and 27019, -with replica set name "repl0". The replica set MUST be started with SSL -enabled. - -To run the tests that accompany this spec, you need to configure the SRV and -TXT records with a real name server. The following records are required for -these tests:: - - Record TTL Class Address - localhost.test.build.10gen.cc. 86400 IN A 127.0.0.1 - localhost.sub.test.build.10gen.cc. 86400 IN A 127.0.0.1 - - Record TTL Class Port Target - _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test1.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. - _mongodb._tcp.test2.test.build.10gen.cc. 86400 IN SRV 27018 localhost.test.build.10gen.cc. - _mongodb._tcp.test2.test.build.10gen.cc. 86400 IN SRV 27019 localhost.test.build.10gen.cc. - _mongodb._tcp.test3.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test5.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test6.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test7.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test8.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test10.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test11.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - _mongodb._tcp.test12.test.build.10gen.cc. 86400 IN SRV 27017 localhost.build.10gen.cc. - _mongodb._tcp.test13.test.build.10gen.cc. 86400 IN SRV 27017 test.build.10gen.cc. - _mongodb._tcp.test14.test.build.10gen.cc. 86400 IN SRV 27017 localhost.not-test.build.10gen.cc. - _mongodb._tcp.test15.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.not-build.10gen.cc. - _mongodb._tcp.test16.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.not-10gen.cc. - _mongodb._tcp.test17.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.not-cc. - _mongodb._tcp.test18.test.build.10gen.cc. 86400 IN SRV 27017 localhost.sub.test.build.10gen.cc. - _mongodb._tcp.test19.test.build.10gen.cc. 86400 IN SRV 27017 localhost.evil.build.10gen.cc. - _mongodb._tcp.test19.test.build.10gen.cc. 86400 IN SRV 27017 localhost.test.build.10gen.cc. - - Record TTL Class Text - test5.test.build.10gen.cc. 86400 IN TXT "replicaSet=repl0&authSource=thisDB" - test6.test.build.10gen.cc. 86400 IN TXT "replicaSet=repl0" - test6.test.build.10gen.cc. 86400 IN TXT "authSource=otherDB" - test7.test.build.10gen.cc. 86400 IN TXT "ssl=false" - test8.test.build.10gen.cc. 86400 IN TXT "authSource" - test10.test.build.10gen.cc. 86400 IN TXT "socketTimeoutMS=500" - test11.test.build.10gen.cc. 86400 IN TXT "replicaS" "et=rep" "l0" - -Note that ``test4`` is omitted deliberately to test what happens with no SRV -record. ``test9`` is missing because it was deleted during the development of -the tests. The missing ``test.`` sub-domain in the SRV record target for -``test12`` is deliberate. - -In our tests we have used ``localhost.test.build.10gen.cc`` as the domain, and -then configured ``localhost.test.build.10gen.cc`` to resolve to 127.0.0.1. - -You need to adapt the records shown above to replace ``test.build.10gen.cc`` -with your own domain name, and update the "uri" field in the YAML or JSON files -in this directory with the actual domain. - -Test Format and Use -------------------- - -These YAML and JSON files contain the following fields: - -- ``uri``: a mongodb+srv connection string -- ``seeds``: the expected set of initial seeds discovered from the SRV record -- ``hosts``: the discovered topology's list of hosts once SDAM completes a scan -- ``options``: the parsed connection string options as discovered from URI and - TXT records -- ``parsed_options``: additional options present in the URI such as user/password -credentials -- ``error``: indicates that the parsing of the URI, or the resolving or - contents of the SRV or TXT records included errors. -- ``comment``: a comment to indicate why a test would fail. - -For each file, create MongoClient initialized with the mongodb+srv connection -string. You SHOULD verify that the client's initial seed list matches the list of -seeds. You MUST verify that the set of ServerDescriptions in the client's -TopologyDescription eventually matches the list of hosts. You MUST verify that -each of the values of the Connection String Options under ``options`` match the -Client's parsed value for that option. There may be other options parsed by -the Client as well, which a test does not verify. In ``uri-with-auth`` the URI -contains a user/password set and additional options are provided in -``parsed_options`` so that tests can verify authentication is maintained when -evaluating URIs. You MUST verify that an error has been thrown if ``error`` is -present. diff --git a/test/spec/dns-txt-records/longer-parent-in-return.json b/test/spec/dns-txt-records/longer-parent-in-return.json deleted file mode 100644 index 9a8267eaeb..0000000000 --- a/test/spec/dns-txt-records/longer-parent-in-return.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test18.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.sub.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - }, - "comment": "Is correct, as returned host name shared the URI root \"test.build.10gen.cc\"." -} diff --git a/test/spec/dns-txt-records/longer-parent-in-return.yml b/test/spec/dns-txt-records/longer-parent-in-return.yml deleted file mode 100644 index e77c4570d3..0000000000 --- a/test/spec/dns-txt-records/longer-parent-in-return.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test18.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.sub.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true -comment: Is correct, as returned host name shared the URI root "test.build.10gen.cc". diff --git a/test/spec/dns-txt-records/misformatted-option.json b/test/spec/dns-txt-records/misformatted-option.json deleted file mode 100644 index 3c8c29ace6..0000000000 --- a/test/spec/dns-txt-records/misformatted-option.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test8.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because the options in the TXT record are incorrectly formatted (misses value)." -} diff --git a/test/spec/dns-txt-records/misformatted-option.yml b/test/spec/dns-txt-records/misformatted-option.yml deleted file mode 100644 index 9669772cb3..0000000000 --- a/test/spec/dns-txt-records/misformatted-option.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test8.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because the options in the TXT record are incorrectly formatted (misses value). diff --git a/test/spec/dns-txt-records/no-results.json b/test/spec/dns-txt-records/no-results.json deleted file mode 100644 index c1dc02d281..0000000000 --- a/test/spec/dns-txt-records/no-results.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test4.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because no SRV records are present for this URI." -} diff --git a/test/spec/dns-txt-records/no-results.yml b/test/spec/dns-txt-records/no-results.yml deleted file mode 100644 index e09bd060c2..0000000000 --- a/test/spec/dns-txt-records/no-results.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test4.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because no SRV records are present for this URI. diff --git a/test/spec/dns-txt-records/not-enough-parts.json b/test/spec/dns-txt-records/not-enough-parts.json deleted file mode 100644 index 7cfce2ec57..0000000000 --- a/test/spec/dns-txt-records/not-enough-parts.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because host in URI does not have {hostname}, {domainname} and {tld}." -} diff --git a/test/spec/dns-txt-records/not-enough-parts.yml b/test/spec/dns-txt-records/not-enough-parts.yml deleted file mode 100644 index b36fa4a5de..0000000000 --- a/test/spec/dns-txt-records/not-enough-parts.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because host in URI does not have {hostname}, {domainname} and {tld}. diff --git a/test/spec/dns-txt-records/one-result-default-port.json b/test/spec/dns-txt-records/one-result-default-port.json deleted file mode 100644 index cebb3b1ec3..0000000000 --- a/test/spec/dns-txt-records/one-result-default-port.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "uri": "mongodb+srv://test3.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/one-result-default-port.yml b/test/spec/dns-txt-records/one-result-default-port.yml deleted file mode 100644 index 395bcdc968..0000000000 --- a/test/spec/dns-txt-records/one-result-default-port.yml +++ /dev/null @@ -1,10 +0,0 @@ -uri: "mongodb+srv://test3.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true diff --git a/test/spec/dns-txt-records/one-txt-record-multiple-strings.json b/test/spec/dns-txt-records/one-txt-record-multiple-strings.json deleted file mode 100644 index 622668c351..0000000000 --- a/test/spec/dns-txt-records/one-txt-record-multiple-strings.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "uri": "mongodb+srv://test11.test.build.10gen.cc/", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/one-txt-record-multiple-strings.yml b/test/spec/dns-txt-records/one-txt-record-multiple-strings.yml deleted file mode 100644 index 90a702cdbe..0000000000 --- a/test/spec/dns-txt-records/one-txt-record-multiple-strings.yml +++ /dev/null @@ -1,10 +0,0 @@ -uri: "mongodb+srv://test11.test.build.10gen.cc/" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true diff --git a/test/spec/dns-txt-records/one-txt-record.json b/test/spec/dns-txt-records/one-txt-record.json deleted file mode 100644 index 2385021ad4..0000000000 --- a/test/spec/dns-txt-records/one-txt-record.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc/", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "authSource": "thisDB", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/one-txt-record.yml b/test/spec/dns-txt-records/one-txt-record.yml deleted file mode 100644 index 9356eaa2c2..0000000000 --- a/test/spec/dns-txt-records/one-txt-record.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc/" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - authSource: thisDB - ssl: true diff --git a/test/spec/dns-txt-records/parent-part-mismatch1.json b/test/spec/dns-txt-records/parent-part-mismatch1.json deleted file mode 100644 index 8d0147a48b..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch1.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test14.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's part \"not-test\" mismatches URI parent part \"test\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch1.yml b/test/spec/dns-txt-records/parent-part-mismatch1.yml deleted file mode 100644 index e35dfdf6d5..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch1.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test14.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's part "not-test" mismatches URI parent part "test". diff --git a/test/spec/dns-txt-records/parent-part-mismatch2.json b/test/spec/dns-txt-records/parent-part-mismatch2.json deleted file mode 100644 index 996249eb99..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch2.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test15.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's part \"not-build\" mismatches URI parent part \"build\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch2.yml b/test/spec/dns-txt-records/parent-part-mismatch2.yml deleted file mode 100644 index 595e5493c4..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch2.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test15.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's part "not-build" mismatches URI parent part "build". diff --git a/test/spec/dns-txt-records/parent-part-mismatch3.json b/test/spec/dns-txt-records/parent-part-mismatch3.json deleted file mode 100644 index 69e724af6c..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch3.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test16.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's part \"not-10gen\" mismatches URI parent part \"10gen\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch3.yml b/test/spec/dns-txt-records/parent-part-mismatch3.yml deleted file mode 100644 index 64ca2e708d..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch3.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test16.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's part "not-10gen" mismatches URI parent part "10gen". diff --git a/test/spec/dns-txt-records/parent-part-mismatch4.json b/test/spec/dns-txt-records/parent-part-mismatch4.json deleted file mode 100644 index 254168e34c..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch4.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test17.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's TLD \"not-cc\" mismatches URI TLD \"cc\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch4.yml b/test/spec/dns-txt-records/parent-part-mismatch4.yml deleted file mode 100644 index 226d6fa3bc..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch4.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test17.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's TLD "not-cc" mismatches URI TLD "cc". diff --git a/test/spec/dns-txt-records/parent-part-mismatch5.json b/test/spec/dns-txt-records/parent-part-mismatch5.json deleted file mode 100644 index 92c024b4f3..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch5.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test19.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because one of the returned host names' domain name parts \"evil\" mismatches \"test\"." -} diff --git a/test/spec/dns-txt-records/parent-part-mismatch5.yml b/test/spec/dns-txt-records/parent-part-mismatch5.yml deleted file mode 100644 index 1ed2bda4eb..0000000000 --- a/test/spec/dns-txt-records/parent-part-mismatch5.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test19.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because one of the returned host names' domain name parts "evil" mismatches "test". diff --git a/test/spec/dns-txt-records/returned-parent-too-short.json b/test/spec/dns-txt-records/returned-parent-too-short.json deleted file mode 100644 index 676eb0c0d0..0000000000 --- a/test/spec/dns-txt-records/returned-parent-too-short.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test13.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name's parent (build.10gen.cc) misses \"test.\"" -} diff --git a/test/spec/dns-txt-records/returned-parent-too-short.yml b/test/spec/dns-txt-records/returned-parent-too-short.yml deleted file mode 100644 index 397aec8953..0000000000 --- a/test/spec/dns-txt-records/returned-parent-too-short.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test13.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name's parent (build.10gen.cc) misses "test." diff --git a/test/spec/dns-txt-records/returned-parent-wrong.json b/test/spec/dns-txt-records/returned-parent-wrong.json deleted file mode 100644 index 3aabfd8196..0000000000 --- a/test/spec/dns-txt-records/returned-parent-wrong.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test12.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because returned host name is too short and mismatches a parent." -} diff --git a/test/spec/dns-txt-records/returned-parent-wrong.yml b/test/spec/dns-txt-records/returned-parent-wrong.yml deleted file mode 100644 index 1fc3867a0e..0000000000 --- a/test/spec/dns-txt-records/returned-parent-wrong.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test12.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because returned host name is too short and mismatches a parent. diff --git a/test/spec/dns-txt-records/two-results-default-port.json b/test/spec/dns-txt-records/two-results-default-port.json deleted file mode 100644 index 66028310a6..0000000000 --- a/test/spec/dns-txt-records/two-results-default-port.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.test.build.10gen.cc:27017", - "localhost.test.build.10gen.cc:27018" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/two-results-default-port.yml b/test/spec/dns-txt-records/two-results-default-port.yml deleted file mode 100644 index 61d38b5e82..0000000000 --- a/test/spec/dns-txt-records/two-results-default-port.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test1.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.test.build.10gen.cc:27017 - - localhost.test.build.10gen.cc:27018 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true diff --git a/test/spec/dns-txt-records/two-results-nonstandard-port.json b/test/spec/dns-txt-records/two-results-nonstandard-port.json deleted file mode 100644 index 4900f7cff1..0000000000 --- a/test/spec/dns-txt-records/two-results-nonstandard-port.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test2.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.test.build.10gen.cc:27018", - "localhost.test.build.10gen.cc:27019" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/two-results-nonstandard-port.yml b/test/spec/dns-txt-records/two-results-nonstandard-port.yml deleted file mode 100644 index 7185f52cd6..0000000000 --- a/test/spec/dns-txt-records/two-results-nonstandard-port.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test2.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.test.build.10gen.cc:27018 - - localhost.test.build.10gen.cc:27019 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - ssl: true diff --git a/test/spec/dns-txt-records/two-txt-records.json b/test/spec/dns-txt-records/two-txt-records.json deleted file mode 100644 index f0654ef6cb..0000000000 --- a/test/spec/dns-txt-records/two-txt-records.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test6.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because there are two TXT records." -} diff --git a/test/spec/dns-txt-records/two-txt-records.yml b/test/spec/dns-txt-records/two-txt-records.yml deleted file mode 100644 index c6093613d4..0000000000 --- a/test/spec/dns-txt-records/two-txt-records.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test6.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because there are two TXT records. diff --git a/test/spec/dns-txt-records/txt-record-not-allowed-option.json b/test/spec/dns-txt-records/txt-record-not-allowed-option.json deleted file mode 100644 index 2a5cf2f007..0000000000 --- a/test/spec/dns-txt-records/txt-record-not-allowed-option.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test10.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because socketTimeoutMS is not an allowed option." -} diff --git a/test/spec/dns-txt-records/txt-record-not-allowed-option.yml b/test/spec/dns-txt-records/txt-record-not-allowed-option.yml deleted file mode 100644 index f4ff1cfd15..0000000000 --- a/test/spec/dns-txt-records/txt-record-not-allowed-option.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test10.test.build.10gen.cc/?replicaSet=repl0" -seeds: [] -hosts: [] -error: true -comment: Should fail because socketTimeoutMS is not an allowed option. diff --git a/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.json b/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.json deleted file mode 100644 index 0ebc737bd5..0000000000 --- a/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc/?ssl=false", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "authSource": "thisDB", - "ssl": false - } -} diff --git a/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.yml b/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.yml deleted file mode 100644 index 2a922aa234..0000000000 --- a/test/spec/dns-txt-records/txt-record-with-overridden-ssl-option.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc/?ssl=false" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - authSource: thisDB - ssl: false diff --git a/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.json b/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.json deleted file mode 100644 index 2626ba6083..0000000000 --- a/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc/?authSource=otherDB", - "seeds": [ - "localhost.test.build.10gen.cc:27017" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "options": { - "replicaSet": "repl0", - "authSource": "otherDB", - "ssl": true - } -} diff --git a/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.yml b/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.yml deleted file mode 100644 index a9015599e7..0000000000 --- a/test/spec/dns-txt-records/txt-record-with-overridden-uri-option.yml +++ /dev/null @@ -1,11 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc/?authSource=otherDB" -seeds: - - localhost.test.build.10gen.cc:27017 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -options: - replicaSet: repl0 - authSource: otherDB - ssl: true diff --git a/test/spec/dns-txt-records/txt-record-with-unallowed-option.json b/test/spec/dns-txt-records/txt-record-with-unallowed-option.json deleted file mode 100644 index 0d333a459d..0000000000 --- a/test/spec/dns-txt-records/txt-record-with-unallowed-option.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test7.test.build.10gen.cc/", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because \"ssl\" is not an allowed option." -} diff --git a/test/spec/dns-txt-records/txt-record-with-unallowed-option.yml b/test/spec/dns-txt-records/txt-record-with-unallowed-option.yml deleted file mode 100644 index ba3877ee9f..0000000000 --- a/test/spec/dns-txt-records/txt-record-with-unallowed-option.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test7.test.build.10gen.cc/" -seeds: [] -hosts: [] -error: true -comment: Should fail because "ssl" is not an allowed option. diff --git a/test/spec/dns-txt-records/uri-with-auth.json b/test/spec/dns-txt-records/uri-with-auth.json deleted file mode 100644 index cc7257d85b..0000000000 --- a/test/spec/dns-txt-records/uri-with-auth.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "uri": "mongodb+srv://auser:apass@test1.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [ - "localhost.test.build.10gen.cc:27017", - "localhost.test.build.10gen.cc:27018" - ], - "hosts": [ - "localhost:27017", - "localhost:27018", - "localhost:27019" - ], - "parsed_options": { - "user": "auser", - "password": "apass" - }, - "comment": "Should preserve auth credentials" -} diff --git a/test/spec/dns-txt-records/uri-with-auth.yml b/test/spec/dns-txt-records/uri-with-auth.yml deleted file mode 100644 index 9ecfca73ea..0000000000 --- a/test/spec/dns-txt-records/uri-with-auth.yml +++ /dev/null @@ -1,12 +0,0 @@ -uri: "mongodb+srv://auser:apass@test1.test.build.10gen.cc/?replicaSet=repl0" -seeds: - - localhost.test.build.10gen.cc:27017 - - localhost.test.build.10gen.cc:27018 -hosts: - - localhost:27017 - - localhost:27018 - - localhost:27019 -parsed_options: - user: auser - password: apass -comment: Should preserve auth credentials diff --git a/test/spec/dns-txt-records/uri-with-port.json b/test/spec/dns-txt-records/uri-with-port.json deleted file mode 100644 index b981e2a1bf..0000000000 --- a/test/spec/dns-txt-records/uri-with-port.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc:8123/?replicaSet=repl0", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because the mongodb+srv URI includes a port." -} diff --git a/test/spec/dns-txt-records/uri-with-port.yml b/test/spec/dns-txt-records/uri-with-port.yml deleted file mode 100644 index f1944dcdd9..0000000000 --- a/test/spec/dns-txt-records/uri-with-port.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc:8123/?replicaSet=repl0" -seeds: [] -hosts: [] -error: true -comment: Should fail because the mongodb+srv URI includes a port. diff --git a/test/spec/dns-txt-records/uri-with-two-hosts.json b/test/spec/dns-txt-records/uri-with-two-hosts.json deleted file mode 100644 index 5261a39cfa..0000000000 --- a/test/spec/dns-txt-records/uri-with-two-hosts.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "uri": "mongodb+srv://test5.test.build.10gen.cc,test6.test.build.10gen.cc/?replicaSet=repl0", - "seeds": [], - "hosts": [], - "error": true, - "comment": "Should fail because the mongodb+srv URI includes two host names." -} diff --git a/test/spec/dns-txt-records/uri-with-two-hosts.yml b/test/spec/dns-txt-records/uri-with-two-hosts.yml deleted file mode 100644 index 3b2189d48b..0000000000 --- a/test/spec/dns-txt-records/uri-with-two-hosts.yml +++ /dev/null @@ -1,5 +0,0 @@ -uri: "mongodb+srv://test5.test.build.10gen.cc,test6.test.build.10gen.cc/?replicaSet=repl0" -seeds: [] -hosts: [] -error: true -comment: Should fail because the mongodb+srv URI includes two host names. diff --git a/test/tools/runner/config.js b/test/tools/runner/config.js index 6ce05145f0..cdc8688774 100644 --- a/test/tools/runner/config.js +++ b/test/tools/runner/config.js @@ -62,18 +62,12 @@ class NativeConfiguration { return this.options.replicaSet; } - usingUnifiedTopology() { - return true; - } - newClient(dbOptions, serverOptions) { // support MongoClient contructor form (url, options) for `newClient` if (typeof dbOptions === 'string') { return new MongoClient( dbOptions, - this.usingUnifiedTopology() - ? Object.assign({ minHeartbeatFrequencyMS: 100 }, serverOptions) - : serverOptions + Object.assign({ minHeartbeatFrequencyMS: 100 }, serverOptions) ); } @@ -138,21 +132,7 @@ class NativeConfiguration { options = Object.assign({}, options); const hosts = host == null ? [].concat(this.options.hosts) : [{ host, port }]; - if (this.usingUnifiedTopology()) { - return new core.Topology(hosts, options); - } - - if (this.topologyType === TopologyType.ReplicaSetWithPrimary) { - options.poolSize = 1; - options.autoReconnect = false; - return new core.ReplSet(hosts, options); - } - - if (this.topologyType === TopologyType.Sharded) { - return new core.Mongos(hosts, options); - } - - return new core.Server(Object.assign({ host, port }, options)); + return new core.Topology(hosts, options); } url(username, password, options) { diff --git a/test/tools/runner/filters/unified_filter.js b/test/tools/runner/filters/unified_filter.js deleted file mode 100644 index 67b155c909..0000000000 --- a/test/tools/runner/filters/unified_filter.js +++ /dev/null @@ -1,25 +0,0 @@ -'use strict'; - -/** - * Filter for tests that require the unified topology - * - * example: - * metadata: { - * requires: { - * unifiedTopology: - * } - * } - */ -class UnifiedTopologyFilter { - filter(test) { - const unifiedTopology = - test.metadata && test.metadata.requires && test.metadata.requires.unifiedTopology; - - return ( - typeof unifiedTopology !== 'boolean' || - unifiedTopology === !!process.env.MONGODB_UNIFIED_TOPOLOGY - ); - } -} - -module.exports = UnifiedTopologyFilter; diff --git a/test/tools/runner/index.js b/test/tools/runner/index.js index 15fcac4a06..49ee9870c1 100644 --- a/test/tools/runner/index.js +++ b/test/tools/runner/index.js @@ -4,7 +4,7 @@ const path = require('path'); const fs = require('fs'); const MongoClient = require('../../..').MongoClient; const TestConfiguration = require('./config'); -const parseConnectionString = require('../../../lib/core/uri_parser'); +const parseConnectionString = require('../../../lib/connection_string').parseConnectionString; const eachAsync = require('../../../lib/core/utils').eachAsync; const mock = require('mongodb-mock-server'); diff --git a/test/tools/runner/plugins/client_leak_checker.js b/test/tools/runner/plugins/client_leak_checker.js index ac2863df50..6ee18fa062 100644 --- a/test/tools/runner/plugins/client_leak_checker.js +++ b/test/tools/runner/plugins/client_leak_checker.js @@ -33,14 +33,9 @@ function unifiedTopologyIsConnected(client) { after(function() { wtfnode.dump(); - const isUnifiedTopology = this.configuration.usingUnifiedTopology; const traces = []; const openClientCount = activeClients.reduce((count, client) => { - const isConnected = isUnifiedTopology - ? unifiedTopologyIsConnected(client) - : client.isConnected(); - - if (isConnected) { + if (unifiedTopologyIsConnected(client)) { traces.push(client.trace); return count + 1; } diff --git a/test/tools/runner/plugins/session_leak_checker.js b/test/tools/runner/plugins/session_leak_checker.js index 96c0167d7a..5787ab6020 100644 --- a/test/tools/runner/plugins/session_leak_checker.js +++ b/test/tools/runner/plugins/session_leak_checker.js @@ -54,7 +54,7 @@ beforeEach('Session Leak Before Each - setup session tracking', function() { return _endAllPooledSessions.apply(this, arguments); }); - [core.Server, core.ReplSet, core.Mongos].forEach(topology => { + [core.Topology].forEach(topology => { const _endSessions = topology.prototype.endSessions; sandbox.stub(topology.prototype, 'endSessions').callsFake(function(sessions) { sessions = Array.isArray(sessions) ? sessions : [sessions]; diff --git a/test/unit/client_metadata.test.js b/test/unit/client_metadata.test.js deleted file mode 100644 index 21b5127418..0000000000 --- a/test/unit/client_metadata.test.js +++ /dev/null @@ -1,51 +0,0 @@ -'use strict'; -const mock = require('mongodb-mock-server'); -const expect = require('chai').expect; - -describe('Client Metadata', function() { - let mockServer; - before(() => mock.createServer().then(server => (mockServer = server))); - after(() => mock.cleanup()); - - it('should report the correct platform in client metadata', function(done) { - const ismasters = []; - mockServer.setMessageHandler(request => { - const doc = request.document; - if (doc.ismaster) { - ismasters.push(doc); - request.reply(mock.DEFAULT_ISMASTER); - } else { - request.reply({ ok: 1 }); - } - }); - - const isUnifiedTopology = this.configuration.usingUnifiedTopology(); - const client = this.configuration.newClient(`mongodb://${mockServer.uri()}/`); - client.connect(err => { - expect(err).to.not.exist; - this.defer(() => client.close()); - - client.db().command({ ping: 1 }, err => { - expect(err).to.not.exist; - - if (isUnifiedTopology) { - expect(ismasters).to.have.length.greaterThan(1); - ismasters.forEach(ismaster => - expect(ismaster) - .nested.property('client.platform') - .to.match(/unified/) - ); - } else { - expect(ismasters).to.have.length(1); - ismasters.forEach(ismaster => - expect(ismaster) - .nested.property('client.platform') - .to.match(/legacy/) - ); - } - - done(); - }); - }); - }); -}); diff --git a/test/unit/core/connection_string.test.js b/test/unit/core/connection_string.test.js index 6e5c914216..30b194f034 100644 --- a/test/unit/core/connection_string.test.js +++ b/test/unit/core/connection_string.test.js @@ -1,6 +1,6 @@ 'use strict'; -const parseConnectionString = require('../../../lib/core/uri_parser'); +const parseConnectionString = require('../../../lib/connection_string').parseConnectionString; const punycode = require('punycode'); const MongoParseError = require('../../../lib/core/error').MongoParseError; const loadSpecTests = require('../../spec').loadSpecTests; diff --git a/test/unit/core/mongodb_srv.test.js b/test/unit/core/mongodb_srv.test.js index 846d3715d2..6d45fd554c 100644 --- a/test/unit/core/mongodb_srv.test.js +++ b/test/unit/core/mongodb_srv.test.js @@ -1,7 +1,7 @@ 'use strict'; const fs = require('fs'); const path = require('path'); -const parseConnectionString = require('../../../lib/core/uri_parser'); +const parseConnectionString = require('../../../lib/connection_string').parseConnectionString; const expect = require('chai').expect; describe('mongodb+srv', function() { diff --git a/test/unit/core/response_test.js.test.js b/test/unit/core/response_test.js.test.js index 4a6dd68d48..6bec67bcb6 100644 --- a/test/unit/core/response_test.js.test.js +++ b/test/unit/core/response_test.js.test.js @@ -3,7 +3,7 @@ const expect = require('chai').expect; const MongoError = require('../../../lib/core/error').MongoError; const mock = require('mongodb-mock-server'); -const Server = require('../../../lib/core/topologies/server'); +const Topology = require('../../../lib/core').Topology; const Long = require('bson').Long; const test = {}; @@ -22,7 +22,7 @@ describe('Response', function() { errmsg: 'Cursor not found (namespace: "liveearth.entityEvents", id: 2018648316188432590).' }; - const client = new Server(test.server.address()); + const client = new Topology(test.server.address()); test.server.setMessageHandler(request => { const doc = request.document; diff --git a/test/unit/core/scram_iterations.test.js b/test/unit/core/scram_iterations.test.js index 1464fe3b7e..358293a71d 100644 --- a/test/unit/core/scram_iterations.test.js +++ b/test/unit/core/scram_iterations.test.js @@ -2,7 +2,7 @@ const expect = require('chai').expect; const mock = require('mongodb-mock-server'); -const Server = require('../../../lib/core/topologies/server'); +const Topology = require('../../../lib/core').Topology; const Buffer = require('safe-buffer').Buffer; const MongoCredentials = require('../../../lib/core/auth/mongo_credentials').MongoCredentials; @@ -48,7 +48,7 @@ describe('SCRAM Iterations Tests', function() { } }); - const client = new Server(Object.assign({}, test.server.address(), { credentials })); + const client = new Topology(test.server.uri(), { credentials }); client.on('error', err => { let testErr; try { @@ -100,7 +100,7 @@ describe('SCRAM Iterations Tests', function() { } }); - const client = new Server(Object.assign({}, test.server.address(), { credentials })); + const client = new Topology(test.server.uri(), { credentials }); client.on('error', err => { expect(err).to.not.be.null; expect(err) @@ -143,12 +143,12 @@ describe('SCRAM Iterations Tests', function() { } }); - const client = new Server(Object.assign({}, test.server.address(), { credentials })); + const client = new Topology(test.server.uri(), { credentials }); client.on('error', err => { expect(err).to.not.be.null; expect(err) .to.have.property('message') - .that.matches(/failed to connect to server/); + .that.matches(/connection(.+)closed/); client.destroy(done); }); diff --git a/test/unit/core/sessions.test.js b/test/unit/core/sessions.test.js index bf58185ba4..6495020255 100644 --- a/test/unit/core/sessions.test.js +++ b/test/unit/core/sessions.test.js @@ -6,7 +6,7 @@ const genClusterTime = require('./common').genClusterTime; const sessionCleanupHandler = require('./common').sessionCleanupHandler; const core = require('../../../lib/core'); -const Server = core.Server; +const Topology = core.Topology; const ServerSessionPool = core.Sessions.ServerSessionPool; const ServerSession = core.Sessions.ServerSession; const ClientSession = core.Sessions.ClientSession; @@ -34,8 +34,8 @@ describe('Sessions', function() { it('should default to `null` for `clusterTime`', { metadata: { requires: { topology: 'single' } }, test: function(done) { - const client = new Server(); - const sessionPool = new ServerSessionPool(client); + const client = new Topology('localhost:27017'); + const sessionPool = client.s.sessionPool; const session = new ClientSession(client, sessionPool); done = sessionCleanupHandler(session, sessionPool, done); @@ -48,8 +48,8 @@ describe('Sessions', function() { metadata: { requires: { topology: 'single' } }, test: function(done) { const clusterTime = genClusterTime(Date.now()); - const client = new Server(); - const sessionPool = new ServerSessionPool(client); + const client = new Topology('localhost:27017'); + const sessionPool = client.s.sessionPool; const session = new ClientSession(client, sessionPool, { initialClusterTime: clusterTime }); done = sessionCleanupHandler(session, sessionPool, done); @@ -80,7 +80,7 @@ describe('Sessions', function() { }); }) .then(() => { - test.client = new Server(test.server.address()); + test.client = new Topology(test.server.address()); return new Promise((resolve, reject) => { test.client.once('error', reject); diff --git a/test/unit/core/write_concern_error.test.js b/test/unit/core/write_concern_error.test.js index b7374d44f4..40dd8722b4 100644 --- a/test/unit/core/write_concern_error.test.js +++ b/test/unit/core/write_concern_error.test.js @@ -1,5 +1,5 @@ 'use strict'; -const ReplSet = require('../../../lib/core/topologies/replset'); +const Topology = require('../../../lib/core').Topology; const mock = require('mongodb-mock-server'); const ReplSetFixture = require('./common').ReplSetFixture; const MongoWriteConcernError = require('../../../lib/core/error').MongoWriteConcernError; @@ -7,14 +7,6 @@ const expect = require('chai').expect; describe('WriteConcernError', function() { let test; - - // mock ops store from node-mongodb-native - const mockDisconnectHandler = { - add: () => {}, - execute: () => {}, - flush: () => {} - }; - const RAW_USER_WRITE_CONCERN_CMD = { createUser: 'foo2', pwd: 'pwd', @@ -44,16 +36,12 @@ describe('WriteConcernError', function() { function makeAndConnectReplSet(cb) { let invoked = false; - const replSet = new ReplSet( + console.log({ + uri: `mongodb://${test.primaryServer.uri()},${test.firstSecondaryServer.uri()}/?replicaSet=rs` + }); + const replSet = new Topology( [test.primaryServer.address(), test.firstSecondaryServer.address()], - { - setName: 'rs', - haInterval: 10000, - connectionTimeout: 3000, - disconnectHandler: mockDisconnectHandler, - secondaryOnlyConnectionAllowed: true, - size: 1 - } + { replicaSet: 'rs' } ); replSet.once('error', err => { @@ -61,14 +49,16 @@ describe('WriteConcernError', function() { return; } invoked = true; - cb(err, null); + cb(err); }); + replSet.on('connect', () => { - if (invoked || !replSet.s.replicaSetState.hasPrimary()) { + if (invoked) { return; } + invoked = true; - cb(null, replSet); + cb(undefined, replSet); }); replSet.connect(); @@ -86,7 +76,7 @@ describe('WriteConcernError', function() { makeAndConnectReplSet((err, replSet) => { // cleanup the server before calling done - const cleanup = err => replSet.destroy(err2 => done(err || err2)); + const cleanup = err => replSet.close({ force: true }, err2 => done(err || err2)); if (err) { return cleanup(err); diff --git a/test/unit/db.test.js b/test/unit/db.test.js index aef7e4b4c4..b28c889a48 100644 --- a/test/unit/db.test.js +++ b/test/unit/db.test.js @@ -24,6 +24,10 @@ class MockTopology extends EventEmitter { return false; } + shouldCheckForSessionSupport() { + return false; + } + command(namespace, command, options, callback) { callback(null, { result: { ok: 1 } }); } diff --git a/test/unit/sdam/server_selection/select_servers.test.js b/test/unit/sdam/server_selection/select_servers.test.js index 218b3f76f6..b85b2de874 100644 --- a/test/unit/sdam/server_selection/select_servers.test.js +++ b/test/unit/sdam/server_selection/select_servers.test.js @@ -85,7 +85,6 @@ describe('selectServer', function() { let completed = 0; function finish() { completed++; - console.log(completed); if (completed === toSelect) done(); } diff --git a/test/unit/sdam/spec.test.js b/test/unit/sdam/spec.test.js index 8d8eaa3f70..ab738ecd86 100644 --- a/test/unit/sdam/spec.test.js +++ b/test/unit/sdam/spec.test.js @@ -5,7 +5,7 @@ const Topology = require('../../../lib/core/sdam/topology').Topology; const Server = require('../../../lib/core/sdam/server').Server; const ServerDescription = require('../../../lib/core/sdam/server_description').ServerDescription; const sdamEvents = require('../../../lib/core/sdam/events'); -const parse = require('../../../lib/core/uri_parser'); +const parse = require('../../../lib/connection_string').parseConnectionString; const sinon = require('sinon'); const EJSON = require('mongodb-extjson'); diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js index b84502ef75..6e7ac43ee8 100644 --- a/test/unit/sdam/topology.test.js +++ b/test/unit/sdam/topology.test.js @@ -1,12 +1,72 @@ 'use strict'; -const Topology = require('../../../lib/core/sdam/topology').Topology; + const Server = require('../../../lib/core/sdam/server').Server; const ServerDescription = require('../../../lib/core/sdam/server_description').ServerDescription; const mock = require('mongodb-mock-server'); const expect = require('chai').expect; const sinon = require('sinon'); +const core = require('../../../lib/core'); +const BSON = core.BSON; +const Topology = core.Topology; + describe('Topology (unit)', function() { + describe('client metadata', function() { + let mockServer; + before(() => mock.createServer().then(server => (mockServer = server))); + after(() => mock.cleanup()); + + it('should correctly pass appname', { + metadata: { requires: { topology: 'single' } }, + + test: function(done) { + // Attempt to connect + var server = new Topology( + [{ host: this.configuration.host, port: this.configuration.port }], + { + bson: new BSON(), + appname: 'My application name' + } + ); + + expect(server.clientMetadata.application.name).to.equal('My application name'); + done(); + } + }); + + it('should report the correct platform in client metadata', function(done) { + const ismasters = []; + mockServer.setMessageHandler(request => { + const doc = request.document; + if (doc.ismaster) { + ismasters.push(doc); + request.reply(mock.DEFAULT_ISMASTER); + } else { + request.reply({ ok: 1 }); + } + }); + + const client = this.configuration.newClient(`mongodb://${mockServer.uri()}/`); + client.connect(err => { + expect(err).to.not.exist; + this.defer(() => client.close()); + + client.db().command({ ping: 1 }, err => { + expect(err).to.not.exist; + + expect(ismasters).to.have.length.greaterThan(1); + ismasters.forEach(ismaster => + expect(ismaster) + .nested.property('client.platform') + .to.match(/unified/) + ); + + done(); + }); + }); + }); + }); + describe('shouldCheckForSessionSupport', function() { beforeEach(function() { this.sinon = sinon.sandbox.create();