diff --git a/src/bulk/common.ts b/src/bulk/common.ts index 00e5dee4e6..068a1b4611 100644 --- a/src/bulk/common.ts +++ b/src/bulk/common.ts @@ -1,6 +1,6 @@ import { PromiseProvider } from '../promise_provider'; import { Long, ObjectId, Document, BSONSerializeOptions, resolveBSONOptions } from '../bson'; -import { MongoError, MongoWriteConcernError, AnyError } from '../error'; +import { MongoError, MongoWriteConcernError, AnyError, MONGODB_ERROR_CODES } from '../error'; import { applyRetryableWrites, executeLegacyOperation, @@ -20,9 +20,6 @@ import type { Topology } from '../sdam/topology'; import type { CommandOperationOptions, CollationOptions } from '../operations/command'; import type { Hint } from '../operations/operation'; -// Error codes -const WRITE_CONCERN_ERROR = 64; - /** @public */ export const BatchType = { INSERT: 1, @@ -307,7 +304,9 @@ export class BulkWriteResult { if (i === 0) errmsg = errmsg + ' and '; } - return new WriteConcernError(new MongoError({ errmsg: errmsg, code: WRITE_CONCERN_ERROR })); + return new WriteConcernError( + new MongoError({ errmsg: errmsg, code: MONGODB_ERROR_CODES.WriteConcernFailed }) + ); } } diff --git a/src/cmap/commands.ts b/src/cmap/commands.ts index eadbf3d1c3..8ae14da271 100644 --- a/src/cmap/commands.ts +++ b/src/cmap/commands.ts @@ -30,7 +30,7 @@ export type WriteProtocolMessageType = Query | Msg | GetMore | KillCursor; /** @internal */ export interface OpQueryOptions extends CommandOptions { - socketTimeout?: number; + socketTimeoutMS?: number; session?: ClientSession; documentsReturnedIn?: string; numberToSkip?: number; diff --git a/src/cmap/connect.ts b/src/cmap/connect.ts index 615e4d9f95..cbb38d03b4 100644 --- a/src/cmap/connect.ts +++ b/src/cmap/connect.ts @@ -93,7 +93,7 @@ function performInitialHandshake( const handshakeOptions: Document = Object.assign({}, options); if (typeof options.connectTimeoutMS === 'number') { // The handshake technically is a monitoring check, so its socket timeout should be connectTimeoutMS - handshakeOptions.socketTimeout = options.connectTimeoutMS; + handshakeOptions.socketTimeoutMS = options.connectTimeoutMS; } const start = new Date().getTime(); @@ -262,13 +262,13 @@ const SOCKET_ERROR_EVENTS = new Set(SOCKET_ERROR_EVENT_LIST); function makeConnection(options: ConnectionOptions, _callback: CallbackWithType) { const useTLS = options.tls ?? false; const keepAlive = options.keepAlive ?? true; - const socketTimeout = options.socketTimeout ?? 0; + const socketTimeoutMS = options.socketTimeoutMS ?? Reflect.get(options, 'socketTimeout') ?? 0; const noDelay = options.noDelay ?? true; const connectionTimeout = options.connectTimeoutMS ?? 30000; const rejectUnauthorized = options.rejectUnauthorized ?? true; const keepAliveInitialDelay = - ((options.keepAliveInitialDelay ?? 120000) > socketTimeout - ? Math.round(socketTimeout / 2) + ((options.keepAliveInitialDelay ?? 120000) > socketTimeoutMS + ? Math.round(socketTimeoutMS / 2) : options.keepAliveInitialDelay) ?? 120000; let socket: Stream; @@ -320,7 +320,7 @@ function makeConnection(options: ConnectionOptions, _callback: CallbackWithType< } } - socket.setTimeout(socketTimeout); + socket.setTimeout(socketTimeoutMS); callback(undefined, socket); } diff --git a/src/cmap/connection.ts b/src/cmap/connection.ts index a8924f86f7..50f3cd099e 100644 --- a/src/cmap/connection.ts +++ b/src/cmap/connection.ts @@ -80,7 +80,7 @@ export interface CommandOptions extends BSONSerializeOptions { raw?: boolean; monitoring?: boolean; fullResult?: boolean; - socketTimeout?: number; + socketTimeoutMS?: number; /** Session to use for the operation */ session?: ClientSession; documentsReturnedIn?: string; @@ -120,7 +120,7 @@ export interface ConnectionOptions keepAlive?: boolean; keepAliveInitialDelay?: number; noDelay?: boolean; - socketTimeout?: number; + socketTimeoutMS?: number; cancellationToken?: EventEmitter; metadata: ClientMetadata; @@ -136,7 +136,7 @@ export interface DestroyOptions { export class Connection extends EventEmitter { id: number | ''; address: string; - socketTimeout: number; + socketTimeoutMS: number; monitorCommands: boolean; closed: boolean; destroyed: boolean; @@ -172,7 +172,7 @@ export class Connection extends EventEmitter { super(); this.id = options.id; this.address = streamIdentifier(stream); - this.socketTimeout = options.socketTimeout ?? 0; + this.socketTimeoutMS = options.socketTimeoutMS ?? 0; this.monitorCommands = options.monitorCommands; this.serverApi = options.serverApi; this.closed = false; @@ -674,7 +674,7 @@ function messageHandler(conn: Connection) { // requeue the callback for next synthetic request conn[kQueue].set(message.requestId, operationDescription); } else if (operationDescription.socketTimeoutOverride) { - conn[kStream].setTimeout(conn.socketTimeout); + conn[kStream].setTimeout(conn.socketTimeoutMS); } try { @@ -764,9 +764,9 @@ function write( } } - if (typeof options.socketTimeout === 'number') { + if (typeof options.socketTimeoutMS === 'number') { operationDescription.socketTimeoutOverride = true; - conn[kStream].setTimeout(options.socketTimeout); + conn[kStream].setTimeout(options.socketTimeoutMS); } // if command monitoring is enabled we need to modify the callback here diff --git a/src/error.ts b/src/error.ts index aae4985bd7..6d2dcd2105 100644 --- a/src/error.ts +++ b/src/error.ts @@ -7,26 +7,55 @@ export type AnyError = MongoError | Error; const kErrorLabels = Symbol('errorLabels'); +/** @internal MongoDB Error Codes */ +export const MONGODB_ERROR_CODES = Object.freeze({ + HostUnreachable: 6, + HostNotFound: 7, + NetworkTimeout: 89, + ShutdownInProgress: 91, + PrimarySteppedDown: 189, + ExceededTimeLimit: 262, + SocketException: 9001, + NotMaster: 10107, + InterruptedAtShutdown: 11600, + InterruptedDueToReplStateChange: 11602, + NotMasterNoSlaveOk: 13435, + NotMasterOrSecondary: 13436, + StaleShardVersion: 63, + StaleEpoch: 150, + StaleConfig: 13388, + RetryChangeStream: 234, + FailedToSatisfyReadPreference: 133, + CursorNotFound: 43, + LegacyNotPrimary: 10058, + WriteConcernFailed: 64, + NamespaceNotFound: 26, + IllegalOperation: 20, + MaxTimeMSExpired: 50, + UnknownReplWriteConcern: 79, + UnsatisfiableWriteConcern: 100 +} as const); + // From spec@https://github.com/mongodb/specifications/blob/f93d78191f3db2898a59013a7ed5650352ef6da8/source/change-streams/change-streams.rst#resumable-error -export const GET_MORE_RESUMABLE_CODES = new Set([ - 6, // HostUnreachable - 7, // HostNotFound - 89, // NetworkTimeout - 91, // ShutdownInProgress - 189, // PrimarySteppedDown - 262, // ExceededTimeLimit - 9001, // SocketException - 10107, // NotMaster - 11600, // InterruptedAtShutdown - 11602, // InterruptedDueToReplStateChange - 13435, // NotMasterNoSlaveOk - 13436, // NotMasterOrSecondary - 63, // StaleShardVersion - 150, // StaleEpoch - 13388, // StaleConfig - 234, // RetryChangeStream - 133, // FailedToSatisfyReadPreference - 43 // CursorNotFound +export const GET_MORE_RESUMABLE_CODES = new Set([ + MONGODB_ERROR_CODES.HostUnreachable, + MONGODB_ERROR_CODES.HostNotFound, + MONGODB_ERROR_CODES.NetworkTimeout, + MONGODB_ERROR_CODES.ShutdownInProgress, + MONGODB_ERROR_CODES.PrimarySteppedDown, + MONGODB_ERROR_CODES.ExceededTimeLimit, + MONGODB_ERROR_CODES.SocketException, + MONGODB_ERROR_CODES.NotMaster, + MONGODB_ERROR_CODES.InterruptedAtShutdown, + MONGODB_ERROR_CODES.InterruptedDueToReplStateChange, + MONGODB_ERROR_CODES.NotMasterNoSlaveOk, + MONGODB_ERROR_CODES.NotMasterOrSecondary, + MONGODB_ERROR_CODES.StaleShardVersion, + MONGODB_ERROR_CODES.StaleEpoch, + MONGODB_ERROR_CODES.StaleConfig, + MONGODB_ERROR_CODES.RetryChangeStream, + MONGODB_ERROR_CODES.FailedToSatisfyReadPreference, + MONGODB_ERROR_CODES.CursorNotFound ]); /** @public */ @@ -244,33 +273,33 @@ export class MongoWriteConcernError extends MongoError { } // see: https://github.com/mongodb/specifications/blob/master/source/retryable-writes/retryable-writes.rst#terms -const RETRYABLE_ERROR_CODES = new Set([ - 6, // HostUnreachable - 7, // HostNotFound - 89, // NetworkTimeout - 91, // ShutdownInProgress - 189, // PrimarySteppedDown - 9001, // SocketException - 10107, // NotMaster - 11600, // InterruptedAtShutdown - 11602, // InterruptedDueToReplStateChange - 13435, // NotMasterNoSlaveOk - 13436 // NotMasterOrSecondary +const RETRYABLE_ERROR_CODES = new Set([ + MONGODB_ERROR_CODES.HostUnreachable, + MONGODB_ERROR_CODES.HostNotFound, + MONGODB_ERROR_CODES.NetworkTimeout, + MONGODB_ERROR_CODES.ShutdownInProgress, + MONGODB_ERROR_CODES.PrimarySteppedDown, + MONGODB_ERROR_CODES.SocketException, + MONGODB_ERROR_CODES.NotMaster, + MONGODB_ERROR_CODES.InterruptedAtShutdown, + MONGODB_ERROR_CODES.InterruptedDueToReplStateChange, + MONGODB_ERROR_CODES.NotMasterNoSlaveOk, + MONGODB_ERROR_CODES.NotMasterOrSecondary ]); -const RETRYABLE_WRITE_ERROR_CODES = new Set([ - 11600, // InterruptedAtShutdown - 11602, // InterruptedDueToReplStateChange - 10107, // NotMaster - 13435, // NotMasterNoSlaveOk - 13436, // NotMasterOrSecondary - 189, // PrimarySteppedDown - 91, // ShutdownInProgress - 7, // HostNotFound - 6, // HostUnreachable - 89, // NetworkTimeout - 9001, // SocketException - 262 // ExceededTimeLimit +const RETRYABLE_WRITE_ERROR_CODES = new Set([ + MONGODB_ERROR_CODES.InterruptedAtShutdown, + MONGODB_ERROR_CODES.InterruptedDueToReplStateChange, + MONGODB_ERROR_CODES.NotMaster, + MONGODB_ERROR_CODES.NotMasterNoSlaveOk, + MONGODB_ERROR_CODES.NotMasterOrSecondary, + MONGODB_ERROR_CODES.PrimarySteppedDown, + MONGODB_ERROR_CODES.ShutdownInProgress, + MONGODB_ERROR_CODES.HostNotFound, + MONGODB_ERROR_CODES.HostUnreachable, + MONGODB_ERROR_CODES.NetworkTimeout, + MONGODB_ERROR_CODES.SocketException, + MONGODB_ERROR_CODES.ExceededTimeLimit ]); export function isRetryableWriteError(error: MongoError): boolean { @@ -291,42 +320,45 @@ export function isRetryableError(error: MongoError): boolean { ); } -const SDAM_RECOVERING_CODES = new Set([ - 91, // ShutdownInProgress - 189, // PrimarySteppedDown - 11600, // InterruptedAtShutdown - 11602, // InterruptedDueToReplStateChange - 13436 // NotMasterOrSecondary +const SDAM_RECOVERING_CODES = new Set([ + MONGODB_ERROR_CODES.ShutdownInProgress, + MONGODB_ERROR_CODES.PrimarySteppedDown, + MONGODB_ERROR_CODES.InterruptedAtShutdown, + MONGODB_ERROR_CODES.InterruptedDueToReplStateChange, + MONGODB_ERROR_CODES.NotMasterOrSecondary ]); -const SDAM_NOTMASTER_CODES = new Set([ - 10107, // NotMaster - 13435 // NotMasterNoSlaveOk +const SDAM_NOTMASTER_CODES = new Set([ + MONGODB_ERROR_CODES.NotMaster, + MONGODB_ERROR_CODES.NotMasterNoSlaveOk, + MONGODB_ERROR_CODES.LegacyNotPrimary ]); -const SDAM_NODE_SHUTTING_DOWN_ERROR_CODES = new Set([ - 11600, // InterruptedAtShutdown - 91 // ShutdownInProgress +const SDAM_NODE_SHUTTING_DOWN_ERROR_CODES = new Set([ + MONGODB_ERROR_CODES.InterruptedAtShutdown, + MONGODB_ERROR_CODES.ShutdownInProgress ]); function isRecoveringError(err: MongoError) { - if (err.code && SDAM_RECOVERING_CODES.has(err.code)) { - return true; + if (typeof err.code !== 'undefined') { + // If any error code exists, we ignore the error.message + return SDAM_RECOVERING_CODES.has(err.code); } - return err.message.match(/not master or secondary/) || err.message.match(/node is recovering/); + return /not master or secondary/.test(err.message) || /node is recovering/.test(err.message); } function isNotMasterError(err: MongoError) { - if (err.code && SDAM_NOTMASTER_CODES.has(err.code)) { - return true; + if (typeof err.code !== 'undefined') { + // If any error code exists, we ignore the error.message + return SDAM_NOTMASTER_CODES.has(err.code); } if (isRecoveringError(err)) { return false; } - return err.message.match(/not master/); + return /not master/.test(err.message); } export function isNodeShuttingDownError(err: MongoError): boolean { @@ -347,6 +379,9 @@ export function isSDAMUnrecoverableError(error: MongoError): boolean { return true; } + if (typeof error.code !== 'undefined') { + return isRecoveringError(error) || isNotMasterError(error); + } if (isRecoveringError(error) || isNotMasterError(error)) { return true; } diff --git a/src/gridfs-stream/upload.ts b/src/gridfs-stream/upload.ts index c9b29ed928..cadca413ed 100644 --- a/src/gridfs-stream/upload.ts +++ b/src/gridfs-stream/upload.ts @@ -1,6 +1,6 @@ import * as crypto from 'crypto'; import { Writable } from 'stream'; -import { MongoError, AnyError } from '../error'; +import { MongoError, AnyError, MONGODB_ERROR_CODES } from '../error'; import { WriteConcern } from './../write_concern'; import { PromiseProvider } from '../promise_provider'; import { ObjectId } from '../bson'; @@ -11,8 +11,6 @@ import type { GridFSBucket } from './index'; import type { GridFSFile } from './download'; import type { WriteConcernOptions } from '../write_concern'; -const ERROR_NAMESPACE_NOT_FOUND = 26; - /** @public */ export type TFileId = string | number | Document | ObjectId; @@ -256,7 +254,7 @@ function checkChunksIndex(stream: GridFSBucketWriteStream, callback: Callback): let index: { files_id: number; n: number }; if (error) { // Collection doesn't exist so create index - if (error instanceof MongoError && error.code === ERROR_NAMESPACE_NOT_FOUND) { + if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { index = { files_id: 1, n: 1 }; stream.chunks.createIndex(index, { background: false, unique: true }, error => { if (error) { @@ -349,7 +347,7 @@ function checkIndexes(stream: GridFSBucketWriteStream, callback: Callback): void let index: { filename: number; uploadDate: number }; if (error) { // Collection doesn't exist so create index - if (error instanceof MongoError && error.code === ERROR_NAMESPACE_NOT_FOUND) { + if (error instanceof MongoError && error.code === MONGODB_ERROR_CODES.NamespaceNotFound) { index = { filename: 1, uploadDate: 1 }; stream.files.createIndex(index, { background: false }, (error?: AnyError) => { if (error) { diff --git a/src/operations/execute_operation.ts b/src/operations/execute_operation.ts index ece250e424..ab0d966c29 100644 --- a/src/operations/execute_operation.ts +++ b/src/operations/execute_operation.ts @@ -1,5 +1,5 @@ import { ReadPreference } from '../read_preference'; -import { MongoError, isRetryableError } from '../error'; +import { MongoError, isRetryableError, MONGODB_ERROR_CODES } from '../error'; import { Aspect, AbstractOperation } from './operation'; import { maxWireVersion, maybePromise, Callback } from '../utils'; import { ServerType } from '../sdam/common'; @@ -8,7 +8,7 @@ import type { Topology } from '../sdam/topology'; import type { ClientSession } from '../sessions'; import type { Document } from '../bson'; -const MMAPv1_RETRY_WRITES_ERROR_CODE = 20; +const MMAPv1_RETRY_WRITES_ERROR_CODE = MONGODB_ERROR_CODES.IllegalOperation; const MMAPv1_RETRY_WRITES_ERROR_MESSAGE = 'This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string.'; diff --git a/src/operations/indexes.ts b/src/operations/indexes.ts index fd6ca54f6c..97c91f3d34 100644 --- a/src/operations/indexes.ts +++ b/src/operations/indexes.ts @@ -1,6 +1,6 @@ import { indexInformation, IndexInformationOptions } from './common_functions'; import { AbstractOperation, Aspect, defineAspects } from './operation'; -import { MongoError } from '../error'; +import { MONGODB_ERROR_CODES, MongoError } from '../error'; import { maxWireVersion, parseIndexOptions, @@ -280,7 +280,7 @@ export class EnsureIndexOperation extends CreateIndexOperation { const cursor = this.db.collection(this.collectionName).listIndexes({ session }); cursor.toArray((err, indexes) => { /// ignore "NamespaceNotFound" errors - if (err && (err as MongoError).code !== 26) { + if (err && (err as MongoError).code !== MONGODB_ERROR_CODES.NamespaceNotFound) { return callback(err); } diff --git a/src/sdam/monitor.ts b/src/sdam/monitor.ts index c8147b7aaa..9f1e7a4084 100644 --- a/src/sdam/monitor.ts +++ b/src/sdam/monitor.ts @@ -222,10 +222,10 @@ function checkServer(monitor: Monitor, callback: Callback) { const options = isAwaitable ? { - socketTimeout: connectTimeoutMS ? connectTimeoutMS + maxAwaitTimeMS : 0, + socketTimeoutMS: connectTimeoutMS ? connectTimeoutMS + maxAwaitTimeMS : 0, exhaustAllowed: true } - : { socketTimeout: connectTimeoutMS }; + : { socketTimeoutMS: connectTimeoutMS }; if (isAwaitable && monitor[kRTTPinger] == null) { monitor[kRTTPinger] = new RTTPinger( @@ -343,10 +343,8 @@ function monitorServer(monitor: Monitor) { function makeTopologyVersion(tv: TopologyVersion) { return { processId: tv.processId, - - // NOTE: The casting here is a bug, `counter` should always be a `Long` - // but it was not at the time of typing. Further investigation needed - counter: Long.fromNumber((tv.counter as unknown) as number) + // tests mock counter as just number, but in a real situation counter should always be a Long + counter: Long.isLong(tv.counter) ? tv.counter : Long.fromNumber(tv.counter) }; } diff --git a/src/sdam/server.ts b/src/sdam/server.ts index 77d85a8e03..56aa015081 100644 --- a/src/sdam/server.ts +++ b/src/sdam/server.ts @@ -60,7 +60,7 @@ const DEBUG_FIELDS = [ 'noDelay', 'connectionTimeout', 'checkServerIdentity', - 'socketTimeout', + 'socketTimeoutMS', 'ssl', 'ca', 'crl', diff --git a/src/sdam/server_description.ts b/src/sdam/server_description.ts index 4fd9248ca6..65695e76d5 100644 --- a/src/sdam/server_description.ts +++ b/src/sdam/server_description.ts @@ -1,6 +1,6 @@ import { arrayStrictEqual, errorStrictEqual, now, HostAddress } from '../utils'; import { ServerType, ServerTypeId } from './common'; -import type { ObjectId, Long, Document } from '../bson'; +import { ObjectId, Long, Document } from '../bson'; import type { ClusterTime } from './common'; const WRITABLE_SERVER_TYPES = new Set([ @@ -256,14 +256,10 @@ export function compareTopologyVersion(lhs?: TopologyVersion, rhs?: TopologyVers } if (lhs.processId.equals(rhs.processId)) { - // TODO: handle counters as Longs - if (lhs.counter === rhs.counter) { - return 0; - } else if (lhs.counter < rhs.counter) { - return -1; - } - - return 1; + // tests mock counter as just number, but in a real situation counter should always be a Long + const lhsCounter = Long.isLong(lhs.counter) ? lhs.counter : Long.fromNumber(lhs.counter); + const rhsCounter = Long.isLong(rhs.counter) ? lhs.counter : Long.fromNumber(rhs.counter); + return lhsCounter.compare(rhsCounter); } return -1; diff --git a/src/sdam/topology.ts b/src/sdam/topology.ts index 1f2dddf656..3c4df02104 100644 --- a/src/sdam/topology.ts +++ b/src/sdam/topology.ts @@ -1,7 +1,7 @@ import Denque = require('denque'); import { EventEmitter } from 'events'; import { ReadPreference, ReadPreferenceLike } from '../read_preference'; -import { ServerDescription } from './server_description'; +import { compareTopologyVersion, ServerDescription } from './server_description'; import { TopologyDescription } from './topology_description'; import { Server, ServerOptions } from './server'; import { @@ -612,6 +612,11 @@ export class Topology extends EventEmitter { return; } + // ignore this server update if its from an outdated topologyVersion + if (isStaleServerDescription(this.s.description, serverDescription)) { + return; + } + // these will be used for monitoring events later const previousTopologyDescription = this.s.description; const previousServerDescription = this.s.description.servers.get(serverDescription.address); @@ -965,6 +970,19 @@ function processWaitQueue(topology: Topology) { } } +function isStaleServerDescription( + topologyDescription: TopologyDescription, + incomingServerDescription: ServerDescription +) { + const currentServerDescription = topologyDescription.servers.get( + incomingServerDescription.address + ); + const currentTopologyVersion = currentServerDescription?.topologyVersion; + return ( + compareTopologyVersion(currentTopologyVersion, incomingServerDescription.topologyVersion) > 0 + ); +} + /** @public */ export class ServerCapabilities { maxWireVersion: number; diff --git a/src/sessions.ts b/src/sessions.ts index fe6d708416..08c057d30a 100644 --- a/src/sessions.ts +++ b/src/sessions.ts @@ -11,7 +11,13 @@ import { } from './transactions'; import { resolveClusterTime, ClusterTime } from './sdam/common'; import { isSharded } from './cmap/wire_protocol/shared'; -import { MongoError, isRetryableError, MongoNetworkError, MongoWriteConcernError } from './error'; +import { + MongoError, + isRetryableError, + MongoNetworkError, + MongoWriteConcernError, + MONGODB_ERROR_CODES +} from './error'; import { now, calculateDurationInMs, @@ -329,9 +335,6 @@ class ClientSession extends EventEmitter { } const MAX_WITH_TRANSACTION_TIMEOUT = 120000; -const UNSATISFIABLE_WRITE_CONCERN_CODE = 100; -const UNKNOWN_REPL_WRITE_CONCERN_CODE = 79; -const MAX_TIME_MS_EXPIRED_CODE = 50; const NON_DETERMINISTIC_WRITE_CONCERN_ERRORS = new Set([ 'CannotSatisfyWriteConcern', 'UnknownReplWriteConcern', @@ -349,8 +352,8 @@ function isUnknownTransactionCommitResult(err: MongoError) { return ( isMaxTimeMSExpiredError(err) || (!isNonDeterministicWriteConcernError && - err.code !== UNSATISFIABLE_WRITE_CONCERN_CODE && - err.code !== UNKNOWN_REPL_WRITE_CONCERN_CODE) + err.code !== MONGODB_ERROR_CODES.UnsatisfiableWriteConcern && + err.code !== MONGODB_ERROR_CODES.UnknownReplWriteConcern) ); } @@ -360,8 +363,8 @@ function isMaxTimeMSExpiredError(err: MongoError) { } return ( - err.code === MAX_TIME_MS_EXPIRED_CODE || - (err.writeConcernError && err.writeConcernError.code === MAX_TIME_MS_EXPIRED_CODE) + err.code === MONGODB_ERROR_CODES.MaxTimeMSExpired || + (err.writeConcernError && err.writeConcernError.code === MONGODB_ERROR_CODES.MaxTimeMSExpired) ); } diff --git a/test/functional/mongo_client_options.test.js b/test/functional/mongo_client_options.test.js index 2088236ef4..814e541480 100644 --- a/test/functional/mongo_client_options.test.js +++ b/test/functional/mongo_client_options.test.js @@ -67,7 +67,7 @@ describe('MongoClient Options', function () { const options = args[2]; if (ns.toString() === 'admin.$cmd' && command.ismaster && options.exhaustAllowed) { stub.restore(); - expect(options).property('socketTimeout').to.equal(0); + expect(options).property('socketTimeoutMS').to.equal(0); client.close(done); } @@ -93,7 +93,7 @@ describe('MongoClient Options', function () { const options = args[2]; if (ns.toString() === 'admin.$cmd' && command.ismaster && options.exhaustAllowed) { stub.restore(); - expect(options).property('socketTimeout').to.equal(510); + expect(options).property('socketTimeoutMS').to.equal(510); client.close(done); } diff --git a/test/functional/spec-runner/index.js b/test/functional/spec-runner/index.js index 6828db318e..a18338793c 100644 --- a/test/functional/spec-runner/index.js +++ b/test/functional/spec-runner/index.js @@ -95,7 +95,7 @@ function parseRunOn(runOn) { } const mongodb = version.join(' '); - return { topology, mongodb }; + return { topology, mongodb, authEnabled: !!config.authEnabled }; }); } @@ -121,6 +121,21 @@ function generateTopologyTests(testSuites, testContext, filter) { afterEach(() => testContext.cleanupAfterSuite()); testSuite.tests.forEach(spec => { it(spec.description, function () { + if (requires.authEnabled && process.env.AUTH !== 'auth') { + // TODO: We do not have a way to determine if auth is enabled in our mocha metadata + // We need to do a admin.command({getCmdLineOpts: 1}) if it errors (code=13) auth is on + this.skip(); + } + + if ( + spec.operations.some( + op => op.name === 'waitForEvent' && op.arguments.event === 'PoolReadyEvent' + ) + ) { + // TODO(NODE-2994): Connection storms work will add new events to connection pool + this.skip(); + } + if ( spec.skipReason || (filter && typeof filter === 'function' && !filter(spec, this.configuration)) diff --git a/test/spec/server-discovery-and-monitoring/errors/generate-error-tests.py b/test/spec/server-discovery-and-monitoring/errors/generate-error-tests.py index 8ff3c69104..877363996e 100644 --- a/test/spec/server-discovery-and-monitoring/errors/generate-error-tests.py +++ b/test/spec/server-discovery-and-monitoring/errors/generate-error-tests.py @@ -40,6 +40,7 @@ def write_test(filename, data): 'ShutdownInProgress': (91,), 'NotMaster': (10107,), 'NotMasterNoSlaveOk': (13435,), + 'LegacyNotPrimary': (10058,), } @@ -129,6 +130,7 @@ def create_stale_generation_tests(): # Stale network errors for network_error_type, when in itertools.product( ['network', 'timeout'], WHEN): + error_name = network_error_type test_name = f'stale-generation-{when}-{network_error_type}' stale_error = STALE_GENERATION_NETWORK_ERROR.format(**locals()) data = tmp.format(**locals()) diff --git a/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json new file mode 100644 index 0000000000..e1f33b81bb --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion greater LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion greater LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.yml b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.yml new file mode 100644 index 0000000000..daa7925b1e --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-greater-LegacyNotPrimary.yml @@ -0,0 +1,60 @@ +# Autogenerated tests for SDAM error handling, see generate-error-tests.py +description: Non-stale topologyVersion greater LegacyNotPrimary error +uri: mongodb://a/?replicaSet=rs +phases: +- description: Primary A is discovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 9 + topologyVersion: &topologyVersion_1_1 + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": '1' + outcome: &outcome + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: *topologyVersion_1_1 + pool: + generation: 0 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Non-stale topologyVersion greater LegacyNotPrimary error marks server Unknown + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: command + response: + ok: 0 + errmsg: LegacyNotPrimary + code: 10058 + topologyVersion: + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": "2" + outcome: + servers: + a:27017: + type: Unknown + topologyVersion: + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": "2" + pool: + generation: 0 + topologyType: ReplicaSetNoPrimary + logicalSessionTimeoutMinutes: null + setName: rs diff --git a/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json new file mode 100644 index 0000000000..ccaacd1cfe --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.json @@ -0,0 +1,84 @@ +{ + "description": "Non-stale topologyVersion missing LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion missing LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.yml b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.yml new file mode 100644 index 0000000000..04c725e446 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-missing-LegacyNotPrimary.yml @@ -0,0 +1,51 @@ +# Autogenerated tests for SDAM error handling, see generate-error-tests.py +description: Non-stale topologyVersion missing LegacyNotPrimary error +uri: mongodb://a/?replicaSet=rs +phases: +- description: Primary A is discovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 9 + topologyVersion: &topologyVersion_1_1 + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": '1' + outcome: &outcome + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: *topologyVersion_1_1 + pool: + generation: 0 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Non-stale topologyVersion missing LegacyNotPrimary error marks server Unknown + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: command + response: + ok: 0 + errmsg: LegacyNotPrimary + code: 10058 + outcome: + servers: + a:27017: + type: Unknown + topologyVersion: null + pool: + generation: 0 + topologyType: ReplicaSetNoPrimary + logicalSessionTimeoutMinutes: null + setName: rs diff --git a/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json new file mode 100644 index 0000000000..da36e9b33c --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.json @@ -0,0 +1,99 @@ +{ + "description": "Non-stale topologyVersion proccessId changed LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale topologyVersion proccessId changed LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.yml b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.yml new file mode 100644 index 0000000000..b333863c15 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/non-stale-topologyVersion-proccessId-changed-LegacyNotPrimary.yml @@ -0,0 +1,60 @@ +# Autogenerated tests for SDAM error handling, see generate-error-tests.py +description: Non-stale topologyVersion proccessId changed LegacyNotPrimary error +uri: mongodb://a/?replicaSet=rs +phases: +- description: Primary A is discovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 9 + topologyVersion: &topologyVersion_1_1 + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": '1' + outcome: &outcome + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: *topologyVersion_1_1 + pool: + generation: 0 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Non-stale topologyVersion proccessId changed LegacyNotPrimary error marks server Unknown + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: command + response: + ok: 0 + errmsg: LegacyNotPrimary + code: 10058 + topologyVersion: + processId: + "$oid": '000000000000000000000002' + counter: + "$numberLong": "1" + outcome: + servers: + a:27017: + type: Unknown + topologyVersion: + processId: + "$oid": '000000000000000000000002' + counter: + "$numberLong": "1" + pool: + generation: 0 + topologyType: ReplicaSetNoPrimary + logicalSessionTimeoutMinutes: null + setName: rs diff --git a/test/spec/server-discovery-and-monitoring/errors/post-42-LegacyNotPrimary.json b/test/spec/server-discovery-and-monitoring/errors/post-42-LegacyNotPrimary.json new file mode 100644 index 0000000000..731da196b5 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/post-42-LegacyNotPrimary.json @@ -0,0 +1,69 @@ +{ + "description": "Post-4.2 LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 8 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Post-4.2 LegacyNotPrimary error marks server Unknown", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 8, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/errors/post-42-LegacyNotPrimary.yml b/test/spec/server-discovery-and-monitoring/errors/post-42-LegacyNotPrimary.yml new file mode 100644 index 0000000000..56de3a23a9 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/post-42-LegacyNotPrimary.yml @@ -0,0 +1,46 @@ +# Autogenerated tests for SDAM error handling, see generate-error-tests.py +description: Post-4.2 LegacyNotPrimary error +uri: mongodb://a/?replicaSet=rs +phases: +- description: Primary A is discovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 8 + outcome: &outcome + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: null + pool: + generation: 0 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Post-4.2 LegacyNotPrimary error marks server Unknown + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 8 + type: command + response: + ok: 0 + errmsg: LegacyNotPrimary + code: 10058 + outcome: + servers: + a:27017: + type: Unknown + topologyVersion: null + pool: + generation: 0 + topologyType: ReplicaSetNoPrimary + logicalSessionTimeoutMinutes: null + setName: rs diff --git a/test/spec/server-discovery-and-monitoring/errors/pre-42-LegacyNotPrimary.json b/test/spec/server-discovery-and-monitoring/errors/pre-42-LegacyNotPrimary.json new file mode 100644 index 0000000000..db5acd718d --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/pre-42-LegacyNotPrimary.json @@ -0,0 +1,69 @@ +{ + "description": "Pre-4.2 LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 7 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Pre-4.2 LegacyNotPrimary error marks server Unknown and clears the pool", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 7, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/errors/pre-42-LegacyNotPrimary.yml b/test/spec/server-discovery-and-monitoring/errors/pre-42-LegacyNotPrimary.yml new file mode 100644 index 0000000000..3757474151 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/pre-42-LegacyNotPrimary.yml @@ -0,0 +1,46 @@ +# Autogenerated tests for SDAM error handling, see generate-error-tests.py +description: Pre-4.2 LegacyNotPrimary error +uri: mongodb://a/?replicaSet=rs +phases: +- description: Primary A is discovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 7 + outcome: &outcome + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: null + pool: + generation: 0 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Pre-4.2 LegacyNotPrimary error marks server Unknown and clears the pool + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 7 + type: command + response: + ok: 0 + errmsg: LegacyNotPrimary + code: 10058 + outcome: + servers: + a:27017: + type: Unknown + topologyVersion: null + pool: + generation: 1 + topologyType: ReplicaSetNoPrimary + logicalSessionTimeoutMinutes: null + setName: rs diff --git a/test/spec/server-discovery-and-monitoring/errors/prefer-error-code.json b/test/spec/server-discovery-and-monitoring/errors/prefer-error-code.json new file mode 100644 index 0000000000..486103a457 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/prefer-error-code.json @@ -0,0 +1,130 @@ +{ + "description": "Do not check errmsg when code exists", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "errmsg \"not master\" gets ignored when error code exists", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "not master", + "code": 1 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "errmsg \"node is recovering\" gets ignored when error code exists", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "node is recovering", + "code": 1 + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/errors/prefer-error-code.yml b/test/spec/server-discovery-and-monitoring/errors/prefer-error-code.yml new file mode 100644 index 0000000000..f7c76b9e31 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/prefer-error-code.yml @@ -0,0 +1,53 @@ +description: Do not check errmsg when code exists +uri: mongodb://a/?replicaSet=rs +phases: +- description: Primary A is discovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 9 + topologyVersion: &topologyVersion_1_1 + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": '1' + outcome: &outcome + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: *topologyVersion_1_1 + pool: + generation: 0 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: errmsg "not master" gets ignored when error code exists + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: command + response: + ok: 0 + errmsg: "not master" + code: 1 # Not a "not master" error code. + outcome: *outcome + +- description: errmsg "node is recovering" gets ignored when error code exists + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: command + response: + ok: 0 + errmsg: "node is recovering" + code: 1 # Not a "node is recovering" error code. + outcome: *outcome diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json new file mode 100644 index 0000000000..2169c4a6fb --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation LegacyNotPrimary error afterHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.yml b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.yml new file mode 100644 index 0000000000..f7ccdf8d4a --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-LegacyNotPrimary.yml @@ -0,0 +1,89 @@ +# Autogenerated tests for SDAM error handling, see generate-error-tests.py +description: Stale generation LegacyNotPrimary error afterHandshakeCompletes +uri: mongodb://a/?replicaSet=rs +phases: +- description: Primary A is discovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 9 + topologyVersion: &topologyVersion_1_1 + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": '1' + outcome: + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: *topologyVersion_1_1 + pool: + generation: 0 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +# Process a network error which increments the pool generation. +- description: Non-stale application network error + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: network + outcome: + servers: + a:27017: + type: Unknown + topologyVersion: null + pool: + generation: 1 + topologyType: ReplicaSetNoPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Primary A is rediscovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 9 + topologyVersion: *topologyVersion_1_1 + outcome: &outcome + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: *topologyVersion_1_1 + pool: + generation: 1 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Ignore stale LegacyNotPrimary error (stale generation) + applicationErrors: + - address: a:27017 + generation: 0 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: command + response: + ok: 0 + errmsg: LegacyNotPrimary + code: 10058 + topologyVersion: + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": "2" + outcome: *outcome diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-network.json b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-network.json index 4e11c48eb2..94b8322d2d 100644 --- a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-network.json +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-network.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes", + "description": "Stale generation network error afterHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -124,7 +124,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale network error (stale generation)", "applicationErrors": [ { "address": "a:27017", diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-network.yml b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-network.yml index ec06490f98..7f3b104cc5 100644 --- a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-network.yml +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-network.yml @@ -1,5 +1,5 @@ # Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes +description: Stale generation network error afterHandshakeCompletes uri: mongodb://a/?replicaSet=rs phases: - description: Primary A is discovered @@ -70,7 +70,7 @@ phases: logicalSessionTimeoutMinutes: null setName: rs -- description: Ignore stale NotMasterNoSlaveOk error (stale generation) +- description: Ignore stale network error (stale generation) applicationErrors: - address: a:27017 generation: 0 diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json index 27bac32443..490703de90 100644 --- a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes", + "description": "Stale generation timeout error afterHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -124,7 +124,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale timeout error (stale generation)", "applicationErrors": [ { "address": "a:27017", diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.yml b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.yml index a65304c282..2a42ffb65e 100644 --- a/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.yml +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-afterHandshakeCompletes-timeout.yml @@ -1,5 +1,5 @@ # Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Stale generation NotMasterNoSlaveOk error afterHandshakeCompletes +description: Stale generation timeout error afterHandshakeCompletes uri: mongodb://a/?replicaSet=rs phases: - description: Primary A is discovered @@ -70,7 +70,7 @@ phases: logicalSessionTimeoutMinutes: null setName: rs -- description: Ignore stale NotMasterNoSlaveOk error (stale generation) +- description: Ignore stale timeout error (stale generation) applicationErrors: - address: a:27017 generation: 0 diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json new file mode 100644 index 0000000000..674cb994cd --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.json @@ -0,0 +1,174 @@ +{ + "description": "Stale generation LegacyNotPrimary error beforeHandshakeCompletes", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Non-stale application network error", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "network" + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Primary A is rediscovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (stale generation)", + "applicationErrors": [ + { + "address": "a:27017", + "generation": 0, + "when": "beforeHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 1 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.yml b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.yml new file mode 100644 index 0000000000..a37ca73442 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-LegacyNotPrimary.yml @@ -0,0 +1,89 @@ +# Autogenerated tests for SDAM error handling, see generate-error-tests.py +description: Stale generation LegacyNotPrimary error beforeHandshakeCompletes +uri: mongodb://a/?replicaSet=rs +phases: +- description: Primary A is discovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 9 + topologyVersion: &topologyVersion_1_1 + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": '1' + outcome: + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: *topologyVersion_1_1 + pool: + generation: 0 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +# Process a network error which increments the pool generation. +- description: Non-stale application network error + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: network + outcome: + servers: + a:27017: + type: Unknown + topologyVersion: null + pool: + generation: 1 + topologyType: ReplicaSetNoPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Primary A is rediscovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 9 + topologyVersion: *topologyVersion_1_1 + outcome: &outcome + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: *topologyVersion_1_1 + pool: + generation: 1 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Ignore stale LegacyNotPrimary error (stale generation) + applicationErrors: + - address: a:27017 + generation: 0 + when: beforeHandshakeCompletes + maxWireVersion: 9 + type: command + response: + ok: 0 + errmsg: LegacyNotPrimary + code: 10058 + topologyVersion: + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": "2" + outcome: *outcome diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json index 9734776f22..3e581773eb 100644 --- a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-network.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes", + "description": "Stale generation network error beforeHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -124,7 +124,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale network error (stale generation)", "applicationErrors": [ { "address": "a:27017", diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-network.yml b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-network.yml index 1ef43508d7..997fb76ac5 100644 --- a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-network.yml +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-network.yml @@ -1,5 +1,5 @@ # Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes +description: Stale generation network error beforeHandshakeCompletes uri: mongodb://a/?replicaSet=rs phases: - description: Primary A is discovered @@ -70,7 +70,7 @@ phases: logicalSessionTimeoutMinutes: null setName: rs -- description: Ignore stale NotMasterNoSlaveOk error (stale generation) +- description: Ignore stale network error (stale generation) applicationErrors: - address: a:27017 generation: 0 diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json index af8730e5ca..24c8c6e507 100644 --- a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.json @@ -1,5 +1,5 @@ { - "description": "Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes", + "description": "Stale generation timeout error beforeHandshakeCompletes", "uri": "mongodb://a/?replicaSet=rs", "phases": [ { @@ -124,7 +124,7 @@ } }, { - "description": "Ignore stale NotMasterNoSlaveOk error (stale generation)", + "description": "Ignore stale timeout error (stale generation)", "applicationErrors": [ { "address": "a:27017", diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.yml b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.yml index 40471d9441..1f9ae8180b 100644 --- a/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.yml +++ b/test/spec/server-discovery-and-monitoring/errors/stale-generation-beforeHandshakeCompletes-timeout.yml @@ -1,5 +1,5 @@ # Autogenerated tests for SDAM error handling, see generate-error-tests.py -description: Stale generation NotMasterNoSlaveOk error beforeHandshakeCompletes +description: Stale generation timeout error beforeHandshakeCompletes uri: mongodb://a/?replicaSet=rs phases: - description: Primary A is discovered @@ -70,7 +70,7 @@ phases: logicalSessionTimeoutMinutes: null setName: rs -- description: Ignore stale NotMasterNoSlaveOk error (stale generation) +- description: Ignore stale timeout error (stale generation) applicationErrors: - address: a:27017 generation: 0 diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json b/test/spec/server-discovery-and-monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json new file mode 100644 index 0000000000..beee51e666 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/stale-topologyVersion-LegacyNotPrimary.json @@ -0,0 +1,146 @@ +{ + "description": "Stale topologyVersion LegacyNotPrimary error", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "description": "Primary A is discovered", + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (topologyVersion less)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "description": "Ignore stale LegacyNotPrimary error (topologyVersion equal)", + "applicationErrors": [ + { + "address": "a:27017", + "when": "afterHandshakeCompletes", + "maxWireVersion": 9, + "type": "command", + "response": { + "ok": 0, + "errmsg": "LegacyNotPrimary", + "code": 10058, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + } + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + }, + "pool": { + "generation": 0 + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/errors/stale-topologyVersion-LegacyNotPrimary.yml b/test/spec/server-discovery-and-monitoring/errors/stale-topologyVersion-LegacyNotPrimary.yml new file mode 100644 index 0000000000..df0a2ac5fa --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/errors/stale-topologyVersion-LegacyNotPrimary.yml @@ -0,0 +1,64 @@ +# Autogenerated tests for SDAM error handling, see generate-error-tests.py +description: Stale topologyVersion LegacyNotPrimary error +uri: mongodb://a/?replicaSet=rs +phases: +- description: Primary A is discovered + responses: + - - a:27017 + - ok: 1 + ismaster: true + hosts: + - a:27017 + setName: rs + minWireVersion: 0 + maxWireVersion: 9 + topologyVersion: &topologyVersion_1_1 + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": '1' + outcome: &outcome + servers: + a:27017: + type: RSPrimary + setName: rs + topologyVersion: *topologyVersion_1_1 + pool: + generation: 0 + topologyType: ReplicaSetWithPrimary + logicalSessionTimeoutMinutes: null + setName: rs + +- description: Ignore stale LegacyNotPrimary error (topologyVersion less) + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: command + response: + ok: 0 + errmsg: LegacyNotPrimary + code: 10058 + topologyVersion: + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": '0' + outcome: *outcome + +- description: Ignore stale LegacyNotPrimary error (topologyVersion equal) + applicationErrors: + - address: a:27017 + when: afterHandshakeCompletes + maxWireVersion: 9 + type: command + response: + ok: 0 + errmsg: LegacyNotPrimary + code: 10058 + topologyVersion: + processId: + "$oid": '000000000000000000000001' + counter: + "$numberLong": '1' + outcome: *outcome diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-error.json b/test/spec/server-discovery-and-monitoring/integration/auth-error.json new file mode 100644 index 0000000000..064d660e32 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-error.json @@ -0,0 +1,140 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after AuthenticationFailure error", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authErrorTest", + "errorCode": 18 + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authErrorTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-error.yml b/test/spec/server-discovery-and-monitoring/integration/auth-error.yml new file mode 100644 index 0000000000..9c646543e0 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-error.yml @@ -0,0 +1,81 @@ +# Test SDAM error handling. +runOn: + # failCommand appName requirements + - minServerVersion: "4.4" + authEnabled: true + +database_name: &database_name "sdam-tests" +collection_name: &collection_name "auth-error" + +data: &data + - {_id: 1} + - {_id: 2} + +tests: + - description: Reset server and pool after AuthenticationFailure error + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["saslContinue"] + appName: authErrorTest + errorCode: 18 # AuthenticationFailure + clientOptions: + retryWrites: false + appname: authErrorTest + operations: + - name: insertMany + object: collection + arguments: + documents: + - _id: 3 + - _id: 4 + error: true + - name: waitForEvent + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: waitForEvent + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + # Perform another operation to ensure the node is rediscovered. + - name: insertMany + object: collection + arguments: + documents: + - _id: 5 + - _id: 6 + # Assert the server was marked Unknown and pool was cleared exactly once. + - name: assertEventCount + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: assertEventCount + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + + expectations: + # Note: The first insert command is never attempted because connection + # checkout fails. + - command_started_event: + command: + insert: *collection_name + documents: + - _id: 5 + - _id: 6 + command_name: insert + database_name: *database_name + + outcome: + collection: + data: + - {_id: 1} + - {_id: 2} + - {_id: 5} + - {_id: 6} diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-misc-command-error.json b/test/spec/server-discovery-and-monitoring/integration/auth-misc-command-error.json new file mode 100644 index 0000000000..70dd59251d --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-misc-command-error.json @@ -0,0 +1,140 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-misc-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after misc command error", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authMiscErrorTest", + "errorCode": 1 + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authMiscErrorTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-misc-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-misc-command-error.yml b/test/spec/server-discovery-and-monitoring/integration/auth-misc-command-error.yml new file mode 100644 index 0000000000..20eae4533e --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-misc-command-error.yml @@ -0,0 +1,81 @@ +# Test SDAM error handling. +runOn: + # failCommand appName requirements + - minServerVersion: "4.4" + authEnabled: true + +database_name: &database_name "sdam-tests" +collection_name: &collection_name "auth-misc-error" + +data: &data + - {_id: 1} + - {_id: 2} + +tests: + - description: Reset server and pool after misc command error + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["saslContinue"] + appName: authMiscErrorTest + errorCode: 1 # InternalError + clientOptions: + retryWrites: false + appname: authMiscErrorTest + operations: + - name: insertMany + object: collection + arguments: + documents: + - _id: 3 + - _id: 4 + error: true + - name: waitForEvent + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: waitForEvent + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + # Perform another operation to ensure the node is rediscovered. + - name: insertMany + object: collection + arguments: + documents: + - _id: 5 + - _id: 6 + # Assert the server was marked Unknown and pool was cleared exactly once. + - name: assertEventCount + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: assertEventCount + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + + expectations: + # Note: The first insert command is never attempted because connection + # checkout fails. + - command_started_event: + command: + insert: *collection_name + documents: + - _id: 5 + - _id: 6 + command_name: insert + database_name: *database_name + + outcome: + collection: + data: + - {_id: 1} + - {_id: 2} + - {_id: 5} + - {_id: 6} diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-network-error.json b/test/spec/server-discovery-and-monitoring/integration/auth-network-error.json new file mode 100644 index 0000000000..a75a398c5e --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-network-error.json @@ -0,0 +1,140 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-network-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after network error during authentication", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "closeConnection": true, + "appName": "authNetworkErrorTest" + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authNetworkErrorTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-network-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-network-error.yml b/test/spec/server-discovery-and-monitoring/integration/auth-network-error.yml new file mode 100644 index 0000000000..abb2b1471e --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-network-error.yml @@ -0,0 +1,81 @@ +# Test SDAM error handling. +runOn: + # failCommand appName requirements + - minServerVersion: "4.4" + authEnabled: true + +database_name: &database_name "sdam-tests" +collection_name: &collection_name "auth-network-error" + +data: &data + - {_id: 1} + - {_id: 2} + +tests: + - description: Reset server and pool after network error during authentication + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["saslContinue"] + closeConnection: true + appName: authNetworkErrorTest + clientOptions: + retryWrites: false + appname: authNetworkErrorTest + operations: + - name: insertMany + object: collection + arguments: + documents: + - _id: 3 + - _id: 4 + error: true + - name: waitForEvent + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: waitForEvent + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + # Perform another operation to ensure the node is rediscovered. + - name: insertMany + object: collection + arguments: + documents: + - _id: 5 + - _id: 6 + # Assert the server was marked Unknown and pool was cleared exactly once. + - name: assertEventCount + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: assertEventCount + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + + expectations: + # Note: The first insert command is never attempted because connection + # checkout fails. + - command_started_event: + command: + insert: *collection_name + documents: + - _id: 5 + - _id: 6 + command_name: insert + database_name: *database_name + + outcome: + collection: + data: + - {_id: 1} + - {_id: 2} + - {_id: 5} + - {_id: 6} diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-network-timeout-error.json b/test/spec/server-discovery-and-monitoring/integration/auth-network-timeout-error.json new file mode 100644 index 0000000000..a4ee7d9eff --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-network-timeout-error.json @@ -0,0 +1,143 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-network-timeout-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after network timeout error during authentication", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "authNetworkTimeoutErrorTest" + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authNetworkTimeoutErrorTest", + "connectTimeoutMS": 250, + "socketTimeoutMS": 250 + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-network-timeout-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-network-timeout-error.yml b/test/spec/server-discovery-and-monitoring/integration/auth-network-timeout-error.yml new file mode 100644 index 0000000000..e065ec5abf --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-network-timeout-error.yml @@ -0,0 +1,86 @@ +# Test SDAM error handling. +runOn: + # failCommand appName requirements + - minServerVersion: "4.4" + authEnabled: true + +database_name: &database_name "sdam-tests" +collection_name: &collection_name "auth-network-timeout-error" + +data: &data + - {_id: 1} + - {_id: 2} + +tests: + - description: Reset server and pool after network timeout error during authentication + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["saslContinue"] + blockConnection: true + blockTimeMS: 500 + appName: authNetworkTimeoutErrorTest + clientOptions: + retryWrites: false + appname: authNetworkTimeoutErrorTest + # Set a short connect/socket timeout to ensure the fail point causes the + # connection establishment to timeout. + connectTimeoutMS: 250 + socketTimeoutMS: 250 + operations: + - name: insertMany + object: collection + arguments: + documents: + - _id: 3 + - _id: 4 + error: true + - name: waitForEvent + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: waitForEvent + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + # Perform another operation to ensure the node is rediscovered. + - name: insertMany + object: collection + arguments: + documents: + - _id: 5 + - _id: 6 + # Assert the server was marked Unknown and pool was cleared exactly once. + - name: assertEventCount + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: assertEventCount + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + + expectations: + # Note: The first insert command is never attempted because connection + # checkout fails. + - command_started_event: + command: + insert: *collection_name + documents: + - _id: 5 + - _id: 6 + command_name: insert + database_name: *database_name + + outcome: + collection: + data: + - {_id: 1} + - {_id: 2} + - {_id: 5} + - {_id: 6} diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-shutdown-error.json b/test/spec/server-discovery-and-monitoring/integration/auth-shutdown-error.json new file mode 100644 index 0000000000..2dab90e1c5 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-shutdown-error.json @@ -0,0 +1,140 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4", + "authEnabled": true + } + ], + "database_name": "sdam-tests", + "collection_name": "auth-shutdown-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Reset server and pool after shutdown error during authentication", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "saslContinue" + ], + "appName": "authShutdownErrorTest", + "errorCode": 91 + } + }, + "clientOptions": { + "retryWrites": false, + "appname": "authShutdownErrorTest" + }, + "operations": [ + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 3 + }, + { + "_id": 4 + } + ] + }, + "error": true + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertMany", + "object": "collection", + "arguments": { + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "insert": "auth-shutdown-error", + "documents": [ + { + "_id": 5 + }, + { + "_id": 6 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 5 + }, + { + "_id": 6 + } + ] + } + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/integration/auth-shutdown-error.yml b/test/spec/server-discovery-and-monitoring/integration/auth-shutdown-error.yml new file mode 100644 index 0000000000..4efe25f3be --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/auth-shutdown-error.yml @@ -0,0 +1,81 @@ +# Test SDAM error handling. +runOn: + # failCommand appName requirements + - minServerVersion: "4.4" + authEnabled: true + +database_name: &database_name "sdam-tests" +collection_name: &collection_name "auth-shutdown-error" + +data: &data + - {_id: 1} + - {_id: 2} + +tests: + - description: Reset server and pool after shutdown error during authentication + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["saslContinue"] + appName: authShutdownErrorTest + errorCode: 91 + clientOptions: + retryWrites: false + appname: authShutdownErrorTest + operations: + - name: insertMany + object: collection + arguments: + documents: + - _id: 3 + - _id: 4 + error: true + - name: waitForEvent + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: waitForEvent + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + # Perform another operation to ensure the node is rediscovered. + - name: insertMany + object: collection + arguments: + documents: + - _id: 5 + - _id: 6 + # Assert the server was marked Unknown and pool was cleared exactly once. + - name: assertEventCount + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: assertEventCount + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + + expectations: + # Note: The first insert command is never attempted because connection + # checkout fails. + - command_started_event: + command: + insert: *collection_name + documents: + - _id: 5 + - _id: 6 + command_name: insert + database_name: *database_name + + outcome: + collection: + data: + - {_id: 1} + - {_id: 2} + - {_id: 5} + - {_id: 6} diff --git a/test/spec/server-discovery-and-monitoring/integration/find-network-timeout-error.json b/test/spec/server-discovery-and-monitoring/integration/find-network-timeout-error.json new file mode 100644 index 0000000000..c4e10b3a76 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/find-network-timeout-error.json @@ -0,0 +1,119 @@ +{ + "runOn": [ + { + "minServerVersion": "4.4" + } + ], + "database_name": "sdam-tests", + "collection_name": "find-network-timeout-error", + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + } + ], + "tests": [ + { + "description": "Ignore network timeout error on find", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "find" + ], + "blockConnection": true, + "blockTimeMS": 500, + "appName": "findNetworkTimeoutErrorTest" + } + }, + "clientOptions": { + "retryWrites": false, + "retryReads": false, + "appname": "findNetworkTimeoutErrorTest", + "socketTimeoutMS": 250 + }, + "operations": [ + { + "name": "find", + "object": "collection", + "arguments": { + "filter": { + "_id": 1 + } + }, + "error": true + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 0 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 0 + } + } + ], + "expectations": [ + { + "command_started_event": { + "command": { + "find": "find-network-timeout-error" + }, + "command_name": "find", + "database_name": "sdam-tests" + } + }, + { + "command_started_event": { + "command": { + "insert": "find-network-timeout-error", + "documents": [ + { + "_id": 3 + } + ] + }, + "command_name": "insert", + "database_name": "sdam-tests" + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + } + ] + } + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/integration/find-network-timeout-error.yml b/test/spec/server-discovery-and-monitoring/integration/find-network-timeout-error.yml new file mode 100644 index 0000000000..d4ce8e39a7 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/find-network-timeout-error.yml @@ -0,0 +1,73 @@ +# Test SDAM error handling. +runOn: + # failCommand appName requirements + - minServerVersion: "4.4" + +database_name: &database_name "sdam-tests" +collection_name: &collection_name "find-network-timeout-error" + +data: &data + - {_id: 1} + - {_id: 2} + +tests: + - description: Ignore network timeout error on find + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["find"] + blockConnection: true + blockTimeMS: 500 + appName: findNetworkTimeoutErrorTest + clientOptions: + retryWrites: false + retryReads: false + appname: findNetworkTimeoutErrorTest + # Set a short socket timeout to ensure the find command times out. + socketTimeoutMS: 250 + operations: + - name: find + object: collection + arguments: + filter: + _id: 1 + error: true + # Perform another operation to ensure the node is still usable. + - name: insertOne + object: collection + arguments: + document: + _id: 3 + # Assert the server was not marked Unknown and the pool was not cleared. + - name: assertEventCount + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 0 + - name: assertEventCount + object: testRunner + arguments: + event: PoolClearedEvent + count: 0 + + expectations: + - command_started_event: + command: + find: *collection_name + command_name: find + database_name: *database_name + - command_started_event: + command: + insert: *collection_name + documents: + - _id: 3 + command_name: insert + database_name: *database_name + + outcome: + collection: + data: + - {_id: 1} + - {_id: 2} + - {_id: 3} diff --git a/test/spec/server-discovery-and-monitoring/integration/isMaster-command-error.json b/test/spec/server-discovery-and-monitoring/integration/isMaster-command-error.json index 4bdfd9adff..0a735dc334 100644 --- a/test/spec/server-discovery-and-monitoring/integration/isMaster-command-error.json +++ b/test/spec/server-discovery-and-monitoring/integration/isMaster-command-error.json @@ -39,14 +39,6 @@ "count": 1 } }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, { "name": "insertMany", "object": "collection", diff --git a/test/spec/server-discovery-and-monitoring/integration/isMaster-command-error.yml b/test/spec/server-discovery-and-monitoring/integration/isMaster-command-error.yml index 192f079bc2..e1eb13348f 100644 --- a/test/spec/server-discovery-and-monitoring/integration/isMaster-command-error.yml +++ b/test/spec/server-discovery-and-monitoring/integration/isMaster-command-error.yml @@ -27,17 +27,12 @@ tests: appname: commandErrorHandshakeTest operations: # The command error on the initial handshake should mark the server - # Unknown and clear the pool. + # Unknown (emitting a ServerDescriptionChangedEvent) and clear the pool. - name: waitForEvent object: testRunner arguments: event: ServerMarkedUnknownEvent count: 1 - - name: waitForEvent - object: testRunner - arguments: - event: PoolClearedEvent - count: 1 # Perform an operation to ensure the node is discovered. - name: insertMany object: collection diff --git a/test/spec/server-discovery-and-monitoring/integration/isMaster-network-error.json b/test/spec/server-discovery-and-monitoring/integration/isMaster-network-error.json index eb1f3eac19..2385a41646 100644 --- a/test/spec/server-discovery-and-monitoring/integration/isMaster-network-error.json +++ b/test/spec/server-discovery-and-monitoring/integration/isMaster-network-error.json @@ -38,14 +38,6 @@ "count": 1 } }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, { "name": "insertMany", "object": "collection", diff --git a/test/spec/server-discovery-and-monitoring/integration/isMaster-network-error.yml b/test/spec/server-discovery-and-monitoring/integration/isMaster-network-error.yml index 0a0d4dd5f3..0414005b7f 100644 --- a/test/spec/server-discovery-and-monitoring/integration/isMaster-network-error.yml +++ b/test/spec/server-discovery-and-monitoring/integration/isMaster-network-error.yml @@ -26,17 +26,12 @@ tests: appname: networkErrorHandshakeTest operations: # The network error on the initial handshake should mark the server - # Unknown and clear the pool. + # Unknown (emitting a ServerDescriptionChangedEvent) and clear the pool. - name: waitForEvent object: testRunner arguments: event: ServerMarkedUnknownEvent count: 1 - - name: waitForEvent - object: testRunner - arguments: - event: PoolClearedEvent - count: 1 # Perform an operation to ensure the node is discovered. - name: insertMany object: collection diff --git a/test/spec/server-discovery-and-monitoring/integration/isMaster-timeout.json b/test/spec/server-discovery-and-monitoring/integration/isMaster-timeout.json index eeee612be8..50ad482778 100644 --- a/test/spec/server-discovery-and-monitoring/integration/isMaster-timeout.json +++ b/test/spec/server-discovery-and-monitoring/integration/isMaster-timeout.json @@ -39,14 +39,6 @@ "count": 1 } }, - { - "name": "waitForEvent", - "object": "testRunner", - "arguments": { - "event": "PoolClearedEvent", - "count": 1 - } - }, { "name": "insertMany", "object": "collection", diff --git a/test/spec/server-discovery-and-monitoring/integration/isMaster-timeout.yml b/test/spec/server-discovery-and-monitoring/integration/isMaster-timeout.yml index 269b681be3..715d23abfb 100644 --- a/test/spec/server-discovery-and-monitoring/integration/isMaster-timeout.yml +++ b/test/spec/server-discovery-and-monitoring/integration/isMaster-timeout.yml @@ -27,17 +27,12 @@ tests: appname: timeoutMonitorHandshakeTest operations: # The network error on the initial handshake should mark the server - # Unknown and clear the pool. + # Unknown (emitting a ServerDescriptionChangedEvent) and clear the pool. - name: waitForEvent object: testRunner arguments: event: ServerMarkedUnknownEvent count: 1 - - name: waitForEvent - object: testRunner - arguments: - event: PoolClearedEvent - count: 1 # Perform an operation to ensure the node is discovered. - name: insertMany object: collection @@ -101,6 +96,9 @@ tests: failCommands: ["isMaster"] appName: timeoutMonitorCheckTest blockConnection: true + # blockTimeMS is evaluated after the waiting for heartbeatFrequencyMS server-side, so this value only + # needs to be greater than connectTimeoutMS. The driver will wait for (500+750)ms and the server will + # respond after (500+1000)ms. blockTimeMS: 1000 # The network error on the next check should mark the server Unknown and # clear the pool. diff --git a/test/spec/server-discovery-and-monitoring/integration/minPoolSize-error.json b/test/spec/server-discovery-and-monitoring/integration/minPoolSize-error.json new file mode 100644 index 0000000000..9605ee4f5f --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/minPoolSize-error.json @@ -0,0 +1,101 @@ +{ + "runOn": [ + { + "minServerVersion": "4.9" + } + ], + "database_name": "sdam-tests", + "collection_name": "sdam-minPoolSize-error", + "data": [], + "tests": [ + { + "description": "Network error on minPoolSize background creation", + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "skip": 3 + }, + "data": { + "failCommands": [ + "isMaster" + ], + "appName": "SDAMminPoolSizeError", + "closeConnection": true + } + }, + "clientOptions": { + "heartbeatFrequencyMS": 10000, + "appname": "SDAMminPoolSizeError", + "minPoolSize": 10, + "serverSelectionTimeoutMS": 1000, + "directConnection": true + }, + "operations": [ + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolReadyEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "runCommand", + "object": "database", + "command_name": "ping", + "arguments": { + "command": { + "ping": {} + } + }, + "error": true + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": "off" + } + } + }, + { + "name": "runCommand", + "object": "database", + "command_name": "ping", + "arguments": { + "command": { + "ping": 1 + } + }, + "error": false + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolReadyEvent", + "count": 2 + } + } + ] + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/integration/minPoolSize-error.yml b/test/spec/server-discovery-and-monitoring/integration/minPoolSize-error.yml new file mode 100644 index 0000000000..bf2666a54a --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/minPoolSize-error.yml @@ -0,0 +1,77 @@ +# Test SDAM error handling. +runOn: + # failCommand appName requirements + - minServerVersion: "4.9" + +database_name: &database_name "sdam-tests" +collection_name: &collection_name "sdam-minPoolSize-error" + +data: [] + +tests: + - description: Network error on minPoolSize background creation + # Configure the initial monitor handshake to succeed but the + # first or second background minPoolSize establishments to fail. + failPoint: + configureFailPoint: failCommand + mode: { skip: 3 } + data: + failCommands: ["isMaster"] + appName: SDAMminPoolSizeError + closeConnection: true + clientOptions: + heartbeatFrequencyMS: 10000 + appname: SDAMminPoolSizeError + minPoolSize: 10 + serverSelectionTimeoutMS: 1000 + directConnection: true + operations: + # Wait for monitor to succeed handshake and mark the pool as ready. + - name: waitForEvent + object: testRunner + arguments: + event: PoolReadyEvent + count: 1 + # Background connection establishment ensuring minPoolSize should fail, + # causing the pool to be cleared. + - name: waitForEvent + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + # The server should be marked as Unknown as part of this. + - name: waitForEvent + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + # Executing a command should fail server selection due to not being able + # to find the primary. + - name: runCommand + object: database + command_name: ping + arguments: + command: + ping: {} + error: true + # Disable the failpoint, allowing the monitor to discover the primary again. + - name: configureFailPoint + object: testRunner + arguments: + failPoint: + configureFailPoint: failCommand + mode: off + # Perform an operation to ensure the node is discovered. + - name: runCommand + object: database + command_name: ping + arguments: + command: + ping: 1 + error: false + # Assert that the monitor discovered the primary and mark the pool as ready again. + - name: assertEventCount + object: testRunner + arguments: + event: PoolReadyEvent + count: 2 diff --git a/test/spec/server-discovery-and-monitoring/integration/pool-cleared-error.json b/test/spec/server-discovery-and-monitoring/integration/pool-cleared-error.json new file mode 100644 index 0000000000..061503c259 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/pool-cleared-error.json @@ -0,0 +1,307 @@ +{ + "runOn": [ + { + "minServerVersion": "4.9", + "topology": [ + "replicaset", + "sharded" + ] + } + ], + "database_name": "sdam-tests", + "collection_name": "pool-cleared-error", + "data": [], + "tests": [ + { + "description": "PoolClearedError does not mark server unknown", + "clientOptions": { + "retryWrites": true, + "maxPoolSize": 1, + "appname": "poolClearedErrorTest" + }, + "operations": [ + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 1 + } + } + }, + { + "name": "configureFailPoint", + "object": "testRunner", + "arguments": { + "failPoint": { + "configureFailPoint": "failCommand", + "mode": { + "times": 1 + }, + "data": { + "failCommands": [ + "insert" + ], + "blockConnection": true, + "blockTimeMS": 100, + "closeConnection": true, + "appName": "poolClearedErrorTest" + } + } + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread1" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread2" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread3" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread4" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread5" + } + }, + { + "name": "startThread", + "object": "testRunner", + "arguments": { + "name": "thread6" + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 2 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 3 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 4 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread1", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 5 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 6 + } + } + } + } + }, + { + "name": "runOnThread", + "object": "testRunner", + "arguments": { + "name": "thread2", + "operation": { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 7 + } + } + } + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread1" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread2" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread3" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread4" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread5" + } + }, + { + "name": "waitForThread", + "object": "testRunner", + "arguments": { + "name": "thread6" + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "waitForEvent", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + }, + { + "name": "insertOne", + "object": "collection", + "arguments": { + "document": { + "_id": 8 + } + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "ServerMarkedUnknownEvent", + "count": 1 + } + }, + { + "name": "assertEventCount", + "object": "testRunner", + "arguments": { + "event": "PoolClearedEvent", + "count": 1 + } + } + ], + "outcome": { + "collection": { + "data": [ + { + "_id": 1 + }, + { + "_id": 2 + }, + { + "_id": 3 + }, + { + "_id": 4 + }, + { + "_id": 5 + }, + { + "_id": 6 + }, + { + "_id": 7 + }, + { + "_id": 8 + } + ] + } + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/integration/pool-cleared-error.yml b/test/spec/server-discovery-and-monitoring/integration/pool-cleared-error.yml new file mode 100644 index 0000000000..13147fd5b5 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/integration/pool-cleared-error.yml @@ -0,0 +1,201 @@ +# Test SDAM error handling. +runOn: + # This test requires retryable writes, failCommand appName, and + # failCommand blockConnection with closeConnection:true (SERVER-53512). + - minServerVersion: "4.9" + topology: ["replicaset", "sharded"] + +database_name: &database_name "sdam-tests" +collection_name: &collection_name "pool-cleared-error" + +data: [] + +tests: + - description: PoolClearedError does not mark server unknown + clientOptions: + retryWrites: true + maxPoolSize: 1 + appname: poolClearedErrorTest + operations: + # Perform an operation to ensure the node is discovered. + - name: insertOne + object: collection + arguments: + document: + _id: 1 + # Configure the next insert to fail with a network error which will + # clear the pool leaving it paused until the server is rediscovered. + - name: configureFailPoint + object: testRunner + arguments: + failPoint: + configureFailPoint: failCommand + mode: { times: 1 } + data: + failCommands: ["insert"] + blockConnection: true + blockTimeMS: 100 + closeConnection: true + appName: poolClearedErrorTest + # Start threads. + - name: startThread + object: testRunner + arguments: + name: thread1 + - name: startThread + object: testRunner + arguments: + name: thread2 + - name: startThread + object: testRunner + arguments: + name: thread3 + - name: startThread + object: testRunner + arguments: + name: thread4 + - name: startThread + object: testRunner + arguments: + name: thread5 + - name: startThread + object: testRunner + arguments: + name: thread6 + # Perform concurrent insert operations. The first one to execute will + # fail with a network error, mark the server Unknown, clear the pool, + # and retry. + # The other operations will either: + # - Notice the pool is paused, fail with a PoolClearedError, and retry. + # - Or block waiting in server selection until the server is + # rediscovered. + # + # Note that this test does not guarantee that a PoolClearedError will be + # raised but it is likely since the initial insert is delayed. + - name: runOnThread + object: testRunner + arguments: + name: thread1 + operation: + name: insertOne + object: collection + arguments: + document: + _id: 2 + - name: runOnThread + object: testRunner + arguments: + name: thread2 + operation: + name: insertOne + object: collection + arguments: + document: + _id: 3 + - name: runOnThread + object: testRunner + arguments: + name: thread2 + operation: + name: insertOne + object: collection + arguments: + document: + _id: 4 + - name: runOnThread + object: testRunner + arguments: + name: thread1 + operation: + name: insertOne + object: collection + arguments: + document: + _id: 5 + - name: runOnThread + object: testRunner + arguments: + name: thread2 + operation: + name: insertOne + object: collection + arguments: + document: + _id: 6 + - name: runOnThread + object: testRunner + arguments: + name: thread2 + operation: + name: insertOne + object: collection + arguments: + document: + _id: 7 + # Stop threads. + - name: waitForThread + object: testRunner + arguments: + name: thread1 + - name: waitForThread + object: testRunner + arguments: + name: thread2 + - name: waitForThread + object: testRunner + arguments: + name: thread3 + - name: waitForThread + object: testRunner + arguments: + name: thread4 + - name: waitForThread + object: testRunner + arguments: + name: thread5 + - name: waitForThread + object: testRunner + arguments: + name: thread6 + # The first shutdown error should mark the server Unknown and then clear + # the pool. + - name: waitForEvent + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: waitForEvent + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + # Perform an operation to ensure the node still useable. + - name: insertOne + object: collection + arguments: + document: + _id: 8 + # Assert the server was marked Unknown and pool was cleared exactly once. + - name: assertEventCount + object: testRunner + arguments: + event: ServerMarkedUnknownEvent + count: 1 + - name: assertEventCount + object: testRunner + arguments: + event: PoolClearedEvent + count: 1 + + # Order of operations is non-deterministic so we cannot check events. + outcome: + collection: + data: + - {_id: 1} + - {_id: 2} + - {_id: 3} + - {_id: 4} + - {_id: 5} + - {_id: 6} + - {_id: 7} + - {_id: 8} diff --git a/test/spec/server-discovery-and-monitoring/monitoring/discovered_standalone.json b/test/spec/server-discovery-and-monitoring/monitoring/discovered_standalone.json new file mode 100644 index 0000000000..c3ab59834f --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/monitoring/discovered_standalone.json @@ -0,0 +1,104 @@ +{ + "description": "Monitoring a discovered standalone connection", + "uri": "mongodb://a:27017/?directConnection=false", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "minWireVersion": 0, + "maxWireVersion": 4 + } + ] + ], + "outcome": { + "events": [ + { + "topology_opening_event": { + "topologyId": "42" + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [] + }, + "newDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + } + } + }, + { + "server_opening_event": { + "topologyId": "42", + "address": "a:27017" + } + }, + { + "server_description_changed_event": { + "topologyId": "42", + "address": "a:27017", + "previousDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + }, + "newDescription": { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + } + }, + { + "topology_description_changed_event": { + "topologyId": "42", + "previousDescription": { + "topologyType": "Unknown", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Unknown" + } + ] + }, + "newDescription": { + "topologyType": "Single", + "servers": [ + { + "address": "a:27017", + "arbiters": [], + "hosts": [], + "passives": [], + "type": "Standalone" + } + ] + } + } + } + ] + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/monitoring/discovered_standalone.yml b/test/spec/server-discovery-and-monitoring/monitoring/discovered_standalone.yml new file mode 100644 index 0000000000..ad4ab14c31 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/monitoring/discovered_standalone.yml @@ -0,0 +1,70 @@ +description: "Monitoring a discovered standalone connection" +uri: "mongodb://a:27017/?directConnection=false" +phases: + - + responses: + - + - "a:27017" + - { ok: 1, ismaster: true, minWireVersion: 0, maxWireVersion: 4 } + + outcome: + events: + - + topology_opening_event: + topologyId: "42" + - + topology_description_changed_event: + topologyId: "42" + previousDescription: + topologyType: "Unknown" + servers: [] + newDescription: + topologyType: "Unknown" + servers: + - + address: "a:27017" + arbiters: [] + hosts: [] + passives: [] + type: "Unknown" + - + server_opening_event: + topologyId: "42" + address: "a:27017" + - + server_description_changed_event: + topologyId: "42" + address: "a:27017" + previousDescription: + address: "a:27017" + arbiters: [] + hosts: [] + passives: [] + type: "Unknown" + newDescription: + address: "a:27017" + arbiters: [] + hosts: [] + passives: [] + type: "Standalone" + - + topology_description_changed_event: + topologyId: "42" + previousDescription: + topologyType: "Unknown" + servers: + - + address: "a:27017" + arbiters: [] + hosts: [] + passives: [] + type: "Unknown" + newDescription: + topologyType: "Single" + servers: + - + address: "a:27017" + arbiters: [] + hosts: [] + passives: [] + type: "Standalone" diff --git a/test/spec/server-discovery-and-monitoring/monitoring/standalone.json b/test/spec/server-discovery-and-monitoring/monitoring/standalone.json index 5d40286c97..3ff10f820f 100644 --- a/test/spec/server-discovery-and-monitoring/monitoring/standalone.json +++ b/test/spec/server-discovery-and-monitoring/monitoring/standalone.json @@ -1,6 +1,6 @@ { - "description": "Monitoring a standalone connection", - "uri": "mongodb://a:27017", + "description": "Monitoring a direct connection", + "uri": "mongodb://a:27017/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/spec/server-discovery-and-monitoring/monitoring/standalone.yml b/test/spec/server-discovery-and-monitoring/monitoring/standalone.yml index aff3f7322c..8787edce34 100644 --- a/test/spec/server-discovery-and-monitoring/monitoring/standalone.yml +++ b/test/spec/server-discovery-and-monitoring/monitoring/standalone.yml @@ -1,5 +1,5 @@ -description: "Monitoring a standalone connection" -uri: "mongodb://a:27017" +description: "Monitoring a direct connection" +uri: "mongodb://a:27017/?directConnection=true" phases: - responses: diff --git a/test/spec/server-discovery-and-monitoring/monitoring/standalone_suppress_equal_description_changes.json b/test/spec/server-discovery-and-monitoring/monitoring/standalone_suppress_equal_description_changes.json index a4b2d10da8..ceab1449cc 100644 --- a/test/spec/server-discovery-and-monitoring/monitoring/standalone_suppress_equal_description_changes.json +++ b/test/spec/server-discovery-and-monitoring/monitoring/standalone_suppress_equal_description_changes.json @@ -1,6 +1,6 @@ { - "description": "Monitoring a standalone connection - suppress update events for equal server descriptions", - "uri": "mongodb://a:27017", + "description": "Monitoring a direct connection - suppress update events for equal server descriptions", + "uri": "mongodb://a:27017/?directConnection=true", "phases": [ { "responses": [ diff --git a/test/spec/server-discovery-and-monitoring/monitoring/standalone_suppress_equal_description_changes.yml b/test/spec/server-discovery-and-monitoring/monitoring/standalone_suppress_equal_description_changes.yml index 0cffa8fc79..229f8d7c69 100644 --- a/test/spec/server-discovery-and-monitoring/monitoring/standalone_suppress_equal_description_changes.yml +++ b/test/spec/server-discovery-and-monitoring/monitoring/standalone_suppress_equal_description_changes.yml @@ -1,5 +1,5 @@ -description: "Monitoring a standalone connection - suppress update events for equal server descriptions" -uri: "mongodb://a:27017" +description: "Monitoring a direct connection - suppress update events for equal server descriptions" +uri: "mongodb://a:27017/?directConnection=true" phases: - responses: diff --git a/test/spec/server-discovery-and-monitoring/rs/secondary_ignore_ok_0.json b/test/spec/server-discovery-and-monitoring/rs/secondary_ignore_ok_0.json index 6d3033eeee..a951da9f8d 100644 --- a/test/spec/server-discovery-and-monitoring/rs/secondary_ignore_ok_0.json +++ b/test/spec/server-discovery-and-monitoring/rs/secondary_ignore_ok_0.json @@ -1,5 +1,5 @@ { - "description": "New primary", + "description": "New primary secondary ignore ok equal 0", "uri": "mongodb://a,b/?replicaSet=rs", "phases": [ { diff --git a/test/spec/server-discovery-and-monitoring/rs/secondary_ignore_ok_0.yml b/test/spec/server-discovery-and-monitoring/rs/secondary_ignore_ok_0.yml index a77166927f..e408cd0d3f 100644 --- a/test/spec/server-discovery-and-monitoring/rs/secondary_ignore_ok_0.yml +++ b/test/spec/server-discovery-and-monitoring/rs/secondary_ignore_ok_0.yml @@ -1,4 +1,4 @@ -description: "New primary" +description: "New primary secondary ignore ok equal 0" uri: "mongodb://a,b/?replicaSet=rs" diff --git a/test/spec/server-discovery-and-monitoring/rs/topology_version_equal.json b/test/spec/server-discovery-and-monitoring/rs/topology_version_equal.json new file mode 100644 index 0000000000..ba84e059a0 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/rs/topology_version_equal.json @@ -0,0 +1,99 @@ +{ + "description": "Primary with equal topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + }, + "b:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/rs/topology_version_equal.yml b/test/spec/server-discovery-and-monitoring/rs/topology_version_equal.yml new file mode 100644 index 0000000000..104194ac84 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/rs/topology_version_equal.yml @@ -0,0 +1,66 @@ +description: "Primary with equal topologyVersion" + +uri: "mongodb://a/?replicaSet=rs" + +phases: [ + + # Primary A is discovered + { + responses: [ + ["a:27017", { + ok: 1, + ismaster: true, + hosts: ["a:27017"], + setName: "rs", + minWireVersion: 0, + maxWireVersion: 9, + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "1"}} + }] + ], + + outcome: { + servers: { + "a:27017": { + type: "RSPrimary", + setName: "rs", + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "1"}} + } + }, + topologyType: "ReplicaSetWithPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + }, + + # A responds with an equal topologyVersion, we should process the response. + { + responses: [ + ["a:27017", { + ok: 1, + ismaster: true, + hosts: ["a:27017", "b:27017"], + setName: "rs", + minWireVersion: 0, + maxWireVersion: 9, + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "1"}} + }] + ], + + outcome: { + servers: { + "a:27017": { + type: "RSPrimary", + setName: "rs", + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "1"}} + }, + "b:27017": { + type: "Unknown", + topologyVersion: null + } + }, + topologyType: "ReplicaSetWithPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + } +] diff --git a/test/spec/server-discovery-and-monitoring/rs/topology_version_greater.json b/test/spec/server-discovery-and-monitoring/rs/topology_version_greater.json new file mode 100644 index 0000000000..afa8108ea2 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/rs/topology_version_greater.json @@ -0,0 +1,254 @@ +{ + "description": "Primary with newer topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "2" + } + } + }, + "b:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "c:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000002" + }, + "counter": { + "$numberLong": "0" + } + } + }, + "c:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "d:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9 + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": null + }, + "d:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "e:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000003" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000003" + }, + "counter": { + "$numberLong": "0" + } + } + }, + "e:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + {} + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "Unknown", + "topologyVersion": null + }, + "e:27017": { + "type": "Unknown", + "topologyVersion": null + } + }, + "topologyType": "ReplicaSetNoPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/rs/topology_version_greater.yml b/test/spec/server-discovery-and-monitoring/rs/topology_version_greater.yml new file mode 100644 index 0000000000..4edf99ba8c --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/rs/topology_version_greater.yml @@ -0,0 +1,189 @@ +description: "Primary with newer topologyVersion" + +uri: "mongodb://a/?replicaSet=rs" + +phases: [ + + # Primary A is discovered + { + responses: [ + ["a:27017", { + ok: 1, + ismaster: true, + hosts: ["a:27017"], + setName: "rs", + minWireVersion: 0, + maxWireVersion: 9, + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "1"}} + }] + ], + + outcome: { + servers: { + "a:27017": { + type: "RSPrimary", + setName: "rs", + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "1"}} + } + }, + topologyType: "ReplicaSetWithPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + }, + + # A responds with a greater topologyVersion counter, we should process the response. + { + responses: [ + ["a:27017", { + ok: 1, + ismaster: true, + hosts: ["a:27017", "b:27017"], + setName: "rs", + minWireVersion: 0, + maxWireVersion: 9, + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "2"}} + }] + ], + + outcome: { + servers: { + "a:27017": { + type: "RSPrimary", + setName: "rs", + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "2"}} + }, + "b:27017": { + + type: "Unknown", + topologyVersion: null + } + }, + topologyType: "ReplicaSetWithPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + }, + + # A responds with a different topologyVersion processId, we should process the response. + { + responses: [ + ["a:27017", { + ok: 1, + ismaster: true, + hosts: ["a:27017", "c:27017"], + setName: "rs", + minWireVersion: 0, + maxWireVersion: 9, + topologyVersion: {'processId': {"$oid": "000000000000000000000002"}, "counter": {"$numberLong": "0"}} + }] + ], + + outcome: { + servers: { + "a:27017": { + type: "RSPrimary", + setName: "rs", + topologyVersion: {'processId': {"$oid": "000000000000000000000002"}, "counter": {"$numberLong": "0"}} + }, + "c:27017": { + + type: "Unknown", + topologyVersion: null + } + }, + topologyType: "ReplicaSetWithPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + }, + + # A responds without a topologyVersion, we should process the response. + { + responses: [ + ["a:27017", { + ok: 1, + ismaster: true, + hosts: ["a:27017", "d:27017"], + setName: "rs", + minWireVersion: 0, + maxWireVersion: 9 + }] + ], + + outcome: { + servers: { + "a:27017": { + type: "RSPrimary", + setName: "rs", + topologyVersion: null + }, + "d:27017": { + + type: "Unknown", + topologyVersion: null + } + }, + topologyType: "ReplicaSetWithPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + }, + + # A responds with a topologyVersion again, we should process the response. + { + responses: [ + ["a:27017", { + ok: 1, + ismaster: true, + hosts: ["a:27017", "e:27017"], + setName: "rs", + minWireVersion: 0, + maxWireVersion: 9, + topologyVersion: {'processId': {"$oid": "000000000000000000000003"}, "counter": {"$numberLong": "0"}} + }] + ], + + outcome: { + servers: { + "a:27017": { + type: "RSPrimary", + setName: "rs", + topologyVersion: {'processId': {"$oid": "000000000000000000000003"}, "counter": {"$numberLong": "0"}} + }, + "e:27017": { + + type: "Unknown", + topologyVersion: null + } + }, + topologyType: "ReplicaSetWithPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + }, + + # A responds with a network error, we should process the response. + { + responses: [ + ["a:27017", {}] + ], + + outcome: { + servers: { + "a:27017": { + type: "Unknown", + topologyVersion: null + }, + "e:27017": { + + type: "Unknown", + topologyVersion: null + } + }, + topologyType: "ReplicaSetNoPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + } +] diff --git a/test/spec/server-discovery-and-monitoring/rs/topology_version_less.json b/test/spec/server-discovery-and-monitoring/rs/topology_version_less.json new file mode 100644 index 0000000000..ae45f803d4 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/rs/topology_version_less.json @@ -0,0 +1,95 @@ +{ + "description": "Primary with older topologyVersion", + "uri": "mongodb://a/?replicaSet=rs", + "phases": [ + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + }, + { + "responses": [ + [ + "a:27017", + { + "ok": 1, + "ismaster": true, + "hosts": [ + "a:27017", + "b:27017" + ], + "setName": "rs", + "minWireVersion": 0, + "maxWireVersion": 9, + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "0" + } + } + } + ] + ], + "outcome": { + "servers": { + "a:27017": { + "type": "RSPrimary", + "setName": "rs", + "topologyVersion": { + "processId": { + "$oid": "000000000000000000000001" + }, + "counter": { + "$numberLong": "1" + } + } + } + }, + "topologyType": "ReplicaSetWithPrimary", + "logicalSessionTimeoutMinutes": null, + "setName": "rs" + } + } + ] +} diff --git a/test/spec/server-discovery-and-monitoring/rs/topology_version_less.yml b/test/spec/server-discovery-and-monitoring/rs/topology_version_less.yml new file mode 100644 index 0000000000..1faac9b9a7 --- /dev/null +++ b/test/spec/server-discovery-and-monitoring/rs/topology_version_less.yml @@ -0,0 +1,62 @@ +description: "Primary with older topologyVersion" + +uri: "mongodb://a/?replicaSet=rs" + +phases: [ + + # Primary A is discovered + { + responses: [ + ["a:27017", { + ok: 1, + ismaster: true, + hosts: ["a:27017"], + setName: "rs", + minWireVersion: 0, + maxWireVersion: 9, + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "1"}} + }] + ], + + outcome: { + servers: { + "a:27017": { + type: "RSPrimary", + setName: "rs", + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "1"}} + } + }, + topologyType: "ReplicaSetWithPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + }, + + # A responds with an older topologyVersion, we should ignore the response. + { + responses: [ + ["a:27017", { + ok: 1, + ismaster: true, + hosts: ["a:27017", "b:27017"], + setName: "rs", + minWireVersion: 0, + maxWireVersion: 9, + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "0"}} + }] + ], + + outcome: { + servers: { + "a:27017": { + type: "RSPrimary", + setName: "rs", + topologyVersion: {'processId': {"$oid": "000000000000000000000001"}, "counter": {"$numberLong": "1"}} + } + }, + topologyType: "ReplicaSetWithPrimary", + logicalSessionTimeoutMinutes: null, + setName: "rs", + } + } +] diff --git a/test/tools/sdam_viz b/test/tools/sdam_viz index 67e71151f2..2ac76a670f 100755 --- a/test/tools/sdam_viz +++ b/test/tools/sdam_viz @@ -56,7 +56,7 @@ async function scheduleWorkload(client) { const result = await client .db('test') .collection('test') - .find({}, { socketTimeout: 2000 }) + .find({}, { socketTimeoutMS: 2000 }) .limit(1) .toArray(); @@ -89,16 +89,19 @@ async function scheduleWriteWorkload(client) { completedWriteWorkloads++; if (completedWriteWorkloads % writeWorkloadSampleSize === 0) { - print(`${chalk.yellow(`workload#${currentWriteWorkload}`)} completed ${completedWriteWorkloads} writes with average time: ${averageWriteMS}`); + print( + `${chalk.yellow( + `workload#${currentWriteWorkload}` + )} completed ${completedWriteWorkloads} writes with average time: ${averageWriteMS}` + ); } - } catch (e) { print(`${chalk.yellow(`workload#${currentWriteWorkload}`)} write failed: ${e.message}`); } } let exitRequestCount = 0; -process.on('SIGINT', async function() { +process.on('SIGINT', async function () { exitRequestCount++; if (exitRequestCount > 3) { console.log('force quitting...'); diff --git a/test/unit/cmap/connection.test.js b/test/unit/cmap/connection.test.js index 28606c376f..70edfb8f68 100644 --- a/test/unit/cmap/connection.test.js +++ b/test/unit/cmap/connection.test.js @@ -48,7 +48,7 @@ describe('Connection - unit/cmap', function () { expect(err).to.not.exist; expect(conn).to.exist; - conn.command(ns('$admin.cmd'), { ping: 1 }, { socketTimeout: 50 }, (err, result) => { + conn.command(ns('$admin.cmd'), { ping: 1 }, { socketTimeoutMS: 50 }, (err, result) => { expect(err).to.exist; expect(result).to.not.exist; diff --git a/test/unit/cmap/connection_pool.test.js b/test/unit/cmap/connection_pool.test.js index 20dbc6cd5b..c11d9b9255 100644 --- a/test/unit/cmap/connection_pool.test.js +++ b/test/unit/cmap/connection_pool.test.js @@ -95,7 +95,7 @@ describe('Connection Pool', function () { const pool = new ConnectionPool({ maxPoolSize: 1, - socketTimeout: 200, + socketTimeoutMS: 200, hostAddress: server.hostAddress() }); diff --git a/test/unit/sdam/topology.test.js b/test/unit/sdam/topology.test.js index 0f46e7a6e6..71964ed918 100644 --- a/test/unit/sdam/topology.test.js +++ b/test/unit/sdam/topology.test.js @@ -162,7 +162,7 @@ describe('Topology (unit)', function () { topology.selectServer('primary', (err, server) => { expect(err).to.not.exist; - server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeout: 250 }, (err, result) => { + server.command(ns('admin.$cmd'), { ping: 1 }, { socketTimeoutMS: 250 }, (err, result) => { expect(result).to.not.exist; expect(err).to.exist; expect(err).to.match(/timed out/);