diff --git a/cli/CHANGELOG.md b/cli/CHANGELOG.md index d1004bea03ab..bd61069c8dc1 100644 --- a/cli/CHANGELOG.md +++ b/cli/CHANGELOG.md @@ -3,6 +3,10 @@ _Released 4/2/2024 (PENDING)_ +**Performance:** + +- Improvements to Test Replay upload resiliency. Fixes [#28890](https://github.com/cypress-io/cypress/issues/28890). Addressed in [#29174](https://github.com/cypress-io/cypress/pull/29174) + **Bugfixes:** - Fixed an issue where Cypress was not executing beyond the first spec in `cypress run` for versions of Firefox 124 and up when a custom user agent was provided. Fixes [#29190](https://github.com/cypress-io/cypress/issues/29190). diff --git a/packages/server/lib/cloud/api/http_error.ts b/packages/server/lib/cloud/api/http_error.ts new file mode 100644 index 000000000000..14c4501695c7 --- /dev/null +++ b/packages/server/lib/cloud/api/http_error.ts @@ -0,0 +1,33 @@ +const SENSITIVE_KEYS = Object.freeze(['x-amz-credential', 'x-amz-signature', 'Signature', 'AWSAccessKeyId']) +const scrubUrl = (url: string, sensitiveKeys: readonly string[]): string => { + const parsedUrl = new URL(url) + + for (const [key, value] of parsedUrl.searchParams) { + if (sensitiveKeys.includes(key)) { + parsedUrl.searchParams.set(key, 'X'.repeat(value.length)) + } + } + + return parsedUrl.href +} + +export class HttpError extends Error { + constructor ( + message: string, + public readonly originalResponse: Response, + ) { + super(message) + } + + public static async fromResponse (response: Response): Promise { + const status = response.status + const statusText = await (response.json().catch(() => { + return response.statusText + })) + + return new HttpError( + `${status} ${statusText} (${scrubUrl(response.url, SENSITIVE_KEYS)})`, + response, + ) + } +} diff --git a/packages/server/lib/cloud/api.ts b/packages/server/lib/cloud/api/index.ts similarity index 97% rename from packages/server/lib/cloud/api.ts rename to packages/server/lib/cloud/api/index.ts index 82a7fdc4fad9..2b089ff6a223 100644 --- a/packages/server/lib/cloud/api.ts +++ b/packages/server/lib/cloud/api/index.ts @@ -9,19 +9,19 @@ const RequestErrors = require('@cypress/request-promise/errors') const { agent } = require('@packages/network') const pkg = require('@packages/root') -const machineId = require('./machine_id') -const errors = require('../errors') -const { apiUrl, apiRoutes, makeRoutes } = require('./routes') +const machineId = require('../machine_id') +const errors = require('../../errors') +const { apiUrl, apiRoutes, makeRoutes } = require('../routes') import Bluebird from 'bluebird' -import { getText } from '../util/status_code' -import * as enc from './encryption' -import getEnvInformationForProjectRoot from './environment' +import { getText } from '../../util/status_code' +import * as enc from '../encryption' +import getEnvInformationForProjectRoot from '../environment' import type { OptionsWithUrl } from 'request-promise' -import { fs } from '../util/fs' -import ProtocolManager from './protocol' -import type { ProjectBase } from '../project-base' +import { fs } from '../../util/fs' +import ProtocolManager from '../protocol' +import type { ProjectBase } from '../../project-base' const THIRTY_SECONDS = humanInterval('30 seconds') const SIXTY_SECONDS = humanInterval('60 seconds') diff --git a/packages/server/lib/cloud/api/put_protocol_artifact.ts b/packages/server/lib/cloud/api/put_protocol_artifact.ts new file mode 100644 index 000000000000..6457d6cb0a30 --- /dev/null +++ b/packages/server/lib/cloud/api/put_protocol_artifact.ts @@ -0,0 +1,35 @@ +import fsAsync from 'fs/promises' +import fs from 'fs' +import Debug from 'debug' +import { uploadStream, geometricRetry } from '../upload/upload_stream' +import { StreamActivityMonitor } from '../upload/stream_activity_monitor' + +const debug = Debug('cypress:server:cloud:api:protocol-artifact') + +// the upload will get canceled if the source stream does not +// begin flowing within 5 seconds, or if the stream pipeline +// stalls (does not push data to the `fetch` sink) for more +// than 5 seconds +const MAX_START_DWELL_TIME = 5000 +const MAX_ACTIVITY_DWELL_TIME = 5000 + +export const putProtocolArtifact = async (artifactPath: string, maxFileSize: number, destinationUrl: string) => { + debug(`Atttempting to upload Test Replay archive from ${artifactPath} to ${destinationUrl})`) + const { size } = await fsAsync.stat(artifactPath) + + if (size > maxFileSize) { + throw new Error(`Spec recording too large: artifact is ${size} bytes, limit is ${maxFileSize} bytes`) + } + + const activityMonitor = new StreamActivityMonitor(MAX_START_DWELL_TIME, MAX_ACTIVITY_DWELL_TIME) + const fileStream = fs.createReadStream(artifactPath) + + await uploadStream( + fileStream, + destinationUrl, + size, { + retryDelay: geometricRetry, + activityMonitor, + }, + ) +} diff --git a/packages/server/lib/cloud/protocol.ts b/packages/server/lib/cloud/protocol.ts index 65b57855625c..051fa022c1cb 100644 --- a/packages/server/lib/cloud/protocol.ts +++ b/packages/server/lib/cloud/protocol.ts @@ -12,6 +12,8 @@ import { agent } from '@packages/network' import pkg from '@packages/root' import env from '../util/env' +import { putProtocolArtifact } from './api/put_protocol_artifact' + import type { Readable } from 'stream' import type { ProtocolManagerShape, AppCaptureProtocolInterface, CDPClient, ProtocolError, CaptureArtifact, ProtocolErrorReport, ProtocolCaptureMethod, ProtocolManagerOptions, ResponseStreamOptions, ResponseEndedWithEmptyBodyOptions, ResponseStreamTimedOutOptions } from '@packages/types' @@ -23,9 +25,6 @@ const debugVerbose = Debug('cypress-verbose:server:protocol') const CAPTURE_ERRORS = !process.env.CYPRESS_LOCAL_PROTOCOL_PATH const DELETE_DB = !process.env.CYPRESS_LOCAL_PROTOCOL_PATH -// Timeout for upload -const TWO_MINUTES = 120000 -const RETRY_DELAYS = [500, 1000] const DB_SIZE_LIMIT = 5000000000 const dbSizeLimit = () => { @@ -51,13 +50,6 @@ const requireScript = (script: string) => { return mod.exports } -class CypressRetryableError extends Error { - constructor (message: string) { - super(message) - this.name = 'CypressRetryableError' - } -} - export class ProtocolManager implements ProtocolManagerShape { private _runId?: string private _instanceId?: string @@ -267,7 +259,7 @@ export class ProtocolManager implements ProtocolManagerShape { return this._errors.filter((e) => !e.fatal) } - async getArchiveInfo (): Promise<{ stream: Readable, fileSize: number } | void> { + async getArchiveInfo (): Promise<{ filePath: string, fileSize: number } | void> { const archivePath = this._archivePath debug('reading archive from', archivePath) @@ -276,91 +268,36 @@ export class ProtocolManager implements ProtocolManagerShape { } return { - stream: fs.createReadStream(archivePath), + filePath: archivePath, fileSize: (await fs.stat(archivePath)).size, } } - async uploadCaptureArtifact ({ uploadUrl, payload, fileSize }: CaptureArtifact, timeout) { - const archivePath = this._archivePath + async uploadCaptureArtifact ({ uploadUrl, fileSize, filePath }: CaptureArtifact): Promise<{ + success: boolean + fileSize: number + specAccess?: ReturnType + } | void> { + if (!this._protocol || !filePath || !this._db) { + debug('not uploading due to one of the following being falsy: %O', { + _protocol: !!this._protocol, + archivePath: !!filePath, + _db: !!this._db, + }) - if (!this._protocol || !archivePath || !this._db) { return } - debug(`uploading %s to %s with a file size of %s`, archivePath, uploadUrl, fileSize) - - const retryRequest = async (retryCount: number, errors: Error[]) => { - try { - if (fileSize > dbSizeLimit()) { - throw new Error(`Spec recording too large: db is ${fileSize} bytes, limit is ${dbSizeLimit()} bytes`) - } - - const controller = new AbortController() - - setTimeout(() => { - controller.abort() - }, timeout ?? TWO_MINUTES) - - const res = await fetch(uploadUrl, { - agent, - method: 'PUT', - // @ts-expect-error - this is supported - body: payload, - headers: { - 'Accept': 'application/json', - 'Content-Type': 'application/x-tar', - 'Content-Length': `${fileSize}`, - }, - signal: controller.signal, - }) + debug(`uploading %s to %s with a file size of %s`, filePath, uploadUrl, fileSize) - if (res.ok) { - return { - fileSize, - success: true, - specAccess: this._protocol?.getDbMetadata(), - } - } - - const errorMessage = await res.json().catch(() => { - const url = new URL(uploadUrl) - - for (const [key, value] of url.searchParams) { - if (['x-amz-credential', 'x-amz-signature'].includes(key.toLowerCase())) { - url.searchParams.set(key, 'X'.repeat(value.length)) - } - } - - return `${res.status} ${res.statusText} (${url.href})` - }) - - debug(`error response: %O`, errorMessage) - - if (res.status >= 500 && res.status < 600) { - throw new CypressRetryableError(errorMessage) - } - - throw new Error(errorMessage) - } catch (e) { - // Only retry errors that are network related (e.g. connection reset or timeouts) - if (['FetchError', 'AbortError', 'CypressRetryableError'].includes(e.name)) { - if (retryCount < RETRY_DELAYS.length) { - debug(`retrying upload %o`, { retryCount }) - await new Promise((resolve) => setTimeout(resolve, RETRY_DELAYS[retryCount])) - - return await retryRequest(retryCount + 1, [...errors, e]) - } - } - - const totalErrors = [...errors, e] + try { + await putProtocolArtifact(filePath, dbSizeLimit(), uploadUrl) - throw new AggregateError(totalErrors, e.message) + return { + fileSize, + success: true, + specAccess: this._protocol?.getDbMetadata(), } - } - - try { - return await retryRequest(0, []) } catch (e) { if (CAPTURE_ERRORS) { this._errors.push({ @@ -368,15 +305,15 @@ export class ProtocolManager implements ProtocolManagerShape { captureMethod: 'uploadCaptureArtifact', fatal: true, }) - } - throw e + throw e + } } finally { - await ( - DELETE_DB ? fs.unlink(archivePath).catch((e) => { - debug(`Error unlinking db %o`, e) - }) : Promise.resolve() - ) + if (DELETE_DB) { + await fs.unlink(filePath).catch((e) => { + debug('Error unlinking db %o', e) + }) + } } } diff --git a/packages/server/lib/cloud/upload/stream_activity_monitor.ts b/packages/server/lib/cloud/upload/stream_activity_monitor.ts new file mode 100644 index 000000000000..f0b49b786c47 --- /dev/null +++ b/packages/server/lib/cloud/upload/stream_activity_monitor.ts @@ -0,0 +1,103 @@ +import Debug from 'debug' +import { Transform, Readable } from 'stream' + +const debug = Debug('cypress:server:cloud:stream-activity-monitor') +const debugVerbose = Debug('cypress-verbose:server:cloud:stream-activity-monitor') + +export class StreamStartTimedOutError extends Error { + constructor (maxStartDwellTime: number) { + super(`Source stream failed to begin sending data after ${maxStartDwellTime}ms`) + } +} + +export class StreamStalledError extends Error { + constructor (maxActivityDwellTime: number) { + super(`Stream stalled: no activity detected in the previous ${maxActivityDwellTime}ms`) + } +} + +/** + * `StreamActivityMonitor` encapsulates state with regard to monitoring a stream + * for flow failure states. Given a maxStartDwellTime and a maxActivityDwellTime, this class + * can `monitor` a Node Readable stream and signal if the sink (e.g., a `fetch`) should be + * aborted via an AbortController that can be retried via `getController`. It does this + * by creating an identity Transform stream and piping the source stream through it. The + * transform stream receives each chunk that the source emits, and orchestrates some timeouts + * to determine if the stream has failed to start, or if the data flow has stalled. + * + * Example usage: + * + * const MAX_START_DWELL_TIME = 5000 + * const MAX_ACTIVITY_DWELL_TIME = 5000 + * const stallDetection = new StreamActivityMonitor(MAX_START_DWELL_TIME, MAX_ACTIVITY_DWELL_TIME) + * try { + * const source = fs.createReadStream('/some/source/file') + * await fetch('/destination/url', { + * method: 'PUT', + * body: stallDetection.monitor(source) + * signal: stallDetection.getController().signal + * }) + * } catch (e) { + * if (stallDetection.getController().signal.reason) { + * // the `fetch` was aborted by the signal that `stallDetection` controlled + * } + * } + * + */ +export class StreamActivityMonitor { + private streamMonitor: Transform | undefined + private startTimeout: NodeJS.Timeout | undefined + private activityTimeout: NodeJS.Timeout | undefined + private controller: AbortController + + constructor (private maxStartDwellTime: number, private maxActivityDwellTime: number) { + this.controller = new AbortController() + } + + public getController () { + return this.controller + } + + public monitor (stream: Readable): Readable { + debug('monitoring stream') + if (this.streamMonitor || this.startTimeout || this.activityTimeout) { + this.reset() + } + + this.streamMonitor = new Transform({ + transform: (chunk, _, callback) => { + debugVerbose('Received chunk from File ReadableStream; Enqueing to network: ', chunk.length) + + clearTimeout(this.startTimeout) + this.markActivityInterval() + callback(null, chunk) + }, + }) + + this.startTimeout = setTimeout(() => { + this.controller?.abort(new StreamStartTimedOutError(this.maxStartDwellTime)) + }, this.maxStartDwellTime) + + return stream.pipe(this.streamMonitor) + } + + private reset () { + debug('Resetting Stream Activity Monitor') + clearTimeout(this.startTimeout) + clearTimeout(this.activityTimeout) + + this.streamMonitor = undefined + this.startTimeout = undefined + this.activityTimeout = undefined + + this.controller = new AbortController() + } + + private markActivityInterval () { + debug('marking activity interval') + clearTimeout(this.activityTimeout) + this.activityTimeout = setTimeout(() => { + this.controller?.abort(new StreamStalledError(this.maxActivityDwellTime)) + }, this.maxActivityDwellTime) + } +} diff --git a/packages/server/lib/cloud/upload/upload_stream.ts b/packages/server/lib/cloud/upload/upload_stream.ts new file mode 100644 index 000000000000..0a3126fd4125 --- /dev/null +++ b/packages/server/lib/cloud/upload/upload_stream.ts @@ -0,0 +1,138 @@ +import crossFetch from 'cross-fetch' +import fetchCreator from 'fetch-retry-ts' +import type { ReadStream } from 'fs' +import type { StreamActivityMonitor } from './stream_activity_monitor' +import Debug from 'debug' +import { HttpError } from '../api/http_error' +import { agent } from '@packages/network' + +const debug = Debug('cypress:server:cloud:uploadStream') +const debugVerbose = Debug('cypress-verbose:server:cloud:uploadStream') +/** + * These are retryable status codes. Other status codes are not valid for automatic + * retries: they indicate some issue with the client making the request, or that + * the server can never fulfill the request. Some of these status codes should only + * be retried if the request is idempotent, but I think it's fine for S3 for now. + * - 408 Request Timeout + * - 429 Too Many Requests (S3 can return this) + * - 502 Bad Gateway + * - 503 Service Unavailable + * - 504 Gateway Timeout + * other http status codes are not valid for automatic retries. + */ +const RETRYABLE_STATUS_CODES = [408, 429, 502, 503, 504] + +/** + * expected to be passed into uploadStream: nock + delay is very difficult to + * use fake timers for, as the callback to generate a nock response (expectedly) + * executes before any retry is attempted, and there is no wait to "await" that + * retry hook to advance fake timers. If a method of using fake timers with nock + * is known, this can be refactored to simplify the uploadStream signature and + * bake the geometricRetry logic into the args passed to fetchCreator. + * without passing in a noop delay in the tests, or some way of advancing sinon's + * clock, the tests for uploadStream would take too long to execute. + */ +export const geometricRetry = (n) => { + return (n + 1) * 500 +} + +const identity = (arg: T) => arg + +type UploadStreamOptions = { + retryDelay?: (count: number) => number + activityMonitor?: StreamActivityMonitor +} + +export const uploadStream = async (fileStream: ReadStream, destinationUrl: string, fileSize: number, options?: UploadStreamOptions): Promise => { + debug(`Uploading file stream (${fileSize} bytes) to ${destinationUrl}`) + const retryDelay = options?.retryDelay ?? identity + const timeoutMonitor = options?.activityMonitor ?? undefined + + /** + * To support more robust error messages from the server than statusText, we attempt to + * retrieve response.json(). This is async, so a list of error promises is stored here + * that must be resolved before throwing an aggregate error. This is necessary because + * ts-fetch-retry's retryOn fn does not support returning a promise. + */ + const errorPromises: Promise[] = [] + const abortController = timeoutMonitor?.getController() + + debug('PUT %s: %d byte file upload initiated', destinationUrl, fileSize) + + const retryableFetch = fetchCreator(crossFetch as typeof crossFetch, { + retries: 2, + retryDelay, + + retryOn: (attempt, retries, error, response) => { + debugVerbose('PUT %s Response: %O', destinationUrl, response) + debugVerbose('PUT %s Error: %O', destinationUrl, error) + // Record all HTTP errors encountered + if (response?.status && response?.status >= 400) { + errorPromises.push(HttpError.fromResponse(response)) + } + + // Record network errors + if (error) { + errorPromises.push(Promise.resolve(error)) + } + + const isUnderRetryLimit = attempt < retries + const isNetworkError = !!error + const isRetryableHttpError = (!!response?.status && RETRYABLE_STATUS_CODES.includes(response.status)) + + debug('checking if should retry: %s %O', destinationUrl, { + attempt, + retries, + networkError: error, + status: response?.status, + statusText: response?.statusText, + }) + + return ( + isUnderRetryLimit && // retries param is ignored if retryOn is a fn, so have to impl + (isNetworkError || isRetryableHttpError) + ) + }, + }) + + return new Promise(async (resolve, reject) => { + debug(`${destinationUrl}: PUT ${fileSize}`) + try { + const response = await retryableFetch(destinationUrl, { + agent, + method: 'PUT', + headers: { + 'content-length': String(fileSize), + 'content-type': 'application/x-tar', + 'accept': 'application/json', + }, + // ts thinks this is a web fetch, which only expects ReadableStreams. + // But, this is a node fetch, which supports ReadStreams. + // @ts-expect-error + body: timeoutMonitor ? timeoutMonitor.monitor(fileStream) : fileStream, + ...(abortController && { signal: abortController.signal }), + }) + + debug('PUT %s: HTTP %d %s', destinationUrl, response.status, response.statusText) + + if (response.status >= 400) { + const errors = await Promise.all(errorPromises) + + reject( + errors.length > 1 ? + new AggregateError(errors, `${errors.length} errors encountered during upload`) : + errors[0], + ) + } else { + // S3 does not include a response body - if the request succeeds, + // a simple 200 response is returned + resolve() + } + } catch (e) { + const error = abortController?.signal.reason ?? e + + debug('PUT %s: %s', destinationUrl, error) + reject(error) + } + }) +} diff --git a/packages/server/lib/modes/record.js b/packages/server/lib/modes/record.js index dbaeef11dade..5eb9f6864ba9 100644 --- a/packages/server/lib/modes/record.js +++ b/packages/server/lib/modes/record.js @@ -190,8 +190,7 @@ const uploadArtifactBatch = async (artifacts, protocolManager, quiet) => { return { ...artifact, - fileSize: archiveInfo.fileSize, - payload: archiveInfo.stream, + ...archiveInfo, } } catch (err) { debug('failed to prepare protocol artifact', { @@ -375,8 +374,10 @@ const uploadArtifactBatch = async (artifacts, protocolManager, quiet) => { let { error, errorStack, allErrors } = report if (allErrors) { - error = `Failed to upload after ${allErrors.length} attempts. Errors: ${allErrors.map((error) => error.message).join(', ')}` + error = `Failed to upload Test Replay after ${allErrors.length} attempts. Errors: ${allErrors.map((error) => error.message).join(', ')}` errorStack = allErrors.map((error) => error.stack).join(', ') + } else if (error) { + error = `Failed to upload Test Replay: ${error}` } return skipped && !report.error ? acc : { diff --git a/packages/server/lib/util/print-run.ts b/packages/server/lib/util/print-run.ts index d2ef8f12dce2..200f726f6841 100644 --- a/packages/server/lib/util/print-run.ts +++ b/packages/server/lib/util/print-run.ts @@ -600,7 +600,7 @@ export const printPendingArtifactUpload = (artifact: T, process.stdout.write(`- ${formatFileSize(Number(artifact.fileSize))}`) } - if (artifact.filePath) { + if (artifact.filePath && artifact.reportKey !== 'protocol') { process.stdout.write(` ${formatPath(artifact.filePath, undefined, 'cyan')}`) } diff --git a/packages/server/package.json b/packages/server/package.json index 2d2b47fe0fff..4dcbb60bb53d 100644 --- a/packages/server/package.json +++ b/packages/server/package.json @@ -66,6 +66,7 @@ "evil-dns": "0.2.0", "execa": "1.0.0", "express": "4.17.3", + "fetch-retry-ts": "^1.3.1", "find-process": "1.4.7", "firefox-profile": "4.3.2", "fluent-ffmpeg": "2.1.2", @@ -189,7 +190,7 @@ "proxyquire": "2.1.3", "react": "16.8.6", "repl.history": "0.1.4", - "sinon": "5.1.1", + "sinon": "17.0.1", "snap-shot-it": "7.9.3", "ssestream": "1.0.1", "supertest": "4.0.2", diff --git a/packages/server/test/mockery_helper.ts b/packages/server/test/mockery_helper.ts new file mode 100644 index 000000000000..e54e5934548b --- /dev/null +++ b/packages/server/test/mockery_helper.ts @@ -0,0 +1,22 @@ +import mockery from 'mockery' +import path from 'path' + +export const mockElectron = (mockeryInstance: typeof mockery) => { + // stub out the entire electron object for our stub + // we must use an absolute path here because of the way mockery internally loads this + // module - meaning the first time electron is required it'll use this path string + // so because its required from a separate module we must use an absolute reference to it + mockeryInstance.registerSubstitute( + 'electron', + path.join(__dirname, './support/helpers/electron_stub'), + ) + + // stub out electron's original-fs module which is available when running in electron + mockeryInstance.registerMock('original-fs', {}) +} + +export const enable = (mockeryInstance: typeof mockery) => { + mockeryInstance.enable({ + warnOnUnregistered: false, + }) +} diff --git a/packages/server/test/spec_helper.js b/packages/server/test/spec_helper.js index 6fa70c36bab6..1b45b4f45a5b 100644 --- a/packages/server/test/spec_helper.js +++ b/packages/server/test/spec_helper.js @@ -1,6 +1,8 @@ /* eslint-disable no-console */ require('../lib/environment') +const { enable, mockElectron } = require('./mockery_helper') + const chai = require('chai') chai.use(require('chai-subset')) @@ -14,7 +16,6 @@ global.proxyquire = require('proxyquire') global.sinon = require('sinon') const _ = require('lodash') const Promise = require('bluebird') -const path = require('path') const cache = require('../lib/cache') require('chai') @@ -60,7 +61,11 @@ const { } = sinon sinon.useFakeTimers = function (...args) { - sinon._clock = useFakeTimers.apply(sinon, args) + const clock = useFakeTimers.apply(sinon, args) + + sinon._clock = clock + + return clock } sinon.restore = function (...args) { @@ -75,21 +80,9 @@ sinon.restore = function (...args) { return restore.apply(sinon, args) } -mockery.enable({ - warnOnUnregistered: false, -}) +enable(mockery) -// stub out the entire electron object for our stub -// we must use an absolute path here because of the way mockery internally loads this -// module - meaning the first time electron is required it'll use this path string -// so because its required from a separate module we must use an absolute reference to it -mockery.registerSubstitute( - 'electron', - path.join(__dirname, './support/helpers/electron_stub'), -) - -// stub out electron's original-fs module which is available when running in electron -mockery.registerMock('original-fs', {}) +mockElectron(mockery) before(function () { if (hasOnly) { diff --git a/packages/server/test/unit/cloud/api_spec.js b/packages/server/test/unit/cloud/api/api_spec.js similarity index 99% rename from packages/server/test/unit/cloud/api_spec.js rename to packages/server/test/unit/cloud/api/api_spec.js index abd281289e6b..9555d2724541 100644 --- a/packages/server/test/unit/cloud/api_spec.js +++ b/packages/server/test/unit/cloud/api/api_spec.js @@ -3,20 +3,20 @@ const jose = require('jose') const base64Url = require('base64url') const stealthyRequire = require('stealthy-require') -require('../../spec_helper') +require('../../../spec_helper') const _ = require('lodash') const os = require('os') -const encryption = require('../../../lib/cloud/encryption') +const encryption = require('../../../../lib/cloud/encryption') const { agent, } = require('@packages/network') const pkg = require('@packages/root') -const api = require('../../../lib/cloud/api') -const cache = require('../../../lib/cache') -const errors = require('../../../lib/errors') -const machineId = require('../../../lib/cloud/machine_id') +const api = require('../../../../lib/cloud/api') +const cache = require('../../../../lib/cache') +const errors = require('../../../../lib/errors') +const machineId = require('../../../../lib/cloud/machine_id') const Promise = require('bluebird') const API_BASEURL = 'http://localhost:1234' @@ -237,9 +237,9 @@ describe('lib/cloud/api', () => { if (!prodApi) { prodApi = stealthyRequire(require.cache, () => { - return require('../../../lib/cloud/api') + return require('../../../../lib/cloud/api') }, () => { - require('../../../lib/cloud/encryption') + require('../../../../lib/cloud/encryption') }, module) } }) diff --git a/packages/server/test/unit/cloud/api/put_protocol_artifact_spec.ts b/packages/server/test/unit/cloud/api/put_protocol_artifact_spec.ts new file mode 100644 index 000000000000..feb198b836de --- /dev/null +++ b/packages/server/test/unit/cloud/api/put_protocol_artifact_spec.ts @@ -0,0 +1,194 @@ +import mockery from 'mockery' +import { enable as enableMockery, mockElectron } from '../../../mockery_helper' +import sinon from 'sinon' +import chai, { expect } from 'chai' +import sinonChai from 'sinon-chai' +import chaiAsPromised from 'chai-as-promised' + +import { ReadStream } from 'fs' +import { StreamActivityMonitor } from '../../../../lib/cloud/upload/stream_activity_monitor' +import { uploadStream } from '../../../../lib/cloud/upload/upload_stream' +import { HttpError } from '../../../../lib/cloud/api/http_error' + +chai.use(chaiAsPromised).use(sinonChai) + +describe('putProtocolArtifact', () => { + let filePath: string + let maxFileSize: number + let fileSize: number + let mockStreamMonitor + let mockReadStream + let destinationUrl + + let statStub: sinon.SinonStub + let uploadStreamStub: sinon.SinonStub, ReturnType> + let geometricRetryStub: sinon.SinonStub + + let putProtocolArtifact: (artifactPath: string, maxFileSize: number, destinationUrl: string) => Promise + + /** + * global.mockery is defined the first time `test/spec_helper.js` is required by any spec. + * Unfortunately, the only way to fully reset a mocked dependency with mockery is to + * disable it in an `afterEach` (see the afterEach hook of this describe). + * because global mockery is enabled once for the entire spec run, this kind of post-spec + * state reset fails in dramatic ways, often in specs unrelated to this spec. + * + * To get around this suite-wide failure condition, this spec disables the global mockery + * once before running any of its tests, and re-enables it after it finishes. This is kind + * of an inverse of setting/clearing state before/after tests: we're cleaing global state, + * and then re-setting global state. The alternative to be able to use mockery in this spec + * was to refactor all other specs to not rely on spec_helper. Some specs rely on spec_helper + * only implicitly, though, so the scope of that refactor is difficult to determine. + */ + before(() => { + if (global.mockery) { + global.mockery.disable() + } + }) + + after(() => { + if (global.mockery) { + enableMockery(global.mockery) + mockElectron(global.mockery) + } + }) + + beforeEach(() => { + mockery.enable({ + useCleanCache: true, + warnOnUnregistered: false, + }) + + maxFileSize = 20000 + filePath = '/some/file/path' + fileSize = 20 + destinationUrl = 'https://some/destination/url' + + mockReadStream = sinon.createStubInstance(ReadStream) + mockery.registerMock('fs', { + createReadStream: sinon.stub().returns(mockReadStream), + }) + + geometricRetryStub = sinon.stub() + + uploadStreamStub = sinon.stub, ReturnType>() + + // these paths need to be what `put_protocol_artifact` used to import them + mockery.registerMock('../upload/upload_stream', { + geometricRetry: geometricRetryStub, + uploadStream: uploadStreamStub, + }) + + mockStreamMonitor = sinon.createStubInstance(StreamActivityMonitor) + mockery.registerMock('../upload/stream_activity_monitor', { + StreamActivityMonitor: sinon.stub().callsFake(() => { + return mockStreamMonitor + }), + }) + + statStub = sinon.stub() + + mockery.registerMock('fs/promises', { + stat: statStub, + }) + + putProtocolArtifact = require('../../../../lib/cloud/api/put_protocol_artifact').putProtocolArtifact + }) + + afterEach(() => { + mockery.deregisterAll() + mockery.disable() + }) + + describe('when provided an artifact path that does not exist', () => { + let invalidPath: string + + beforeEach(() => { + invalidPath = '/some/invalid/path' + + statStub.withArgs(invalidPath).callsFake((path) => { + const e = new Error(`ENOENT: no such file or directory, stat '${path}'`) + + // no way to instantiate system errors like ENOENT in TS - + // there is no exported system error interface + // @ts-ignore + e.errno = -2 + // @ts-ignore + e.code = 'ENOENT' + // @ts-ignore + e.syscall = 'stat' + // @ts-ignore + e.path = path + + return Promise.reject(e) + }) + }) + + it('rejects with a file does not exist error', () => { + return expect(putProtocolArtifact(invalidPath, maxFileSize, destinationUrl)).to.be.rejectedWith(`ENOENT: no such file or directory, stat '/some/invalid/path'`) + }) + }) + + describe('when provided a valid artifact path', () => { + beforeEach(() => { + statStub.withArgs(filePath).resolves({ size: fileSize }) + }) + + describe('and the artifact is too large', () => { + beforeEach(() => { + maxFileSize = fileSize - 1 + }) + + it('rejects with a file too large error', () => { + return expect(putProtocolArtifact(filePath, maxFileSize, destinationUrl)) + .to.be.rejectedWith('Spec recording too large: artifact is 20 bytes, limit is 19 bytes') + }) + }) + + describe('and fetch completes successfully', () => { + it('resolves', () => { + uploadStreamStub.withArgs( + mockReadStream, + destinationUrl, + fileSize, { + retryDelay: geometricRetryStub, + activityMonitor: mockStreamMonitor, + }, + ).resolves() + + return expect(putProtocolArtifact(filePath, maxFileSize, destinationUrl)).to.be.fulfilled + }) + }) + + describe('and uploadStream rejects', () => { + let httpErr: HttpError + let res: Response + + beforeEach(() => { + res = sinon.createStubInstance(Response) + + httpErr = new HttpError(`403 Forbidden (${destinationUrl})`, res) + + uploadStreamStub.withArgs( + mockReadStream, + destinationUrl, + fileSize, { + retryDelay: geometricRetryStub, + activityMonitor: mockStreamMonitor, + }, + ).rejects(httpErr) + }) + + it('rethrows', async () => { + let error: Error | undefined + + try { + await putProtocolArtifact(filePath, maxFileSize, destinationUrl) + } catch (e) { + error = e + } + expect(error).to.eq(httpErr) + }) + }) + }) +}) diff --git a/packages/server/test/unit/cloud/upload/stream_activity_monitor_spec.ts b/packages/server/test/unit/cloud/upload/stream_activity_monitor_spec.ts new file mode 100644 index 000000000000..ae72666e460c --- /dev/null +++ b/packages/server/test/unit/cloud/upload/stream_activity_monitor_spec.ts @@ -0,0 +1,90 @@ +const { sinon, expect } = require('../../../spec_helper') + +import { StreamActivityMonitor, StreamStalledError, StreamStartTimedOutError } from '../../../../lib/cloud/upload/stream_activity_monitor' +import { Readable, Writable } from 'stream' + +describe('StreamTimeoutController', () => { + const maxStartDwellTime = 1000 + const maxActivityDwellTime = 500 + + let monitor: StreamActivityMonitor + let clock: sinon.SinonFakeTimers + let fakeWebReadableStream: ReadableStream + let fakeNodeReadableStream: Readable + let streamSink: Writable + let streamController: ReadableStreamDefaultController + + let writtenValues: string + + beforeEach(() => { + writtenValues = '' + monitor = new StreamActivityMonitor(maxStartDwellTime, maxActivityDwellTime) + clock = sinon.useFakeTimers() + + // oddly, it's easier to asynchronously emit data from a ReadableStream than + // it is to asynchronously emit data from a Readable, so to test this we are + // converting a ReadableStream to a Writable + fakeWebReadableStream = new ReadableStream({ + start (controller) { + streamController = controller + }, + }) + + // @ts-ignore + fakeNodeReadableStream = Readable.fromWeb(fakeWebReadableStream) + + streamSink = new Writable() + streamSink._write = (chunk, _, callback) => { + writtenValues += chunk + callback() + } + }) + + afterEach(() => { + clock.restore() + }) + + describe('when monitoring a stream', () => { + beforeEach(() => { + monitor.monitor(fakeNodeReadableStream).pipe(streamSink) + }) + + it('signals an abort if no initial activity happens within maxStartDwellTime', async () => { + await clock.tickAsync(maxStartDwellTime + 1) + expect(monitor.getController().signal.aborted).to.be.true + expect(monitor.getController().signal.reason).to.be.an.instanceOf(StreamStartTimedOutError) + }) + + it('signals an abort if activity fails to happen after maxActivityDwellTime', async () => { + streamController.enqueue('some data') + await clock.tickAsync(maxActivityDwellTime + 1) + expect(monitor.getController().signal.aborted).to.be.true + expect(monitor.getController().signal.reason).to.be.an.instanceOf(StreamStalledError) + }) + + it('does not signal an abort if initial activity happens within maxStartDwellTime', async () => { + await clock.tickAsync(maxStartDwellTime - 10) + streamController.enqueue('some data') + expect(monitor.getController().signal.aborted).not.to.be.true + expect(monitor.getController().signal.reason).to.be.undefined + }) + + it('does not signal an abort if subsequent activity happens within maxActivityDwellTime', async () => { + streamController.enqueue('some data') + await clock.tickAsync(maxActivityDwellTime - 10) + streamController.enqueue('some more data') + await clock.tickAsync(maxActivityDwellTime - 10) + expect(monitor.getController().signal.aborted).not.to.be.true + expect(monitor.getController().signal.reason).to.be.undefined + }) + + it('passes data through', async () => { + const value = 'some data' + + streamController.enqueue(value) + streamController.enqueue(value) + await clock.tickAsync(maxActivityDwellTime) + expect(writtenValues).to.equal(value + value) + }) + }) +}) diff --git a/packages/server/test/unit/cloud/upload/upload_stream_spec.ts b/packages/server/test/unit/cloud/upload/upload_stream_spec.ts new file mode 100644 index 000000000000..8a3142e41a12 --- /dev/null +++ b/packages/server/test/unit/cloud/upload/upload_stream_spec.ts @@ -0,0 +1,242 @@ +/// + +import fs, { ReadStream } from 'fs' +import { Readable } from 'stream' +import sinon from 'sinon' +import chai, { expect } from 'chai' +import sinonChai from 'sinon-chai' +import chaiAsPromised from 'chai-as-promised' +import { uploadStream, geometricRetry } from '../../../../lib/cloud/upload/upload_stream' +import { HttpError } from '../../../../lib/cloud/api/http_error' +import { StreamActivityMonitor, StreamStalledError, StreamStartTimedOutError } from '../../../../lib/cloud/upload/stream_activity_monitor' + +chai.use(chaiAsPromised).use(sinonChai) + +import nock from 'nock' + +describe('geometricRetry', () => { + it('returns a geometrically increasing n', () => { + expect(geometricRetry(0)).to.eq(500) + expect(geometricRetry(1)).to.eq(1000) + expect(geometricRetry(2)).to.eq(1500) + }) +}) + +describe('uploadStream', () => { + let destinationUrl: string + let destinationPath: string + let destinationDomain: string + let uploadPromise: Promise + let scope: nock.Scope + let fileSize: number + let fsReadStream: ReadStream + let fileContents: string + + function execSimpleStream () { + fsReadStream.push(fileContents) + fsReadStream.push(null) + } + + function mockUpload () { + return scope.put(destinationPath, undefined, { + reqheaders: { + 'Accept': 'application/json', + 'Content-Type': 'application/x-tar', + 'Content-Length': fileSize.toString(), + }, + }) + } + + beforeEach(() => { + fileContents = 'lorem ipsum dolor set' + fileSize = fileContents.length + + fsReadStream = new Readable() as ReadStream + sinon.stub(fs, 'createReadStream').callsFake(() => { + return fsReadStream + }) + + destinationDomain = 'http://somedomain.test' + destinationPath = '/upload' + destinationUrl = `${destinationDomain}${destinationPath}` + scope = nock(destinationDomain) + }) + + afterEach(() => { + (fs.createReadStream as sinon.SinonStub).restore() + nock.cleanAll() + }) + + describe('when fetch resolves with a 200 OK', () => { + beforeEach(() => { + mockUpload().reply(200, 'OK') + }) + + it(`resolves`, async () => { + uploadPromise = uploadStream(fsReadStream, destinationUrl, fileSize) + execSimpleStream() + + await expect(uploadPromise).to.be.fulfilled + }) + }) + + describe('when fetch resolves with a 4xx/5xx response', () => { + const status = 403 + + beforeEach(() => { + mockUpload().reply(status) + }) + + it('rejects with an appropriate HttpError', async () => { + const uploadPromise = uploadStream(fsReadStream, destinationUrl, fileSize) + + execSimpleStream() + + let err: Error | undefined + + try { + await uploadPromise + } catch (e) { + err = e + } + expect(err).to.be.instanceOf(HttpError) + expect(err?.message).to.eq(`403 Forbidden (${destinationUrl})`) + }) + }) + + describe('retry behavior', () => { + describe('when fetch resolves with a retryable http status code 3 times', () => { + const callCount = 3 + + let retryDelay + + beforeEach(() => { + retryDelay = sinon.stub().returns((n: number) => { + return n + }) + }) + + ;[408, 429, 502, 503, 504].forEach((status) => { + it(`makes a total of ${callCount} calls for HTTP ${status} and eventually rejects`, async () => { + let count = 0 + + const inc = () => { + count++ + } + + mockUpload().times(4).reply(status, inc) + + const uploadPromise = uploadStream(fsReadStream, destinationUrl, fileSize, { + retryDelay, + }) + + execSimpleStream() + + await expect(uploadPromise).to.eventually.be.rejectedWith(AggregateError) + expect(retryDelay).to.have.been.called + expect(count).to.eq(callCount) + }) + }) + + it('throws an aggregate error containing all of the errors encountered', async () => { + let uploadPromise + + mockUpload().reply(503) + mockUpload().reply(408) + mockUpload().reply(502) + + let error: AggregateError | undefined + + try { + uploadPromise = uploadStream(fsReadStream, destinationUrl, fileSize) + + execSimpleStream() + await uploadPromise + } catch (e) { + error = e + } + + expect(error).not.be.undefined + expect(error?.message).to.eq('3 errors encountered during upload') + expect(error?.errors[0]?.message).to.eq(`503 Service Unavailable (${destinationUrl})`) + expect(error?.errors[1]?.message).to.eq(`408 Request Timeout (${destinationUrl})`) + expect(error?.errors[2]?.message).to.eq(`502 Bad Gateway (${destinationUrl})`) + }) + }) + + describe('when fetch resolves with a retryable status code 2x, and then a 200', () => { + const callCount = 3 + + ;[408, 429, 502, 503, 504].forEach((status) => { + it(`makes a total of ${callCount} requests after HTTP ${status} and eventually resolves`, async () => { + let count = 0 + + const inc = () => count++ + + mockUpload().reply(status, inc) + mockUpload().reply(status, inc) + mockUpload().reply(200, inc) + + const uploadPromise = uploadStream(fsReadStream, destinationUrl, fileSize) + + execSimpleStream() + await expect(uploadPromise).to.be.fulfilled + expect(count).to.eq(callCount) + }) + }) + }) + }) + + describe('when passed a timeout controller', () => { + let activityMonitor: StreamActivityMonitor + const maxStartDwellTime = 1000 + const maxActivityDwellTime = 1000 + let abortController: AbortController + + beforeEach(() => { + abortController = new AbortController() + activityMonitor = new StreamActivityMonitor(maxStartDwellTime, maxActivityDwellTime) + sinon.stub(activityMonitor, 'getController').callsFake(() => abortController) + sinon.stub(activityMonitor, 'monitor').callsFake((arg) => arg) + }) + + it('pipes the readstream through the timeout controller monitoring method', async () => { + mockUpload().reply(200) + const uploadPromise = uploadStream(fsReadStream, destinationUrl, fileSize, { + activityMonitor, + }) + + execSimpleStream() + await expect(uploadPromise).to.be.fulfilled + expect(activityMonitor.monitor).to.be.calledWith(fsReadStream) + }) + + describe('and the timeout monitor\'s signal aborts with a StreamStartTimedOut error', () => { + beforeEach(() => { + abortController.abort(new StreamStartTimedOutError(maxStartDwellTime)) + }) + + it('rejects with a StreamStartFailed error', async () => { + const uploadPromise = uploadStream(fsReadStream, destinationUrl, fileSize, { + activityMonitor, + }) + + await expect(uploadPromise).to.be.rejectedWith(StreamStartTimedOutError) + }) + }) + + describe('and the timeout monitor\'s signal aborts with a StreamStalled error', () => { + beforeEach(() => { + abortController.abort(new StreamStalledError(maxActivityDwellTime)) + }) + + it('rejects with a StreamStalled error', async () => { + const uploadPromise = uploadStream(fsReadStream, destinationUrl, fileSize, { + activityMonitor, + }) + + await expect(uploadPromise).to.be.rejectedWith(StreamStalledError) + }) + }) + }) +}) diff --git a/packages/types/src/protocol.ts b/packages/types/src/protocol.ts index e6402ab51f28..4e5d4c07a6a1 100644 --- a/packages/types/src/protocol.ts +++ b/packages/types/src/protocol.ts @@ -74,7 +74,7 @@ export type ProtocolErrorReport = { export type CaptureArtifact = { uploadUrl: string fileSize: number - payload: Readable + filePath: string } export type ProtocolManagerOptions = { diff --git a/scripts/after-pack-hook.js b/scripts/after-pack-hook.js index 23376c023728..c8cbea39ec1a 100644 --- a/scripts/after-pack-hook.js +++ b/scripts/after-pack-hook.js @@ -77,7 +77,7 @@ module.exports = async function (params) { const encryptionFileSource = await getEncryptionFileSource(encryptionFilePath) const cloudEnvironmentFilePath = path.join(CY_ROOT_DIR, 'packages/server/lib/cloud/environment.ts') const cloudEnvironmentFileSource = await getCloudEnvironmentFileSource(cloudEnvironmentFilePath) - const cloudApiFilePath = path.join(CY_ROOT_DIR, 'packages/server/lib/cloud/api.ts') + const cloudApiFilePath = path.join(CY_ROOT_DIR, 'packages/server/lib/cloud/api/index.ts') const cloudApiFileSource = await getProtocolFileSource(cloudApiFilePath) const cloudProtocolFilePath = path.join(CY_ROOT_DIR, 'packages/server/lib/cloud/protocol.ts') const cloudProtocolFileSource = await getProtocolFileSource(cloudProtocolFilePath) diff --git a/system-tests/__snapshots__/record_spec.js b/system-tests/__snapshots__/record_spec.js index 20897f5b579e..bdafa9d8c10c 100644 --- a/system-tests/__snapshots__/record_spec.js +++ b/system-tests/__snapshots__/record_spec.js @@ -3047,7 +3047,7 @@ exports['e2e record capture-protocol enabled protocol runtime errors db size too (Uploaded Cloud Artifacts) - Screenshot - Done Uploading 1 kB in Xm, Ys ZZ.ZZms 1/2 /XXX/XXX/XXX/cypress/screenshots/record_pass.cy.js/yay it passes.png - - Test Replay - Failed Uploading after Xm, Ys ZZ.ZZms 2/2 - Spec recording too large: db is 1024 bytes, limit is 200 bytes + - Test Replay - Failed Uploading after Xm, Ys ZZ.ZZms 2/2 - Spec recording too large: artifact is 1024 bytes, limit is 200 bytes ==================================================================================================== @@ -3658,7 +3658,7 @@ exports['e2e record capture-protocol enabled passing retrieves the capture proto ` -exports['capture-protocol api errors upload 500 - retries 3 times and fails continues 1'] = ` +exports['capture-protocol api errors upload 500 - does not retry continues 1'] = ` ==================================================================================================== @@ -3742,7 +3742,7 @@ exports['capture-protocol api errors upload 500 - retries 3 times and fails cont ` -exports['capture-protocol api errors upload 500 - retries 2 times and succeeds on the last call continues 1'] = ` +exports['capture-protocol api errors upload 503 - retries 2 times and succeeds on the last call continues 1'] = ` ==================================================================================================== @@ -3825,3 +3825,87 @@ exports['capture-protocol api errors upload 500 - retries 2 times and succeeds o ` + +exports['capture-protocol api errors upload 503 - tries 3 times and fails continues 1'] = ` + +==================================================================================================== + + (Run Starting) + + ┌────────────────────────────────────────────────────────────────────────────────────────────────┐ + │ Cypress: 1.2.3 │ + │ Browser: FooBrowser 88 │ + │ Specs: 1 found (record_pass.cy.js) │ + │ Searched: cypress/e2e/record_pass* │ + │ Params: Tag: false, Group: false, Parallel: false │ + │ Run URL: https://dashboard.cypress.io/projects/cjvoj7/runs/12 │ + └────────────────────────────────────────────────────────────────────────────────────────────────┘ + + +──────────────────────────────────────────────────────────────────────────────────────────────────── + + Running: record_pass.cy.js (1 of 1) + Estimated: X second(s) + + + record pass + ✓ passes + - is pending + + + 1 passing + 1 pending + + + (Results) + + ┌────────────────────────────────────────────────────────────────────────────────────────────────┐ + │ Tests: 2 │ + │ Passing: 1 │ + │ Failing: 0 │ + │ Pending: 1 │ + │ Skipped: 0 │ + │ Screenshots: 1 │ + │ Video: false │ + │ Duration: X seconds │ + │ Estimated: X second(s) │ + │ Spec Ran: record_pass.cy.js │ + └────────────────────────────────────────────────────────────────────────────────────────────────┘ + + + (Screenshots) + + - /XXX/XXX/XXX/cypress/screenshots/record_pass.cy.js/yay it passes.png (400x1022) + + + (Uploading Cloud Artifacts) + + - Video - Nothing to upload + - Screenshot - 1 kB /XXX/XXX/XXX/cypress/screenshots/record_pass.cy.js/yay it passes.png + - Test Replay + + Uploading Cloud Artifacts: . . . . . + + (Uploaded Cloud Artifacts) + + - Screenshot - Done Uploading 1 kB in Xm, Ys ZZ.ZZms 1/2 /XXX/XXX/XXX/cypress/screenshots/record_pass.cy.js/yay it passes.png + - Test Replay - Failed Uploading after Xm, Ys ZZ.ZZms 2/2 - 503 Service Unavailable (http://localhost:1234/capture-protocol/upload/?x-amz-credential=XXXXXXXX&x-amz-signature=XXXXXXXXXXXXX) + +==================================================================================================== + + (Run Finished) + + + Spec Tests Passing Failing Pending Skipped + ┌────────────────────────────────────────────────────────────────────────────────────────────────┐ + │ ✔ record_pass.cy.js XX:XX 2 1 - 1 - │ + └────────────────────────────────────────────────────────────────────────────────────────────────┘ + ✔ All specs passed! XX:XX 2 1 - 1 - + + +─────────────────────────────────────────────────────────────────────────────────────────────────────── + + Recorded Run: https://dashboard.cypress.io/projects/cjvoj7/runs/12 + + +` diff --git a/system-tests/test/record_spec.js b/system-tests/test/record_spec.js index 3938e1f3e5af..0035ea991c5a 100644 --- a/system-tests/test/record_spec.js +++ b/system-tests/test/record_spec.js @@ -2524,7 +2524,7 @@ describe('capture-protocol api errors', () => { enableCaptureProtocol() - const stubbedServerWithErrorOn = (endpoint, numberOfFailuresBeforeSuccess = Number.MAX_SAFE_INTEGER) => { + const stubbedServerWithErrorOn = (endpoint, numberOfFailuresBeforeSuccess = Number.MAX_SAFE_INTEGER, status = 500, statusText = 'Internal Server Error') => { let failures = 0 return setupStubbedServer(createRoutes({ @@ -2532,7 +2532,7 @@ describe('capture-protocol api errors', () => { res: (req, res) => { if (failures < numberOfFailuresBeforeSuccess) { failures += 1 - res.status(500).send('500 - Internal Server Error') + res.status(status).send(`${status} - ${statusText}`) } else { routeHandlers[endpoint].res(req, res) } @@ -2541,7 +2541,7 @@ describe('capture-protocol api errors', () => { })) } - describe('upload 500 - retries 3 times and fails', () => { + describe('upload 500 - does not retry', () => { stubbedServerWithErrorOn('putCaptureProtocolUpload') it('continues', function () { process.env.API_RETRY_INTERVALS = '1000' @@ -2561,7 +2561,33 @@ describe('capture-protocol api errors', () => { expect(artifactReport?.protocol).to.exist() expect(artifactReport?.protocol?.error).to.equal( - 'Failed to upload after 3 attempts. Errors: 500 Internal Server Error (http://localhost:1234/capture-protocol/upload/?x-amz-credential=XXXXXXXX&x-amz-signature=XXXXXXXXXXXXX), 500 Internal Server Error (http://localhost:1234/capture-protocol/upload/?x-amz-credential=XXXXXXXX&x-amz-signature=XXXXXXXXXXXXX), 500 Internal Server Error (http://localhost:1234/capture-protocol/upload/?x-amz-credential=XXXXXXXX&x-amz-signature=XXXXXXXXXXXXX)', + 'Failed to upload Test Replay: 500 Internal Server Error (http://localhost:1234/capture-protocol/upload/?x-amz-credential=XXXXXXXX&x-amz-signature=XXXXXXXXXXXXX)', + ) + }) + }) + }) + + describe('upload 503 - tries 3 times and fails', () => { + stubbedServerWithErrorOn('putCaptureProtocolUpload', Number.MAX_SAFE_INTEGER, 503, 'Service Unavailable') + it('continues', function () { + process.env.API_RETRY_INTERVALS = '1000' + + return systemTests.exec(this, { + key: 'f858a2bc-b469-4e48-be67-0876339ee7e1', + configFile: 'cypress-with-project-id.config.js', + spec: 'record_pass*', + record: true, + snapshot: true, + }).then(() => { + const urls = getRequestUrls() + + expect(urls).to.include.members([`PUT /instances/${instanceId}/artifacts`]) + + const artifactReport = getRequests().find(({ url }) => url === `PUT /instances/${instanceId}/artifacts`)?.body + + expect(artifactReport?.protocol).to.exist() + expect(artifactReport?.protocol?.error).to.equal( + 'Failed to upload Test Replay after 3 attempts. Errors: 503 Service Unavailable (http://localhost:1234/capture-protocol/upload/?x-amz-credential=XXXXXXXX&x-amz-signature=XXXXXXXXXXXXX), 503 Service Unavailable (http://localhost:1234/capture-protocol/upload/?x-amz-credential=XXXXXXXX&x-amz-signature=XXXXXXXXXXXXX), 503 Service Unavailable (http://localhost:1234/capture-protocol/upload/?x-amz-credential=XXXXXXXX&x-amz-signature=XXXXXXXXXXXXX)', ) expect(artifactReport?.protocol?.errorStack).to.exist().and.not.to.be.empty() @@ -2569,8 +2595,8 @@ describe('capture-protocol api errors', () => { }) }) - describe('upload 500 - retries 2 times and succeeds on the last call', () => { - stubbedServerWithErrorOn('putCaptureProtocolUpload', 2) + describe('upload 503 - retries 2 times and succeeds on the last call', () => { + stubbedServerWithErrorOn('putCaptureProtocolUpload', 2, 503, 'Internal Server Error') let archiveFile = '' diff --git a/tooling/v8-snapshot/cache/darwin/snapshot-meta.json b/tooling/v8-snapshot/cache/darwin/snapshot-meta.json index 51c632c38f7d..bc7113a8992b 100644 --- a/tooling/v8-snapshot/cache/darwin/snapshot-meta.json +++ b/tooling/v8-snapshot/cache/darwin/snapshot-meta.json @@ -751,7 +751,7 @@ "./packages/server/lib/browsers/firefox.ts", "./packages/server/lib/browsers/memory/index.ts", "./packages/server/lib/cache.js", - "./packages/server/lib/cloud/api.ts", + "./packages/server/lib/cloud/api/index.ts", "./packages/server/lib/cloud/auth.ts", "./packages/server/lib/cloud/routes.ts", "./packages/server/lib/cloud/upload.ts", diff --git a/tooling/v8-snapshot/cache/linux/snapshot-meta.json b/tooling/v8-snapshot/cache/linux/snapshot-meta.json index d7bf9051b529..4a8476affac8 100644 --- a/tooling/v8-snapshot/cache/linux/snapshot-meta.json +++ b/tooling/v8-snapshot/cache/linux/snapshot-meta.json @@ -750,7 +750,7 @@ "./packages/server/lib/browsers/firefox.ts", "./packages/server/lib/browsers/memory/index.ts", "./packages/server/lib/cache.js", - "./packages/server/lib/cloud/api.ts", + "./packages/server/lib/cloud/api/index.ts", "./packages/server/lib/cloud/auth.ts", "./packages/server/lib/cloud/routes.ts", "./packages/server/lib/cloud/upload.ts", diff --git a/tooling/v8-snapshot/cache/win32/snapshot-meta.json b/tooling/v8-snapshot/cache/win32/snapshot-meta.json index 08caf1536229..4c857b341ac5 100644 --- a/tooling/v8-snapshot/cache/win32/snapshot-meta.json +++ b/tooling/v8-snapshot/cache/win32/snapshot-meta.json @@ -755,7 +755,7 @@ "./packages/server/lib/browsers/firefox.ts", "./packages/server/lib/browsers/memory/index.ts", "./packages/server/lib/cache.js", - "./packages/server/lib/cloud/api.ts", + "./packages/server/lib/cloud/api/index.ts", "./packages/server/lib/cloud/auth.ts", "./packages/server/lib/cloud/routes.ts", "./packages/server/lib/cloud/upload.ts", diff --git a/yarn.lock b/yarn.lock index cb74c76dbfb9..2725c14aca0d 100644 --- a/yarn.lock +++ b/yarn.lock @@ -6334,10 +6334,10 @@ dependencies: "@sinonjs/commons" "^1.7.0" -"@sinonjs/fake-timers@^10.0.2": - version "10.3.0" - resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-10.3.0.tgz#55fdff1ecab9f354019129daf4df0dd4d923ea66" - integrity sha512-V4BG07kuYSUkTCSBHG8G8TNhM+F19jXFWnQtzj+we8DrkpSBCee9Z3Ms8yiGer/dlmhe35/Xdgyo3/0rQKg7YA== +"@sinonjs/fake-timers@^11.2.2": + version "11.2.2" + resolved "https://registry.yarnpkg.com/@sinonjs/fake-timers/-/fake-timers-11.2.2.tgz#50063cc3574f4a27bd8453180a04171c85cc9699" + integrity sha512-G2piCSxQ7oWOxwGSAyFHfPIsyeJGXYtc6mFbnFA+kRXkiEnTl8c/8jul2S329iFBnDI9HGoeWWAZvuvOkZccgw== dependencies: "@sinonjs/commons" "^3.0.0" @@ -6422,10 +6422,19 @@ lodash.get "^4.4.2" type-detect "^4.0.8" -"@sinonjs/text-encoding@^0.7.1": - version "0.7.1" - resolved "https://registry.yarnpkg.com/@sinonjs/text-encoding/-/text-encoding-0.7.1.tgz#8da5c6530915653f3a1f38fd5f101d8c3f8079c5" - integrity sha512-+iTbntw2IZPb/anVDbypzfQa+ay64MW0Zo8aJ8gZPWMMK6/OubMVb6lUPMagqjOPnmtauXnFCACVl3O7ogjeqQ== +"@sinonjs/samsam@^8.0.0": + version "8.0.0" + resolved "https://registry.yarnpkg.com/@sinonjs/samsam/-/samsam-8.0.0.tgz#0d488c91efb3fa1442e26abea81759dfc8b5ac60" + integrity sha512-Bp8KUVlLp8ibJZrnvq2foVhP0IVX2CIprMJPK0vqGqgrDa0OHVKeZyBykqskkrdxV6yKBPmGasO8LVjAKR3Gew== + dependencies: + "@sinonjs/commons" "^2.0.0" + lodash.get "^4.4.2" + type-detect "^4.0.8" + +"@sinonjs/text-encoding@^0.7.1", "@sinonjs/text-encoding@^0.7.2": + version "0.7.2" + resolved "https://registry.yarnpkg.com/@sinonjs/text-encoding/-/text-encoding-0.7.2.tgz#5981a8db18b56ba38ef0efb7d995b12aa7b51918" + integrity sha512-sXXKG+uL9IrKqViTtao2Ws6dy0znu9sOaP1di/jKGW1M6VssO8vlpXCQcpZ+jisQ1tTFAC5Jo/EOzFbggBagFQ== "@smithy/abort-controller@^2.0.16": version "2.0.16" @@ -13707,10 +13716,10 @@ diff@^4.0.1, diff@^4.0.2: resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== -diff@^5.0.0: - version "5.1.0" - resolved "https://registry.yarnpkg.com/diff/-/diff-5.1.0.tgz#bc52d298c5ea8df9194800224445ed43ffc87e40" - integrity sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw== +diff@^5.0.0, diff@^5.1.0: + version "5.2.0" + resolved "https://registry.yarnpkg.com/diff/-/diff-5.2.0.tgz#26ded047cd1179b78b9537d5ef725503ce1ae531" + integrity sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A== diffie-hellman@^5.0.0: version "5.0.3" @@ -15760,6 +15769,11 @@ fd-slicer@~1.1.0: dependencies: pend "~1.2.0" +fetch-retry-ts@^1.3.1: + version "1.3.1" + resolved "https://registry.yarnpkg.com/fetch-retry-ts/-/fetch-retry-ts-1.3.1.tgz#a1572ebe28657fe8b89af0e130820a01feb1e753" + integrity sha512-0QW6UP1nnnBkN6WWo4s0FmvsmdF8PfB1bY4qrAwp5JC4dn/Ad93SMRvdMQLCm9LxjaLx+OnkUEV8pUcsi5lz6g== + figgy-pudding@^3.5.1: version "3.5.2" resolved "https://registry.yarnpkg.com/figgy-pudding/-/figgy-pudding-3.5.2.tgz#b4eee8148abb01dcf1d1ac34367d59e12fa61d6e" @@ -19815,6 +19829,11 @@ just-extend@^4.0.2: resolved "https://registry.yarnpkg.com/just-extend/-/just-extend-4.1.1.tgz#158f1fdb01f128c411dc8b286a7b4837b3545282" integrity sha512-aWgeGFW67BP3e5181Ep1Fv2v8z//iBJfrvyTnq8wG86vEESwmonn1zPBJ0VfmT9CJq2FIT0VsETtrNFm2a+SHA== +just-extend@^6.2.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/just-extend/-/just-extend-6.2.0.tgz#b816abfb3d67ee860482e7401564672558163947" + integrity sha512-cYofQu2Xpom82S6qD778jBDpwvvy39s1l/hrYij2u9AMdQcGRpaBu6kY4mVhuno5kJVi1DAz4aiphA2WI1/OAw== + just-my-luck@3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/just-my-luck/-/just-my-luck-3.0.0.tgz#a5567eb4fad51100436e1bfd6492f2d9613aaf4e" @@ -22550,16 +22569,16 @@ nise@^4.0.1, nise@^4.1.0: just-extend "^4.0.2" path-to-regexp "^1.7.0" -nise@^5.1.1: - version "5.1.4" - resolved "https://registry.yarnpkg.com/nise/-/nise-5.1.4.tgz#491ce7e7307d4ec546f5a659b2efe94a18b4bbc0" - integrity sha512-8+Ib8rRJ4L0o3kfmyVCL7gzrohyDe0cMFTBa2d364yIrEGMEoetznKJx899YxjybU6bL9SQkYPSBBs1gyYs8Xg== +nise@^5.1.1, nise@^5.1.5: + version "5.1.9" + resolved "https://registry.yarnpkg.com/nise/-/nise-5.1.9.tgz#0cb73b5e4499d738231a473cd89bd8afbb618139" + integrity sha512-qOnoujW4SV6e40dYxJOb3uvuoPHtmLzIk4TFo+j0jPJoC+5Z9xja5qH5JZobEPsa8+YYphMrOSwnrshEhG2qww== dependencies: - "@sinonjs/commons" "^2.0.0" - "@sinonjs/fake-timers" "^10.0.2" - "@sinonjs/text-encoding" "^0.7.1" - just-extend "^4.0.2" - path-to-regexp "^1.7.0" + "@sinonjs/commons" "^3.0.0" + "@sinonjs/fake-timers" "^11.2.2" + "@sinonjs/text-encoding" "^0.7.2" + just-extend "^6.2.0" + path-to-regexp "^6.2.1" no-case@^2.2.0, no-case@^2.3.2: version "2.3.2" @@ -24466,10 +24485,10 @@ path-to-regexp@^1.7.0: dependencies: isarray "0.0.1" -path-to-regexp@^6.2.0: - version "6.2.0" - resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-6.2.0.tgz#f7b3803336104c346889adece614669230645f38" - integrity sha512-f66KywYG6+43afgE/8j/GoiNyygk/bnoCbps++3ErRKsIYkGGupyv07R2Ok5m9i67Iqc+T2g1eAUGUPzWhYTyg== +path-to-regexp@^6.2.0, path-to-regexp@^6.2.1: + version "6.2.1" + resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-6.2.1.tgz#d54934d6798eb9e5ef14e7af7962c945906918e5" + integrity sha512-JLyh7xT1kizaEvcaXOQwOc2/Yhw6KZOvPf1S8401UyLk86CU79LN3vl7ztXGm/pZ+YjoyAJ4rxmHwbkBXJX+yw== path-type@^1.0.0: version "1.1.0" @@ -27575,6 +27594,18 @@ sinon@14.0.0: nise "^5.1.1" supports-color "^7.2.0" +sinon@17.0.1: + version "17.0.1" + resolved "https://registry.yarnpkg.com/sinon/-/sinon-17.0.1.tgz#26b8ef719261bf8df43f925924cccc96748e407a" + integrity sha512-wmwE19Lie0MLT+ZYNpDymasPHUKTaZHUH/pKEubRXIzySv9Atnlw+BUMGCzWgV7b7wO+Hw6f1TEOr0IUnmU8/g== + dependencies: + "@sinonjs/commons" "^3.0.0" + "@sinonjs/fake-timers" "^11.2.2" + "@sinonjs/samsam" "^8.0.0" + diff "^5.1.0" + nise "^5.1.5" + supports-color "^7.2.0" + sinon@5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/sinon/-/sinon-5.1.1.tgz#19c59810ffb733ea6e76a28b94a71fc4c2f523b8"