diff --git a/README.md b/README.md index 51557069a..bef959966 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ const client = require('twilio')(accountSid, authToken, { ### Enable Auto-Retry with Exponential Backoff -`twilio-node` supports automatic retry with exponential backoff when API requests receive an [Error 429 response](https://support.twilio.com/hc/en-us/articles/360044308153-Twilio-API-response-Error-429-Too-Many-Requests-). This retry with exponential backoff feature is disabled by default. To enable this feature, instantiate the Twilio client with the `autoRetry` flag set to `true`. +`twilio-node` supports automatic retry with exponential backoff when API requests receive an [Error 429 response](https://support.twilio.com/hc/en-us/articles/360044308153-Twilio-API-response-Error-429-Too-Many-Requests-) or encounter network errors (such as `ECONNRESET`, `ETIMEDOUT`, or `ECONNABORTED`). This retry with exponential backoff feature is disabled by default. To enable this feature, instantiate the Twilio client with the `autoRetry` flag set to `true`. Optionally, the maximum number of retries performed by this feature can be set with the `maxRetries` flag. The default maximum number of retries is `3`. diff --git a/spec/unit/base/ExponentialBackoffErrorHandler.spec.js b/spec/unit/base/ExponentialBackoffErrorHandler.spec.js new file mode 100644 index 000000000..ef2875597 --- /dev/null +++ b/spec/unit/base/ExponentialBackoffErrorHandler.spec.js @@ -0,0 +1,394 @@ +import axios from "axios"; +import RequestClient from "../../../src/base/RequestClient"; +import sinon from "sinon"; + +describe("Exponential Backoff Error Handler", function () { + let axiosMock; + let clock; + let originalSetTimeout; + + beforeEach(function () { + // Mock axios instance + axiosMock = { + create: jest.fn().mockReturnValue({ + defaults: { headers: { post: {} } }, + interceptors: { + response: { use: jest.fn(), handlers: [] }, + request: { use: jest.fn() }, + }, + }), + }; + + // Use sinon to fake timers for testing delays + originalSetTimeout = global.setTimeout; + clock = sinon.useFakeTimers(); + + // Mock axios module + jest.spyOn(axios, "create").mockImplementation(axiosMock.create); + }); + + afterEach(function () { + jest.restoreAllMocks(); + clock.restore(); + global.setTimeout = originalSetTimeout; + }); + + describe("getExponentialBackoffErrorHandler function", function () { + let errorHandler; + let axiosInstance; + + beforeEach(function () { + // Create RequestClient with auto-retry enabled + const requestClient = new RequestClient({ autoRetry: true }); + + // Extract error handler from axios interceptors + errorHandler = + requestClient.axios.interceptors.response.use.mock.calls[0][1]; + axiosInstance = requestClient.axios; + }); + + it("should reject with the original error for non-retryable errors", async function () { + const nonRetryableError = new Error("Not retryable"); + nonRetryableError.code = "ENOTFOUND"; // Not in the retryable list + nonRetryableError.config = { method: "GET", url: "/test" }; + + try { + await errorHandler(nonRetryableError); + fail("Should have rejected"); + } catch (error) { + expect(error).toBe(nonRetryableError); + } + }); + + it("should reject with the original error if config is missing", async function () { + const errorWithoutConfig = new Error("No config"); + errorWithoutConfig.code = "ECONNRESET"; // Retryable but no config + + try { + await errorHandler(errorWithoutConfig); + fail("Should have rejected"); + } catch (error) { + expect(error).toBe(errorWithoutConfig); + } + }); + + it("should retry retryable network errors (ECONNRESET)", async function () { + const retryableError = new Error("Connection reset"); + retryableError.code = "ECONNRESET"; + retryableError.config = { + method: "GET", + url: "/test", + retryCount: 0, + }; + + // Setup axios mock to resolve on retry + jest.spyOn(axiosInstance, "request").mockResolvedValueOnce({ + status: 200, + data: "success", + headers: {}, + }); + + // Start the retry process + const retryPromise = errorHandler(retryableError); + + // Fast-forward time to handle the setTimeout + clock.tick(1000); + + // Wait for the promise to resolve + const result = await retryPromise; + + // Verify axios was called with updated config + expect(axiosInstance.request).toHaveBeenCalledWith( + expect.objectContaining({ + method: "GET", + url: "/test", + retryCount: 1, + }) + ); + + expect(result).toEqual({ + status: 200, + data: "success", + headers: {}, + }); + }); + + it("should retry retryable network errors (ETIMEDOUT)", async function () { + const retryableError = new Error("Timeout"); + retryableError.code = "ETIMEDOUT"; + retryableError.config = { + method: "GET", + url: "/test", + retryCount: 0, + }; + + // Setup axios mock to resolve on retry + jest.spyOn(axiosInstance, "request").mockResolvedValueOnce({ + status: 200, + data: "success", + headers: {}, + }); + + // Start the retry process + const retryPromise = errorHandler(retryableError); + + // Fast-forward time to handle the setTimeout + clock.tick(1000); + + // Wait for the promise to resolve + const result = await retryPromise; + + // Verify axios was called with updated config + expect(axiosInstance.request).toHaveBeenCalledWith( + expect.objectContaining({ + method: "GET", + url: "/test", + retryCount: 1, + }) + ); + + expect(result).toEqual({ + status: 200, + data: "success", + headers: {}, + }); + }); + + it("should retry retryable network errors (ECONNABORTED)", async function () { + const retryableError = new Error("Connection aborted"); + retryableError.code = "ECONNABORTED"; + retryableError.config = { + method: "GET", + url: "/test", + retryCount: 0, + }; + + // Setup axios mock to resolve on retry + jest.spyOn(axiosInstance, "request").mockResolvedValueOnce({ + status: 200, + data: "success", + headers: {}, + }); + + // Start the retry process + const retryPromise = errorHandler(retryableError); + + // Fast-forward time to handle the setTimeout + clock.tick(1000); + + // Wait for the promise to resolve + const result = await retryPromise; + + // Verify axios was called with updated config + expect(axiosInstance.request).toHaveBeenCalledWith( + expect.objectContaining({ + method: "GET", + url: "/test", + retryCount: 1, + }) + ); + + expect(result).toEqual({ + status: 200, + data: "success", + headers: {}, + }); + }); + + it("should increment retry count with each retry", async function () { + const retryableError = new Error("Connection reset"); + retryableError.code = "ECONNRESET"; + retryableError.config = { + method: "GET", + url: "/test", + retryCount: 0, + }; + + // First retry fails again with same error + const secondError = new Error("Connection reset again"); + secondError.code = "ECONNRESET"; + secondError.config = { + method: "GET", + url: "/test", + retryCount: 1, + }; + + // Setup axios mock to fail on first retry, then succeed + jest + .spyOn(axiosInstance, "request") + .mockRejectedValueOnce(secondError) + .mockResolvedValueOnce({ + status: 200, + data: "success finally", + headers: {}, + }); + + // Start the retry process + const retryPromise = errorHandler(retryableError); + + // Fast-forward time to handle first setTimeout + clock.tick(1000); + + // Need to wait for the first promise to be handled + await Promise.resolve(); + + // Now handle the second error's setTimeout + clock.tick(2000); + + // Wait for the final promise to resolve + const result = await retryPromise; + + // Verify axios was called twice with updated configs + expect(axiosInstance.request).toHaveBeenNthCalledWith( + 1, + expect.objectContaining({ + method: "GET", + url: "/test", + retryCount: 1, + }) + ); + + expect(axiosInstance.request).toHaveBeenNthCalledWith( + 2, + expect.objectContaining({ + method: "GET", + url: "/test", + retryCount: 2, + }) + ); + + expect(result).toEqual({ + status: 200, + data: "success finally", + headers: {}, + }); + }); + + it("should use exponential backoff for retry delays", async function () { + // Create multiple errors to test increasing delays + const error1 = new Error("Connection reset 1"); + error1.code = "ECONNRESET"; + error1.config = { method: "GET", url: "/test", retryCount: 0 }; + + const error2 = new Error("Connection reset 2"); + error2.code = "ECONNRESET"; + error2.config = { method: "GET", url: "/test", retryCount: 1 }; + + const error3 = new Error("Connection reset 3"); + error3.code = "ECONNRESET"; + error3.config = { method: "GET", url: "/test", retryCount: 2 }; + + // Setup axios mock for sequence of calls + jest + .spyOn(axiosInstance, "request") + .mockRejectedValueOnce(error2) + .mockRejectedValueOnce(error3) + .mockResolvedValueOnce({ + status: 200, + data: "success", + headers: {}, + }); + + // Mock setTimeout to capture delay values + const setTimeoutSpy = jest.spyOn(global, "setTimeout"); + + // Start the retry process + const retryPromise = errorHandler(error1); + + // Fast-forward enough time for all retries + clock.tick(10000); + + // Wait for the promise chain to complete + await retryPromise; + + // Extract the delays from the setTimeout calls + const delays = setTimeoutSpy.mock.calls.map((call) => call[1]); + + // Verify exponential backoff pattern (with some flexibility due to jitter) + expect(delays[0]).toBeLessThanOrEqual(200); // ~100 * 2^1 with jitter + expect(delays[1]).toBeLessThanOrEqual(400); // ~100 * 2^2 with jitter + expect(delays[2]).toBeLessThanOrEqual(800); // ~100 * 2^3 with jitter + + // Verify increasing delays + expect(delays[0]).toBeLessThan(delays[1]); + expect(delays[1]).toBeLessThan(delays[2]); + }); + + it("should respect maximum retry limit", async function () { + // Create a client with custom max retries + const customClient = new RequestClient({ + autoRetry: true, + maxRetries: 2, + }); + + // Extract error handler + const customErrorHandler = + customClient.axios.interceptors.response.use.mock.calls[0][1]; + + // Create an error that's already at the retry limit + const finalRetryError = new Error("Final retry"); + finalRetryError.code = "ECONNRESET"; + finalRetryError.config = { + method: "GET", + url: "/test", + retryCount: 2, // Already at the max retry limit + }; + + try { + await customErrorHandler(finalRetryError); + fail("Should have rejected"); + } catch (error) { + // Should reject with the original error after max retries + expect(error).toBe(finalRetryError); + } + }); + + it("should respect max retry delay", async function () { + // Create client with low max retry delay + const customClient = new RequestClient({ + autoRetry: true, + maxRetryDelay: 150, // Very low max delay + maxRetries: 5, + }); + + // Extract error handler + const customErrorHandler = + customClient.axios.interceptors.response.use.mock.calls[0][1]; + + // Create an error for a high retry count (which would normally have a long delay) + const highRetryError = new Error("High retry count"); + highRetryError.code = "ECONNRESET"; + highRetryError.config = { + method: "GET", + url: "/test", + retryCount: 4, // Would normally be 100 * 2^4 = 1600ms + }; + + // Mock setTimeout to capture delay value + const setTimeoutSpy = jest.spyOn(global, "setTimeout"); + + // Setup axios mock to resolve on retry + jest.spyOn(customClient.axios, "request").mockResolvedValueOnce({ + status: 200, + data: "success", + headers: {}, + }); + + // Start the retry process + const retryPromise = customErrorHandler(highRetryError); + + // Fast-forward time + clock.tick(1000); + + // Wait for the promise to resolve + await retryPromise; + + // Extract the delay from the setTimeout call + const delay = setTimeoutSpy.mock.calls[0][1]; + + // Verify the delay is capped at the max retry delay + // With jitter, it will be somewhere between 0 and maxRetryDelay + expect(delay).toBeLessThanOrEqual(150); + }); + }); +}); diff --git a/spec/unit/base/RequestClient.spec.js b/spec/unit/base/RequestClient.spec.js index b16e697fa..71a1df1e7 100644 --- a/spec/unit/base/RequestClient.spec.js +++ b/spec/unit/base/RequestClient.spec.js @@ -479,3 +479,144 @@ describe("Exponential backoff and retry", function () { }); }, 10000); }); + +describe("Network error retry", function () { + let client; + + beforeEach(function () { + client = new RequestClient({ + autoRetry: true, + }); + }); + + it("should identify retryable errors correctly", function () { + // Test the isRetryableError function logic by creating error objects + const retryableErrors = [ + { code: "ECONNRESET" }, + { code: "ETIMEDOUT" }, + { code: "ECONNABORTED" }, + ]; + + const nonRetryableErrors = [ + { code: "ENOTFOUND" }, + { code: "ECONNREFUSED" }, + { message: "Some other error" }, + null, + ]; + + // Verify we have the expected number of each type + expect(retryableErrors.length).toEqual(3); + expect(nonRetryableErrors.length).toEqual(4); + + // Verify the retryable error codes are the expected ones + expect(retryableErrors.map((e) => e.code)).toEqual([ + "ECONNRESET", + "ETIMEDOUT", + "ECONNABORTED", + ]); + }); + + it("should enable network error retry when autoRetry is true", function () { + const clientWithRetry = new RequestClient({ + autoRetry: true, + }); + + const clientWithoutRetry = new RequestClient({ + autoRetry: false, + }); + + // Verify the clients are configured correctly + expect(clientWithRetry.autoRetry).toBe(true); + expect(clientWithoutRetry.autoRetry).toBe(false); + }); + + it("should configure max retries for network errors", function () { + const defaultClient = new RequestClient({ + autoRetry: true, + }); + + const customClient = new RequestClient({ + autoRetry: true, + maxRetries: 5, + }); + + expect(defaultClient.maxRetries).toBe(3); // Default value + expect(customClient.maxRetries).toBe(5); // Custom value + }); + + it("should configure max retry delay for network errors", function () { + const defaultClient = new RequestClient({ + autoRetry: true, + }); + + const customClient = new RequestClient({ + autoRetry: true, + maxRetryDelay: 5000, + }); + + expect(defaultClient.maxRetryDelay).toBe(3000); // Default value + expect(customClient.maxRetryDelay).toBe(5000); // Custom value + }); + + it("should register error interceptor when autoRetry is enabled", function () { + const clientWithRetry = new RequestClient({ + autoRetry: true, + }); + + const clientWithoutRetry = new RequestClient({ + autoRetry: false, + }); + + // Verify that interceptors are registered when autoRetry is enabled + expect( + clientWithRetry.axios.interceptors.response.handlers.length + ).toBeGreaterThan(0); + expect(clientWithoutRetry.axios.interceptors.response.handlers.length).toBe( + 0 + ); + }); + + it("should handle various error object structures", function () { + // Test how the implementation would handle different error structures + const errorWithCode = { code: "ECONNRESET", message: "Connection reset" }; + const errorWithoutCode = { message: "Some error" }; + const errorWithNullCode = { code: null, message: "Error with null code" }; + const errorWithEmptyCode = { code: "", message: "Error with empty code" }; + const nullError = null; + const undefinedError = undefined; + + // These are behavioral expectations based on the isRetryableError implementation + expect(errorWithCode.code).toBe("ECONNRESET"); // Would be retryable + expect(errorWithoutCode.code).toBeUndefined(); // Would not be retryable + expect(errorWithNullCode.code).toBeNull(); // Would not be retryable + expect(errorWithEmptyCode.code).toBe(""); // Would not be retryable + expect(nullError).toBeNull(); // Would not be retryable + expect(undefinedError).toBeUndefined(); // Would not be retryable + }); + + it("should have proper default configuration values", function () { + // Test various combinations of configuration options + const defaultClient = new RequestClient(); + const autoRetryClient = new RequestClient({ autoRetry: true }); + const customConfigClient = new RequestClient({ + autoRetry: true, + maxRetries: 5, + maxRetryDelay: 5000, + }); + + // Verify default values + expect(defaultClient.autoRetry).toBe(false); + expect(defaultClient.maxRetries).toBe(3); + expect(defaultClient.maxRetryDelay).toBe(3000); + + // Verify autoRetry enabled with defaults + expect(autoRetryClient.autoRetry).toBe(true); + expect(autoRetryClient.maxRetries).toBe(3); + expect(autoRetryClient.maxRetryDelay).toBe(3000); + + // Verify custom configuration + expect(customConfigClient.autoRetry).toBe(true); + expect(customConfigClient.maxRetries).toBe(5); + expect(customConfigClient.maxRetryDelay).toBe(5000); + }); +}); diff --git a/src/base/RequestClient.ts b/src/base/RequestClient.ts index 763e21d8a..4c6b17ef1 100644 --- a/src/base/RequestClient.ts +++ b/src/base/RequestClient.ts @@ -45,6 +45,14 @@ interface ExponentialBackoffResponseHandlerOptions { maxRetries: number; } +function isRetryableError(error: any): boolean { + // Check for network errors that are typically transient + if (error.code) { + return ["ECONNRESET", "ETIMEDOUT", "ECONNABORTED"].includes(error.code); + } + return false; +} + function getExponentialBackoffResponseHandler( axios: AxiosInstance, opts: ExponentialBackoffResponseHandlerOptions @@ -76,6 +84,37 @@ function getExponentialBackoffResponseHandler( }; } +function getExponentialBackoffErrorHandler( + axios: AxiosInstance, + opts: ExponentialBackoffResponseHandlerOptions +) { + const maxIntervalMillis = opts.maxIntervalMillis; + const maxRetries = opts.maxRetries; + + return function (error: any) { + const config: BackoffAxiosRequestConfig = error.config; + + if (!isRetryableError(error) || !config) { + return Promise.reject(error); + } + + const retryCount = (config.retryCount || 0) + 1; + if (retryCount <= maxRetries) { + config.retryCount = retryCount; + const baseDelay = Math.min( + maxIntervalMillis, + DEFAULT_INITIAL_RETRY_INTERVAL_MILLIS * Math.pow(2, retryCount) + ); + const delay = Math.floor(baseDelay * Math.random()); // Full jitter backoff + + return new Promise((resolve: (value: Promise) => void) => { + setTimeout(() => resolve(axios(config)), delay); + }); + } + return Promise.reject(error); + }; +} + class RequestClient { defaultTimeout: number; axios: AxiosInstance; @@ -96,9 +135,9 @@ class RequestClient { * @param opts.maxTotalSockets - https.Agent maxTotalSockets option * @param opts.maxFreeSockets - https.Agent maxFreeSockets option * @param opts.scheduling - https.Agent scheduling option - * @param opts.autoRetry - Enable auto-retry requests with exponential backoff on 429 responses. Defaults to false. - * @param opts.maxRetryDelay - Max retry delay in milliseconds for 429 Too Many Request response retries. Defaults to 3000. - * @param opts.maxRetries - Max number of request retries for 429 Too Many Request responses. Defaults to 3. + * @param opts.autoRetry - Enable auto-retry requests with exponential backoff on 429 responses and network errors. Defaults to false. + * @param opts.maxRetryDelay - Max retry delay in milliseconds for 429 Too Many Request response retries and network errors. Defaults to 3000. + * @param opts.maxRetries - Max number of request retries for 429 Too Many Request responses and network errors. Defaults to 3. * @param opts.validationClient - Validation client for PKCV */ constructor(opts?: RequestClient.RequestClientOptions) { @@ -146,6 +185,10 @@ class RequestClient { getExponentialBackoffResponseHandler(this.axios, { maxIntervalMillis: this.maxRetryDelay, maxRetries: this.maxRetries, + }), + getExponentialBackoffErrorHandler(this.axios, { + maxIntervalMillis: this.maxRetryDelay, + maxRetries: this.maxRetries, }) ); } @@ -421,16 +464,17 @@ namespace RequestClient { ca?: string | Buffer; /** * Enable auto-retry with exponential backoff when receiving 429 Errors from - * the API. Disabled by default. + * the API or network errors (e.g. ECONNRESET). Disabled by default. */ autoRetry?: boolean; /** - * Maximum retry delay in milliseconds for 429 Error response retries. - * Defaults to 3000. + * Maximum retry delay in milliseconds for 429 Error response retries + * and network errors. Defaults to 3000. */ maxRetryDelay?: number; /** - * Maximum number of request retries for 429 Error responses. Defaults to 3. + * Maximum number of request retries for 429 Error responses and network + * errors. Defaults to 3. */ maxRetries?: number; /**