From cca8f81a57148a9406d1dc4f0f7036b98fc6bbde Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 14 Nov 2025 14:16:55 -0700 Subject: [PATCH 01/30] Extend createServicePolicy to support live network status In a future commit we will introduce changes to `network-controller` so that it will keep track of the status of each network as requests are made. These updates to `createServicePolicy` assist with that. See the changelog for a list of changes to the `ServicePolicy` API. Besides the changes listed there, the tests for `createServicePolicy` have been refactored slightly so that it is easier to maintain in the future. --- packages/controller-utils/CHANGELOG.md | 13 + .../src/create-service-policy.test.ts | 2710 ++++++++++------- .../src/create-service-policy.ts | 103 +- packages/controller-utils/src/index.ts | 2 + 4 files changed, 1757 insertions(+), 1071 deletions(-) diff --git a/packages/controller-utils/CHANGELOG.md b/packages/controller-utils/CHANGELOG.md index 8a37dad418f..2628a42f481 100644 --- a/packages/controller-utils/CHANGELOG.md +++ b/packages/controller-utils/CHANGELOG.md @@ -7,6 +7,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Add `getCircuitState` method to `ServicePolicy` ([#7164](https://github.com/MetaMask/core/pull/7164)) + - This can be used when working with a chain of services to know whether a service's underlying circuit is open or closed. +- Add `getLastInnerFailureReason` method to `ServicePolicy` ([#7164](https://github.com/MetaMask/core/pull/7164)) + - This can be used when working with a chain of services to obtain the last error that the circuit breaker policy captured after executing the service. +- Add `onAvailable` method to `ServicePolicy` ([#7164](https://github.com/MetaMask/core/pull/7164)) + - This can be used to listen for the initial successful execution of the service, or the first successful execution after the service becomes degraded or the circuit breaks. +- Add `reset` method to `ServicePolicy` ([#7164](https://github.com/MetaMask/core/pull/7164)) + - This can be used when working with a chain of services to reset the state of the circuit breaker policy (e.g. if a primary recovers and we want to reset the failovers). +- Export `CockatielEventEmitter` and `CockatielFailureReason` from Cockatiel ([#7164](https://github.com/MetaMask/core/pull/7164)) + - These can be used to further transform types for event emitters/listeners. + ## [11.15.0] ### Added diff --git a/packages/controller-utils/src/create-service-policy.test.ts b/packages/controller-utils/src/create-service-policy.test.ts index c42bea58b39..1eed4c75aff 100644 --- a/packages/controller-utils/src/create-service-policy.test.ts +++ b/packages/controller-utils/src/create-service-policy.test.ts @@ -1,4 +1,4 @@ -import { handleWhen } from 'cockatiel'; +import { CircuitState, handleWhen } from 'cockatiel'; import { useFakeTimers } from 'sinon'; import type { SinonFakeTimers } from 'sinon'; @@ -32,7 +32,7 @@ describe('createServicePolicy', () => { }); it('only calls the service once before returning', async () => { - const mockService = jest.fn(() => ({ some: 'data' })); + const mockService = jest.fn(); const policy = createServicePolicy(); await policy.execute(mockService); @@ -40,10 +40,11 @@ describe('createServicePolicy', () => { expect(mockService).toHaveBeenCalledTimes(1); }); - it('does not call the listener passed to onBreak, since the circuit never opens', async () => { - const mockService = jest.fn(() => ({ some: 'data' })); + it('does not call onBreak listeners, since the circuit never opens', async () => { + const mockService = jest.fn(); const onBreakListener = jest.fn(); const policy = createServicePolicy(); + policy.onBreak(onBreakListener); await policy.execute(mockService); @@ -51,67 +52,79 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - describe(`using the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, () => { - it('does not call the listener passed to onDegraded if the service execution time is below the threshold', async () => { - const mockService = jest.fn(() => ({ some: 'data' })); - const onDegradedListener = jest.fn(); - const policy = createServicePolicy(); - policy.onDegraded(onDegradedListener); + describe.each([ + { + desc: `the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, + threshold: DEFAULT_DEGRADED_THRESHOLD, + options: {}, + }, + { + desc: 'a custom degraded threshold', + threshold: 2000, + options: { degradedThreshold: 2000 }, + }, + ])('using $desc', ({ threshold, options }) => { + describe('if the service execution time is below the threshold', () => { + it('does not call onDegraded listeners', async () => { + const mockService = jest.fn(); + const onDegradedListener = jest.fn(); + const policy = createServicePolicy(options); + policy.onDegraded(onDegradedListener); + + await policy.execute(mockService); + + expect(onDegradedListener).not.toHaveBeenCalled(); + }); - await policy.execute(mockService); + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const mockService = jest.fn(); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy(options); + policy.onAvailable(onAvailableListener); - expect(onDegradedListener).not.toHaveBeenCalled(); - }); + await policy.execute(mockService); + await policy.execute(mockService); - it('calls the listener passed to onDegraded once if the service execution time is beyond the threshold', async () => { - const delay = DEFAULT_DEGRADED_THRESHOLD + 1; - const mockService = jest.fn(() => { - return new Promise((resolve) => { - setTimeout(() => resolve({ some: 'data' }), delay); - }); + expect(onAvailableListener).toHaveBeenCalledTimes(1); }); - const onDegradedListener = jest.fn(); - const policy = createServicePolicy(); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - clock.tick(delay); - await promise; - - expect(onDegradedListener).toHaveBeenCalledTimes(1); }); - }); - describe('using a custom degraded threshold', () => { - it('does not call the listener passed to onDegraded if the service execution time below the threshold', async () => { - const degradedThreshold = 2000; - const mockService = jest.fn(() => ({ some: 'data' })); - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ degradedThreshold }); - policy.onDegraded(onDegradedListener); + describe('if the service execution time is beyond the threshold', () => { + it('calls onDegraded listeners once', async () => { + const delay = threshold + 1; + const mockService = jest.fn(() => { + return new Promise((resolve) => { + setTimeout(() => resolve(), delay); + }); + }); + const onDegradedListener = jest.fn(); + const policy = createServicePolicy(options); + policy.onDegraded(onDegradedListener); - await policy.execute(mockService); + const promise = policy.execute(mockService); + clock.tick(delay); + await promise; - expect(onDegradedListener).not.toHaveBeenCalled(); - }); + expect(onDegradedListener).toHaveBeenCalledTimes(1); + }); - it('calls the listener passed to onDegraded once if the service execution time beyond the threshold', async () => { - const degradedThreshold = 2000; - const delay = degradedThreshold + 1; - const mockService = jest.fn(() => { - return new Promise((resolve) => { - setTimeout(() => resolve({ some: 'data' }), delay); + it('does not call onAvailable listeners', async () => { + const delay = threshold + 1; + const mockService = jest.fn(() => { + return new Promise((resolve) => { + setTimeout(() => resolve(), delay); + }); }); - }); - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ degradedThreshold }); - policy.onDegraded(onDegradedListener); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy(options); + policy.onAvailable(onAvailableListener); - const promise = policy.execute(mockService); - clock.tick(delay); - await promise; + const promise = policy.execute(mockService); + clock.tick(delay); + await promise; - expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).not.toHaveBeenCalled(); + }); }); }); }); @@ -151,7 +164,7 @@ describe('createServicePolicy', () => { expect(mockService).toHaveBeenCalledTimes(1); }); - it('does not call the listener passed to onRetry', async () => { + it('does not call onRetry listeners', async () => { const error = new Error('failure'); const mockService = jest.fn(() => { throw error; @@ -170,7 +183,7 @@ describe('createServicePolicy', () => { expect(onRetryListener).not.toHaveBeenCalled(); }); - it('does not call the listener passed to onBreak', async () => { + it('does not call onBreak listeners', async () => { const error = new Error('failure'); const mockService = jest.fn(() => { throw error; @@ -181,6 +194,7 @@ describe('createServicePolicy', () => { (caughtError) => caughtError.message !== 'failure', ), }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -193,7 +207,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - it('does not call the listener passed to onDegraded', async () => { + it('does not call onDegraded listeners', async () => { const error = new Error('failure'); const mockService = jest.fn(() => { throw error; @@ -215,6 +229,29 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); + + it('does not call onAvailable listeners', async () => { + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + retryFilterPolicy: handleWhen( + (caughtError) => caughtError.message !== 'failure', + ), + }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise queue + // is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); + }); }); describe('using the default retry filter policy (which retries all errors)', () => { @@ -245,7 +282,7 @@ describe('createServicePolicy', () => { expect(mockService).toHaveBeenCalledTimes(1 + DEFAULT_MAX_RETRIES); }); - it('calls the listener passed to onRetry once per retry', async () => { + it('calls onRetry listeners once per retry', async () => { const error = new Error('failure'); const mockService = jest.fn(() => { throw error; @@ -281,13 +318,14 @@ describe('createServicePolicy', () => { await expect(promise).rejects.toThrow(error); }); - it('does not call the listener passed to onBreak, since the max number of consecutive failures is never reached', async () => { + it('does not call onBreak listeners, since the max number of consecutive failures is never reached', async () => { const error = new Error('failure'); const mockService = jest.fn(() => { throw error; }); const onBreakListener = jest.fn(); const policy = createServicePolicy(); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -300,7 +338,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - it('calls the listener passed to onDegraded once, since the circuit is still closed', async () => { + it('calls onDegraded listeners once with the error, since the circuit is still closed', async () => { const error = new Error('failure'); const mockService = jest.fn(() => { throw error; @@ -317,6 +355,26 @@ describe('createServicePolicy', () => { await ignoreRejection(promise); expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ error }); + }); + + it('does not call onAvailable listeners', async () => { + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy(); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); }); }); @@ -341,7 +399,7 @@ describe('createServicePolicy', () => { await expect(promise).rejects.toThrow(error); }); - it('does not call the listener passed to onBreak', async () => { + it('does not call onBreak listeners', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -351,6 +409,7 @@ describe('createServicePolicy', () => { const policy = createServicePolicy({ maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -363,7 +422,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - it('calls the listener passed to onDegraded once', async () => { + it('calls onDegraded listeners once with the error', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -383,6 +442,29 @@ describe('createServicePolicy', () => { await ignoreRejection(promise); expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ error }); + }); + + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); }); }); @@ -406,7 +488,7 @@ describe('createServicePolicy', () => { await expect(promise).rejects.toThrow(error); }); - it('calls the listener passed to onBreak once with the error', async () => { + it('calls onBreak listeners once with the error', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -416,6 +498,7 @@ describe('createServicePolicy', () => { const policy = createServicePolicy({ maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -429,7 +512,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).toHaveBeenCalledWith({ error }); }); - it('never calls the listener passed to onDegraded, since the circuit is open', async () => { + it('never calls onDegraded listeners, since the circuit is open', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -451,6 +534,28 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); + }); + it('throws a BrokenCircuitError instead of whatever error the service produces if the service is executed again', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; const error = new Error('failure'); @@ -501,7 +606,7 @@ describe('createServicePolicy', () => { ); }); - it('calls the listener passed to onBreak once with the error', async () => { + it('calls onBreak listeners once with the error', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -511,6 +616,7 @@ describe('createServicePolicy', () => { const policy = createServicePolicy({ maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -524,7 +630,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).toHaveBeenCalledWith({ error }); }); - it('never calls the listener passed to onDegraded, since the circuit is open', async () => { + it('never calls onDegraded listeners, since the circuit is open', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -545,6 +651,28 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); + + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); + }); }); }); }); @@ -579,7 +707,7 @@ describe('createServicePolicy', () => { expect(mockService).toHaveBeenCalledTimes(1 + maxRetries); }); - it('calls the onRetry callback once per retry', async () => { + it('calls onRetry listeners once per retry', async () => { const maxRetries = 5; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -620,7 +748,7 @@ describe('createServicePolicy', () => { await expect(promise).rejects.toThrow(error); }); - it('does not call the onBreak callback', async () => { + it('does not call onBreak listeners', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -628,6 +756,7 @@ describe('createServicePolicy', () => { }); const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxRetries }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -640,7 +769,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - it('calls the onDegraded callback once', async () => { + it('calls onDegraded listeners once with the error', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -658,6 +787,27 @@ describe('createServicePolicy', () => { await ignoreRejection(promise); expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ error }); + }); + + it('does not call onAvailable listeners', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ maxRetries }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); }); }); @@ -679,7 +829,7 @@ describe('createServicePolicy', () => { await expect(promise).rejects.toThrow(error); }); - it('calls the onBreak callback once with the error', async () => { + it('calls onBreak listeners once with the error', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -687,6 +837,7 @@ describe('createServicePolicy', () => { }); const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxRetries }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -700,7 +851,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).toHaveBeenCalledWith({ error }); }); - it('never calls the onDegraded callback, since the circuit is open', async () => { + it('never calls onDegraded listeners, since the circuit is open', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -720,6 +871,26 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); + it('does not call onAvailable listeners', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ maxRetries }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); + }); + it('throws a BrokenCircuitError instead of whatever error the service produces if the policy is executed again', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; const error = new Error('failure'); @@ -765,7 +936,7 @@ describe('createServicePolicy', () => { ); }); - it('calls the onBreak callback once with the error', async () => { + it('calls onBreak listeners once with the error', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -773,6 +944,7 @@ describe('createServicePolicy', () => { }); const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxRetries }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -786,7 +958,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).toHaveBeenCalledWith({ error }); }); - it('never calls the onDegraded callback, since the circuit is open', async () => { + it('never calls onDegraded listeners, since the circuit is open', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; const error = new Error('failure'); const mockService = jest.fn(() => { @@ -805,6 +977,26 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); + + it('does not call onAvailable listeners', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ maxRetries }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); + }); }); }); @@ -831,7 +1023,7 @@ describe('createServicePolicy', () => { await expect(promise).rejects.toThrow(error); }); - it('does not call the onBreak callback', async () => { + it('does not call onBreak listeners', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures - 2; const error = new Error('failure'); @@ -843,6 +1035,7 @@ describe('createServicePolicy', () => { maxRetries, maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -855,7 +1048,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - it('calls the onDegraded callback once', async () => { + it('calls onDegraded listeners once with the error', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures - 2; const error = new Error('failure'); @@ -877,6 +1070,31 @@ describe('createServicePolicy', () => { await ignoreRejection(promise); expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ error }); + }); + + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 2; + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); }); }); @@ -902,7 +1120,7 @@ describe('createServicePolicy', () => { await expect(promise).rejects.toThrow(error); }); - it('calls the onBreak callback once with the error', async () => { + it('calls onBreak listeners once with the error', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures - 1; const error = new Error('failure'); @@ -914,6 +1132,7 @@ describe('createServicePolicy', () => { maxRetries, maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -927,7 +1146,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).toHaveBeenCalledWith({ error }); }); - it('never calls the onDegraded callback, since the circuit is open', async () => { + it('never calls onDegraded listeners, since the circuit is open', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures - 1; const error = new Error('failure'); @@ -951,6 +1170,30 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); + it('never calls onAvailable listeners', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 1; + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); + }); + it('throws a BrokenCircuitError instead of whatever error the service produces if the policy is executed again', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures - 1; @@ -1005,7 +1248,7 @@ describe('createServicePolicy', () => { ); }); - it('calls the onBreak callback once with the error', async () => { + it('calls onBreak listeners once with the error', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures; const error = new Error('failure'); @@ -1017,6 +1260,7 @@ describe('createServicePolicy', () => { maxRetries, maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -1030,7 +1274,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).toHaveBeenCalledWith({ error }); }); - it('never calls the onDegraded callback, since the circuit is open', async () => { + it('never calls onDegraded listeners, since the circuit is open', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures; const error = new Error('failure'); @@ -1053,6 +1297,30 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); + + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures; + const error = new Error('failure'); + const mockService = jest.fn(() => { + throw error; + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); + + expect(onAvailableListener).not.toHaveBeenCalled(); + }); }); }); }); @@ -1104,9 +1372,7 @@ describe('createServicePolicy', () => { } throw new Error('failure'); }; - const onBreakListener = jest.fn(); const policy = createServicePolicy(); - policy.onBreak(onBreakListener); const promise = policy.execute(mockService); // It's safe not to await this promise; adding it to the promise queue @@ -1117,7 +1383,7 @@ describe('createServicePolicy', () => { expect(await promise).toStrictEqual({ some: 'data' }); }); - it('does not call the onBreak callback, since the max number of consecutive failures is never reached', async () => { + it('does not call onBreak listeners, since the max number of consecutive failures is never reached', async () => { let invocationCounter = 0; const mockService = () => { invocationCounter += 1; @@ -1128,6 +1394,7 @@ describe('createServicePolicy', () => { }; const onBreakListener = jest.fn(); const policy = createServicePolicy(); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -1140,173 +1407,20 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - describe(`using the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - return { some: 'data' }; - } - throw new Error('failure'); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy(); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).not.toHaveBeenCalled(); - }); - - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - let invocationCounter = 0; - const delay = DEFAULT_DEGRADED_THRESHOLD + 1; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); - } - }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy(); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).toHaveBeenCalledTimes(1); - }); - }); - - describe('using a custom degraded threshold', () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const degradedThreshold = 2000; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - return { some: 'data' }; - } - throw new Error('failure'); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).not.toHaveBeenCalled(); - }); - - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const degradedThreshold = 2000; - let invocationCounter = 0; - const delay = degradedThreshold + 1; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); - } - }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).toHaveBeenCalledTimes(1); - }); - }); - }); - - describe('using a custom max number of consecutive failures', () => { - describe('if the initial run + retries is less than the max number of consecutive failures', () => { - it('returns what the service returns', async () => { - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - return { some: 'data' }; - } - throw new Error('failure'); - }; - const onBreakListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, - }); - policy.onBreak(onBreakListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - - expect(await promise).toStrictEqual({ some: 'data' }); - }); - - it('does not call the onBreak callback', async () => { - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - return { some: 'data' }; - } - throw new Error('failure'); - }; - const onBreakListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, - }); - policy.onBreak(onBreakListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onBreakListener).not.toHaveBeenCalled(); - }); - - describe(`using the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; + describe.each([ + { + desc: `the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, + threshold: DEFAULT_DEGRADED_THRESHOLD, + options: {}, + }, + { + desc: 'a custom degraded threshold', + threshold: 2000, + options: { degradedThreshold: 2000 }, + }, + ])('using $desc', ({ threshold, options }) => { + describe('if the service execution time is below the threshold', () => { + it('does not call onDegraded listeners', async () => { let invocationCounter = 0; const mockService = () => { invocationCounter += 1; @@ -1316,9 +1430,7 @@ describe('createServicePolicy', () => { throw new Error('failure'); }; const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, - }); + const policy = createServicePolicy(options); policy.onDegraded(onDegradedListener); const promise = policy.execute(mockService); @@ -1331,54 +1443,55 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; - const delay = DEFAULT_DEGRADED_THRESHOLD + 1; + it('calls onAvailable listeners once, even if the service is called more than once', async () => { let invocationCounter = 0; const mockService = () => { invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); - } - }); + if ( + invocationCounter > 0 && + invocationCounter % (DEFAULT_MAX_RETRIES + 1) === 0 + ) { + return { some: 'data' }; + } + throw new Error('failure'); }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, - }); - policy.onDegraded(onDegradedListener); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy(options); + policy.onAvailable(onAvailableListener); - const promise = policy.execute(mockService); + const promise1 = policy.execute(mockService); // It's safe not to await this promise; adding it to the promise // queue is enough to prevent this test from running indefinitely. // eslint-disable-next-line @typescript-eslint/no-floating-promises clock.runAllAsync(); - await promise; + await promise1; + const promise2 = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise2; - expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledTimes(1); }); }); - describe('using a custom degraded threshold', () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const degradedThreshold = 2000; - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; + describe('if the service execution time is beyond the threshold', () => { + it('calls onDegraded listeners once', async () => { let invocationCounter = 0; + const delay = threshold + 1; const mockService = () => { invocationCounter += 1; - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - return { some: 'data' }; - } - throw new Error('failure'); + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); }; const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, - degradedThreshold, - }); + const policy = createServicePolicy(options); policy.onDegraded(onDegradedListener); const promise = policy.execute(mockService); @@ -1388,14 +1501,12 @@ describe('createServicePolicy', () => { clock.runAllAsync(); await promise; - expect(onDegradedListener).not.toHaveBeenCalled(); + expect(onDegradedListener).toHaveBeenCalledTimes(1); }); - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const degradedThreshold = 2000; - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; - const delay = degradedThreshold + 1; + it('does not call onAvailable listeners', async () => { let invocationCounter = 0; + const delay = DEFAULT_DEGRADED_THRESHOLD + 1; const mockService = () => { invocationCounter += 1; return new Promise((resolve, reject) => { @@ -1406,12 +1517,9 @@ describe('createServicePolicy', () => { } }); }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy(options); + policy.onAvailable(onAvailableListener); const promise = policy.execute(mockService); // It's safe not to await this promise; adding it to the promise @@ -1420,14 +1528,16 @@ describe('createServicePolicy', () => { clock.runAllAsync(); await promise; - expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).not.toHaveBeenCalled(); }); }); }); + }); - describe('if the initial run + retries is equal to the max number of consecutive failures', () => { + describe('using a custom max number of consecutive failures', () => { + describe('if the initial run + retries is less than the max number of consecutive failures', () => { it('returns what the service returns', async () => { - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; let invocationCounter = 0; const mockService = () => { invocationCounter += 1; @@ -1436,11 +1546,9 @@ describe('createServicePolicy', () => { } throw new Error('failure'); }; - const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxConsecutiveFailures, }); - policy.onBreak(onBreakListener); const promise = policy.execute(mockService); // It's safe not to await this promise; adding it to the promise @@ -1451,21 +1559,21 @@ describe('createServicePolicy', () => { expect(await promise).toStrictEqual({ some: 'data' }); }); - it('does not call the onBreak callback', async () => { - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; + it('does not call onBreak listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; let invocationCounter = 0; - const error = new Error('failure'); const mockService = () => { invocationCounter += 1; if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { return { some: 'data' }; } - throw error; + throw new Error('failure'); }; const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -1478,125 +1586,336 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - describe(`using the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, - }); - policy.onDegraded(onDegradedListener); + describe.each([ + { + desc: `the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, + threshold: DEFAULT_DEGRADED_THRESHOLD, + options: {}, + }, + { + desc: 'a custom degraded threshold', + threshold: 2000, + options: { degradedThreshold: 2000 }, + }, + ])('using $desc', ({ threshold, options }) => { + describe('if the service execution time is below the threshold', () => { + it('does not call onDegraded listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + return { some: 'data' }; + } + throw new Error('failure'); + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, + }); + policy.onDegraded(onDegradedListener); - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; - expect(onDegradedListener).not.toHaveBeenCalled(); - }); + expect(onDegradedListener).not.toHaveBeenCalled(); + }); - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; - const delay = DEFAULT_DEGRADED_THRESHOLD + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + if (invocationCounter >= DEFAULT_MAX_RETRIES + 1) { + return { some: 'data' }; } + throw new Error('failure'); + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, + policy.onAvailable(onAvailableListener); + + const promise1 = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise1; + const promise2 = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise2; + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); + }); + + describe('if the service execution time is beyond the threshold', () => { + it('calls onDegraded listeners once', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; + const delay = threshold + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, + }); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + }); + + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 2; + const delay = threshold + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, + }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onAvailableListener).not.toHaveBeenCalled(); }); - policy.onDegraded(onDegradedListener); + }); + }); + }); + + describe('if the initial run + retries is equal to the max number of consecutive failures', () => { + it('returns what the service returns', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + return { some: 'data' }; + } + throw new Error('failure'); + }; + const policy = createServicePolicy({ + maxConsecutiveFailures, + }); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; + expect(await promise).toStrictEqual({ some: 'data' }); + }); - expect(onDegradedListener).toHaveBeenCalledTimes(1); + it('does not call onBreak listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + return { some: 'data' }; + } + throw error; + }; + const onBreakListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, }); - }); - describe('using a custom degraded threshold', () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const degradedThreshold = 2000; - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); + policy.onBreak(onBreakListener); - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; - expect(onDegradedListener).not.toHaveBeenCalled(); - }); + expect(onBreakListener).not.toHaveBeenCalled(); + }); - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const degradedThreshold = 2000; - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; - const delay = degradedThreshold + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { + describe.each([ + { + desc: `the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, + threshold: DEFAULT_DEGRADED_THRESHOLD, + options: {}, + }, + { + desc: 'a custom degraded threshold', + threshold: 2000, + options: { degradedThreshold: 2000 }, + }, + ])('using $desc', ({ threshold, options }) => { + describe('if the service execution time is below the threshold', () => { + it('does not call onDegraded listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); + return { some: 'data' }; } + throw error; + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxConsecutiveFailures, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).not.toHaveBeenCalled(); + }); + + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter >= DEFAULT_MAX_RETRIES + 1) { + return { some: 'data' }; + } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, + }); + policy.onAvailable(onAvailableListener); + + const promise1 = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise1; + const promise2 = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise2; + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); + }); + + describe('if the service execution time is beyond the threshold', () => { + it('calls onDegraded listeners once', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; + const delay = threshold + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, + }); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + }); + + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES + 1; + const delay = threshold + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, + }); + policy.onAvailable(onAvailableListener); - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; - expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).not.toHaveBeenCalled(); + }); }); }); }); @@ -1613,11 +1932,9 @@ describe('createServicePolicy', () => { } throw error; }; - const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxConsecutiveFailures, }); - policy.onBreak(onBreakListener); const promise = policy.execute(mockService); // It's safe not to await this promise; adding it to the promise @@ -1631,7 +1948,7 @@ describe('createServicePolicy', () => { ); }); - it('calls the onBreak callback once with the error', async () => { + it('calls onBreak listeners once with the error', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; let invocationCounter = 0; const error = new Error('failure'); @@ -1646,6 +1963,7 @@ describe('createServicePolicy', () => { const policy = createServicePolicy({ maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -1659,7 +1977,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).toHaveBeenCalledWith({ error }); }); - it('does not call the onDegraded callback', async () => { + it('does not call onDegraded listeners', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; let invocationCounter = 0; const error = new Error('failure'); @@ -1686,64 +2004,107 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); - describe(`using the default circuit break duration (${DEFAULT_CIRCUIT_BREAK_DURATION})`, () => { - it('returns what the service returns if it is successfully called again after the circuit break duration has elapsed', async () => { - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - return { some: 'data' }; - } - throw error; - }; - const policy = createServicePolicy({ - maxConsecutiveFailures, - }); + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + return { some: 'data' }; + } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + }); + policy.onAvailable(onAvailableListener); - const firstExecution = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await ignoreRejection(firstExecution); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - const result = await policy.execute(mockService); + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); - expect(result).toStrictEqual({ some: 'data' }); - }); + expect(onAvailableListener).not.toHaveBeenCalled(); }); - describe('using a custom circuit break duration', () => { - it('returns what the service returns if it is successfully called again after the circuit break duration has elapsed', async () => { - // This has to be high enough to exceed the exponential backoff - const circuitBreakDuration = 5_000; - const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - return { some: 'data' }; - } - throw error; - }; - const policy = createServicePolicy({ - maxConsecutiveFailures, - circuitBreakDuration, - }); + describe('after the circuit break duration has elapsed', () => { + describe.each([ + { + desc: `using the default circuit break duration (${DEFAULT_CIRCUIT_BREAK_DURATION})`, + duration: DEFAULT_CIRCUIT_BREAK_DURATION, + options: {}, + }, + { + desc: 'using a custom circuit break duration', + duration: DEFAULT_CIRCUIT_BREAK_DURATION, + options: { + // This has to be high enough to exceed the exponential backoff + circuitBreakDuration: 5_000, + }, + }, + ])('$desc', ({ duration, options }) => { + it('returns what the service returns', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + return { some: 'data' }; + } + throw error; + }; + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, + }); - const firstExecution = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await ignoreRejection(firstExecution); - clock.tick(circuitBreakDuration); - const result = await policy.execute(mockService); + const firstExecution = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(firstExecution); + clock.tick(duration); + const result = await policy.execute(mockService); + + expect(result).toStrictEqual({ some: 'data' }); + }); + + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter >= DEFAULT_MAX_RETRIES + 1) { + return { some: 'data' }; + } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxConsecutiveFailures, + ...options, + }); + policy.onAvailable(onAvailableListener); - expect(result).toStrictEqual({ some: 'data' }); + const firstExecution = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(firstExecution); + clock.tick(duration); + await policy.execute(mockService); + await policy.execute(mockService); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); }); }); }); @@ -1809,7 +2170,7 @@ describe('createServicePolicy', () => { expect(await promise).toStrictEqual({ some: 'data' }); }); - it('does not call the onBreak callback', async () => { + it('does not call onBreak listeners', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; let invocationCounter = 0; const error = new Error('failure'); @@ -1822,6 +2183,7 @@ describe('createServicePolicy', () => { }; const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxRetries }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -1834,121 +2196,127 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - describe(`using the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ maxRetries }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).not.toHaveBeenCalled(); - }); - - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; - const delay = DEFAULT_DEGRADED_THRESHOLD + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); - } - }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ maxRetries }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).toHaveBeenCalledTimes(1); - }); - }); - - describe('using a custom degraded threshold', () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const degradedThreshold = 2000; - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).not.toHaveBeenCalled(); - }); - - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const degradedThreshold = 2000; - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; - const delay = degradedThreshold + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); + describe.each([ + { + desc: `the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, + threshold: DEFAULT_DEGRADED_THRESHOLD, + options: {}, + }, + { + desc: 'a custom degraded threshold', + threshold: 2000, + options: { degradedThreshold: 2000 }, + }, + ])('using $desc', ({ threshold, options }) => { + describe('if the service execution time is below the threshold', () => { + it('does not call onDegraded listeners', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === maxRetries + 1) { + return { some: 'data' }; } - }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).toHaveBeenCalledTimes(1); + throw error; + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ ...options, maxRetries }); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).not.toHaveBeenCalled(); + }); + + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter >= maxRetries + 1) { + return { some: 'data' }; + } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ ...options, maxRetries }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + await policy.execute(mockService); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); + }); + + describe('if the service execution time is beyond the threshold', () => { + it('calls onDegraded listeners once', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; + const delay = threshold + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ ...options, maxRetries }); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + }); + + it('does not call onAvailable listeners', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 2; + const delay = threshold + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ ...options, maxRetries }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onAvailableListener).not.toHaveBeenCalled(); + }); }); }); }); @@ -1976,7 +2344,7 @@ describe('createServicePolicy', () => { expect(await promise).toStrictEqual({ some: 'data' }); }); - it('does not call the onBreak callback', async () => { + it('does not call onBreak listeners', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; let invocationCounter = 0; const error = new Error('failure'); @@ -1989,6 +2357,7 @@ describe('createServicePolicy', () => { }; const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxRetries }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -2001,121 +2370,127 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - describe(`using the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ maxRetries }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).not.toHaveBeenCalled(); - }); - - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; - const delay = DEFAULT_DEGRADED_THRESHOLD + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); + describe.each([ + { + desc: `the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, + threshold: DEFAULT_DEGRADED_THRESHOLD, + options: {}, + }, + { + desc: 'a custom degraded threshold', + threshold: 2000, + options: { degradedThreshold: 2000 }, + }, + ])('using $desc', () => { + describe('if the service execution time is below the threshold', () => { + it('does not call onDegraded listeners', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === maxRetries + 1) { + return { some: 'data' }; } - }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ maxRetries }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).toHaveBeenCalledTimes(1); - }); - }); - - describe('using a custom degraded threshold', () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const degradedThreshold = 2000; - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).not.toHaveBeenCalled(); - }); - - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const degradedThreshold = 2000; - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; - const delay = degradedThreshold + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); + throw error; + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ maxRetries }); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).not.toHaveBeenCalled(); + }); + + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter >= maxRetries + 1) { + return { some: 'data' }; } - }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - degradedThreshold, + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ maxRetries }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + await policy.execute(mockService); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); + }); + + describe('if the service execution time is beyond the threshold', () => { + it('calls onDegraded listeners once', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; + const delay = DEFAULT_DEGRADED_THRESHOLD + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ maxRetries }); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + }); + + it('does not call onAvailable listeners', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES - 1; + const delay = DEFAULT_DEGRADED_THRESHOLD + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ maxRetries }); + policy.onAvailable(onAvailableListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onAvailableListener).not.toHaveBeenCalled(); }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).toHaveBeenCalledTimes(1); }); }); }); @@ -2147,7 +2522,7 @@ describe('createServicePolicy', () => { ); }); - it('calls the onBreak callback once with the error', async () => { + it('calls onBreak listeners once with the error', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; let invocationCounter = 0; const error = new Error('failure'); @@ -2160,6 +2535,7 @@ describe('createServicePolicy', () => { }; const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxRetries }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -2173,7 +2549,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).toHaveBeenCalledWith({ error }); }); - it('does not call the onDegraded callback', async () => { + it('does not call onDegraded listeners', async () => { const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; let invocationCounter = 0; const error = new Error('failure'); @@ -2198,66 +2574,99 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); - describe(`using the default circuit break duration (${DEFAULT_CIRCUIT_BREAK_DURATION})`, () => { - it('returns what the service returns if it is successfully called again after the circuit break duration has elapsed', async () => { - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const policy = createServicePolicy({ maxRetries }); + it('does not call onAvailable listeners', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === maxRetries + 1) { + return { some: 'data' }; + } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ maxRetries }); + policy.onAvailable(onAvailableListener); - const firstExecution = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await ignoreRejection(firstExecution); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - const result = await policy.execute(mockService); + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); - expect(result).toStrictEqual({ some: 'data' }); - }); + expect(onAvailableListener).not.toHaveBeenCalled(); }); - describe('using a custom circuit break duration', () => { - it('returns what the service returns if it is successfully called again after the circuit break duration has elapsed', async () => { - // This has to be high enough to exceed the exponential backoff - const circuitBreakDuration = 50_000; - const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const policy = createServicePolicy({ - maxRetries, - circuitBreakDuration, - }); + describe('after the circuit break duration has elapsed', () => { + describe.each([ + { + desc: `the default circuit break duration (${DEFAULT_CIRCUIT_BREAK_DURATION})`, + duration: DEFAULT_CIRCUIT_BREAK_DURATION, + options: {}, + }, + { + desc: 'a custom circuit break duration', + duration: DEFAULT_CIRCUIT_BREAK_DURATION, + options: { + // This has to be high enough to exceed the exponential backoff + circuitBreakDuration: 50_000, + }, + }, + ])('using $desc', ({ duration, options }) => { + it('returns what the service returns', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === maxRetries + 1) { + return { some: 'data' }; + } + throw error; + }; + const policy = createServicePolicy({ maxRetries, ...options }); + + const firstExecution = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(firstExecution); + clock.tick(duration); + const result = await policy.execute(mockService); + + expect(result).toStrictEqual({ some: 'data' }); + }); + + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const maxRetries = DEFAULT_MAX_CONSECUTIVE_FAILURES; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter >= maxRetries + 1) { + return { some: 'data' }; + } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ maxRetries, ...options }); + policy.onAvailable(onAvailableListener); - const firstExecution = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await expect(firstExecution).rejects.toThrow( - new Error( - 'Execution prevented because the circuit breaker is open', - ), - ); - clock.tick(circuitBreakDuration); - const result = await policy.execute(mockService); + const firstExecution = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(firstExecution); + clock.tick(duration); + await policy.execute(mockService); + await policy.execute(mockService); - expect(result).toStrictEqual({ some: 'data' }); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); }); }); }); @@ -2277,12 +2686,10 @@ describe('createServicePolicy', () => { } throw error; }; - const onBreakListener = jest.fn(); const policy = createServicePolicy({ maxRetries, maxConsecutiveFailures, }); - policy.onBreak(onBreakListener); const promise = policy.execute(mockService); // It's safe not to await this promise; adding it to the promise @@ -2293,7 +2700,7 @@ describe('createServicePolicy', () => { expect(await promise).toStrictEqual({ some: 'data' }); }); - it('does not call the onBreak callback', async () => { + it('does not call onBreak listeners', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures - 2; let invocationCounter = 0; @@ -2310,145 +2717,165 @@ describe('createServicePolicy', () => { maxRetries, maxConsecutiveFailures, }); - policy.onBreak(onBreakListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onBreakListener).not.toHaveBeenCalled(); - }); - - describe(`using the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures - 2; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).not.toHaveBeenCalled(); - }); - - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures - 2; - const delay = DEFAULT_DEGRADED_THRESHOLD + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); - } - }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).toHaveBeenCalledTimes(1); - }); - }); - describe('using a custom degraded threshold', () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const degradedThreshold = 2000; - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures - 2; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); + policy.onBreak(onBreakListener); - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; - expect(onDegradedListener).not.toHaveBeenCalled(); - }); + expect(onBreakListener).not.toHaveBeenCalled(); + }); - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const degradedThreshold = 2000; - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures - 2; - const delay = degradedThreshold + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); + describe.each([ + { + desc: `the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, + threshold: DEFAULT_DEGRADED_THRESHOLD, + options: {}, + }, + { + desc: 'a custom degraded threshold', + threshold: 2000, + options: { degradedThreshold: 2000 }, + }, + ])('using $desc', ({ threshold, options }) => { + describe('if the service execution time is below the threshold', () => { + it('does not call onDegraded listeners', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 2; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === maxRetries + 1) { + return { some: 'data' }; } + throw error; + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).not.toHaveBeenCalled(); + }); + + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 2; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter >= maxRetries + 1) { + return { some: 'data' }; + } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, + }); + policy.onAvailable(onAvailableListener); + + const promise1 = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise1; + const promise2 = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise2; + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); + }); + + describe('if the service execution time is beyond the threshold', () => { + it('calls onDegraded listeners once', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 2; + const delay = DEFAULT_DEGRADED_THRESHOLD + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, + }); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + }); + + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 2; + const delay = threshold + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, + }); + policy.onAvailable(onAvailableListener); - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; - expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).not.toHaveBeenCalled(); + }); }); }); }); @@ -2480,7 +2907,7 @@ describe('createServicePolicy', () => { expect(await promise).toStrictEqual({ some: 'data' }); }); - it('does not call the onBreak callback', async () => { + it('does not call onBreak listeners', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures - 1; let invocationCounter = 0; @@ -2497,6 +2924,7 @@ describe('createServicePolicy', () => { maxRetries, maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -2509,133 +2937,152 @@ describe('createServicePolicy', () => { expect(onBreakListener).not.toHaveBeenCalled(); }); - describe(`using the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures - 1; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).not.toHaveBeenCalled(); - }); - - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures - 1; - const delay = DEFAULT_DEGRADED_THRESHOLD + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); + describe.each([ + { + desc: `the default degraded threshold (${DEFAULT_DEGRADED_THRESHOLD})`, + threshold: DEFAULT_DEGRADED_THRESHOLD, + options: {}, + }, + { + desc: 'a custom degraded threshold', + threshold: 2000, + options: { degradedThreshold: 2000 }, + }, + ])('using $desc', ({ threshold, options }) => { + describe('if the service execution time is below the threshold', () => { + it('does not call onDegraded listeners', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 1; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === maxRetries + 1) { + return { some: 'data' }; } + throw error; + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).toHaveBeenCalledTimes(1); - }); - }); - - describe('using a custom degraded threshold', () => { - it('does not call the onDegraded callback if the service execution time is below the threshold', async () => { - const degradedThreshold = 2000; - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures - 1; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); - - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; - - expect(onDegradedListener).not.toHaveBeenCalled(); - }); - - it('calls the onDegraded callback once if the service execution time is beyond the threshold', async () => { - const degradedThreshold = 2000; - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures - 1; - const delay = degradedThreshold + 1; - let invocationCounter = 0; - const mockService = () => { - invocationCounter += 1; - return new Promise((resolve, reject) => { - if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { - setTimeout(() => resolve({ some: 'data' }), delay); - } else { - reject(new Error('failure')); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).not.toHaveBeenCalled(); + }); + + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 1; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter % (maxRetries + 1) === 0) { + return { some: 'data' }; } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, }); - }; - const onDegradedListener = jest.fn(); - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - degradedThreshold, - }); - policy.onDegraded(onDegradedListener); + policy.onAvailable(onAvailableListener); + + const promise1 = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise1; + const promise2 = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise2; + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); + }); + + describe('if the service execution time is beyond the threshold', () => { + it('calls onDegraded listeners once', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 1; + const delay = threshold + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onDegradedListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, + }); + policy.onDegraded(onDegradedListener); + + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + }); + + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures - 1; + const delay = threshold + 1; + let invocationCounter = 0; + const mockService = () => { + invocationCounter += 1; + return new Promise((resolve, reject) => { + if (invocationCounter === DEFAULT_MAX_RETRIES + 1) { + setTimeout(() => resolve({ some: 'data' }), delay); + } else { + reject(new Error('failure')); + } + }); + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, + }); + policy.onAvailable(onAvailableListener); - const promise = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await promise; + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await promise; - expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).not.toHaveBeenCalled(); + }); }); }); }); @@ -2672,7 +3119,7 @@ describe('createServicePolicy', () => { ); }); - it('calls the onBreak callback once with the error', async () => { + it('calls onBreak listeners once with the error', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures; let invocationCounter = 0; @@ -2689,6 +3136,7 @@ describe('createServicePolicy', () => { maxRetries, maxConsecutiveFailures, }); + policy.onBreak(onBreakListener); const promise = policy.execute(mockService); @@ -2702,7 +3150,7 @@ describe('createServicePolicy', () => { expect(onBreakListener).toHaveBeenCalledWith({ error }); }); - it('does not call the onDegraded callback', async () => { + it('does not call onDegraded listeners', async () => { const maxConsecutiveFailures = 5; const maxRetries = maxConsecutiveFailures; let invocationCounter = 0; @@ -2731,78 +3179,220 @@ describe('createServicePolicy', () => { expect(onDegradedListener).not.toHaveBeenCalled(); }); - describe(`using the default circuit break duration (${DEFAULT_CIRCUIT_BREAK_DURATION})`, () => { - it('returns what the service returns if it is successfully called again after the circuit break duration has elapsed', async () => { - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - }); + it('does not call onAvailable listeners', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === maxRetries + 1) { + return { some: 'data' }; + } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + }); + policy.onAvailable(onAvailableListener); - const firstExecution = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await ignoreRejection(firstExecution); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - const result = await policy.execute(mockService); + const promise = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(promise); - expect(result).toStrictEqual({ some: 'data' }); - }); + expect(onAvailableListener).not.toHaveBeenCalled(); }); - describe('using a custom circuit break duration', () => { - it('returns what the service returns if it is successfully called again after the circuit break duration has elapsed', async () => { - // This has to be high enough to exceed the exponential backoff - const circuitBreakDuration = 5_000; - const maxConsecutiveFailures = 5; - const maxRetries = maxConsecutiveFailures; - let invocationCounter = 0; - const error = new Error('failure'); - const mockService = () => { - invocationCounter += 1; - if (invocationCounter === maxRetries + 1) { - return { some: 'data' }; - } - throw error; - }; - const policy = createServicePolicy({ - maxRetries, - maxConsecutiveFailures, - circuitBreakDuration, - }); + describe('after the circuit break duration has elapsed', () => { + describe.each([ + { + desc: `the default circuit break duration (${DEFAULT_CIRCUIT_BREAK_DURATION})`, + duration: DEFAULT_CIRCUIT_BREAK_DURATION, + options: {}, + }, + { + desc: 'a custom circuit break duration', + duration: DEFAULT_CIRCUIT_BREAK_DURATION, + options: { + // This has to be high enough to exceed the exponential backoff + circuitBreakDuration: 5_000, + }, + }, + ])('using $desc', ({ duration, options }) => { + it('returns what the service returns', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter === maxRetries + 1) { + return { some: 'data' }; + } + throw error; + }; + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, + }); - const firstExecution = policy.execute(mockService); - // It's safe not to await this promise; adding it to the promise - // queue is enough to prevent this test from running indefinitely. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.runAllAsync(); - await expect(firstExecution).rejects.toThrow( - new Error( - 'Execution prevented because the circuit breaker is open', - ), - ); - clock.tick(circuitBreakDuration); - const result = await policy.execute(mockService); + const firstExecution = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(firstExecution); + clock.tick(duration); + const result = await policy.execute(mockService); + + expect(result).toStrictEqual({ some: 'data' }); + }); + + it('calls onAvailable listeners once, even if the service is called more than once', async () => { + const maxConsecutiveFailures = 5; + const maxRetries = maxConsecutiveFailures; + let invocationCounter = 0; + const error = new Error('failure'); + const mockService = () => { + invocationCounter += 1; + if (invocationCounter >= maxRetries + 1) { + return { some: 'data' }; + } + throw error; + }; + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + maxRetries, + maxConsecutiveFailures, + ...options, + }); + policy.onAvailable(onAvailableListener); + + const firstExecution = policy.execute(mockService); + // It's safe not to await this promise; adding it to the promise + // queue is enough to prevent this test from running indefinitely. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.runAllAsync(); + await ignoreRejection(firstExecution); + clock.tick(duration); + await policy.execute(mockService); + await policy.execute(mockService); - expect(result).toStrictEqual({ some: 'data' }); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); }); }); }); }); }); }); + + describe('reset', () => { + it('resets the state of the circuit to "closed"', async () => { + let invocationCounter = 0; + const mockService = jest.fn(() => { + invocationCounter += 1; + if (invocationCounter === DEFAULT_MAX_CONSECUTIVE_FAILURES + 1) { + return { some: 'data' }; + } + throw new Error('failure'); + }); + const policy = createServicePolicy(); + policy.onRetry(() => { + clock.next(); + }); + // Retry until we break the circuit + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + expect(policy.getCircuitState()).toBe(CircuitState.Open); + + policy.reset(); + + expect(policy.getCircuitState()).toBe(CircuitState.Closed); + }); + + it('allows the service to be executed successfully again if its circuit has broken', async () => { + let invocationCounter = 0; + const mockService = jest.fn(() => { + invocationCounter += 1; + if (invocationCounter === DEFAULT_MAX_CONSECUTIVE_FAILURES + 1) { + return { some: 'data' }; + } + throw new Error('failure'); + }); + const policy = createServicePolicy(); + policy.onRetry(() => { + clock.next(); + }); + // Retry until we break the circuit + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + + policy.reset(); + + expect(await policy.execute(mockService)).toStrictEqual({ some: 'data' }); + }); + + it('calls onAvailable listeners if the service was executed successfully, its circuit broke, it was reset, and executes successfully again', async () => { + let invocationCounter = 0; + const mockService = jest.fn(() => { + invocationCounter += 1; + if ( + invocationCounter === 1 || + invocationCounter === DEFAULT_MAX_CONSECUTIVE_FAILURES + 2 + ) { + return { some: 'data' }; + } + throw new Error('failure'); + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy(); + policy.onRetry(() => { + clock.next(); + }); + policy.onAvailable(onAvailableListener); + + // Execute the service successfully once + await policy.execute(mockService); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + + // Execute and retry until we break the circuit + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + + policy.reset(); + + await policy.execute(mockService); + expect(onAvailableListener).toHaveBeenCalledTimes(2); + }); + + it('allows the service to be executed unsuccessfully again if its circuit has broken', async () => { + const mockService = jest.fn(() => { + throw new Error('failure'); + }); + const policy = createServicePolicy(); + policy.onRetry(() => { + clock.next(); + }); + // Retry until we break the circuit + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + + policy.reset(); + + await expect(policy.execute(mockService)).rejects.toThrow('failure'); + }); + }); }); /** diff --git a/packages/controller-utils/src/create-service-policy.ts b/packages/controller-utils/src/create-service-policy.ts index 3860caad532..d28822c93f3 100644 --- a/packages/controller-utils/src/create-service-policy.ts +++ b/packages/controller-utils/src/create-service-policy.ts @@ -23,6 +23,7 @@ import type { export { BrokenCircuitError, + CockatielEventEmitter, CircuitState, ConstantBackoff, ExponentialBackoff, @@ -30,7 +31,7 @@ export { handleWhen, }; -export type { CockatielEvent }; +export type { CockatielEvent, FailureReason as CockatielFailureReason }; /** * The options for `createServicePolicy`. @@ -85,12 +86,26 @@ export type ServicePolicy = IPolicy & { * maximum consecutive failures is reached. */ circuitBreakDuration: number; + /** + * @returns The state of the underlying circuit. + */ + getCircuitState: () => CircuitState; + /** + * @returns The last failure reason that the retry policy captured (or + * `undefined` if the last execution of the service was successful). + */ + getLastInnerFailureReason: () => FailureReason | undefined; /** * If the circuit is open and ongoing requests are paused, returns the number * of milliseconds before the requests will be attempted again. If the circuit * is not open, returns null. */ getRemainingCircuitOpenDuration: () => number | null; + /** + * Resets the internal circuit breaker policy (if it is open, it will now be + * closed). + */ + reset: () => void; /** * The Cockatiel retry policy that the service policy uses internally. */ @@ -108,6 +123,12 @@ export type ServicePolicy = IPolicy & { * number of consecutive failures has been reached. */ onDegraded: CockatielEvent | void>; + /** + * A function which is called when the service succeeds for the first time, + * or when the service fails enough times to cause the circuit to break and + * then recovers. + */ + onAvailable: CockatielEvent; /** * A function which will be called by the retry policy each time the service * fails and the policy kicks off a timer to re-run the service. This is @@ -249,6 +270,8 @@ export function createServicePolicy( backoff = new ExponentialBackoff(), } = options; + let availabilityStatus: 'unknown' | 'available' = 'unknown'; + const retryPolicy = retry(retryFilterPolicy, { // Note that although the option here is called "max attempts", it's really // maximum number of *retries* (attempts past the initial attempt). @@ -259,6 +282,7 @@ export function createServicePolicy( }); const onRetry = retryPolicy.onRetry.bind(retryPolicy); + const consecutiveBreaker = new ConsecutiveBreaker(maxConsecutiveFailures); const circuitBreakerPolicy = circuitBreaker(handleWhen(isServiceFailure), { // While the circuit is open, any additional invocations of the service // passed to the policy (either via automatic retries or by manually @@ -267,36 +291,67 @@ export function createServicePolicy( // service will be allowed to run again. If the service succeeds, the // circuit will close, otherwise it will remain open. halfOpenAfter: circuitBreakDuration, - breaker: new ConsecutiveBreaker(maxConsecutiveFailures), + breaker: consecutiveBreaker, }); let internalCircuitState: InternalCircuitState = getInternalCircuitState( circuitBreakerPolicy.state, ); - circuitBreakerPolicy.onStateChange((state) => { - internalCircuitState = getInternalCircuitState(state); + circuitBreakerPolicy.onStateChange((circuitState) => { + internalCircuitState = getInternalCircuitState(circuitState); }); const onBreak = circuitBreakerPolicy.onBreak.bind(circuitBreakerPolicy); const onDegradedEventEmitter = new CockatielEventEmitter | void>(); + const onDegraded = onDegradedEventEmitter.addListener; + + const onAvailableEventEmitter = new CockatielEventEmitter(); + const onAvailable = onAvailableEventEmitter.addListener; + + let lastInnerFailureReason: FailureReason | undefined; retryPolicy.onGiveUp((data) => { if (circuitBreakerPolicy.state === CircuitState.Closed) { onDegradedEventEmitter.emit(data); } }); - retryPolicy.onSuccess(({ duration }) => { - if ( - circuitBreakerPolicy.state === CircuitState.Closed && - duration > degradedThreshold - ) { - onDegradedEventEmitter.emit(); + retryPolicy.onSuccess((data) => { + lastInnerFailureReason = undefined; + + if (circuitBreakerPolicy.state === CircuitState.Closed) { + if (data.duration > degradedThreshold) { + onDegradedEventEmitter.emit(); + } else if (availabilityStatus !== 'available') { + availabilityStatus = 'available'; + onAvailableEventEmitter.emit(); + } } }); - const onDegraded = onDegradedEventEmitter.addListener; + retryPolicy.onFailure((event) => { + lastInnerFailureReason = event.reason; + }); // Every time the retry policy makes an attempt, it executes the circuit // breaker policy, which executes the service. + // + // Calling: + // + // policy.execute(() => { + // // do what the service does + // }) + // + // is equivalent to: + // + // retryPolicy.execute(() => { + // circuitBreakerPolicy.execute(() => { + // // do what the service does + // }); + // }); + // + // So if the retry policy succeeds or fails, it is because the circuit breaker + // policy succeeded or failed. And if there are any event listeners registered + // on the retry policy, by the time they are called, the state of the circuit + // breaker will have already changed. const policy = wrap(retryPolicy, circuitBreakerPolicy); const getRemainingCircuitOpenDuration = () => { @@ -306,14 +361,40 @@ export function createServicePolicy( return null; }; + const getCircuitState = () => { + return circuitBreakerPolicy.state; + }; + + const getLastInnerFailureReason = () => { + return lastInnerFailureReason; + }; + + const reset = () => { + // Set the state of the policy to "isolated" regardless of its current state + const { dispose } = circuitBreakerPolicy.isolate(); + // Reset the state to "closed" + dispose(); + + // Reset the counter on the breaker as well + consecutiveBreaker.success(); + + // Re-initialize the availability status so that if the service is executed + // successfully, onAvailable listeners will be called again + availabilityStatus = 'unknown'; + }; + return { ...policy, circuitBreakerPolicy, circuitBreakDuration, + getCircuitState, + getLastInnerFailureReason, getRemainingCircuitOpenDuration, + reset, retryPolicy, onBreak, onDegraded, + onAvailable, onRetry, }; } diff --git a/packages/controller-utils/src/index.ts b/packages/controller-utils/src/index.ts index f6de7c26f38..c07b7df7dfa 100644 --- a/packages/controller-utils/src/index.ts +++ b/packages/controller-utils/src/index.ts @@ -1,6 +1,7 @@ export { BrokenCircuitError, CircuitState, + CockatielEventEmitter, ConstantBackoff, DEFAULT_CIRCUIT_BREAK_DURATION, DEFAULT_DEGRADED_THRESHOLD, @@ -14,6 +15,7 @@ export { export type { CockatielEvent, CreateServicePolicyOptions, + CockatielFailureReason, ServicePolicy, } from './create-service-policy'; export { From 6a3cff1ea7fa4cb419dd416fffee3bb6439ad34f Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 14 Nov 2025 17:11:00 -0700 Subject: [PATCH 02/30] Fix tests --- packages/controller-utils/src/index.test.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/packages/controller-utils/src/index.test.ts b/packages/controller-utils/src/index.test.ts index f22633a8b08..1167bdcdfad 100644 --- a/packages/controller-utils/src/index.test.ts +++ b/packages/controller-utils/src/index.test.ts @@ -6,6 +6,7 @@ describe('@metamask/controller-utils', () => { Array [ "BrokenCircuitError", "CircuitState", + "CockatielEventEmitter", "ConstantBackoff", "DEFAULT_CIRCUIT_BREAK_DURATION", "DEFAULT_DEGRADED_THRESHOLD", From c08f3984079143fa283efe5abc6cfecfe329cc89 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 09:09:38 -0700 Subject: [PATCH 03/30] Add more tests --- .../src/create-service-policy.test.ts | 53 +++++++++++++++++++ 1 file changed, 53 insertions(+) diff --git a/packages/controller-utils/src/create-service-policy.test.ts b/packages/controller-utils/src/create-service-policy.test.ts index 1eed4c75aff..7a57ab9c0f2 100644 --- a/packages/controller-utils/src/create-service-policy.test.ts +++ b/packages/controller-utils/src/create-service-policy.test.ts @@ -3293,6 +3293,59 @@ describe('createServicePolicy', () => { }); }); + describe.only('getRemainingCircuitOpenDuration', () => { + it('returns the number of milliseconds before the circuit will transition from open to half-open', async () => { + const mockService = () => { + throw new Error('failure'); + }; + const policy = createServicePolicy(); + policy.onRetry(() => { + clock.next(); + }); + // Retry until we break the circuit + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + clock.tick(1000); + + expect(policy.getRemainingCircuitOpenDuration()).toBe( + DEFAULT_CIRCUIT_BREAK_DURATION - 1000, + ); + }); + + it('returns null if the circuit is closed', () => { + const policy = createServicePolicy(); + + expect(policy.getRemainingCircuitOpenDuration()).toBeNull(); + }); + }); + + describe.only('getCircuitState', () => { + it('returns the state of the circuit', async () => { + const mockService = () => { + throw new Error('failure'); + }; + const policy = createServicePolicy(); + policy.onRetry(() => { + clock.next(); + }); + + expect(policy.getCircuitState()).toBe(CircuitState.Closed); + + // Retry until we break the circuit + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + expect(policy.getCircuitState()).toBe(CircuitState.Open); + + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + const promise = ignoreRejection(policy.execute(mockService)); + expect(policy.getCircuitState()).toBe(CircuitState.HalfOpen); + await promise; + expect(policy.getCircuitState()).toBe(CircuitState.Open); + }); + }); + describe('reset', () => { it('resets the state of the circuit to "closed"', async () => { let invocationCounter = 0; From 5e0e3e1683ca11000a747fa81d4d0ffab2152dc7 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 09:15:32 -0700 Subject: [PATCH 04/30] No need for getLastInnerFailureReason --- packages/controller-utils/CHANGELOG.md | 2 -- .../src/create-service-policy.test.ts | 4 ++-- .../src/create-service-policy.ts | 16 ---------------- 3 files changed, 2 insertions(+), 20 deletions(-) diff --git a/packages/controller-utils/CHANGELOG.md b/packages/controller-utils/CHANGELOG.md index 2628a42f481..f16dff306c8 100644 --- a/packages/controller-utils/CHANGELOG.md +++ b/packages/controller-utils/CHANGELOG.md @@ -11,8 +11,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Add `getCircuitState` method to `ServicePolicy` ([#7164](https://github.com/MetaMask/core/pull/7164)) - This can be used when working with a chain of services to know whether a service's underlying circuit is open or closed. -- Add `getLastInnerFailureReason` method to `ServicePolicy` ([#7164](https://github.com/MetaMask/core/pull/7164)) - - This can be used when working with a chain of services to obtain the last error that the circuit breaker policy captured after executing the service. - Add `onAvailable` method to `ServicePolicy` ([#7164](https://github.com/MetaMask/core/pull/7164)) - This can be used to listen for the initial successful execution of the service, or the first successful execution after the service becomes degraded or the circuit breaks. - Add `reset` method to `ServicePolicy` ([#7164](https://github.com/MetaMask/core/pull/7164)) diff --git a/packages/controller-utils/src/create-service-policy.test.ts b/packages/controller-utils/src/create-service-policy.test.ts index 7a57ab9c0f2..2e7f8fa1068 100644 --- a/packages/controller-utils/src/create-service-policy.test.ts +++ b/packages/controller-utils/src/create-service-policy.test.ts @@ -3293,7 +3293,7 @@ describe('createServicePolicy', () => { }); }); - describe.only('getRemainingCircuitOpenDuration', () => { + describe('getRemainingCircuitOpenDuration', () => { it('returns the number of milliseconds before the circuit will transition from open to half-open', async () => { const mockService = () => { throw new Error('failure'); @@ -3320,7 +3320,7 @@ describe('createServicePolicy', () => { }); }); - describe.only('getCircuitState', () => { + describe('getCircuitState', () => { it('returns the state of the circuit', async () => { const mockService = () => { throw new Error('failure'); diff --git a/packages/controller-utils/src/create-service-policy.ts b/packages/controller-utils/src/create-service-policy.ts index d28822c93f3..6353e078600 100644 --- a/packages/controller-utils/src/create-service-policy.ts +++ b/packages/controller-utils/src/create-service-policy.ts @@ -90,11 +90,6 @@ export type ServicePolicy = IPolicy & { * @returns The state of the underlying circuit. */ getCircuitState: () => CircuitState; - /** - * @returns The last failure reason that the retry policy captured (or - * `undefined` if the last execution of the service was successful). - */ - getLastInnerFailureReason: () => FailureReason | undefined; /** * If the circuit is open and ongoing requests are paused, returns the number * of milliseconds before the requests will be attempted again. If the circuit @@ -309,15 +304,12 @@ export function createServicePolicy( const onAvailableEventEmitter = new CockatielEventEmitter(); const onAvailable = onAvailableEventEmitter.addListener; - let lastInnerFailureReason: FailureReason | undefined; retryPolicy.onGiveUp((data) => { if (circuitBreakerPolicy.state === CircuitState.Closed) { onDegradedEventEmitter.emit(data); } }); retryPolicy.onSuccess((data) => { - lastInnerFailureReason = undefined; - if (circuitBreakerPolicy.state === CircuitState.Closed) { if (data.duration > degradedThreshold) { onDegradedEventEmitter.emit(); @@ -327,9 +319,6 @@ export function createServicePolicy( } } }); - retryPolicy.onFailure((event) => { - lastInnerFailureReason = event.reason; - }); // Every time the retry policy makes an attempt, it executes the circuit // breaker policy, which executes the service. @@ -365,10 +354,6 @@ export function createServicePolicy( return circuitBreakerPolicy.state; }; - const getLastInnerFailureReason = () => { - return lastInnerFailureReason; - }; - const reset = () => { // Set the state of the policy to "isolated" regardless of its current state const { dispose } = circuitBreakerPolicy.isolate(); @@ -388,7 +373,6 @@ export function createServicePolicy( circuitBreakerPolicy, circuitBreakDuration, getCircuitState, - getLastInnerFailureReason, getRemainingCircuitOpenDuration, reset, retryPolicy, From e2eba7ac336c941eef04f9804230868a0c1a78ec Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 09:41:50 -0700 Subject: [PATCH 05/30] Fix an issue with onAvailable --- .../src/create-service-policy.test.ts | 129 ++++++++++++++++-- .../src/create-service-policy.ts | 11 +- 2 files changed, 130 insertions(+), 10 deletions(-) diff --git a/packages/controller-utils/src/create-service-policy.test.ts b/packages/controller-utils/src/create-service-policy.test.ts index 2e7f8fa1068..76021081563 100644 --- a/packages/controller-utils/src/create-service-policy.test.ts +++ b/packages/controller-utils/src/create-service-policy.test.ts @@ -2034,19 +2034,19 @@ describe('createServicePolicy', () => { describe('after the circuit break duration has elapsed', () => { describe.each([ { - desc: `using the default circuit break duration (${DEFAULT_CIRCUIT_BREAK_DURATION})`, + desc: `the default circuit break duration (${DEFAULT_CIRCUIT_BREAK_DURATION})`, duration: DEFAULT_CIRCUIT_BREAK_DURATION, options: {}, }, { - desc: 'using a custom circuit break duration', - duration: DEFAULT_CIRCUIT_BREAK_DURATION, + desc: 'a custom circuit break duration', + duration: 5_000, options: { // This has to be high enough to exceed the exponential backoff circuitBreakDuration: 5_000, }, }, - ])('$desc', ({ duration, options }) => { + ])('using $desc', ({ duration, options }) => { it('returns what the service returns', async () => { const maxConsecutiveFailures = DEFAULT_MAX_RETRIES; let invocationCounter = 0; @@ -2608,7 +2608,7 @@ describe('createServicePolicy', () => { }, { desc: 'a custom circuit break duration', - duration: DEFAULT_CIRCUIT_BREAK_DURATION, + duration: 5_000, options: { // This has to be high enough to exceed the exponential backoff circuitBreakDuration: 50_000, @@ -3217,7 +3217,7 @@ describe('createServicePolicy', () => { }, { desc: 'a custom circuit break duration', - duration: DEFAULT_CIRCUIT_BREAK_DURATION, + duration: 5_000, options: { // This has to be high enough to exceed the exponential backoff circuitBreakDuration: 5_000, @@ -3293,6 +3293,117 @@ describe('createServicePolicy', () => { }); }); + describe('wrapping a service that succeeds at first and then fails enough to break the circuit', () => { + describe.each([ + { + desc: `the default max number of consecutive failures (${DEFAULT_MAX_CONSECUTIVE_FAILURES})`, + maxConsecutiveFailures: DEFAULT_MAX_CONSECUTIVE_FAILURES, + optionsWithMaxConsecutiveFailures: {}, + }, + { + desc: 'a custom max number of consecutive failures', + maxConsecutiveFailures: DEFAULT_MAX_RETRIES + 1, + optionsWithMaxConsecutiveFailures: { + maxConsecutiveFailures: DEFAULT_MAX_RETRIES + 1, + }, + }, + ])( + 'using $desc', + ({ maxConsecutiveFailures, optionsWithMaxConsecutiveFailures }) => { + describe.each([ + { + desc: `the default circuit break duration (${DEFAULT_CIRCUIT_BREAK_DURATION})`, + circuitBreakDuration: DEFAULT_CIRCUIT_BREAK_DURATION, + optionsWithCircuitBreakDuration: {}, + }, + { + desc: 'a custom circuit break duration', + circuitBreakDuration: DEFAULT_CIRCUIT_BREAK_DURATION, + options: { + // This has to be high enough to exceed the exponential backoff + optionsWithCircuitBreakDuration: 5_000, + }, + }, + ])( + 'using $desc', + ({ circuitBreakDuration, optionsWithCircuitBreakDuration }) => { + it('calls onAvailable listeners if the service finally succeeds', async () => { + let invocationCounter = 0; + const mockService = jest.fn(() => { + invocationCounter += 1; + if ( + invocationCounter === 1 || + invocationCounter === maxConsecutiveFailures + 2 + ) { + return { some: 'data' }; + } + throw new Error('failure'); + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + ...optionsWithMaxConsecutiveFailures, + ...optionsWithCircuitBreakDuration, + }); + policy.onRetry(() => { + clock.next(); + }); + policy.onAvailable(onAvailableListener); + + // Execute the service successfully once + await policy.execute(mockService); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + + // Execute and retry until we break the circuit + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + clock.tick(circuitBreakDuration); + + await policy.execute(mockService); + expect(onAvailableListener).toHaveBeenCalledTimes(2); + }); + + it('does not call onAvailable listeners if the service finally fails', async () => { + let invocationCounter = 0; + const mockService = jest.fn(() => { + invocationCounter += 1; + if ( + invocationCounter === 1 || + invocationCounter === maxConsecutiveFailures + 2 + ) { + return { some: 'data' }; + } + throw new Error('failure'); + }); + const onAvailableListener = jest.fn(); + const policy = createServicePolicy({ + ...optionsWithMaxConsecutiveFailures, + ...optionsWithCircuitBreakDuration, + }); + policy.onRetry(() => { + clock.next(); + }); + policy.onAvailable(onAvailableListener); + + // Execute the service successfully once + await policy.execute(mockService); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + + // Execute and retry until we break the circuit + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + await ignoreRejection(policy.execute(mockService)); + clock.tick(circuitBreakDuration); + + await policy.execute(mockService); + expect(onAvailableListener).toHaveBeenCalledTimes(2); + }); + }, + ); + }, + ); + }); + describe('getRemainingCircuitOpenDuration', () => { it('returns the number of milliseconds before the circuit will transition from open to half-open', async () => { const mockService = () => { @@ -3371,7 +3482,7 @@ describe('createServicePolicy', () => { expect(policy.getCircuitState()).toBe(CircuitState.Closed); }); - it('allows the service to be executed successfully again if its circuit has broken', async () => { + it('allows the service to be executed successfully again if its circuit has broken after resetting', async () => { let invocationCounter = 0; const mockService = jest.fn(() => { invocationCounter += 1; @@ -3394,7 +3505,7 @@ describe('createServicePolicy', () => { expect(await policy.execute(mockService)).toStrictEqual({ some: 'data' }); }); - it('calls onAvailable listeners if the service was executed successfully, its circuit broke, it was reset, and executes successfully again', async () => { + it('calls onAvailable listeners if the service was executed successfully, its circuit broke, it was reset, and executes again, successfully', async () => { let invocationCounter = 0; const mockService = jest.fn(() => { invocationCounter += 1; @@ -3428,7 +3539,7 @@ describe('createServicePolicy', () => { expect(onAvailableListener).toHaveBeenCalledTimes(2); }); - it('allows the service to be executed unsuccessfully again if its circuit has broken', async () => { + it('allows the service to be executed unsuccessfully again if its circuit has broken after resetting', async () => { const mockService = jest.fn(() => { throw new Error('failure'); }); diff --git a/packages/controller-utils/src/create-service-policy.ts b/packages/controller-utils/src/create-service-policy.ts index 6353e078600..5d024a2df08 100644 --- a/packages/controller-utils/src/create-service-policy.ts +++ b/packages/controller-utils/src/create-service-policy.ts @@ -143,6 +143,11 @@ type InternalCircuitState = } | { state: Exclude }; +/** + * Used to keep track of whether the onAvailable event should be fired. + */ +type AvailabilityStatus = 'unknown' | 'available' | 'unavailable'; + /** * The maximum number of times that a failing service should be re-run before * giving up. @@ -265,7 +270,7 @@ export function createServicePolicy( backoff = new ExponentialBackoff(), } = options; - let availabilityStatus: 'unknown' | 'available' = 'unknown'; + let availabilityStatus: AvailabilityStatus = 'unknown'; const retryPolicy = retry(retryFilterPolicy, { // Note that although the option here is called "max attempts", it's really @@ -295,6 +300,10 @@ export function createServicePolicy( circuitBreakerPolicy.onStateChange((circuitState) => { internalCircuitState = getInternalCircuitState(circuitState); }); + + circuitBreakerPolicy.onBreak(() => { + availabilityStatus = 'unavailable'; + }); const onBreak = circuitBreakerPolicy.onBreak.bind(circuitBreakerPolicy); const onDegradedEventEmitter = From 246b2b5e17f5bc7afb2e5f48aa82d8c68f8a6739 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 09:43:16 -0700 Subject: [PATCH 06/30] Reduce the diff --- packages/controller-utils/src/create-service-policy.ts | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/controller-utils/src/create-service-policy.ts b/packages/controller-utils/src/create-service-policy.ts index 5d024a2df08..4f532e1a0ae 100644 --- a/packages/controller-utils/src/create-service-policy.ts +++ b/packages/controller-utils/src/create-service-policy.ts @@ -297,8 +297,8 @@ export function createServicePolicy( let internalCircuitState: InternalCircuitState = getInternalCircuitState( circuitBreakerPolicy.state, ); - circuitBreakerPolicy.onStateChange((circuitState) => { - internalCircuitState = getInternalCircuitState(circuitState); + circuitBreakerPolicy.onStateChange((state) => { + internalCircuitState = getInternalCircuitState(state); }); circuitBreakerPolicy.onBreak(() => { @@ -318,9 +318,9 @@ export function createServicePolicy( onDegradedEventEmitter.emit(data); } }); - retryPolicy.onSuccess((data) => { + retryPolicy.onSuccess(({ duration }) => { if (circuitBreakerPolicy.state === CircuitState.Closed) { - if (data.duration > degradedThreshold) { + if (duration > degradedThreshold) { onDegradedEventEmitter.emit(); } else if (availabilityStatus !== 'available') { availabilityStatus = 'available'; From 199bb792dc957258d22ac3fd3bdaeb1961a6d2c5 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 09:56:00 -0700 Subject: [PATCH 07/30] Fix tests --- packages/controller-utils/src/create-service-policy.test.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/controller-utils/src/create-service-policy.test.ts b/packages/controller-utils/src/create-service-policy.test.ts index 76021081563..b72b2459fe8 100644 --- a/packages/controller-utils/src/create-service-policy.test.ts +++ b/packages/controller-utils/src/create-service-policy.test.ts @@ -3319,9 +3319,9 @@ describe('createServicePolicy', () => { { desc: 'a custom circuit break duration', circuitBreakDuration: DEFAULT_CIRCUIT_BREAK_DURATION, - options: { + optionsWithCircuitBreakDuration: { // This has to be high enough to exceed the exponential backoff - optionsWithCircuitBreakDuration: 5_000, + circuitBreakDuration: 5_000, }, }, ])( From ff6d832a60ba2bef99fd5057ee7cbc774601f1e5 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 11:58:43 -0700 Subject: [PATCH 08/30] Use a quasi-enum for the availability status --- .../src/create-service-policy.ts | 28 ++++++++++++++----- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/packages/controller-utils/src/create-service-policy.ts b/packages/controller-utils/src/create-service-policy.ts index 4f532e1a0ae..bc8bdbfddc1 100644 --- a/packages/controller-utils/src/create-service-policy.ts +++ b/packages/controller-utils/src/create-service-policy.ts @@ -144,9 +144,23 @@ type InternalCircuitState = | { state: Exclude }; /** - * Used to keep track of whether the onAvailable event should be fired. + * List of avalability statuses. + * + * Used to keep track of whether the `onAvailable` event should be fired. + */ +const AVAILABILITY_STATUSES = { + Unknown: 'unknown', + Available: 'available', + Unavailable: 'unavailable', +}; + +/** + * An availability status. + * + * Used to keep track of whether the `onAvailable` event should be fired. */ -type AvailabilityStatus = 'unknown' | 'available' | 'unavailable'; +type AvailabilityStatus = + (typeof AVAILABILITY_STATUSES)[keyof typeof AVAILABILITY_STATUSES]; /** * The maximum number of times that a failing service should be re-run before @@ -270,7 +284,7 @@ export function createServicePolicy( backoff = new ExponentialBackoff(), } = options; - let availabilityStatus: AvailabilityStatus = 'unknown'; + let availabilityStatus: AvailabilityStatus = AVAILABILITY_STATUSES.Unknown; const retryPolicy = retry(retryFilterPolicy, { // Note that although the option here is called "max attempts", it's really @@ -302,7 +316,7 @@ export function createServicePolicy( }); circuitBreakerPolicy.onBreak(() => { - availabilityStatus = 'unavailable'; + availabilityStatus = AVAILABILITY_STATUSES.Unavailable; }); const onBreak = circuitBreakerPolicy.onBreak.bind(circuitBreakerPolicy); @@ -322,8 +336,8 @@ export function createServicePolicy( if (circuitBreakerPolicy.state === CircuitState.Closed) { if (duration > degradedThreshold) { onDegradedEventEmitter.emit(); - } else if (availabilityStatus !== 'available') { - availabilityStatus = 'available'; + } else if (availabilityStatus !== AVAILABILITY_STATUSES.Available) { + availabilityStatus = AVAILABILITY_STATUSES.Available; onAvailableEventEmitter.emit(); } } @@ -374,7 +388,7 @@ export function createServicePolicy( // Re-initialize the availability status so that if the service is executed // successfully, onAvailable listeners will be called again - availabilityStatus = 'unknown'; + availabilityStatus = AVAILABILITY_STATUSES.Unknown; }; return { From fa66813151bf8a9af97564c018432378f9c9870b Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 12:03:36 -0700 Subject: [PATCH 09/30] Fix test --- .../controller-utils/src/create-service-policy.test.ts | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/packages/controller-utils/src/create-service-policy.test.ts b/packages/controller-utils/src/create-service-policy.test.ts index b72b2459fe8..445ebb764cd 100644 --- a/packages/controller-utils/src/create-service-policy.test.ts +++ b/packages/controller-utils/src/create-service-policy.test.ts @@ -3367,10 +3367,7 @@ describe('createServicePolicy', () => { let invocationCounter = 0; const mockService = jest.fn(() => { invocationCounter += 1; - if ( - invocationCounter === 1 || - invocationCounter === maxConsecutiveFailures + 2 - ) { + if (invocationCounter === 1) { return { some: 'data' }; } throw new Error('failure'); @@ -3395,8 +3392,8 @@ describe('createServicePolicy', () => { await ignoreRejection(policy.execute(mockService)); clock.tick(circuitBreakDuration); - await policy.execute(mockService); - expect(onAvailableListener).toHaveBeenCalledTimes(2); + await ignoreRejection(policy.execute(mockService)); + expect(onAvailableListener).toHaveBeenCalledTimes(1); }); }, ); From 9d090e9d6a62b68b766adf1f0e45226ced4c3543 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 14 Nov 2025 14:59:55 -0700 Subject: [PATCH 10/30] Revamp NetworkController RPC endpoint events In a future commit we will introduce changes to `network-controller` so that it will keep track of the status of each network as requests are made. This commit paves the way for this to happen by redefining the existing RPC endpoint-related events that NetworkController produces. Currently, when requests are made through the network clients that NetworkController exposes, three events are published: - `NetworkController:rpcEndpointDegraded` - Published when enough successive retriable errors are encountered while making a request to an RPC endpoint that the maximum number of retries is reached. - `NetworkController:rpcEndpointUnavailable` - Published when enough successive errors are encountered while making a request to an RPC endpoint that the underlying circuit breaks. - `NetworkController:rpcEndpointRequestRetried` - Published when a request is retried (mainly used for testing). It's important to note that in the context of the RPC failover feature, an "RPC endpoint" can actually encompass multiple URLs, so the above events actually fire for any URL. While these events are useful for reporting metrics on RPC endpoints, in order to effectively be able to update the status of a network, we need events that are less granular and are guaranteed not to fire multiple times in a row. We also need a new event. Now the list of events looks like this: - `NetworkController:rpcEndpointInstanceDegraded` - The same as `NetworkController:rpcEndpointDegraded` before. - `NetworkController:rpcEndpointInstanceUnavailable` - The same as `NetworkController:rpcEndpointInstanceDegraded` before. - `NetworkController:rpcEndpointInstanceRetried` - Renamed from `NetworkController:rpcEndpointRequestRetried`. - `NetworkController:rpcEndpointDegraded` - Similar to `NetworkController:rpcEndpointInstanceDegraded`, but won't be published again if the RPC endpoint is already in a degraded state. - `NetworkController:rpcEndpointUnavailable` - Published when all of the circuits underlying all of the URLs for an RPC endpoint have broken (none of the URLs are available). Won't be published again if the RPC endpoint is already in an unavailable state. - `NetworkController:rpcEndpointAvailable` - A new event. Published the first time a successful request is made to one of the URLs for an RPC endpoint, or following a degraded or unavailable status. --- packages/network-controller/CHANGELOG.md | 14 + packages/network-controller/package.json | 1 + .../src/NetworkController.ts | 206 +- ...create-auto-managed-network-client.test.ts | 23 + .../src/create-auto-managed-network-client.ts | 10 +- .../rpc-endpoint-events.test.ts | 1232 +++++++++ .../src/create-network-client.ts | 204 +- packages/network-controller/src/index.ts | 5 +- .../src/rpc-service/rpc-service-chain.test.ts | 1841 +++++++++++-- .../src/rpc-service/rpc-service-chain.ts | 418 ++- .../rpc-service/rpc-service-requestable.ts | 43 +- .../src/rpc-service/rpc-service.test.ts | 2318 +++++++---------- .../src/rpc-service/rpc-service.ts | 154 +- .../src/rpc-service/shared.ts | 66 +- .../tests/NetworkController.test.ts | 18 + .../tests/network-client/helpers.ts | 95 +- .../tests/network-client/rpc-failover.ts | 189 +- yarn.lock | 1 + 18 files changed, 4858 insertions(+), 1980 deletions(-) create mode 100644 packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts diff --git a/packages/network-controller/CHANGELOG.md b/packages/network-controller/CHANGELOG.md index 754630d8fde..5228a1c1e9d 100644 --- a/packages/network-controller/CHANGELOG.md +++ b/packages/network-controller/CHANGELOG.md @@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Add `NetworkController:rpcEndpointAvailable` messenger event ([#7166](https://github.com/MetaMask/core/pull/7166)) + - These are counterparts to the (new) `NetworkController:rpcEndpointUnavailable` and `NetworkController:rpcEndpointDegraded` events, but are published when a request to an RPC endpoint URL is made either initially or following a previously established degraded or unavailable status. + ### Changed - **BREAKING:** Use `InternalProvider` instead of `SafeEventEmitterProvider` ([#6796](https://github.com/MetaMask/core/pull/6796)) @@ -19,6 +24,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - In practice, this should happen rarely if ever. - **BREAKING:** Migrate `NetworkClient` to `JsonRpcEngineV2` ([#7065](https://github.com/MetaMask/core/pull/7065)) - This ought to be unobservable, but we mark it as breaking out of an abundance of caution. +- **BREAKING:** Split up and update payload data for `NetworkController:rpcEndpoint{Degraded,Unavailable}` ([#7166](https://github.com/MetaMask/core/pull/7166)) + - The existing events are now called `NetworkController:rpcEndpointInstance{Degraded,Unavailable}` and retain their present behavior. + - `NetworkController:rpcEndpointInstance{Degraded,Unavailable}` do still exist, but they are now designed to represent the entire RPC endpoint and are guaranteed to not be published multiple times in a row. In particular, `NetworkController:rpcEndpointUnavailable` is published only after trying all of the designated URLs for a particular RPC endpoint and the underlying circuit for the last URL breaks, not as each primary's or failover's circuit breaks. + - The event payloads have been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` has been added. +- **BREAKING:** Rename and update payload data for `NetworkController:rpcEndpointRequestRetried` ([#7166](https://github.com/MetaMask/core/pull/7166)) + - This event is now called `NetworkController:rpcEndpointInstanceRequestRetried` + - The event payload has been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` and `attempt` have been added. +- **BREAKING:** Update `AbstractRpcService`/`RpcServiceRequestable` to remove `{ isolated: true }` from the `onBreak` event data type ([#7166](https://github.com/MetaMask/core/pull/7166)) + - This represented the error produced when `.isolate` is called on a Cockatiel circuit breaker policy, which we never do. - Bump `@metamask/controller-utils` from `^11.14.1` to `^11.15.0` ([#7003](https://github.com/MetaMask/core/pull/7003)) ### Fixed diff --git a/packages/network-controller/package.json b/packages/network-controller/package.json index a0672a0c503..c1fb45a5385 100644 --- a/packages/network-controller/package.json +++ b/packages/network-controller/package.json @@ -78,6 +78,7 @@ "@types/jest-when": "^2.7.3", "@types/lodash": "^4.14.191", "@types/node-fetch": "^2.6.12", + "cockatiel": "^3.1.2", "deep-freeze-strict": "^1.1.1", "deepmerge": "^4.2.2", "jest": "^27.5.1", diff --git a/packages/network-controller/src/NetworkController.ts b/packages/network-controller/src/NetworkController.ts index 2bb04f387ad..ba317f58e07 100644 --- a/packages/network-controller/src/NetworkController.ts +++ b/packages/network-controller/src/NetworkController.ts @@ -443,9 +443,29 @@ export type NetworkControllerNetworkRemovedEvent = { }; /** - * `rpcEndpointUnavailable` is published after an attempt to make a request to - * an RPC endpoint fails too many times in a row (because of a connection error - * or an unusable response). + * `NetworkController:rpcEndpointUnavailable` is published when the number of + * failed consecutive attempts to receive a 2xx response from the primary URL of + * an RPC endpoint reaches a maximum, causing further requests to be temporarily + * paused, and when subsequent traffic to a failover URL similarly fails. + * + * In other words, this event will not published if a primary is deemed to be + * unavailable but its failover is not. + * + * Additionally, if this was the last `NetworkController:rpcEndpoint*` event to + * be published, the event will not be re-published (for instance, if both a + * primary and failover are deemed to be unavailable, or if more than one + * failover is deemed to be unavailable). + * + * @param payload - The event payload. + * @param payload.chainId - The chain ID of the network where the RPC endpoint + * lives. + * @param payload.networkClientId - The ID of the network client representing + * the RPC endpoint. + * @param payload.primaryEndpointUrl - The primary URL of the endpoint. + * @param payload.endpointUrl - One of the URLs defined for the endpoint which + * has been deemed to be unavailable. + * @param payload.error - The error from the last request to one of the URLs + * defined for the endpoint which determined the unavailability status. */ export type NetworkControllerRpcEndpointUnavailableEvent = { type: 'NetworkController:rpcEndpointUnavailable'; @@ -453,15 +473,79 @@ export type NetworkControllerRpcEndpointUnavailableEvent = { { chainId: Hex; endpointUrl: string; - failoverEndpointUrl?: string; error: unknown; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; }, ]; }; /** - * `rpcEndpointDegraded` is published after a request to an RPC endpoint - * responds successfully but takes too long. + * `NetworkController:rpcEndpointInstanceUnavailable` is published when the + * number of failed consecutive attempts to receive a 2xx response from *any* of + * the designated URLs of an RPC endpoint reaches a maximum. + * + * This event will still be published if a primary is deemed to be unavailable, + * even its failover is available. + * + * Additionally, even if this was the last `NetworkController:rpcEndpoint*` event + * to be published, the event may be re-published (for instance, if both a + * primary and failover are deemed to be unavailable, or if more than one + * failover is deemed to be unavailable). + * + * @param payload - The event payload. + * @param payload.chainId - The chain ID of the network where the RPC endpoint + * lives. + * @param payload.networkClientId - The ID of the network client representing + * the RPC endpoint. + * @param payload.primaryEndpointUrl - The primary URL of the endpoint. + * @param payload.endpointUrl - One of the URLs defined for the endpoint which + * has been deemed to be unavailable. + * @param payload.error - The error from the last request to the `endpointUrl` + * which determined the unavailability status. + */ +export type NetworkControllerRpcEndpointInstanceUnavailableEvent = { + type: 'NetworkController:rpcEndpointInstanceUnavailable'; + payload: [ + { + chainId: Hex; + endpointUrl: string; + error: unknown; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; + }, + ]; +}; + +/** + * `NetworkController:rpcEndpointDegraded` is published in the following two + * cases: + * + * 1. When an attempt to receive a 2xx response from any of the designated URLs + * for an RPC endpoint is unsuccessful, and all subsequent automatic retries + * lead to the same result. + * 2. When a 2xx response is received from any of the endpoint URLs, but the + * request takes longer than a set number of seconds to complete. + * + * Note that this event will be published even if there are local connectivity + * issues which prevent requests from being initiated. This is intentional. + * + * Additionally, if this was the last `NetworkController:rpcEndpoint*` event to + * be published, the event will not be re-published (for instance: a failover is + * activated and successive attempts to the failover fail, then the primary + * comes back online, but it is slow). + * + * @param payload - The event payload. + * @param payload.chainId - The chain ID of the network where the RPC endpoint + * lives. + * @param payload.networkClientId - The ID of the network client representing + * the RPC endpoint. + * @param payload.primaryEndpointUrl - The primary URL of the endpoint. + * @param payload.endpointUrl - One of the URLs defined for the endpoint which + * has been deemed to be degraded. + * @param payload.error - The error from the last request to the `endpointUrl` + * which determined the degraded status (or `undefined` if the request was + * merely slow). */ export type NetworkControllerRpcEndpointDegradedEvent = { type: 'NetworkController:rpcEndpointDegraded'; @@ -470,20 +554,113 @@ export type NetworkControllerRpcEndpointDegradedEvent = { chainId: Hex; endpointUrl: string; error: unknown; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; }, ]; }; /** - * `rpcEndpointRequestRetried` is published after a request to an RPC endpoint - * is retried following a connection error or an unusable response. + * + * `NetworkController:rpcEndpointInstanceDegraded` is published in the following + * two cases: + * + * 1. When an attempt to receive a 2xx response from *any* of the designated + * URLs for an RPC endpoint is unsuccessful, and all subsequent automatic + * retries lead to the same result. + * 2. When a 2xx response is received from any of the endpoint URLs, but the + * request takes longer than a set number of seconds to complete. + * + * Note that this event will be published even if there are local connectivity + * issues which prevent requests from being initiated. This is intentional. + * + * Additionally, if this was the last `NetworkController:rpcEndpoint*` event to + * be published, the event may be re-published (for instance: a failover is + * activated and successive attempts to the failover fail, then the primary + * comes back online, but it is slow). + * + * @param payload - The event payload. + * @param payload.chainId - The chain ID of the network where the RPC endpoint + * lives. + * @param payload.networkClientId - The ID of the network client representing + * the RPC endpoint. + * @param payload.primaryEndpointUrl - The primary URL of the endpoint. + * @param payload.endpointUrl - One of the URLs defined for the endpoint which + * has been deemed to be degraded. + * @param payload.error - The error from the last request to the `endpointUrl` + * which determined the degraded status (or `undefined` if the request was + * merely slow). */ -export type NetworkControllerRpcEndpointRequestRetriedEvent = { - type: 'NetworkController:rpcEndpointRequestRetried'; +export type NetworkControllerRpcEndpointInstanceDegradedEvent = { + type: 'NetworkController:rpcEndpointInstanceDegraded'; payload: [ { + chainId: Hex; endpointUrl: string; + error: unknown; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; + }, + ]; +}; + +/** + * `NetworkController:rpcEndpointAvailable` is published in either of the + * following two cases: + * + * 1. The first time that a 2xx request is made to any of the designated URLs of + * an RPC endpoint. + * 2. When requests to any of the URLs previously failed (placing the endpoint + * in a degraded or unavailable status), but are now succeeding again. + * + * @param payload - The event payload. + * @param payload.chainId - The chain ID of the network where the RPC endpoint + * lives. + * @param payload.networkClientId - The ID of the network client representing + * the RPC endpoint. + * @param payload.primaryEndpointUrl - The primary URL of the RPC endpoint. + * @param payload.endpointUrl - The specific URL that returned a successful + * response. + */ +export type NetworkControllerRpcEndpointAvailableEvent = { + type: 'NetworkController:rpcEndpointAvailable'; + payload: [ + { + chainId: Hex; + endpointUrl: string; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; + }, + ]; +}; + +/** + * `NetworkController:rpcEndpointInstanceRetried` is published before a + * request to any of the designated URLs of an RPC endpoint is retried. + * + * This is mainly useful for tests. + * + * @param payload - The event payload. + * @param payload.chainId - The chain ID of the network where the RPC endpoint + * lives. + * @param payload.networkClientId - The ID of the network client representing + * the RPC endpoint. + * @param payload.primaryEndpointUrl - The primary URL of the RPC endpoint. + * @param payload.endpointUrl - The URL defined for the endpoint that is being + * retried. + * @param payload.attempt - The current attempt counter for the endpoint URL + * (starting from 0). + * @see {@link RpcService} for the list of retriable errors. + */ +export type NetworkControllerRpcEndpointInstanceRetriedEvent = { + type: 'NetworkController:rpcEndpointInstanceRetried'; + payload: [ + { attempt: number; + chainId: Hex; + endpointUrl: string; + networkClientId: NetworkClientId; + primaryEndpointUrl: string; }, ]; }; @@ -497,8 +674,11 @@ export type NetworkControllerEvents = | NetworkControllerNetworkAddedEvent | NetworkControllerNetworkRemovedEvent | NetworkControllerRpcEndpointUnavailableEvent + | NetworkControllerRpcEndpointInstanceUnavailableEvent | NetworkControllerRpcEndpointDegradedEvent - | NetworkControllerRpcEndpointRequestRetriedEvent; + | NetworkControllerRpcEndpointInstanceDegradedEvent + | NetworkControllerRpcEndpointAvailableEvent + | NetworkControllerRpcEndpointInstanceRetriedEvent; /** * All events that {@link NetworkController} calls internally. @@ -2800,6 +2980,7 @@ export class NetworkController extends BaseController< autoManagedNetworkClientRegistry[NetworkClientType.Infura][ addedRpcEndpoint.networkClientId ] = createAutoManagedNetworkClient({ + networkClientId: addedRpcEndpoint.networkClientId, networkClientConfiguration: { type: NetworkClientType.Infura, chainId: networkFields.chainId, @@ -2818,6 +2999,7 @@ export class NetworkController extends BaseController< autoManagedNetworkClientRegistry[NetworkClientType.Custom][ addedRpcEndpoint.networkClientId ] = createAutoManagedNetworkClient({ + networkClientId: addedRpcEndpoint.networkClientId, networkClientConfiguration: { type: NetworkClientType.Custom, chainId: networkFields.chainId, @@ -2980,6 +3162,7 @@ export class NetworkController extends BaseController< return [ rpcEndpoint.networkClientId, createAutoManagedNetworkClient({ + networkClientId: rpcEndpoint.networkClientId, networkClientConfiguration: { type: NetworkClientType.Infura, network: infuraNetworkName, @@ -2999,6 +3182,7 @@ export class NetworkController extends BaseController< return [ rpcEndpoint.networkClientId, createAutoManagedNetworkClient({ + networkClientId: rpcEndpoint.networkClientId, networkClientConfiguration: { type: NetworkClientType.Custom, chainId: networkConfiguration.chainId, diff --git a/packages/network-controller/src/create-auto-managed-network-client.test.ts b/packages/network-controller/src/create-auto-managed-network-client.test.ts index 3b49ccda1f5..c30208ce167 100644 --- a/packages/network-controller/src/create-auto-managed-network-client.test.ts +++ b/packages/network-controller/src/create-auto-managed-network-client.test.ts @@ -35,6 +35,7 @@ describe('createAutoManagedNetworkClient', () => { describe(`given configuration for a ${networkClientConfiguration.type} network client`, () => { it('allows the network client configuration to be accessed', () => { const { configuration } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -51,6 +52,7 @@ describe('createAutoManagedNetworkClient', () => { // If unexpected requests occurred, then Nock would throw expect(() => { createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -64,6 +66,7 @@ describe('createAutoManagedNetworkClient', () => { it('returns a provider proxy that has the same interface as a provider', () => { const { provider } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -97,6 +100,7 @@ describe('createAutoManagedNetworkClient', () => { }); const { provider } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -145,6 +149,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const { provider } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -166,6 +171,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenCalledTimes(1); expect(createNetworkClientMock).toHaveBeenCalledWith({ + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -204,6 +210,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const autoManagedNetworkClient = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -227,6 +234,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(1, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -234,6 +242,7 @@ describe('createAutoManagedNetworkClient', () => { isRpcFailoverEnabled: false, }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(2, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -272,6 +281,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const autoManagedNetworkClient = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -295,6 +305,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(1, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -302,6 +313,7 @@ describe('createAutoManagedNetworkClient', () => { isRpcFailoverEnabled: true, }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(2, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -313,6 +325,7 @@ describe('createAutoManagedNetworkClient', () => { it('returns a block tracker proxy that has the same interface as a block tracker', () => { const { blockTracker } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -372,6 +385,7 @@ describe('createAutoManagedNetworkClient', () => { }); const { blockTracker } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, @@ -441,6 +455,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const { blockTracker } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -458,6 +473,7 @@ describe('createAutoManagedNetworkClient', () => { await blockTracker.checkForLatestBlock(); expect(createNetworkClientMock).toHaveBeenCalledTimes(1); expect(createNetworkClientMock).toHaveBeenCalledWith({ + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -496,6 +512,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const autoManagedNetworkClient = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -513,6 +530,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(1, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -520,6 +538,7 @@ describe('createAutoManagedNetworkClient', () => { isRpcFailoverEnabled: false, }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(2, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -558,6 +577,7 @@ describe('createAutoManagedNetworkClient', () => { const messenger = buildNetworkControllerMessenger(); const autoManagedNetworkClient = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -575,6 +595,7 @@ describe('createAutoManagedNetworkClient', () => { }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(1, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -582,6 +603,7 @@ describe('createAutoManagedNetworkClient', () => { isRpcFailoverEnabled: true, }); expect(createNetworkClientMock).toHaveBeenNthCalledWith(2, { + id: 'some-network-client-id', configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, @@ -608,6 +630,7 @@ describe('createAutoManagedNetworkClient', () => { ], }); const { blockTracker, destroy } = createAutoManagedNetworkClient({ + networkClientId: 'some-network-client-id', networkClientConfiguration, getRpcServiceOptions: () => ({ fetch, diff --git a/packages/network-controller/src/create-auto-managed-network-client.ts b/packages/network-controller/src/create-auto-managed-network-client.ts index 5ab700a1737..3bdded89d83 100644 --- a/packages/network-controller/src/create-auto-managed-network-client.ts +++ b/packages/network-controller/src/create-auto-managed-network-client.ts @@ -3,7 +3,10 @@ import type { Logger } from 'loglevel'; import type { NetworkClient } from './create-network-client'; import { createNetworkClient } from './create-network-client'; -import type { NetworkControllerMessenger } from './NetworkController'; +import type { + NetworkClientId, + NetworkControllerMessenger, +} from './NetworkController'; import type { RpcServiceOptions } from './rpc-service/rpc-service'; import type { BlockTracker, @@ -65,6 +68,8 @@ const UNINITIALIZED_TARGET = { __UNINITIALIZED__: true }; * then cached for subsequent usages. * * @param args - The arguments. + * @param args.networkClientId - The ID that will be assigned to the new network + * client in the registry. * @param args.networkClientConfiguration - The configuration object that will be * used to instantiate the network client when it is needed. * @param args.getRpcServiceOptions - Factory for constructing RPC service @@ -81,6 +86,7 @@ const UNINITIALIZED_TARGET = { __UNINITIALIZED__: true }; export function createAutoManagedNetworkClient< Configuration extends NetworkClientConfiguration, >({ + networkClientId, networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions = () => ({}), @@ -88,6 +94,7 @@ export function createAutoManagedNetworkClient< isRpcFailoverEnabled: givenIsRpcFailoverEnabled, logger, }: { + networkClientId: NetworkClientId; networkClientConfiguration: Configuration; getRpcServiceOptions: ( rpcEndpointUrl: string, @@ -104,6 +111,7 @@ export function createAutoManagedNetworkClient< const ensureNetworkClientCreated = (): NetworkClient => { networkClient ??= createNetworkClient({ + id: networkClientId, configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, diff --git a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts new file mode 100644 index 00000000000..cc8d8455ba9 --- /dev/null +++ b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts @@ -0,0 +1,1232 @@ +import { + ConstantBackoff, + DEFAULT_DEGRADED_THRESHOLD, + HttpError, +} from '@metamask/controller-utils'; +import { errorCodes } from '@metamask/rpc-errors'; + +import { buildRootMessenger } from '../../tests/helpers'; +import { + withMockedCommunications, + withNetworkClient, +} from '../../tests/network-client/helpers'; +import { DEFAULT_MAX_CONSECUTIVE_FAILURES } from '../rpc-service/rpc-service'; +import { NetworkClientType } from '../types'; + +describe('createNetworkClient - RPC endpoint events', () => { + for (const networkClientType of Object.values(NetworkClientType)) { + describe(`${networkClientType}`, () => { + const blockNumber = '0x100'; + const backoffDuration = 100; + + it('publishes the NetworkController:rpcEndpointUnavailable event only when the max number of consecutive request failures is reached for all of the provided endpoint URLs', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedUnavailableError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (primaryComms) => { + await withMockedCommunications( + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + async (failoverComms) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockNextBlockTrackerRequest({ + blockNumber, + }); + primaryComms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + + const messenger = buildRootMessenger(); + const rpcEndpointUnavailableEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointUnavailable', + rpcEndpointUnavailableEventHandler, + ); + + await withNetworkClient( + { + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + providerType: networkClientType, + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries, + // breaking the circuit; then hit the failover and exceed + // the max of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the failover and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the failover and exceed the max number of retries, + // breaking the circuit + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + + expect( + rpcEndpointUnavailableEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointUnavailableEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: failoverEndpointUrl, + error: expectedUnavailableError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointInstanceUnavailable event each time the max number of consecutive request failures is reached for any of the provided endpoint URLs', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedUnavailableError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (primaryComms) => { + await withMockedCommunications( + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + async (failoverComms) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockNextBlockTrackerRequest({ + blockNumber, + }); + primaryComms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + + const messenger = buildRootMessenger(); + const rpcEndpointInstanceUnavailableEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceUnavailable', + rpcEndpointInstanceUnavailableEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries, + // breaking the circuit; then hit the failover and exceed + // the max of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the failover and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the failover and exceed the max number of retries, + // breaking the circuit + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + + expect( + rpcEndpointInstanceUnavailableEventHandler, + ).toHaveBeenCalledTimes(2); + expect( + rpcEndpointInstanceUnavailableEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedUnavailableError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect( + rpcEndpointInstanceUnavailableEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: failoverEndpointUrl, + error: expectedUnavailableError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointDegraded event only once, even if the max number of retries is continually reached in making requests to the primary endpoint URL', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockNextBlockTrackerRequest({ + blockNumber, + }); + comms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + + const messenger = buildRootMessenger(); + const rpcEndpointDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries, + // breaking the circuit + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes( + 1, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointDegraded event only once, even if the time to complete a request to the primary endpoint URL is continually too long', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + const messenger = buildRootMessenger(); + const rpcEndpointDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockNextBlockTrackerRequest({ + blockNumber, + }); + comms.mockRpcCall({ + request, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: 'ok', + }; + }, + times: 2, + }); + + await makeRpcCall(request); + await makeRpcCall(request); + await makeRpcCall(request); + + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes( + 1, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + + it('does not publish the NetworkController:rpcEndpointDegraded event again if the max number of retries is reached in making requests to a failover endpoint URL', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (primaryComms) => { + await withMockedCommunications( + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + async (failoverComms) => { + const messenger = buildRootMessenger(); + const rpcEndpointDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockNextBlockTrackerRequest({ + blockNumber, + }); + primaryComms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request, + times: 5, + response: { + httpStatus: 503, + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries, + // break the circuit; hit the failover and exceed the max + // number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + + expect( + rpcEndpointDegradedEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointDegradedEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }, + ); + }); + + it('does not publish the NetworkController:rpcEndpointDegraded event again when the time to complete a request to a failover endpoint URL is too long', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (primaryComms) => { + await withMockedCommunications( + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + async (failoverComms) => { + const messenger = buildRootMessenger(); + const rpcEndpointDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockNextBlockTrackerRequest({ + blockNumber, + }); + primaryComms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: 'ok', + }; + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries, + // break the circuit; hit the failover + await makeRpcCall(request); + + expect( + rpcEndpointDegradedEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointDegradedEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointInstanceDegraded event each time the max number of retries is reached in making requests to the primary endpoint URL', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockNextBlockTrackerRequest({ + blockNumber, + }); + comms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + + const messenger = buildRootMessenger(); + const rpcEndpointInstanceDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceDegraded', + rpcEndpointInstanceDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries, + // breaking the circuit + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenCalledTimes(2); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointInstanceDegraded event when the time to complete a request to the primary endpoint URL is continually too long', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + const messenger = buildRootMessenger(); + const rpcEndpointInstanceDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceDegraded', + rpcEndpointInstanceDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getBlockTrackerOptions: () => ({ + pollingInterval: 10000, + }), + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ blockTracker, makeRpcCall, clock, chainId, rpcUrl }) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockNextBlockTrackerRequest({ + blockNumber: '0x1', + }); + // We mock another block tracker request so we can clear the + // cache. + comms.mockNextBlockTrackerRequest({ + blockNumber: '0x2', + }); + comms.mockRpcCall({ + request, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: 'ok', + }; + }, + }); + comms.mockRpcCall({ + request, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: 'ok', + }; + }, + }); + + await makeRpcCall(request); + // Force another block to clear the cache on the previous + // request + await blockTracker.checkForLatestBlock(); + await makeRpcCall(request); + + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenCalledTimes(2); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointInstanceDegraded event again if the max number of retries is reached in making requests to a failover endpoint URL', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (primaryComms) => { + await withMockedCommunications( + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + async (failoverComms) => { + const messenger = buildRootMessenger(); + const rpcEndpointInstanceDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceDegraded', + rpcEndpointInstanceDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockNextBlockTrackerRequest({ + blockNumber, + }); + primaryComms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request, + times: 5, + response: { + httpStatus: 503, + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries, + // break the circuit; hit the failover and exceed the max + // number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenCalledTimes(3); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenNthCalledWith(1, { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenNthCalledWith(2, { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenNthCalledWith(3, { + chainId, + endpointUrl: failoverEndpointUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointInstanceDegraded event again when the time to complete a request to a failover endpoint URL is too long', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (primaryComms) => { + await withMockedCommunications( + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + async (failoverComms) => { + const messenger = buildRootMessenger(); + const rpcEndpointInstanceDegradedEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceDegraded', + rpcEndpointInstanceDegradedEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockNextBlockTrackerRequest({ + blockNumber, + }); + primaryComms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: 'ok', + }; + }, + }); + + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the primary and exceed the max number of retries, + // break the circuit; hit the failover + await makeRpcCall(request); + + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenCalledTimes(3); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenNthCalledWith(1, { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenNthCalledWith(2, { + chainId, + endpointUrl: rpcUrl, + error: expectedDegradedError, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + expect( + rpcEndpointInstanceDegradedEventHandler, + ).toHaveBeenNthCalledWith(3, { + chainId, + endpointUrl: failoverEndpointUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointAvailable event the first time a successful request to the RPC endpoint is made', async () => { + const request = { + method: 'eth_gasPrice', + params: [], + }; + + await withMockedCommunications( + { providerType: networkClientType }, + async (comms) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + comms.mockNextBlockTrackerRequest({ + blockNumber, + }); + comms.mockRpcCall({ + request, + response: { + result: 'ok', + }, + }); + + const messenger = buildRootMessenger(); + const networkAvailableEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointAvailable', + networkAvailableEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, chainId, rpcUrl }) => { + await makeRpcCall(request); + + expect(networkAvailableEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }); + + it('publishes the NetworkController:rpcEndpointAvailable event the first time a successful request to a failover is made', async () => { + const failoverEndpointUrl = 'https://failover.endpoint/'; + const request = { + method: 'eth_gasPrice', + params: [], + }; + const expectedError = createResourceUnavailableError(503); + + await withMockedCommunications( + { providerType: networkClientType }, + async (primaryComms) => { + await withMockedCommunications( + { + providerType: 'custom', + customRpcUrl: failoverEndpointUrl, + }, + async (failoverComms) => { + // The first time a block-cacheable request is made, the + // latest block number is retrieved through the block + // tracker first. + primaryComms.mockNextBlockTrackerRequest({ + blockNumber, + }); + primaryComms.mockRpcCall({ + request, + times: DEFAULT_MAX_CONSECUTIVE_FAILURES, + response: { + httpStatus: 503, + }, + }); + failoverComms.mockRpcCall({ + request, + response: { + result: 'ok', + }, + }); + + const messenger = buildRootMessenger(); + const networkAvailableEventHandler = jest.fn(); + messenger.subscribe( + 'NetworkController:rpcEndpointAvailable', + networkAvailableEventHandler, + ); + + await withNetworkClient( + { + providerType: networkClientType, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + isRpcFailoverEnabled: true, + failoverRpcUrls: [failoverEndpointUrl], + messenger, + getRpcServiceOptions: () => ({ + fetch, + btoa, + policyOptions: { + backoff: new ConstantBackoff(backoffDuration), + }, + }), + }, + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { + messenger.subscribe( + 'NetworkController:rpcEndpointInstanceRetried', + () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + clock.tick(backoffDuration); + }, + ); + + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries + await expect(makeRpcCall(request)).rejects.toThrow( + expectedError, + ); + // Hit the endpoint and exceed the max number of retries, + // breaking the circuit; hit the failover + await makeRpcCall(request); + + expect(networkAvailableEventHandler).toHaveBeenCalledWith({ + chainId, + endpointUrl: rpcUrl, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); + }, + ); + }, + ); + }, + ); + }); + }); + } +}); + +/** + * Creates a "resource unavailable" RPC error for testing. + * + * @param httpStatus - The HTTP status that the error represents. + * @returns The RPC error. + */ +function createResourceUnavailableError(httpStatus: number) { + return expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: 'RPC endpoint not found or unavailable.', + data: { + httpStatus, + }, + }); +} diff --git a/packages/network-controller/src/create-network-client.ts b/packages/network-controller/src/create-network-client.ts index 8ae4565b767..045e182d14b 100644 --- a/packages/network-controller/src/create-network-client.ts +++ b/packages/network-controller/src/create-network-client.ts @@ -26,7 +26,10 @@ import type { import type { Hex, Json, JsonRpcRequest } from '@metamask/utils'; import type { Logger } from 'loglevel'; -import type { NetworkControllerMessenger } from './NetworkController'; +import type { + NetworkClientId, + NetworkControllerMessenger, +} from './NetworkController'; import type { RpcServiceOptions } from './rpc-service/rpc-service'; import { RpcServiceChain } from './rpc-service/rpc-service-chain'; import type { @@ -59,6 +62,8 @@ type RpcApiMiddleware = JsonRpcMiddleware< * Create a JSON RPC network client for a specific network. * * @param args - The arguments. + * @param args.id - The ID that will be assigned to the new network client in + * the registry. * @param args.configuration - The network configuration. * @param args.getRpcServiceOptions - Factory for constructing RPC service * options. See {@link NetworkControllerOptions.getRpcServiceOptions}. @@ -74,6 +79,7 @@ type RpcApiMiddleware = JsonRpcMiddleware< * @returns The network client. */ export function createNetworkClient({ + id, configuration, getRpcServiceOptions, getBlockTrackerOptions, @@ -81,6 +87,7 @@ export function createNetworkClient({ isRpcFailoverEnabled, logger, }: { + id: NetworkClientId; configuration: NetworkClientConfiguration; getRpcServiceOptions: ( rpcEndpointUrl: string, @@ -96,50 +103,14 @@ export function createNetworkClient({ configuration.type === NetworkClientType.Infura ? `https://${configuration.network}.infura.io/v3/${configuration.infuraProjectId}` : configuration.rpcUrl; - const availableEndpointUrls = isRpcFailoverEnabled - ? [primaryEndpointUrl, ...(configuration.failoverRpcUrls ?? [])] - : [primaryEndpointUrl]; - const rpcServiceChain = new RpcServiceChain( - availableEndpointUrls.map((endpointUrl) => ({ - ...getRpcServiceOptions(endpointUrl), - endpointUrl, - logger, - })), - ); - rpcServiceChain.onBreak(({ endpointUrl, failoverEndpointUrl, ...rest }) => { - let error: unknown; - if ('error' in rest) { - error = rest.error; - } else if ('value' in rest) { - error = rest.value; - } - - messenger.publish('NetworkController:rpcEndpointUnavailable', { - chainId: configuration.chainId, - endpointUrl, - failoverEndpointUrl, - error, - }); - }); - rpcServiceChain.onDegraded(({ endpointUrl, ...rest }) => { - let error: unknown; - if ('error' in rest) { - error = rest.error; - } else if ('value' in rest) { - error = rest.value; - } - - messenger.publish('NetworkController:rpcEndpointDegraded', { - chainId: configuration.chainId, - endpointUrl, - error, - }); - }); - rpcServiceChain.onRetry(({ endpointUrl, attempt }) => { - messenger.publish('NetworkController:rpcEndpointRequestRetried', { - endpointUrl, - attempt, - }); + const rpcServiceChain = createRpcServiceChain({ + id, + primaryEndpointUrl, + configuration, + getRpcServiceOptions, + messenger, + isRpcFailoverEnabled, + logger, }); let rpcApiMiddleware: RpcApiMiddleware; @@ -194,6 +165,149 @@ export function createNetworkClient({ return { configuration, provider, blockTracker, destroy }; } +/** + * Creates an RPC service chain, which represents the primary endpoint URL for + * the network as well as its failover URLs. + * + * @param args - The arguments. + * @param args.id - The ID that will be assigned to the new network client in + * the registry. + * @param args.primaryEndpointUrl - The primary endpoint URL. + * @param args.configuration - The network configuration. + * @param args.getRpcServiceOptions - Factory for constructing RPC service + * options. See {@link NetworkControllerOptions.getRpcServiceOptions}. + * @param args.messenger - The network controller messenger. + * @param args.isRpcFailoverEnabled - Whether or not requests sent to the + * primary RPC endpoint for this network should be automatically diverted to + * provided failover endpoints if the primary is unavailable. This effectively + * causes the `failoverRpcUrls` property of the network client configuration + * to be honored or ignored. + * @param args.logger - A `loglevel` logger. + * @returns The RPC service chain. + */ +function createRpcServiceChain({ + id, + primaryEndpointUrl, + configuration, + getRpcServiceOptions, + messenger, + isRpcFailoverEnabled, + logger, +}: { + id: NetworkClientId; + primaryEndpointUrl: string; + configuration: NetworkClientConfiguration; + getRpcServiceOptions: ( + rpcEndpointUrl: string, + ) => Omit; + messenger: NetworkControllerMessenger; + isRpcFailoverEnabled: boolean; + logger?: Logger; +}) { + const availableEndpointUrls: [string, ...string[]] = isRpcFailoverEnabled + ? [primaryEndpointUrl, ...(configuration.failoverRpcUrls ?? [])] + : [primaryEndpointUrl]; + const buildRpcServiceConfiguration = (endpointUrl: string) => ({ + ...getRpcServiceOptions(endpointUrl), + endpointUrl, + logger, + }); + + const getError = (value: object) => { + if ('error' in value) { + return value.error; + } else if ('value' in value) { + return value.value; + } + return undefined; + }; + + const rpcServiceChain = new RpcServiceChain([ + buildRpcServiceConfiguration(availableEndpointUrls[0]), + ...availableEndpointUrls.slice(1).map(buildRpcServiceConfiguration), + ]); + + rpcServiceChain.onBreak(({ endpointUrl, ...rest }) => { + const error = getError(rest); + + if (error === undefined) { + // This error shouldn't happen in practice because we never call `.isolate` + // on the circuit breaker policy, but we need to appease TypeScript. + throw new Error('Could not make request to endpoint.'); + } + + messenger.publish('NetworkController:rpcEndpointUnavailable', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl, + endpointUrl, + error, + }); + }); + + rpcServiceChain.onServiceBreak( + ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { + const error = getError(rest); + messenger.publish('NetworkController:rpcEndpointInstanceUnavailable', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl, + endpointUrl, + error, + }); + }, + ); + + rpcServiceChain.onDegraded( + ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { + const error = getError(rest); + messenger.publish('NetworkController:rpcEndpointDegraded', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl, + endpointUrl, + error, + }); + }, + ); + + rpcServiceChain.onServiceDegraded( + ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { + const error = getError(rest); + messenger.publish('NetworkController:rpcEndpointInstanceDegraded', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl, + endpointUrl, + error, + }); + }, + ); + + rpcServiceChain.onAvailable(({ primaryEndpointUrl: _, endpointUrl }) => { + messenger.publish('NetworkController:rpcEndpointAvailable', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl, + endpointUrl, + }); + }); + + rpcServiceChain.onServiceRetry( + ({ primaryEndpointUrl: _, endpointUrl, attempt }) => { + messenger.publish('NetworkController:rpcEndpointInstanceRetried', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl, + endpointUrl, + attempt, + }); + }, + ); + + return rpcServiceChain; +} + /** * Create the block tracker for the network. * diff --git a/packages/network-controller/src/index.ts b/packages/network-controller/src/index.ts index 96d93fb02d9..b4a0de293ea 100644 --- a/packages/network-controller/src/index.ts +++ b/packages/network-controller/src/index.ts @@ -37,8 +37,11 @@ export type { NetworkControllerMessenger, NetworkControllerOptions, NetworkControllerRpcEndpointUnavailableEvent, + NetworkControllerRpcEndpointInstanceUnavailableEvent, NetworkControllerRpcEndpointDegradedEvent, - NetworkControllerRpcEndpointRequestRetriedEvent, + NetworkControllerRpcEndpointInstanceDegradedEvent, + NetworkControllerRpcEndpointAvailableEvent, + NetworkControllerRpcEndpointInstanceRetriedEvent, } from './NetworkController'; export { getDefaultNetworkControllerState, diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.test.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.test.ts index 3a2c31bfd55..5f867671a96 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.test.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.test.ts @@ -1,18 +1,16 @@ +import { + DEFAULT_CIRCUIT_BREAK_DURATION, + DEFAULT_DEGRADED_THRESHOLD, + HttpError, +} from '@metamask/controller-utils'; import { errorCodes } from '@metamask/rpc-errors'; import nock from 'nock'; import { useFakeTimers } from 'sinon'; import type { SinonFakeTimers } from 'sinon'; +import { DEFAULT_MAX_CONSECUTIVE_FAILURES } from './rpc-service'; import { RpcServiceChain } from './rpc-service-chain'; -const RESOURCE_UNAVAILABLE_ERROR = expect.objectContaining({ - code: errorCodes.rpc.resourceUnavailable, - message: 'RPC endpoint not found or unavailable.', - data: { - httpStatus: 503, - }, -}); - describe('RpcServiceChain', () => { let clock: SinonFakeTimers; @@ -24,7 +22,7 @@ describe('RpcServiceChain', () => { clock.restore(); }); - describe('onRetry', () => { + describe('onServiceRetry', () => { it('returns a listener which can be disposed', () => { const rpcServiceChain = new RpcServiceChain([ { @@ -34,10 +32,10 @@ describe('RpcServiceChain', () => { }, ]); - const onRetryListener = rpcServiceChain.onRetry(() => { + const onServiceRetryListener = rpcServiceChain.onServiceRetry(() => { // do whatever }); - expect(onRetryListener.dispose()).toBeUndefined(); + expect(onServiceRetryListener.dispose()).toBeUndefined(); }); }); @@ -58,6 +56,23 @@ describe('RpcServiceChain', () => { }); }); + describe('onServiceBreak', () => { + it('returns a listener which can be disposed', () => { + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }, + ]); + + const onServiceBreakListener = rpcServiceChain.onServiceBreak(() => { + // do whatever + }); + expect(onServiceBreakListener.dispose()).toBeUndefined(); + }); + }); + describe('onDegraded', () => { it('returns a listener which can be disposed', () => { const rpcServiceChain = new RpcServiceChain([ @@ -75,9 +90,45 @@ describe('RpcServiceChain', () => { }); }); + describe('onServiceDegraded', () => { + it('returns a listener which can be disposed', () => { + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }, + ]); + + const onServiceDegradedListener = rpcServiceChain.onServiceDegraded( + () => { + // do whatever + }, + ); + expect(onServiceDegradedListener.dispose()).toBeUndefined(); + }); + }); + + describe('onAvailable', () => { + it('returns a listener which can be disposed', () => { + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }, + ]); + + const onAvailableListener = rpcServiceChain.onAvailable(() => { + // do whatever + }); + expect(onAvailableListener.dispose()).toBeUndefined(); + }); + }); + describe('request', () => { it('returns what the first RPC service in the chain returns, if it succeeds', async () => { - nock('https://first.chain') + nock('https://first.endpoint') .post('/', { id: 1, jsonrpc: '2.0', @@ -94,12 +145,12 @@ describe('RpcServiceChain', () => { { fetch, btoa, - endpointUrl: 'https://first.chain', + endpointUrl: 'https://first.endpoint', }, { fetch, btoa, - endpointUrl: 'https://second.chain', + endpointUrl: 'https://second.endpoint', fetchOptions: { headers: { 'X-Foo': 'Bar', @@ -127,30 +178,24 @@ describe('RpcServiceChain', () => { }); }); - it('uses the other RPC services in the chain as failovers', async () => { - nock('https://first.chain') - .post( - '/', - { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }, - { - reqheaders: {}, - }, - ) - .times(15) + it('returns what a failover service returns, if the primary is unavailable and the failover is not', async () => { + nock('https://first.endpoint') + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://second.chain') + nock('https://second.endpoint') .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); nock('https://third.chain') .post('/', { @@ -164,22 +209,17 @@ describe('RpcServiceChain', () => { jsonrpc: '2.0', result: 'ok', }); - + const expectedError = createResourceUnavailableError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', + endpointUrl: 'https://first.endpoint', }, { fetch, btoa, - endpointUrl: 'https://second.chain', - fetchOptions: { - headers: { - 'X-Foo': 'Bar', - }, - }, + endpointUrl: 'https://second.endpoint', }, { fetch, @@ -187,11 +227,8 @@ describe('RpcServiceChain', () => { endpointUrl: 'https://third.chain', }, ]); - rpcServiceChain.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); }); const jsonRpcRequest = { @@ -202,22 +239,22 @@ describe('RpcServiceChain', () => { }; // Retry the first endpoint until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint again, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint for a third time, until max retries is hit. // The circuit will break on the last time, and the second endpoint will // be retried, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. @@ -233,7 +270,7 @@ describe('RpcServiceChain', () => { }); it("allows each RPC service's fetch options to be configured separately, yet passes the fetch options given to request to all of them", async () => { - const firstEndpointScope = nock('https://first.chain', { + const firstEndpointScope = nock('https://first.endpoint', { reqheaders: { 'X-Fizz': 'Buzz', }, @@ -244,11 +281,10 @@ describe('RpcServiceChain', () => { method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - const secondEndpointScope = nock('https://second.chain', { + const secondEndpointScope = nock('https://second.endpoint', { reqheaders: { - 'X-Foo': 'Bar', 'X-Fizz': 'Buzz', }, }) @@ -258,11 +294,10 @@ describe('RpcServiceChain', () => { method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); const thirdEndpointScope = nock('https://third.chain', { reqheaders: { - 'X-Foo': 'Bar', 'X-Fizz': 'Buzz', }, }) @@ -277,17 +312,17 @@ describe('RpcServiceChain', () => { jsonrpc: '2.0', result: 'ok', }); - + const expectedError = createResourceUnavailableError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', + endpointUrl: 'https://first.endpoint', }, { fetch, btoa, - endpointUrl: 'https://second.chain', + endpointUrl: 'https://second.endpoint', fetchOptions: { headers: { 'X-Foo': 'Bar', @@ -303,11 +338,8 @@ describe('RpcServiceChain', () => { }, }, ]); - rpcServiceChain.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); }); const jsonRpcRequest = { @@ -324,22 +356,22 @@ describe('RpcServiceChain', () => { // Retry the first endpoint until max retries is hit. await expect( rpcServiceChain.request(jsonRpcRequest, fetchOptions), - ).rejects.toThrow(RESOURCE_UNAVAILABLE_ERROR); + ).rejects.toThrow(expectedError); // Retry the first endpoint again, until max retries is hit. await expect( rpcServiceChain.request(jsonRpcRequest, fetchOptions), - ).rejects.toThrow(RESOURCE_UNAVAILABLE_ERROR); + ).rejects.toThrow(expectedError); // Retry the first endpoint for a third time, until max retries is hit. // The circuit will break on the last time, and the second endpoint will // be retried, until max retries is hit. await expect( rpcServiceChain.request(jsonRpcRequest, fetchOptions), - ).rejects.toThrow(RESOURCE_UNAVAILABLE_ERROR); + ).rejects.toThrow(expectedError); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. await expect( rpcServiceChain.request(jsonRpcRequest, fetchOptions), - ).rejects.toThrow(RESOURCE_UNAVAILABLE_ERROR); + ).rejects.toThrow(expectedError); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. // The circuit will break on the last time, and the third endpoint will @@ -351,26 +383,79 @@ describe('RpcServiceChain', () => { expect(thirdEndpointScope.isDone()).toBe(true); }); - it('calls onRetry each time an RPC service in the chain retries its request', async () => { - nock('https://first.chain') + it("throws a custom error if a request is attempted while a service's circuit is open", async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Attempt the endpoint again. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + 'RPC endpoint returned too many errors', + ); + }); + + it('calls onServiceRetry each time an RPC service in the chain retries its request', async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + const tertiaryEndpointUrl = 'https://third.chain'; + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://second.chain') + nock(secondaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://third.chain') + nock(tertiaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', @@ -382,17 +467,18 @@ describe('RpcServiceChain', () => { jsonrpc: '2.0', result: 'ok', }); - + const expectedError = createResourceUnavailableError(503); + const expectedRetryError = new HttpError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', + endpointUrl: primaryEndpointUrl, }, { fetch, btoa, - endpointUrl: 'https://second.chain', + endpointUrl: secondaryEndpointUrl, fetchOptions: { headers: { 'X-Foo': 'Bar', @@ -402,19 +488,13 @@ describe('RpcServiceChain', () => { { fetch, btoa, - endpointUrl: 'https://third.chain', + endpointUrl: tertiaryEndpointUrl, }, ]); - const onRetryListener = jest.fn< - ReturnType[0]>, - Parameters[0]> - >(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + const onServiceRetryListener = jest.fn(() => { + clock.next(); }); - rpcServiceChain.onRetry(onRetryListener); + rpcServiceChain.onServiceRetry(onServiceRetryListener); const jsonRpcRequest = { id: 1, @@ -424,22 +504,22 @@ describe('RpcServiceChain', () => { }; // Retry the first endpoint until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint again, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint for a third time, until max retries is hit. // The circuit will break on the last time, and the second endpoint will // be retried, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. @@ -447,85 +527,119 @@ describe('RpcServiceChain', () => { // be hit. This is finally a success. await rpcServiceChain.request(jsonRpcRequest); - const onRetryListenerCallCountsByEndpointUrl = - onRetryListener.mock.calls.reduce( - (memo, call) => { - const { endpointUrl } = call[0]; - memo[endpointUrl] = (memo[endpointUrl] ?? 0) + 1; - return memo; - }, - {} as Record, - ); - - expect(onRetryListenerCallCountsByEndpointUrl).toStrictEqual({ - 'https://first.chain/': 12, - 'https://second.chain/': 12, - }); + for (let attempt = 0; attempt < 24; attempt++) { + expect(onServiceRetryListener).toHaveBeenNthCalledWith(attempt + 1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: + attempt >= 12 + ? `${secondaryEndpointUrl}/` + : `${primaryEndpointUrl}/`, + attempt: (attempt % 4) + 1, + delay: expect.any(Number), + error: expectedRetryError, + }); + } }); - it('calls onBreak each time the underlying circuit for each RPC service in the chain breaks', async () => { - nock('https://first.chain') + it('does not call onBreak if the primary service circuit breaks and the request to its failover fails but its circuit has not broken yet', async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://second.chain') + nock(secondaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) - .reply(503); - nock('https://third.chain') + .reply(500); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(503), + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(503), + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit (unsuccessfully). + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(500), + ); + + expect(onBreakListener).not.toHaveBeenCalled(); + }); + + it("calls onBreak when all of the RPC services' circuits have broken", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .reply(200, { + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { id: 1, jsonrpc: '2.0', - result: 'ok', - }); - + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', - }, - { - fetch, - btoa, - endpointUrl: 'https://second.chain', - fetchOptions: { - headers: { - 'X-Foo': 'Bar', - }, - }, + endpointUrl: primaryEndpointUrl, }, { fetch, btoa, - endpointUrl: 'https://third.chain', + endpointUrl: secondaryEndpointUrl, }, ]); - const onBreakListener = jest.fn< - ReturnType[0]>, - Parameters[0]> - >(); - rpcServiceChain.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + const onBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); }); rpcServiceChain.onBreak(onBreakListener); @@ -537,64 +651,152 @@ describe('RpcServiceChain', () => { }; // Retry the first endpoint until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint again, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Retry the first endpoint for a third time, until max retries is hit. // The circuit will break on the last time, and the second endpoint will // be retried, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the // second endpoint, until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); // Try the first endpoint, see that the circuit is broken, and retry the - // second endpoint, until max retries is hit. - // The circuit will break on the last time, and the third endpoint will - // be hit. This is finally a success. - await rpcServiceChain.request(jsonRpcRequest); - - expect(onBreakListener).toHaveBeenCalledTimes(2); - expect(onBreakListener).toHaveBeenNthCalledWith( - 1, - expect.objectContaining({ - endpointUrl: 'https://first.chain/', - }), - ); - expect(onBreakListener).toHaveBeenNthCalledWith( - 2, - expect.objectContaining({ - endpointUrl: 'https://second.chain/', - }), + // second endpoint, until max retries is hit. The circuit will break on + // the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, ); + + expect(onBreakListener).toHaveBeenCalledTimes(1); + expect(onBreakListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); }); - it('calls onDegraded each time an RPC service in the chain gives up before the circuit breaks or responds successfully but slowly', async () => { - nock('https://first.chain') + it("calls onBreak again if all services' circuits break, the primary service responds successfully, and all services' circuits break again", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://second.chain') + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .times(15) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) .reply(503); - nock('https://third.chain') + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(30) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + const onAvailableListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + rpcServiceChain.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until its circuit breaks, then retry the + // second endpoint until *its* circuit breaks. + for (let i = 0; i < 5; i++) { + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + } + // Wait until the circuit break duration passes, try the first endpoint + // and see that it succeeds. + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + await rpcServiceChain.request(jsonRpcRequest); + // Do it again: retry the first endpoint until its circuit breaks, then + // retry the second endpoint until *its* circuit breaks. + for (let i = 0; i < 5; i++) { + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + } + + expect(onBreakListener).toHaveBeenCalledTimes(2); + expect(onBreakListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + expect(onBreakListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + }); + + it("calls onBreak again if all services' circuits break, the primary service responds successfully but slowly, and all circuits break again", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(primaryEndpointUrl) .post('/', { id: 1, jsonrpc: '2.0', @@ -602,47 +804,49 @@ describe('RpcServiceChain', () => { params: [], }) .reply(200, () => { - clock.tick(6000); + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); return { id: 1, jsonrpc: '2.0', result: '0x1', }; }); - + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(30) + .reply(503); + const expectedError = createResourceUnavailableError(503); const rpcServiceChain = new RpcServiceChain([ { fetch, btoa, - endpointUrl: 'https://first.chain', - }, - { - fetch, - btoa, - endpointUrl: 'https://second.chain', - fetchOptions: { - headers: { - 'X-Foo': 'Bar', - }, - }, + endpointUrl: primaryEndpointUrl, }, { fetch, btoa, - endpointUrl: 'https://third.chain', + endpointUrl: secondaryEndpointUrl, }, ]); - const onDegradedListener = jest.fn< - ReturnType[0]>, - Parameters[0]> - >(); - rpcServiceChain.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + const onBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); }); - rpcServiceChain.onDegraded(onDegradedListener); + rpcServiceChain.onBreak(onBreakListener); const jsonRpcRequest = { id: 1, @@ -650,46 +854,1243 @@ describe('RpcServiceChain', () => { method: 'eth_chainId', params: [], }; - // Retry the first endpoint until max retries is hit. - await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, - ); - // Retry the first endpoint again, until max retries is hit. - await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, - ); - // Retry the first endpoint for a third time, until max retries is hit. - // The circuit will break on the last time, and the second endpoint will - // be retried, until max retries is hit. - await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, - ); - // Try the first endpoint, see that the circuit is broken, and retry the - // second endpoint, until max retries is hit. + // Retry the first endpoint until its circuit breaks, then retry the + // second endpoint until *its* circuit breaks. + for (let i = 0; i < 5; i++) { + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + } + // Wait until the circuit break duration passes, try the first endpoint + // and see that it succeeds. + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + await rpcServiceChain.request(jsonRpcRequest); + // Do it again: retry the first endpoint until its circuit breaks, then + // retry the second endpoint until *its* circuit breaks. + for (let i = 0; i < 5; i++) { + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + } + + expect(onBreakListener).toHaveBeenCalledTimes(2); + expect(onBreakListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + expect(onBreakListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + }); + + it('calls onServiceBreak each time the circuit of an RPC service in the chain breaks', async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + const tertiaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(tertiaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: tertiaryEndpointUrl, + }, + ]); + const onServiceBreakListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceBreak(onServiceBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the third endpoint will + // be hit, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the third endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onServiceBreakListener).toHaveBeenCalledTimes(3); + expect(onServiceBreakListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + expect(onServiceBreakListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + expect(onServiceBreakListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${tertiaryEndpointUrl}/`, + error: new Error("Fetch failed with status '503'"), + }); + }); + + it("calls onDegraded only once even if a service's maximum number of retries is reached multiple times", async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it('calls onDegraded only once even if the time to complete a request via a service is continually slow', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(2) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it('calls onDegraded only once even if a service runs out of retries and then responds successfully but slowly, or vice versa', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(5) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(5) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Try the endpoint again, and see that it succeeds. + await rpcServiceChain.request(jsonRpcRequest); + // Retry the endpoint again until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it("does not call onDegraded again when the primary service's circuit breaks and its failover responds successfully but slowly", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, albeit slowly. + await rpcServiceChain.request(jsonRpcRequest); + + expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it("calls onDegraded again when a service's underlying circuit breaks, and then after waiting, the service responds successfully but slowly", async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Wait until the circuit break duration passes, try the endpoint again, + // and see that it succeeds, but slowly. + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onDegradedListener).toHaveBeenCalledTimes(2); + expect(onDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it("calls onDegraded again when a failover service's underlying circuit breaks, and then after waiting, the primary responds successfully but slowly", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + // Hit the first endpoint again, and see that it succeeds, but slowly + await rpcServiceChain.request(jsonRpcRequest); + + expect(onDegradedListener).toHaveBeenCalledTimes(2); + expect(onDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + }); + }); + + it('calls onServiceDegraded each time a service continually runs out of retries (but before its circuit breaks)', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(2); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it('calls onServiceDegraded each time a service continually responds slowly', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(2) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(2); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it('calls onServiceDegraded each time a service runs out of retries and then responds successfully but slowly, or vice versa', async () => { + const endpointUrl = 'https://some.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(5) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(5) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Try the endpoint again, and see that it succeeds. + await rpcServiceChain.request(jsonRpcRequest); + // Retry the endpoint again until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(3); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + }); + + it("calls onServiceDegraded again when the primary service's circuit breaks and its failover responds successfully but slowly", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onBreakListener = jest.fn(); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onBreak(onBreakListener); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( - RESOURCE_UNAVAILABLE_ERROR, + expectedError, ); - // Try the first endpoint, see that the circuit is broken, and retry the - // second endpoint, until max retries is hit. - // The circuit will break on the last time, and the third endpoint will - // be hit. This is finally a success. + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, albeit slowly. await rpcServiceChain.request(jsonRpcRequest); - const onDegradedListenerCallCountsByEndpointUrl = - onDegradedListener.mock.calls.reduce( - (memo: Record, call) => { - const { endpointUrl } = call[0]; - memo[endpointUrl] = (memo[endpointUrl] ?? 0) + 1; - return memo; - }, - {}, - ); + expect(onServiceDegradedListener).toHaveBeenCalledTimes(3); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + }); + }); + + it("calls onServiceDegraded again when a service's underlying circuit breaks, and then after waiting, the service responds successfully but slowly", async () => { + const endpointUrl = 'https://first.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Wait until the circuit break duration passes, try the endpoint again, + // and see that it succeeds, but slowly. + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(3); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it("calls onServiceDegraded again when a failover service's underlying circuit breaks, and then after waiting, the primary responds successfully but slowly", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + const expectedError = createResourceUnavailableError(503); + const expectedDegradedError = new HttpError(503); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onServiceDegradedListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onServiceDegraded(onServiceDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + // Retry the second endpoint for a third time, until max retries is hit. + // The circuit will break on the last time. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + // Hit the first endpoint again, and see that it succeeds, but slowly + await rpcServiceChain.request(jsonRpcRequest); + + expect(onServiceDegradedListener).toHaveBeenCalledTimes(5); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(2, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(3, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(4, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + error: expectedDegradedError, + }); + expect(onServiceDegradedListener).toHaveBeenNthCalledWith(5, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${primaryEndpointUrl}/`, + }); + }); + + it('calls onAvailable only once, even if a service continually responds successfully', async () => { + const endpointUrl = 'https://first.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(3) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onAvailableListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it("calls onAvailable once, after the primary service's circuit has broken, the request to the failover succeeds", async () => { + const primaryEndpointUrl = 'https://first.endpoint'; + const secondaryEndpointUrl = 'https://second.endpoint'; + nock(primaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(DEFAULT_MAX_CONSECUTIVE_FAILURES) + .reply(503); + nock(secondaryEndpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl: primaryEndpointUrl, + }, + { + fetch, + btoa, + endpointUrl: secondaryEndpointUrl, + }, + ]); + const onAvailableListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Retry the first endpoint until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(503), + ); + // Retry the first endpoint again, until max retries is hit. + await expect(rpcServiceChain.request(jsonRpcRequest)).rejects.toThrow( + createResourceUnavailableError(503), + ); + // Retry the first endpoint for a third time, until max retries is hit. + // The circuit will break on the last time, and the second endpoint will + // be hit. + await rpcServiceChain.request(jsonRpcRequest); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenNthCalledWith(1, { + primaryEndpointUrl: `${primaryEndpointUrl}/`, + endpointUrl: `${secondaryEndpointUrl}/`, + }); + }); + + it('calls onAvailable when a service becomes degraded by responding slowly, and then recovers', async () => { + const endpointUrl = 'https://first.endpoint'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }); + const rpcServiceChain = new RpcServiceChain([ + { + fetch, + btoa, + endpointUrl, + }, + ]); + const onAvailableListener = jest.fn(); + rpcServiceChain.onServiceRetry(() => { + clock.next(); + }); + rpcServiceChain.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await rpcServiceChain.request(jsonRpcRequest); + await rpcServiceChain.request(jsonRpcRequest); - expect(onDegradedListenerCallCountsByEndpointUrl).toStrictEqual({ - 'https://first.chain/': 2, - 'https://second.chain/': 2, - 'https://third.chain/': 1, + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + primaryEndpointUrl: `${endpointUrl}/`, + endpointUrl: `${endpointUrl}/`, }); }); }); }); + +/** + * Creates a "resource unavailable" RPC error for testing. + * + * @param httpStatus - The HTTP status that the error represents. + * @returns The RPC error. + */ +function createResourceUnavailableError(httpStatus: number) { + return expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: 'RPC endpoint not found or unavailable.', + data: { + httpStatus, + }, + }); +} diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.ts index 1a1204f64cb..0617ebd9c3f 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.ts @@ -1,3 +1,7 @@ +import { + CircuitState, + CockatielEventEmitter, +} from '@metamask/controller-utils'; import type { Json, JsonRpcParams, @@ -7,18 +11,80 @@ import type { import { RpcService } from './rpc-service'; import type { RpcServiceOptions } from './rpc-service'; -import type { RpcServiceRequestable } from './rpc-service-requestable'; -import type { FetchOptions } from './shared'; +import type { + CockatielEventToEventEmitterWithData, + CockatielEventToEventListenerWithData, + ExtendCockatielEventData, + ExtractCockatielEventData, + FetchOptions, +} from './shared'; +import { projectLogger, createModuleLogger } from '../logger'; + +const log = createModuleLogger(projectLogger, 'RpcServiceChain'); + +/** + * Possible states of the RPC service chain. + */ +const STATES = { + Initial: 'initial', + Available: 'available', + Degraded: 'degraded', + Unavailable: 'unavailable', +} as const; + +type RpcServiceConfiguration = Omit; /** - * This class constructs a chain of RpcService objects which represent a - * particular network. The first object in the chain is intended to be the - * primary way of reaching the network and the remaining objects are used as - * failovers. + * The state of the service. */ -export class RpcServiceChain implements RpcServiceRequestable { +type State = (typeof STATES)[keyof typeof STATES]; + +/** + * This class constructs and manages requests to a chain of RpcService objects + * which represent an RPC endpoint on a particular network. The first service in + * the chain is intended to be the primary way of hitting the endpoint and the + * remaining services are used as failovers. + */ +export class RpcServiceChain { + /** + * The event emitter for the `onBreak` event. + */ + readonly #onAvailableEventEmitter: CockatielEventToEventEmitterWithData< + RpcService['onAvailable'], + { primaryEndpointUrl: string } + >; + + /** + * The event emitter for the `onBreak` event. + */ + readonly #onBreakEventEmitter: CockatielEventToEventEmitterWithData< + RpcService['onBreak'], + { primaryEndpointUrl: string } + >; + + /** + * The event emitter for the `onBreak` event. + */ + readonly #onDegradedEventEmitter: CockatielEventToEventEmitterWithData< + RpcService['onDegraded'], + { primaryEndpointUrl: string } + >; + + /** + * The first RPC service that requests will be sent to. + */ + readonly #primaryService: RpcService; + + /** + * The RPC services in the chain. + */ readonly #services: RpcService[]; + /** + * The state of the RPC service chain. + */ + #state: State; + /** * Constructs a new RpcServiceChain object. * @@ -27,20 +93,75 @@ export class RpcServiceChain implements RpcServiceRequestable { * {@link RpcServiceOptions}. */ constructor( - rpcServiceConfigurations: Omit[], + rpcServiceConfigurations: [ + RpcServiceConfiguration, + ...RpcServiceConfiguration[], + ], ) { - this.#services = this.#buildRpcServiceChain(rpcServiceConfigurations); + this.#services = rpcServiceConfigurations.map( + (rpcServiceConfiguration) => new RpcService(rpcServiceConfiguration), + ); + this.#primaryService = this.#services[0]; + + this.#state = STATES.Initial; + this.#onBreakEventEmitter = new CockatielEventEmitter< + ExtendCockatielEventData< + ExtractCockatielEventData, + { primaryEndpointUrl: string } + > + >(); + + this.#onDegradedEventEmitter = new CockatielEventEmitter(); + for (const service of this.#services) { + service.onDegraded((data) => { + if (this.#state !== STATES.Degraded) { + log('Updating state to "degraded"', data); + this.#state = STATES.Degraded; + this.#onDegradedEventEmitter.emit({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + } + }); + } + + this.#onAvailableEventEmitter = new CockatielEventEmitter(); + for (const service of this.#services) { + service.onAvailable((data) => { + if (this.#state !== STATES.Available) { + log('Updating state to "available"', data); + this.#state = STATES.Available; + this.#onAvailableEventEmitter.emit({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + } + }); + } } /** - * Listens for when any of the RPC services retry a request. + * Calls the provided callback when any of the RPC services is retried. * - * @param listener - The callback to be called when the retry occurs. - * @returns What {@link RpcService.onRetry} returns. + * This is mainly useful for tests. + * + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the event listener. */ - onRetry(listener: Parameters[0]) { + onServiceRetry( + listener: CockatielEventToEventListenerWithData< + RpcService['onRetry'], + { primaryEndpointUrl: string } + >, + ) { const disposables = this.#services.map((service) => - service.onRetry(listener), + service.onRetry((data) => { + listener({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + }), ); return { @@ -51,15 +172,51 @@ export class RpcServiceChain implements RpcServiceRequestable { } /** - * Listens for when any of the RPC services retry the request too many times - * in a row. + * Calls the provided callback only when the maximum number of failed + * consecutive attempts to receive a 2xx response has been reached for all + * RPC services in the chain, and all services' underlying circuits have + * broken. + * + * The callback will not be called if a service's circuit breaks but its + * failover does not. Use `onServiceBreak` if you'd like a lower level of + * granularity. + * + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. + */ + onBreak( + listener: CockatielEventToEventListenerWithData< + RpcService['onBreak'], + { primaryEndpointUrl: string } + >, + ) { + return this.#onBreakEventEmitter.addListener(listener); + } + + /** + * Calls the provided callback each time when, for *any* of the RPC services + * in this chain, the maximum number of failed consecutive attempts to receive + * a 2xx response has been reached and the underlying circuit has broken. A + * more granular version of `onBreak`. * - * @param listener - The callback to be called when the retry occurs. - * @returns What {@link RpcService.onBreak} returns. + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. */ - onBreak(listener: Parameters[0]) { + onServiceBreak( + listener: CockatielEventToEventListenerWithData< + RpcService['onBreak'], + { primaryEndpointUrl: string } + >, + ) { const disposables = this.#services.map((service) => - service.onBreak(listener), + service.onBreak((data) => { + listener({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + }), ); return { @@ -70,14 +227,70 @@ export class RpcServiceChain implements RpcServiceRequestable { } /** - * Listens for when any of the RPC services send a slow request. + * Calls the provided callback if no requests have been initiated yet or + * all requests to RPC services in this chain have responded successfully in a + * timely fashion, and then one of the two conditions apply: + * + * 1. When a retriable error is encountered making a request to an RPC + * service, and the request is retried until a set maximum is reached. + * 2. When a RPC service responds successfully, but the request takes longer + * than a set number of seconds to complete. + * + * Note that the callback will be called even if there are local connectivity + * issues which prevent requests from being initiated. This is intentional. + * + * Also note this callback will only be called if the RPC service chain as a + * whole is in a "degraded" state, and will then only be called once (e.g., it + * will not be called if a failover service falls into a degraded state, then + * the primary comes back online, but it is slow). Use `onServiceDegraded` if + * you'd like a lower level of granularity. + * + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. + */ + onDegraded( + listener: CockatielEventToEventListenerWithData< + RpcService['onDegraded'], + { primaryEndpointUrl: string } + >, + ) { + return this.#onDegradedEventEmitter.addListener(listener); + } + + /** + * Calls the provided callback each time one of the two conditions apply: + * + * 1. When a retriable error is encountered making a request to an RPC + * service, and the request is retried until a set maximum is reached. + * 2. When a RPC service responds successfully, but the request takes longer + * than a set number of seconds to complete. + * + * Note that the callback will be called even if there are local connectivity + * issues which prevent requests from being initiated. This is intentional. * - * @param listener - The callback to be called when the retry occurs. - * @returns What {@link RpcService.onRetry} returns. + * This is a more granular version of `onDegraded`. The callback will be + * called for each slow request to an RPC service. It may also be called again + * if a failover service falls into a degraded state, then the primary comes + * back online, but it is slow. + * + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. */ - onDegraded(listener: Parameters[0]) { + onServiceDegraded( + listener: CockatielEventToEventListenerWithData< + RpcService['onDegraded'], + { primaryEndpointUrl: string } + >, + ) { const disposables = this.#services.map((service) => - service.onDegraded(listener), + service.onDegraded((data) => { + listener({ + ...data, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + }); + }), ); return { @@ -88,8 +301,33 @@ export class RpcServiceChain implements RpcServiceRequestable { } /** - * Makes a request to the first RPC service in the chain. If this service is - * down, then the request is forwarded to the next service in the chain, etc. + * Calls the provided callback in one of the following two conditions: + * + * 1. The first time that a 2xx request is made to any of the RPC services in + * this chain. + * 2. When requests to any the failover RPC services in this chain were + * failing such that they were degraded or their underyling circuits broke, + * but the first request to the primary succeeds again. + * + * Note this callback will only be called if the RPC service chain as a whole + * is in an "available" state. + * + * @param listener - The callback to be called. + * @returns An object with a `dispose` method which can be used to unregister + * the callback. + */ + onAvailable( + listener: CockatielEventToEventListenerWithData< + RpcService['onAvailable'], + { primaryEndpointUrl: string } + >, + ) { + return this.#onAvailableEventEmitter.addListener(listener); + } + + /** + * Uses the RPC services in the chain to make a request, using each service + * after the first as a fallback to the previous one as necessary. * * This overload is specifically designed for `eth_getBlockByNumber`, which * can return a `result` of `null` despite an expected `Result` being @@ -111,8 +349,8 @@ export class RpcServiceChain implements RpcServiceRequestable { ): Promise | JsonRpcResponse>; /** - * Makes a request to the first RPC service in the chain. If this service is - * down, then the request is forwarded to the next service in the chain, etc. + * Uses the RPC services in the chain to make a request, using each service + * after the first as a fallback to the previous one as necessary. * * This overload is designed for all RPC methods except for * `eth_getBlockByNumber`, which are expected to return a `result` of the @@ -137,31 +375,103 @@ export class RpcServiceChain implements RpcServiceRequestable { jsonRpcRequest: JsonRpcRequest, fetchOptions: FetchOptions = {}, ): Promise> { - return this.#services[0].request(jsonRpcRequest, fetchOptions); - } + // Start with the primary (first) service and switch to failovers as the + // need arises. This is a bit confusing, so keep reading for more on how + // this works. - /** - * Constructs the chain of RPC services. The second RPC service is - * configured as the failover for the first, the third service is - * configured as the failover for the second, etc. - * - * @param rpcServiceConfigurations - The options for the RPC services that - * you want to construct. Each object in this array is the same as - * {@link RpcServiceOptions}. - * @returns The constructed chain of RPC services. - */ - #buildRpcServiceChain( - rpcServiceConfigurations: Omit[], - ): RpcService[] { - return [...rpcServiceConfigurations] - .reverse() - .reduce((workingServices: RpcService[], serviceConfiguration, index) => { - const failoverService = index > 0 ? workingServices[0] : undefined; - const service = new RpcService({ - ...serviceConfiguration, - failoverService, - }); - return [service, ...workingServices]; - }, []); + let availableServiceIndex: number | undefined; + let response: JsonRpcResponse | undefined; + + for (const [i, service] of this.#services.entries()) { + log(`Trying service #${i + 1}...`); + const previousCircuitState = service.getCircuitState(); + + try { + // Try making the request through the service. + response = await service.request( + jsonRpcRequest, + fetchOptions, + ); + log('Service successfully received request.'); + availableServiceIndex = i; + break; + } catch (error) { + // Oops, that didn't work. + // Capture this error so that we can handle it later. + + const lastFailureReason = service.getLastInnerFailureReason(); + const isCircuitOpen = service.getCircuitState() === CircuitState.Open; + + log('Service failed!', error, lastFailureReason); + log( + 'Circuit state', + service.getCircuitState(), + 'Previous circuit state', + previousCircuitState, + 'state', + this.#state, + ); + + if (isCircuitOpen) { + if (i < this.#services.length - 1) { + log( + "This service's circuit is open. Proceeding to next service...", + ); + continue; + } + + if ( + previousCircuitState !== CircuitState.Open && + this.#state !== STATES.Unavailable && + lastFailureReason !== undefined + ) { + // If the service's circuit just broke and it's the last one in the + // chain, then trigger the onBreak event. (But if for some reason we + // have already done this, then don't do it.) + log( + 'This service\'s circuit just opened and it is the last service. Updating state to "unavailable" and triggering onBreak.', + ); + this.#state = STATES.Unavailable; + this.#onBreakEventEmitter.emit({ + ...lastFailureReason, + primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), + endpointUrl: service.endpointUrl.toString(), + }); + } + } + + // The service failed, and we throw whatever the error is. The calling + // code can try again if it so desires. + log( + `${isCircuitOpen ? "This service's circuit is open, but for some reason it wasn't handled above. " : "This service's circuit is closed. "}Re-throwing error.`, + ); + throw error; + } + } + + if (response) { + // If one of the services returned a successful response, assume that we + // won't need to hit any of the failover services following it and reset + // all of the policies of the following services. In particularly this + // means that if any of the failover services' circuits was open when + // requests were diverted back to the available service, that circuit will + // now be reset so that if we start hitting it again we don't get a + // "circuit broken" error. + if (availableServiceIndex !== undefined) { + for (const [i, service] of [...this.#services.entries()].slice( + availableServiceIndex + 1, + )) { + log(`Resetting policy for service #${i + 1}.`); + service.resetPolicy(); + } + } + + return response; + } + + // The only way we can end up here is if there are no services to loop over. + // That is not possible due to the types on the constructor, but TypeScript + // doesn't know this, so we have to appease it. + throw new Error('Nothing to return'); } } diff --git a/packages/network-controller/src/rpc-service/rpc-service-requestable.ts b/packages/network-controller/src/rpc-service/rpc-service-requestable.ts index 20cbbb1c972..4f4e913c0ec 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-requestable.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-requestable.ts @@ -6,7 +6,13 @@ import type { JsonRpcResponse, } from '@metamask/utils'; -import type { AddToCockatielEventData, FetchOptions } from './shared'; +import type { + CockatielEventToEventListenerWithData, + ExcludeCockatielEventData, + ExtendCockatielEventData, + ExtractCockatielEventData, + FetchOptions, +} from './shared'; /** * The interface for a service class responsible for making a request to a @@ -22,8 +28,8 @@ export type RpcServiceRequestable = { * @see {@link createServicePolicy} */ onRetry( - listener: AddToCockatielEventData< - Parameters[0], + listener: CockatielEventToEventListenerWithData< + ServicePolicy['onRetry'], { endpointUrl: string } >, ): ReturnType; @@ -37,10 +43,15 @@ export type RpcServiceRequestable = { * @see {@link createServicePolicy} */ onBreak( - listener: AddToCockatielEventData< - Parameters[0], - { endpointUrl: string } - >, + listener: ( + data: ExcludeCockatielEventData< + ExtendCockatielEventData< + ExtractCockatielEventData, + { endpointUrl: string } + >, + 'isolated' + >, + ) => void, ): ReturnType; /** @@ -52,12 +63,26 @@ export type RpcServiceRequestable = { * @see {@link createServicePolicy} */ onDegraded( - listener: AddToCockatielEventData< - Parameters[0], + listener: CockatielEventToEventListenerWithData< + ServicePolicy['onDegraded'], { endpointUrl: string } >, ): ReturnType; + /** + * Listens for when the policy underlying this RPC service is available. + * + * @param listener - The callback to be called when the request is available. + * @returns What {@link ServicePolicy.onDegraded} returns. + * @see {@link createServicePolicy} + */ + onAvailable( + listener: CockatielEventToEventListenerWithData< + ServicePolicy['onAvailable'], + { endpointUrl: string } + >, + ): ReturnType; + /** * Makes a request to the target. */ diff --git a/packages/network-controller/src/rpc-service/rpc-service.test.ts b/packages/network-controller/src/rpc-service/rpc-service.test.ts index 6faaa7799f3..43b19edae24 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.test.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.test.ts @@ -1,14 +1,20 @@ -import { HttpError } from '@metamask/controller-utils'; +import { + DEFAULT_DEGRADED_THRESHOLD, + HttpError, +} from '@metamask/controller-utils'; import { errorCodes } from '@metamask/rpc-errors'; +import { CircuitState } from 'cockatiel'; import deepFreeze from 'deep-freeze-strict'; import nock from 'nock'; import { FetchError } from 'node-fetch'; import { useFakeTimers } from 'sinon'; import type { SinonFakeTimers } from 'sinon'; -import type { AbstractRpcService } from './abstract-rpc-service'; -import { CUSTOM_RPC_ERRORS, RpcService } from './rpc-service'; -import { DEFAULT_CIRCUIT_BREAK_DURATION } from '../../../controller-utils/src/create-service-policy'; +import { + CUSTOM_RPC_ERRORS, + DEFAULT_MAX_RETRIES, + RpcService, +} from './rpc-service'; describe('RpcService', () => { let clock: SinonFakeTimers; @@ -61,7 +67,7 @@ describe('RpcService', () => { message: 'terminated', }, ])( - `if making the request throws the $message error`, + `if making the request throws the "$message" error`, ({ constructorName, message }) => { let error; switch (constructorName) { @@ -83,7 +89,7 @@ describe('RpcService', () => { ); describe.each(['ETIMEDOUT', 'ECONNRESET'])( - 'if making the request throws a %s error', + 'if making the request throws a "%s" error', (errorCode) => { const error = new Error('timed out'); // @ts-expect-error `code` does not exist on the Error type, but is @@ -99,210 +105,42 @@ describe('RpcService', () => { ); describe('if the endpoint URL was not mocked via Nock', () => { - it('re-throws the error without retrying the request', async () => { - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow('Nock: Disallowed net connect'); - }); - - it('does not forward the request to a failover service if given one', async () => { - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); + testsForNonRetriableErrors({ + expectedError: 'Nock: Disallowed net connect', }); }); describe('if the endpoint URL was mocked via Nock, but not the RPC method', () => { - it('re-throws the error without retrying the request', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_incorrectMethod', - params: [], - }) - .reply(500); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow('Nock: No match for request'); - }); - - it('does not forward the request to a failover service if given one', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_incorrectMethod', - params: [], - }) - .reply(500); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_incorrectMethod', - params: [], - }) - .reply(500); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl }) => { + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_incorrectMethod', + params: [], + }) + .reply(500); + }, + rpcMethod: 'eth_chainId', + expectedError: 'Nock: No match for request', }); }); describe('if making the request throws an unknown error', () => { - it('re-throws the error without retrying the request', async () => { - const error = new Error('oops'); - const mockFetch = jest.fn(() => { - throw error; - }); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow(error); - expect(mockFetch).toHaveBeenCalledTimes(1); - }); - - it('does not forward the request to a failover service if given one', async () => { - const error = new Error('oops'); - const mockFetch = jest.fn(() => { - throw error; - }); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const error = new Error('oops'); - const mockFetch = jest.fn(() => { - throw error; - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); + testsForNonRetriableErrors({ + createService: ({ endpointUrl, expectedError }) => { + return new RpcService({ + fetch: () => { + // This error could be anything. + // eslint-disable-next-line @typescript-eslint/only-throw-error + throw expectedError; + }, + btoa, + endpointUrl, + }); + }, + expectedError: new Error('oops'), }); }); @@ -325,443 +163,166 @@ describe('RpcService', () => { ); describe('if the endpoint has a 401 response', () => { - it('throws an unauthorized error without retrying the request', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(401); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow( - expect.objectContaining({ - code: CUSTOM_RPC_ERRORS.unauthorized, - message: 'Unauthorized.', - data: { - httpStatus: 401, - }, - }), - ); - }); - - it('does not forward the request to a failover service if given one', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(401); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(429); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); - }); - }); - - describe.each([402, 404, 500, 501, 505, 506, 507, 508, 510, 511])( - 'if the endpoint has a %d response', - (httpStatus) => { - it('throws a resource unavailable error without retrying the request', async () => { - const endpointUrl = 'https://rpc.example.chain'; + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl, rpcMethod }) => { nock(endpointUrl) .post('/', { id: 1, jsonrpc: '2.0', - method: 'eth_unknownMethod', + method: rpcMethod, params: [], }) - .reply(httpStatus); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); + .reply(401); + }, + expectedError: expect.objectContaining({ + code: CUSTOM_RPC_ERRORS.unauthorized, + message: 'Unauthorized.', + data: { + httpStatus: 401, + }, + }), + }); + }); - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_unknownMethod', - params: [], - }); - await expect(promise).rejects.toThrow( - expect.objectContaining({ - code: errorCodes.rpc.resourceUnavailable, - message: 'RPC endpoint not found or unavailable.', - data: { - httpStatus, - }, - }), - ); + describe.each([402, 404, 500, 501, 505, 506, 507, 508, 510, 511])( + 'if the endpoint has a %d response', + (httpStatus) => { + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl, rpcMethod }) => { + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: rpcMethod, + params: [], + }) + .reply(httpStatus); + }, + expectedError: expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: 'RPC endpoint not found or unavailable.', + data: { + httpStatus, + }, + }), }); + }, + ); + + describe('if the endpoint has a 429 response', () => { + const httpStatus = 429; - it('does not forward the request to a failover service if given one', async () => { - const endpointUrl = 'https://rpc.example.chain'; + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl, rpcMethod }) => { nock(endpointUrl) .post('/', { id: 1, jsonrpc: '2.0', - method: 'eth_unknownMethod', + method: rpcMethod, params: [], }) .reply(httpStatus); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); + }, + expectedError: expect.objectContaining({ + code: errorCodes.rpc.limitExceeded, + message: 'Request is being rate limited.', + data: { + httpStatus, + }, + }), + }); + }); - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_unknownMethod', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); + describe('when the endpoint has a 4xx response that is not 401, 402, 404, or 429', () => { + const httpStatus = 422; - it('does not call onBreak', async () => { - const endpointUrl = 'https://rpc.example.chain'; + testsForNonRetriableErrors({ + beforeCreateService: ({ endpointUrl, rpcMethod }) => { nock(endpointUrl) .post('/', { id: 1, jsonrpc: '2.0', - method: 'eth_unknownMethod', + method: rpcMethod, params: [], }) .reply(httpStatus); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); + }, + expectedError: expect.objectContaining({ + code: CUSTOM_RPC_ERRORS.httpClientError, + message: 'RPC endpoint returned HTTP client error.', + data: { + httpStatus, + }, + }), + }); + }); - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_unknownMethod', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); + describe.each([ + 'invalid JSON', + '{"foo": "ba', + '

Clearly an HTML response

', + ])( + 'if the endpoint consistently responds with invalid JSON %o', + (responseBody) => { + testsForRetriableResponses({ + getClock: () => clock, + httpStatus: 200, + responseBody, + expectedError: expect.objectContaining({ + code: -32700, + message: 'RPC endpoint did not return JSON.', + }), + expectedOnBreakError: expect.objectContaining({ + message: expect.stringContaining('invalid json'), + }), }); }, ); - describe('if the endpoint has a 429 response', () => { - it('throws a rate-limiting error without retrying the request', async () => { - const httpStatus = 429; - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const promise = service.request({ + it('removes non-JSON-RPC-compliant properties from the request body before sending it to the endpoint', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: '0x1', }); - await expect(promise).rejects.toThrow( - expect.objectContaining({ - code: errorCodes.rpc.limitExceeded, - message: 'Request is being rate limited.', - data: { - httpStatus, - }, - }), - ); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, }); - it('does not forward the request to a failover service if given one', async () => { - const httpStatus = 429; - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); + // @ts-expect-error Intentionally passing bad input. + const response = await service.request({ + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + some: 'extra', + properties: 'here', }); - it('does not call onBreak', async () => { - const httpStatus = 429; - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); + expect(response).toStrictEqual({ + id: 1, + jsonrpc: '2.0', + result: '0x1', + }); + }); - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); - }); - }); - - describe('when the endpoint has a 4xx response that is not 401, 402, 404, or 429', () => { - const httpStatus = 422; - - it('throws a generic HTTP client error without retrying the request', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await expect(promise).rejects.toThrow( - expect.objectContaining({ - code: CUSTOM_RPC_ERRORS.httpClientError, - message: 'RPC endpoint returned HTTP client error.', - data: { - httpStatus, - }, - }), - ); - }); - - it('does not forward the request to a failover service if given one', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - expect(failoverService.request).not.toHaveBeenCalled(); - }); - - it('does not call onBreak', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(httpStatus); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onBreak(onBreakListener); - - const promise = service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await ignoreRejection(promise); - expect(onBreakListener).not.toHaveBeenCalled(); - }); - }); - - describe.each([ - 'invalid JSON', - '{"foo": "ba', - '

Clearly an HTML response

', - ])( - 'if the endpoint consistently responds with invalid JSON %o', - (responseBody) => { - testsForRetriableResponses({ - getClock: () => clock, - httpStatus: 200, - responseBody, - expectedError: expect.objectContaining({ - code: -32700, - message: 'RPC endpoint did not return JSON.', - }), - expectedOnBreakError: expect.objectContaining({ - message: expect.stringContaining('invalid json'), - }), - }); - }, - ); - - it('removes non-JSON-RPC-compliant properties from the request body before sending it to the endpoint', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(200, { - id: 1, - jsonrpc: '2.0', - result: '0x1', - }); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - // @ts-expect-error Intentionally passing bad input. - const response = await service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - some: 'extra', - properties: 'here', - }); - - expect(response).toStrictEqual({ - id: 1, - jsonrpc: '2.0', - result: '0x1', - }); - }); - - it('extracts a username and password from the URL to the Authorization header', async () => { - const scope = nock('https://rpc.example.chain', { - reqheaders: { - Authorization: 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=', - }, - }) - .post('/', { + it('extracts a username and password from the URL to the Authorization header', async () => { + const scope = nock('https://rpc.example.chain', { + reqheaders: { + Authorization: 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=', + }, + }) + .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', @@ -1018,7 +579,7 @@ describe('RpcService', () => { params: [], }) .reply(200, () => { - clock.tick(6000); + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); return { id: 1, jsonrpc: '2.0', @@ -1041,90 +602,137 @@ describe('RpcService', () => { }); expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, + }); }); - }); -}); -/** - * Some tests involve a rejected promise that is not necessarily the focus of - * the test. In these cases we don't want to ignore the error in case the - * promise _isn't_ rejected, but we don't want to highlight the assertion, - * either. - * - * @param promiseOrFn - A promise that rejects, or a function that returns a - * promise that rejects. - */ -async function ignoreRejection( - promiseOrFn: Promise | (() => T | Promise), -) { - await expect(promiseOrFn).rejects.toThrow(expect.any(Error)); -} + it('calls the onAvailable callback the first time a successful request occurs', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const onAvailableListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onAvailable(onAvailableListener); -/** - * These are tests that exercise logic for cases in which the request cannot be - * made because the `fetch` calls throws a specific error. - * - * @param args - The arguments - * @param args.getClock - A function that returns the Sinon clock, set in - * `beforeEach`. - * @param args.producedError - The error produced when `fetch` is called. - * @param args.expectedError - The error that a call to the service's `request` - * method is expected to produce. - */ -function testsForRetriableFetchErrors({ - getClock, - producedError, - expectedError, -}: { - getClock: () => SinonFakeTimers; - producedError: Error; - expectedError: string | jest.Constructable | RegExp | Error; -}) { - describe('if there is no failover service provided', () => { - it('retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; + await service.request({ + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, }); + }); + + it('calls the onAvailable callback if the endpoint takes more than 5 seconds to respond and then speeds up again', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, () => { + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const onAvailableListener = jest.fn(); const service = new RpcService({ - fetch: mockFetch, + fetch, btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + endpointUrl, }); + service.onAvailable(onAvailableListener); - const jsonRpcRequest = { + await service.request({ id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(mockFetch).toHaveBeenCalledTimes(5); - }); + }); + await service.request({ + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }); - it('still re-throws the error even after the circuit breaks', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, }); + }); + }); + + describe('reset', () => { + it('resets the state of the circuit to "closed"', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); const service = new RpcService({ - fetch: mockFetch, + fetch, btoa, - endpointUrl: 'https://rpc.example.chain', + endpointUrl, }); service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + clock.next(); }); const jsonRpcRequest = { @@ -1133,33 +741,49 @@ function testsForRetriableFetchErrors({ method: 'eth_chainId', params: [], }; + // Get through the first two rounds of retries await ignoreRejection(service.request(jsonRpcRequest)); await ignoreRejection(service.request(jsonRpcRequest)); // The last retry breaks the circuit - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); + await ignoreRejection(service.request(jsonRpcRequest)); + expect(service.getCircuitState()).toBe(CircuitState.Open); + + service.resetPolicy(); + + expect(service.getCircuitState()).toBe(CircuitState.Closed); }); - it('calls the onBreak callback once after the circuit breaks', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); + it('allows making a successful request to the service if its circuit has broken', async () => { const endpointUrl = 'https://rpc.example.chain'; - const onBreakListener = jest.fn(); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); const service = new RpcService({ - fetch: mockFetch, + fetch, btoa, endpointUrl, }); service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + clock.next(); }); - service.onBreak(onBreakListener); const jsonRpcRequest = { id: 1, @@ -1167,37 +791,66 @@ function testsForRetriableFetchErrors({ method: 'eth_chainId', params: [], }; + // Get through the first two rounds of retries await ignoreRejection(service.request(jsonRpcRequest)); await ignoreRejection(service.request(jsonRpcRequest)); // The last retry breaks the circuit await ignoreRejection(service.request(jsonRpcRequest)); - expect(onBreakListener).toHaveBeenCalledTimes(1); - expect(onBreakListener).toHaveBeenCalledWith({ - error: expectedError, - endpointUrl: `${endpointUrl}/`, + service.resetPolicy(); + + expect(await service.request(jsonRpcRequest)).toStrictEqual({ + id: 1, + jsonrpc: '2.0', + result: 'ok', }); }); - it('throws an error that includes the number of minutes until the circuit is re-closed if a request is attempted while the circuit is open', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); + it('calls onAvailable listeners if the service was executed successfully, its circuit broke, it was reset, and executes successfully again', async () => { const endpointUrl = 'https://rpc.example.chain'; - const logger = { warn: jest.fn() }; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + const onAvailableListener = jest.fn(); const service = new RpcService({ - fetch: mockFetch, + fetch, btoa, endpointUrl, - logger, }); service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + clock.next(); }); + service.onAvailable(onAvailableListener); const jsonRpcRequest = { id: 1, @@ -1205,38 +858,50 @@ function testsForRetriableFetchErrors({ method: 'eth_chainId', params: [], }; + + // Make a successful requst + await service.request(jsonRpcRequest); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + + // Get through the first two rounds of retries await ignoreRejection(service.request(jsonRpcRequest)); await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit await ignoreRejection(service.request(jsonRpcRequest)); - clock.tick(60000); - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expect.objectContaining({ - code: errorCodes.rpc.resourceUnavailable, - message: - 'RPC endpoint returned too many errors, retrying in 29 minutes. Consider using a different RPC endpoint.', - }), - ); + service.resetPolicy(); + + // Make another successful requst + await service.request(jsonRpcRequest); + expect(onAvailableListener).toHaveBeenCalledTimes(2); }); - it('logs the original CircuitBreakError if a request is attempted while the circuit is open', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); + it('allows making an unsuccessful request to the service if its circuit has broken', async () => { const endpointUrl = 'https://rpc.example.chain'; - const logger = { warn: jest.fn() }; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(500); const service = new RpcService({ - fetch: mockFetch, + fetch, btoa, endpointUrl, - logger, }); service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + clock.next(); }); const jsonRpcRequest = { @@ -1245,38 +910,48 @@ function testsForRetriableFetchErrors({ method: 'eth_chainId', params: [], }; + // Get through the first two rounds of retries await ignoreRejection(service.request(jsonRpcRequest)); await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit await ignoreRejection(service.request(jsonRpcRequest)); - expect(logger.warn).toHaveBeenCalledWith( - expect.objectContaining({ - message: 'Execution prevented because the circuit breaker is open', - }), + service.resetPolicy(); + + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + 'RPC endpoint not found or unavailable', ); }); - }); - describe('if a failover service is provided', () => { - it('still retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const failoverService = buildMockRpcService(); + it('does not call onBreak listeners', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(500); + const onBreakListener = jest.fn(); const service = new RpcService({ - fetch: mockFetch, + fetch, btoa, - endpointUrl: 'https://rpc.example.chain', - failoverService, + endpointUrl, }); service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); + clock.next(); }); + service.onBreak(onBreakListener); const jsonRpcRequest = { id: 1, @@ -1284,304 +959,425 @@ function testsForRetriableFetchErrors({ method: 'eth_chainId', params: [], }; - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(mockFetch).toHaveBeenCalledTimes(5); + + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + expect(onBreakListener).toHaveBeenCalledTimes(1); + + service.resetPolicy(); + expect(onBreakListener).toHaveBeenCalledTimes(1); }); + }); +}); - it('forwards the request to the failover service in addition to the primary endpoint while the circuit is broken, stopping when the primary endpoint recovers', async () => { - const clock = getClock(); - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - let invocationCounter = 0; - const mockFetch = jest.fn(async () => { - invocationCounter += 1; - if (invocationCounter === 17) { - return new Response( - JSON.stringify({ - id: jsonRpcRequest.id, - jsonrpc: jsonRpcRequest.jsonrpc, - result: 'ok', - }), - ); - } - throw producedError; - }); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - fetchOptions: { - headers: { - 'X-Foo': 'bar', - }, - }, - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); +/** + * Some tests involve a rejected promise that is not necessarily the focus of + * the test. In these cases we don't want to ignore the error in case the + * promise _isn't_ rejected, but we don't want to highlight the assertion, + * either. + * + * @param promiseOrFn - A promise that rejects, or a function that returns a + * promise that rejects. + */ +async function ignoreRejection( + promiseOrFn: Promise | (() => T | Promise), +) { + await expect(promiseOrFn).rejects.toThrow(expect.any(Error)); +} - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(mockFetch).toHaveBeenCalledTimes(5); +/** + * These are tests that exercise logic for cases in which the request cannot be + * made because some kind of error is thrown, and the request is not retried. + * + * @param args - The arguments. + * @param args.beforeCreateService - A function that is run before the service + * is created. + * @param args.createService - A function that is run to create the service. + * @param args.endpointUrl - The URL that is hit. + * @param args.rpcMethod - The RPC method that is used. (Defaults to + * `eth_chainId`). + * @param args.expectedError - The error that a call to the service's `request` + * method is expected to produce. + */ +function testsForNonRetriableErrors({ + beforeCreateService = () => { + // do nothing + }, + createService = (args) => { + return new RpcService({ fetch, btoa, endpointUrl: args.endpointUrl }); + }, + endpointUrl = 'https://rpc.example.chain', + rpcMethod = `eth_chainId`, + expectedError, +}: { + beforeCreateService?: (args: { + endpointUrl: string; + rpcMethod: string; + }) => void; + createService?: (args: { + endpointUrl: string; + expectedError: string | RegExp | Error | jest.Constructable | undefined; + }) => RpcService; + endpointUrl?: string; + rpcMethod?: string; + expectedError: string | RegExp | Error | jest.Constructable | undefined; +}) { + /* eslint-disable jest/require-top-level-describe */ - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(mockFetch).toHaveBeenCalledTimes(10); + it('re-throws the error without retrying the request', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const service = createService({ endpointUrl, expectedError }); - // The last retry breaks the circuit - await service.request(jsonRpcRequest); - expect(mockFetch).toHaveBeenCalledTimes(15); - expect(failoverService.request).toHaveBeenCalledTimes(1); - expect(failoverService.request).toHaveBeenNthCalledWith( - 1, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); + const promise = service.request({ + id: 1, + jsonrpc: '2.0', + method: rpcMethod, + params: [], + }); - await service.request(jsonRpcRequest); - // The circuit is broken, so the `fetch` is not attempted - expect(mockFetch).toHaveBeenCalledTimes(15); - expect(failoverService.request).toHaveBeenCalledTimes(2); - expect(failoverService.request).toHaveBeenNthCalledWith( - 2, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); + await expect(promise).rejects.toThrow(expectedError); + }); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - await service.request(jsonRpcRequest); - expect(mockFetch).toHaveBeenCalledTimes(16); - // The circuit breaks again - expect(failoverService.request).toHaveBeenCalledTimes(3); - expect(failoverService.request).toHaveBeenNthCalledWith( - 2, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); + it('does not call onRetry', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const onRetryListener = jest.fn(); + const service = createService({ endpointUrl, expectedError }); + service.onRetry(onRetryListener); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - // Finally the request succeeds - const response = await service.request(jsonRpcRequest); - expect(response).toStrictEqual({ + await ignoreRejection( + service.request({ id: 1, jsonrpc: '2.0', - result: 'ok', - }); - expect(mockFetch).toHaveBeenCalledTimes(17); - expect(failoverService.request).toHaveBeenCalledTimes(3); - }); + method: rpcMethod, + params: [], + }), + ); + expect(onRetryListener).not.toHaveBeenCalled(); + }); - it('still calls onBreak each time the circuit breaks from the perspective of the primary endpoint', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const endpointUrl = 'https://rpc.example.chain'; - const failoverEndpointUrl = 'https://failover.endpoint'; - const failoverService = buildMockRpcService({ - endpointUrl: new URL(failoverEndpointUrl), - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl, - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); + it('does not call onBreak', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const onBreakListener = jest.fn(); + const service = createService({ endpointUrl, expectedError }); + service.onBreak(onBreakListener); - const jsonRpcRequest = { + await ignoreRejection( + service.request({ id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', + jsonrpc: '2.0', + method: rpcMethod, params: [], - }; - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await service.request(jsonRpcRequest); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - // The circuit breaks again - await service.request(jsonRpcRequest); + }), + ); + expect(onBreakListener).not.toHaveBeenCalled(); + }); - expect(onBreakListener).toHaveBeenCalledTimes(2); - expect(onBreakListener).toHaveBeenCalledWith({ - error: expectedError, - endpointUrl: `${endpointUrl}/`, - failoverEndpointUrl: `${failoverEndpointUrl}/`, - }); - }); + it('does not call onDegraded', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const onDegradedListener = jest.fn(); + const service = createService({ endpointUrl, expectedError }); + service.onDegraded(onDegradedListener); - it('throws an error that includes the number of minutes until the circuit is re-closed if a request is attempted while the circuit is open', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const endpointUrl = 'https://rpc.example.chain'; - const failoverEndpointUrl = 'https://failover.endpoint'; - const logger = { warn: jest.fn() }; - const failoverService = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: failoverEndpointUrl, - logger, - }); - failoverService.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl, - failoverService, - logger, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); + await ignoreRejection( + service.request({ + id: 1, + jsonrpc: '2.0', + method: rpcMethod, + params: [], + }), + ); + expect(onDegradedListener).not.toHaveBeenCalled(); + }); - const jsonRpcRequest = { + it('does not call onAvailable', async () => { + beforeCreateService({ endpointUrl, rpcMethod }); + const onAvailableListener = jest.fn(); + const service = createService({ endpointUrl, expectedError }); + service.onAvailable(onAvailableListener); + + await ignoreRejection( + service.request({ id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', + jsonrpc: '2.0', + method: rpcMethod, params: [], - }; - // Get through the first two rounds of retries on the primary - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - // The last retry breaks the circuit and sends the request to the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - // Get through the first two rounds of retries on the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - - // The last retry breaks the circuit on the failover - clock.tick(60000); - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expect.objectContaining({ - code: errorCodes.rpc.resourceUnavailable, - message: - 'RPC endpoint returned too many errors, retrying in 29 minutes. Consider using a different RPC endpoint.', - }), - ); - expect(logger.warn).toHaveBeenCalledWith( - expect.objectContaining({ - message: 'Execution prevented because the circuit breaker is open', - }), - ); - }); + }), + ); + expect(onAvailableListener).not.toHaveBeenCalled(); + }); - it('logs the original CircuitBreakError if a request is attempted while the circuit is open', async () => { - const clock = getClock(); - const mockFetch = jest.fn(() => { - throw producedError; - }); - const endpointUrl = 'https://rpc.example.chain'; - const failoverEndpointUrl = 'https://failover.endpoint'; - const logger = { warn: jest.fn() }; - const failoverService = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl: failoverEndpointUrl, - logger, - }); - failoverService.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch: mockFetch, - btoa, - endpointUrl, - failoverService, - logger, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); + /* eslint-enable jest/require-top-level-describe */ +} - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - // Get through the first two rounds of retries on the primary - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - // The last retry breaks the circuit and sends the request to the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - // Get through the first two rounds of retries on the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - - // The last retry breaks the circuit on the failover - await ignoreRejection(() => service.request(jsonRpcRequest)); - expect(logger.warn).toHaveBeenCalledWith( - expect.objectContaining({ - message: 'Execution prevented because the circuit breaker is open', - }), - ); +/** + * These are tests that exercise logic for cases in which the request cannot be + * made because the `fetch` calls throws a specific error. + * + * @param args - The arguments + * @param args.getClock - A function that returns the Sinon clock, set in + * `beforeEach`. + * @param args.producedError - The error produced when `fetch` is called. + * @param args.expectedError - The error that a call to the service's `request` + * method is expected to produce. + */ +function testsForRetriableFetchErrors({ + getClock, + producedError, + expectedError, +}: { + getClock: () => SinonFakeTimers; + producedError: Error; + expectedError: string | jest.Constructable | RegExp | Error; +}) { + // This function is designed to be used inside of a describe, so this won't be + // a problem in practice. + /* eslint-disable jest/require-top-level-describe */ + + it('retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + expect(mockFetch).toHaveBeenCalledTimes(5); + }); + + it('calls the onDegraded callback once for each retry round', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const endpointUrl = 'https://rpc.example.chain'; + const onDegradedListener = jest.fn(); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + service.onDegraded(onDegradedListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(onDegradedListener).toHaveBeenCalledTimes(2); + expect(onDegradedListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, + error: expectedError, + }); + }); + + it('still re-throws the error even after the circuit breaks', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + }); + + it('calls the onBreak callback once after the circuit breaks', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const endpointUrl = 'https://rpc.example.chain'; + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, }); + service.onRetry(() => { + clock.next(); + }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(onBreakListener).toHaveBeenCalledTimes(1); + expect(onBreakListener).toHaveBeenCalledWith({ + error: expectedError, + endpointUrl: `${endpointUrl}/`, + }); + }); + + it('throws an error that includes the number of minutes until the circuit is re-closed if a request is attempted while the circuit is open', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const endpointUrl = 'https://rpc.example.chain'; + const logger = { warn: jest.fn() }; + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, + logger, + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + // Advance a minute to test that the message updates dynamically as time passes + clock.tick(60000); + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: + 'RPC endpoint returned too many errors, retrying in 29 minutes. Consider using a different RPC endpoint.', + }), + ); }); + + it('logs the original CircuitBreakError if a request is attempted while the circuit is open', async () => { + const clock = getClock(); + const mockFetch = jest.fn(() => { + throw producedError; + }); + const endpointUrl = 'https://rpc.example.chain'; + const logger = { warn: jest.fn() }; + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, + logger, + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(logger.warn).toHaveBeenCalledWith( + expect.objectContaining({ + message: 'Execution prevented because the circuit breaker is open', + }), + ); + }); + + it('calls the onAvailable callback if the endpoint becomes degraded via errors and then recovers', async () => { + const clock = getClock(); + let invocationIndex = -1; + const mockFetch = jest.fn(async () => { + invocationIndex += 1; + if (invocationIndex === DEFAULT_MAX_RETRIES + 1) { + return new Response( + JSON.stringify({ + id: 1, + jsonrpc: '2.0', + result: { some: 'data' }, + }), + ); + } + throw producedError; + }); + const endpointUrl = 'https://rpc.example.chain'; + const onAvailableListener = jest.fn(); + const service = new RpcService({ + fetch: mockFetch, + btoa, + endpointUrl, + }); + service.onAvailable(onAvailableListener); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Cause the retry policy to give up + await ignoreRejection(service.request(jsonRpcRequest)); + await service.request(jsonRpcRequest); + + expect(onAvailableListener).toHaveBeenCalledTimes(1); + }); + + /* eslint-enable jest/require-top-level-describe */ } /** @@ -1613,363 +1409,203 @@ function testsForRetriableResponses({ }) { // This function is designed to be used inside of a describe, so this won't be // a problem in practice. - /* eslint-disable jest/no-identical-title */ + /* eslint-disable jest/require-top-level-describe,jest/no-identical-title */ - describe('if there is no failover service provided', () => { - it('retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { - const clock = getClock(); - const scope = nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(5) - .reply(httpStatus, responseBody); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - - const jsonRpcRequest = { + it('retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { + const clock = getClock(); + const scope = nock('https://rpc.example.chain') + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(scope.isDone()).toBe(true); + }) + .times(5) + .reply(httpStatus, responseBody); + const service = new RpcService({ + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }); + service.onRetry(() => { + clock.next(); }); - it('still re-throws the error even after the circuit breaks', async () => { - const clock = getClock(); - nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(15) - .reply(httpStatus, responseBody); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + expect(scope.isDone()).toBe(true); + }); - const jsonRpcRequest = { + it('still re-throws the error even after the circuit breaks', async () => { + const clock = getClock(); + nock('https://rpc.example.chain') + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); + }) + .times(15) + .reply(httpStatus, responseBody); + const service = new RpcService({ + fetch, + btoa, + endpointUrl: 'https://rpc.example.chain', + }); + service.onRetry(() => { + clock.next(); }); - it('calls the onBreak callback once after the circuit breaks', async () => { - const clock = getClock(); - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(15) - .reply(httpStatus, responseBody); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expectedError, + ); + }); - const jsonRpcRequest = { + it('calls the onBreak callback once after the circuit breaks', async () => { + const clock = getClock(); + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await ignoreRejection(service.request(jsonRpcRequest)); - - expect(onBreakListener).toHaveBeenCalledTimes(1); - expect(onBreakListener).toHaveBeenCalledWith({ - error: expectedOnBreakError, - endpointUrl: `${endpointUrl}/`, - }); + }) + .times(15) + .reply(httpStatus, responseBody); + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(onBreakListener).toHaveBeenCalledTimes(1); + expect(onBreakListener).toHaveBeenCalledWith({ + error: expectedOnBreakError, + endpointUrl: `${endpointUrl}/`, }); }); - describe('if a failover service is provided', () => { - it('still retries a constantly failing request up to 4 more times before re-throwing the error, if `request` is only called once', async () => { - const clock = getClock(); - const scope = nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(5) - .reply(httpStatus, responseBody); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - - const jsonRpcRequest = { + it('throws an error that includes the number of minutes until the circuit is re-closed if a request is attempted while the circuit is open', async () => { + const clock = getClock(); + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(scope.isDone()).toBe(true); + }) + .times(15) + .reply(httpStatus, responseBody); + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, }); - - it('forwards the request to the failover service in addition to the primary endpoint while the circuit is broken, stopping when the primary endpoint recovers', async () => { - const clock = getClock(); - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - let invocationCounter = 0; - nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(17) - .reply(() => { - invocationCounter += 1; - if (invocationCounter === 17) { - return [ - 200, - JSON.stringify({ - id: jsonRpcRequest.id, - jsonrpc: jsonRpcRequest.jsonrpc, - result: 'ok', - }), - ]; - } - return [httpStatus, responseBody]; - }); - const failoverService = buildMockRpcService(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl: 'https://rpc.example.chain', - fetchOptions: { - headers: { - 'X-Foo': 'bar', - }, - }, - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(invocationCounter).toBe(5); - - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - expectedError, - ); - expect(invocationCounter).toBe(10); - - // The last retry breaks the circuit - await service.request(jsonRpcRequest); - expect(invocationCounter).toBe(15); - expect(failoverService.request).toHaveBeenCalledTimes(1); - expect(failoverService.request).toHaveBeenNthCalledWith( - 1, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); - - await service.request(jsonRpcRequest); - // The circuit is broken, so the `fetch` is not attempted - expect(invocationCounter).toBe(15); - expect(failoverService.request).toHaveBeenCalledTimes(2); - expect(failoverService.request).toHaveBeenNthCalledWith( - 2, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); - - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - await service.request(jsonRpcRequest); - expect(invocationCounter).toBe(16); - // The circuit breaks again - expect(failoverService.request).toHaveBeenCalledTimes(3); - expect(failoverService.request).toHaveBeenNthCalledWith( - 2, - jsonRpcRequest, - { - headers: { - Accept: 'application/json', - 'Content-Type': 'application/json', - 'X-Foo': 'bar', - }, - method: 'POST', - body: JSON.stringify(jsonRpcRequest), - }, - ); - - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - // Finally the request succeeds - const response = await service.request(jsonRpcRequest); - expect(response).toStrictEqual({ - id: 1, - jsonrpc: '2.0', - result: 'ok', - }); - expect(invocationCounter).toBe(17); - expect(failoverService.request).toHaveBeenCalledTimes(3); + service.onRetry(() => { + clock.next(); }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + // Advance a minute to test that the message updates dynamically as time passes + clock.tick(60000); + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + expect.objectContaining({ + code: errorCodes.rpc.resourceUnavailable, + message: + 'RPC endpoint returned too many errors, retrying in 29 minutes. Consider using a different RPC endpoint.', + }), + ); + }); - it('still calls onBreak each time the circuit breaks from the perspective of the primary endpoint', async () => { - const clock = getClock(); - nock('https://rpc.example.chain') - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(16) - .reply(httpStatus, responseBody); - const endpointUrl = 'https://rpc.example.chain'; - const failoverEndpointUrl = 'https://failover.endpoint'; - const failoverService = buildMockRpcService({ - endpointUrl: new URL(failoverEndpointUrl), - }); - const onBreakListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - failoverService, - }); - service.onRetry(() => { - // We don't need to await this promise; adding it to the promise - // queue is enough to continue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.nextAsync(); - }); - service.onBreak(onBreakListener); - - const jsonRpcRequest = { + it('logs the original CircuitBreakError if a request is attempted while the circuit is open', async () => { + const clock = getClock(); + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - await ignoreRejection(() => service.request(jsonRpcRequest)); - await ignoreRejection(() => service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await service.request(jsonRpcRequest); - clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); - // The circuit breaks again - await service.request(jsonRpcRequest); - - expect(onBreakListener).toHaveBeenCalledTimes(2); - expect(onBreakListener).toHaveBeenCalledWith({ - error: expectedOnBreakError, - endpointUrl: `${endpointUrl}/`, - failoverEndpointUrl: `${failoverEndpointUrl}/`, - }); + }) + .times(15) + .reply(httpStatus, responseBody); + const logger = { warn: jest.fn() }; + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + logger, + }); + service.onRetry(() => { + clock.next(); }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(logger.warn).toHaveBeenCalledWith( + expect.objectContaining({ + message: 'Execution prevented because the circuit breaker is open', + }), + ); }); - /* eslint-enable jest/no-identical-title */ -} - -/** - * Constructs a fake RPC service for use as a failover in tests. - * - * @param overrides - The overrides. - * @returns The fake failover service. - */ -function buildMockRpcService( - overrides?: Partial, -): AbstractRpcService { - return { - endpointUrl: new URL('https://test.example'), - request: jest.fn(), - onRetry: jest.fn(), - onBreak: jest.fn(), - onDegraded: jest.fn(), - ...overrides, - }; + /* eslint-enable jest/require-top-level-describe,jest/no-identical-title */ } diff --git a/packages/network-controller/src/rpc-service/rpc-service.ts b/packages/network-controller/src/rpc-service/rpc-service.ts index 7021e8167cd..9b270ab11b5 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.ts @@ -4,7 +4,6 @@ import type { } from '@metamask/controller-utils'; import { BrokenCircuitError, - CircuitState, HttpError, createServicePolicy, handleWhen, @@ -23,7 +22,8 @@ import deepmerge from 'deepmerge'; import type { Logger } from 'loglevel'; import type { AbstractRpcService } from './abstract-rpc-service'; -import type { AddToCockatielEventData, FetchOptions } from './shared'; +import type { FetchOptions } from './shared'; +import { projectLogger, createModuleLogger } from '../logger'; /** * Options for the RpcService constructor. @@ -38,11 +38,6 @@ export type RpcServiceOptions = { * The URL of the RPC endpoint to hit. */ endpointUrl: URL | string; - /** - * An RPC service that represents a failover endpoint which will be invoked - * while the circuit for _this_ service is open. - */ - failoverService?: AbstractRpcService; /** * A function that can be used to make an HTTP request. If your JavaScript * environment supports `fetch` natively, you'll probably want to pass that; @@ -65,6 +60,8 @@ export type RpcServiceOptions = { policyOptions?: Omit; }; +const log = createModuleLogger(projectLogger, 'RpcService'); + /** * The maximum number of times that a failing service should be re-run before * giving up. @@ -238,25 +235,21 @@ function stripCredentialsFromUrl(url: URL): URL { */ export class RpcService implements AbstractRpcService { /** - * The function used to make an HTTP request. + * The URL of the RPC endpoint. */ - readonly #fetch: typeof fetch; + readonly endpointUrl: URL; /** - * The URL of the RPC endpoint. + * The function used to make an HTTP request. */ - readonly endpointUrl: URL; + readonly #fetch: typeof fetch; /** * A common set of options that the request options will extend. */ readonly #fetchOptions: FetchOptions; - /** - * An RPC service that represents a failover endpoint which will be invoked - * while the circuit for _this_ service is open. - */ - readonly #failoverService: RpcServiceOptions['failoverService']; + #lastError: unknown; /** * A `loglevel` logger. @@ -277,7 +270,6 @@ export class RpcService implements AbstractRpcService { const { btoa: givenBtoa, endpointUrl, - failoverService, fetch: givenFetch, logger, fetchOptions = {}, @@ -292,10 +284,9 @@ export class RpcService implements AbstractRpcService { givenBtoa, ); this.endpointUrl = stripCredentialsFromUrl(normalizedUrl); - this.#failoverService = failoverService; this.#logger = logger; - const policy = createServicePolicy({ + this.#policy = createServicePolicy({ maxRetries: DEFAULT_MAX_RETRIES, maxConsecutiveFailures: DEFAULT_MAX_CONSECUTIVE_FAILURES, ...policyOptions, @@ -315,7 +306,34 @@ export class RpcService implements AbstractRpcService { ); }), }); - this.#policy = policy; + } + + /** + * Resets the underlying composite Cockatiel policy. + * + * This is useful in a collection of RpcServices where some act as failovers + * for others where you effectively want to invalidate the failovers when the + * primary recovers. + */ + resetPolicy() { + this.#policy.reset(); + } + + /** + * @returns The state of the underlying circuit. + */ + getCircuitState() { + return this.#policy.getCircuitState(); + } + + /** + * @returns The last failure reason that the retry policy captured (or + * `undefined` if the last execution of the service was successful). + */ + getLastInnerFailureReason(): { error: unknown } | undefined { + return this.#lastError === undefined + ? undefined + : { error: this.#lastError }; } /** @@ -325,12 +343,7 @@ export class RpcService implements AbstractRpcService { * @returns What {@link ServicePolicy.onRetry} returns. * @see {@link createServicePolicy} */ - onRetry( - listener: AddToCockatielEventData< - Parameters[0], - { endpointUrl: string } - >, - ) { + onRetry(listener: Parameters[0]) { return this.#policy.onRetry((data) => { listener({ ...data, endpointUrl: this.endpointUrl.toString() }); }); @@ -338,26 +351,17 @@ export class RpcService implements AbstractRpcService { /** * Listens for when the RPC service retries the request too many times in a - * row. + * row, causing the underlying circuit to break. * * @param listener - The callback to be called when the circuit is broken. * @returns What {@link ServicePolicy.onBreak} returns. * @see {@link createServicePolicy} */ - onBreak( - listener: AddToCockatielEventData< - Parameters[0], - { endpointUrl: string; failoverEndpointUrl?: string } - >, - ) { + onBreak(listener: Parameters[0]) { return this.#policy.onBreak((data) => { - listener({ - ...data, - endpointUrl: this.endpointUrl.toString(), - failoverEndpointUrl: this.#failoverService - ? this.#failoverService.endpointUrl.toString() - : undefined, - }); + if (!('isolated' in data)) { + listener({ ...data, endpointUrl: this.endpointUrl.toString() }); + } }); } @@ -369,21 +373,27 @@ export class RpcService implements AbstractRpcService { * @returns What {@link ServicePolicy.onDegraded} returns. * @see {@link createServicePolicy} */ - onDegraded( - listener: AddToCockatielEventData< - Parameters[0], - { endpointUrl: string } - >, - ) { + onDegraded(listener: Parameters[0]) { return this.#policy.onDegraded((data) => { listener({ ...(data ?? {}), endpointUrl: this.endpointUrl.toString() }); }); } /** - * Makes a request to the RPC endpoint. If the circuit is open because this - * request has failed too many times, the request is forwarded to a failover - * service (if provided). + * Listens for when the policy underlying this RPC service is available. + * + * @param listener - The callback to be called when the request is available. + * @returns What {@link ServicePolicy.onAvailable} returns. + * @see {@link createServicePolicy} + */ + onAvailable(listener: Parameters[0]) { + return this.#policy.onAvailable(() => { + listener({ endpointUrl: this.endpointUrl.toString() }); + }); + } + + /** + * Makes a request to the RPC endpoint. * * This overload is specifically designed for `eth_getBlockByNumber`, which * can return a `result` of `null` despite an expected `Result` being @@ -405,9 +415,7 @@ export class RpcService implements AbstractRpcService { ): Promise | JsonRpcResponse>; /** - * Makes a request to the RPC endpoint. If the circuit is open because this - * request has failed too many times, the request is forwarded to a failover - * service (if provided). + * Makes a request to the RPC endpoint. * * This overload is designed for all RPC methods except for * `eth_getBlockByNumber`, which are expected to return a `result` of the @@ -437,21 +445,7 @@ export class RpcService implements AbstractRpcService { jsonRpcRequest, fetchOptions, ); - - try { - return await this.#processRequest(completeFetchOptions); - } catch (error) { - if ( - this.#policy.circuitBreakerPolicy.state === CircuitState.Open && - this.#failoverService !== undefined - ) { - return await this.#failoverService.request( - jsonRpcRequest, - completeFetchOptions, - ); - } - throw error; - } + return await this.#executeAndProcessRequest(completeFetchOptions); } /** @@ -528,19 +522,41 @@ export class RpcService implements AbstractRpcService { * @throws A generic HTTP client JSON-RPC error (code -32050) for any other 4xx HTTP status codes. * @throws A "parse" JSON-RPC error (code -32700) if the response is not valid JSON. */ - async #processRequest( + async #executeAndProcessRequest( fetchOptions: FetchOptions, ): Promise | JsonRpcResponse> { let response: Response | undefined; try { - return await this.#policy.execute(async () => { + log( + `[RpcService: ${this.endpointUrl}] Circuit state`, + this.#policy.getCircuitState(), + ); + return await this.#policy.execute(async (data) => { + log( + 'REQUEST INITIATED:', + this.endpointUrl.toString(), + '::', + fetchOptions, + // @ts-expect-error This property _is_ here, the type of ServicePolicy + // is just wrong. + `(attempt ${data.attempt + 1})`, + ); response = await this.#fetch(this.endpointUrl, fetchOptions); if (!response.ok) { throw new HttpError(response.status); } + log( + 'REQUEST SUCCESSFUL:', + this.endpointUrl.toString(), + response.status, + ); return await response.json(); }); } catch (error) { + log('REQUEST ERROR:', this.endpointUrl.toString(), error); + + this.#lastError = error; + if (error instanceof HttpError) { const status = error.httpStatus; if (status === 401) { diff --git a/packages/network-controller/src/rpc-service/shared.ts b/packages/network-controller/src/rpc-service/shared.ts index e33ae6129ad..668fe3fc570 100644 --- a/packages/network-controller/src/rpc-service/shared.ts +++ b/packages/network-controller/src/rpc-service/shared.ts @@ -1,13 +1,69 @@ +import type { + CockatielEvent, + CockatielEventEmitter, +} from '@metamask/controller-utils'; + /** * Equivalent to the built-in `FetchOptions` type, but renamed for clarity. */ export type FetchOptions = RequestInit; /** - * Extends an event listener that Cockatiel uses so that when it is called, more - * data can be supplied in the event object. + * Converts a Cockatiel event type to an event emitter type. */ -export type AddToCockatielEventData = - EventListener extends (data: infer Data) => void - ? (data: Data extends void ? AdditionalData : Data & AdditionalData) => void +export type CockatielEventToEventEmitter = + Event extends CockatielEvent + ? CockatielEventEmitter : never; + +/** + * Obtains the event data type from a Cockatiel event or event listener type. + */ +export type ExtractCockatielEventData = + CockatielEventOrEventListener extends CockatielEvent + ? Data + : CockatielEventOrEventListener extends (data: infer Data) => void + ? Data + : never; + +/** + * Extends the data that a Cockatiel event listener is called with additional + * data. + */ +export type ExtendCockatielEventData = + OriginalData extends void ? AdditionalData : OriginalData & AdditionalData; + +/** + * Removes keys from the data that a Cockatiel event listner is called with. + */ +export type ExcludeCockatielEventData< + OriginalData, + Keys extends PropertyKey, +> = OriginalData extends void ? void : Omit; + +/** + * Converts a Cockatiel event type to an event listener type, but adding the + * requested data. + */ +export type CockatielEventToEventListenerWithData = ( + data: ExtendCockatielEventData, Data>, +) => void; + +/** + * Converts a Cockatiel event type to an event listener type, but removing the + * requested keys from the data. + */ +export type CockatielEventToEventListenerWithoutData< + Event, + Keys extends PropertyKey, +> = ( + data: ExcludeCockatielEventData, Keys>, +) => void; + +/** + * Converts a Cockatiel event listener type to an event emitter type. + */ +export type CockatielEventToEventEmitterWithData = + CockatielEventEmitter< + ExtendCockatielEventData, Data> + >; diff --git a/packages/network-controller/tests/NetworkController.test.ts b/packages/network-controller/tests/NetworkController.test.ts index 2e85aaddaad..6383310fdca 100644 --- a/packages/network-controller/tests/NetworkController.test.ts +++ b/packages/network-controller/tests/NetworkController.test.ts @@ -4637,6 +4637,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 2, { + networkClientId: infuraNetworkType, networkClientConfiguration: { infuraProjectId, failoverRpcUrls: ['https://first.failover.endpoint'], @@ -4654,6 +4655,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 3, { + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://second.failover.endpoint'], @@ -4670,6 +4672,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 4, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://third.failover.endpoint'], @@ -6047,6 +6050,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(3, { + networkClientId: infuraNetworkType, networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://failover.endpoint'], @@ -6278,6 +6282,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(3, { + networkClientId: 'AAAA-AAAA-AAAA-AAAA', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://first.failover.endpoint'], @@ -6293,6 +6298,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(4, { + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://second.failover.endpoint'], @@ -7265,6 +7271,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(3, { + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://failover.endpoint'], @@ -8135,6 +8142,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 3, { + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://first.failover.endpoint'], @@ -8151,6 +8159,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 4, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://second.failover.endpoint'], @@ -9136,6 +9145,7 @@ describe('NetworkController', () => { }); expect(createAutoManagedNetworkClientSpy).toHaveBeenCalledWith({ + networkClientId: 'BBBB-BBBB-BBBB-BBBB', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://failover.endpoint'], @@ -10292,6 +10302,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(4, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://first.failover.endpoint'], @@ -10307,6 +10318,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(5, { + networkClientId: 'DDDD-DDDD-DDDD-DDDD', networkClientConfiguration: { chainId: infuraChainId, failoverRpcUrls: ['https://second.failover.endpoint'], @@ -11008,6 +11020,7 @@ describe('NetworkController', () => { ); expect(createAutoManagedNetworkClientSpy).toHaveBeenCalledWith({ + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://first.failover.endpoint'], @@ -11021,6 +11034,7 @@ describe('NetworkController', () => { isRpcFailoverEnabled: true, }); expect(createAutoManagedNetworkClientSpy).toHaveBeenCalledWith({ + networkClientId: 'DDDD-DDDD-DDDD-DDDD', networkClientConfiguration: { chainId: '0x1337', failoverRpcUrls: ['https://second.failover.endpoint'], @@ -11737,6 +11751,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(6, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: anotherInfuraChainId, failoverRpcUrls: ['https://first.failover.endpoint'], @@ -11752,6 +11767,7 @@ describe('NetworkController', () => { expect( createAutoManagedNetworkClientSpy, ).toHaveBeenNthCalledWith(7, { + networkClientId: 'DDDD-DDDD-DDDD-DDDD', networkClientConfiguration: { chainId: anotherInfuraChainId, failoverRpcUrls: ['https://second.failover.endpoint'], @@ -12434,6 +12450,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 4, { + networkClientId: 'CCCC-CCCC-CCCC-CCCC', networkClientConfiguration: { chainId: '0x2448', failoverRpcUrls: ['https://first.failover.endpoint'], @@ -12450,6 +12467,7 @@ describe('NetworkController', () => { expect(createAutoManagedNetworkClientSpy).toHaveBeenNthCalledWith( 5, { + networkClientId: 'DDDD-DDDD-DDDD-DDDD', networkClientConfiguration: { chainId: '0x2448', failoverRpcUrls: ['https://second.failover.endpoint'], diff --git a/packages/network-controller/tests/network-client/helpers.ts b/packages/network-controller/tests/network-client/helpers.ts index 4f0dcb128ea..f53900dc99d 100644 --- a/packages/network-controller/tests/network-client/helpers.ts +++ b/packages/network-controller/tests/network-client/helpers.ts @@ -3,13 +3,16 @@ import type { InfuraNetworkType } from '@metamask/controller-utils'; import { BUILT_IN_NETWORKS } from '@metamask/controller-utils'; import type { BlockTracker } from '@metamask/eth-block-tracker'; import EthQuery from '@metamask/eth-query'; -import type { Hex } from '@metamask/utils'; +import type { Hex, JsonRpcRequest } from '@metamask/utils'; import nock, { isDone as nockIsDone } from 'nock'; import type { Scope as NockScope } from 'nock'; import { useFakeTimers } from 'sinon'; import { createNetworkClient } from '../../src/create-network-client'; -import type { NetworkControllerOptions } from '../../src/NetworkController'; +import type { + NetworkClientId, + NetworkControllerOptions, +} from '../../src/NetworkController'; import type { NetworkClientConfiguration, Provider } from '../../src/types'; import { NetworkClientType } from '../../src/types'; import type { RootMessenger } from '../helpers'; @@ -85,7 +88,10 @@ type Response = { result?: any; httpStatus?: number; }; -export type MockResponse = { body: JSONRPCResponse | string } | Response; +export type MockResponse = + | { body: JSONRPCResponse | string } + | Response + | (() => Response | Promise); type CurriedMockRpcCallOptions = { request: MockRequest; // The response data. @@ -147,22 +153,12 @@ function mockRpcCall({ // eth-query always passes `params`, so even if we don't supply this property, // for consistency with makeRpcCall, assume that the `body` contains it const { method, params = [], ...rest } = request; - let httpStatus = 200; - let completeResponse: JSONRPCResponse | string = { id: 2, jsonrpc: '2.0' }; - if (response !== undefined) { - if ('body' in response) { - completeResponse = response.body; - } else { - if (response.error) { - completeResponse.error = response.error; - } else { - completeResponse.result = response.result; - } - if (response.httpStatus) { - httpStatus = response.httpStatus; - } - } - } + const httpStatus = + (typeof response === 'object' && + 'httpStatus' in response && + response.httpStatus) || + 200; + /* @ts-expect-error The types for Nock do not include `basePath` in the interface for Nock.Scope. */ const url = nockScope.basePath.includes('infura.io') ? `/v3/${MOCK_INFURA_PROJECT_ID}` @@ -196,26 +192,45 @@ function mockRpcCall({ if (error !== undefined) { return nockRequest.replyWithError(error); - } else if (completeResponse !== undefined) { - // TODO: Replace `any` with type - // eslint-disable-next-line @typescript-eslint/no-explicit-any - return nockRequest.reply(httpStatus, (_, requestBody: any) => { - if (typeof completeResponse === 'string') { - return completeResponse; - } - - if (response !== undefined && !('body' in response)) { - if (response.id === undefined) { - completeResponse.id = requestBody.id; - } else { - completeResponse.id = response.id; - } - } - debug('Nock returning Response', completeResponse); - return completeResponse; - }); } - return nockRequest; + + return nockRequest.reply(async (_uri, requestBody) => { + const jsonRpcRequest = requestBody as JsonRpcRequest; + let resolvedResponse: Response | string | JSONRPCResponse | undefined; + if (typeof response === 'function') { + resolvedResponse = await response(); + } else if (response !== undefined && 'body' in response) { + resolvedResponse = response.body; + } else { + resolvedResponse = response; + } + + if ( + typeof resolvedResponse === 'string' || + resolvedResponse === undefined + ) { + return [httpStatus, resolvedResponse]; + } + + // It is very dumb that we have to do all of these shenanigans. + // Perhaps we can simplify this in the future. + + const { + id: jsonRpcId = jsonRpcRequest.id, + jsonrpc: jsonRpcVersion = jsonRpcRequest.jsonrpc, + result: jsonRpcResult, + error: jsonRpcError, + } = resolvedResponse; + + const completeResponse = { + id: jsonRpcId, + jsonrpc: jsonRpcVersion, + result: jsonRpcResult, + error: jsonRpcError, + }; + debug('Nock returning Response', completeResponse); + return [httpStatus, completeResponse]; + }); } type MockBlockTrackerRequestOptions = { @@ -316,6 +331,7 @@ export type MockOptions = { getBlockTrackerOptions?: NetworkControllerOptions['getBlockTrackerOptions']; expectedHeaders?: Record; messenger?: RootMessenger; + networkClientId?: NetworkClientId; isRpcFailoverEnabled?: boolean; }; @@ -474,6 +490,7 @@ export async function waitForPromiseToBeFulfilledAfterRunningAllTimers( * @param options.getRpcServiceOptions - RPC service options factory. * @param options.getBlockTrackerOptions - Block tracker options factory. * @param options.messenger - The root messenger to use in tests. + * @param options.networkClientId - The ID of the new network client. * @param options.isRpcFailoverEnabled - Whether or not the RPC failover * functionality is enabled. * @param fn - A function which will be called with an object that allows @@ -491,6 +508,7 @@ export async function withNetworkClient( getRpcServiceOptions = () => ({ fetch, btoa }), getBlockTrackerOptions = () => ({}), messenger = buildRootMessenger(), + networkClientId = 'some-network-client-id', isRpcFailoverEnabled = false, }: MockOptions, // TODO: Replace `any` with type @@ -540,6 +558,7 @@ export async function withNetworkClient( : `https://${infuraNetwork}.infura.io/v3/${MOCK_INFURA_PROJECT_ID}`; const networkClient = createNetworkClient({ + id: networkClientId, configuration: networkClientConfiguration, getRpcServiceOptions, getBlockTrackerOptions, diff --git a/packages/network-controller/tests/network-client/rpc-failover.ts b/packages/network-controller/tests/network-client/rpc-failover.ts index f214c939cb3..a08562c2e0c 100644 --- a/packages/network-controller/tests/network-client/rpc-failover.ts +++ b/packages/network-controller/tests/network-client/rpc-failover.ts @@ -26,7 +26,6 @@ export function testsForRpcFailoverBehavior({ failure, isRetriableFailure, getExpectedError, - getExpectedBreakError = getExpectedError, }: { providerType: ProviderType; requestToCall: MockRequest; @@ -96,7 +95,7 @@ export function testsForRpcFailoverBehavior({ }, async ({ makeRpcCall, clock }) => { messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', + 'NetworkController:rpcEndpointInstanceRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -120,188 +119,6 @@ export function testsForRpcFailoverBehavior({ }); }); - it('publishes the NetworkController:rpcEndpointUnavailable event when the failover occurs', async () => { - const failoverEndpointUrl = 'https://failover.endpoint/'; - - await withMockedCommunications({ providerType }, async (primaryComms) => { - await withMockedCommunications( - { - providerType: 'custom', - customRpcUrl: failoverEndpointUrl, - }, - async (failoverComms) => { - const request = requestToCall; - const requestToMock = getRequestToMock(request, blockNumber); - const additionalMockRpcCallOptions = - failure instanceof Error || typeof failure === 'string' - ? { error: failure } - : { response: failure }; - - // The first time a block-cacheable request is made, the - // latest block number is retrieved through the block - // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); - primaryComms.mockRpcCall({ - request: requestToMock, - times: maxConsecutiveFailures, - ...additionalMockRpcCallOptions, - }); - failoverComms.mockRpcCall({ - request: requestToMock, - response: { - result: 'ok', - }, - }); - - const messenger = buildRootMessenger(); - const rpcEndpointUnavailableEventHandler = jest.fn(); - messenger.subscribe( - 'NetworkController:rpcEndpointUnavailable', - rpcEndpointUnavailableEventHandler, - ); - - await withNetworkClient( - { - providerType, - isRpcFailoverEnabled: true, - failoverRpcUrls: [failoverEndpointUrl], - messenger, - getRpcServiceOptions: () => ({ - fetch, - btoa, - policyOptions: { - backoff: new ConstantBackoff(backoffDuration), - }, - }), - }, - async ({ makeRpcCall, clock, chainId, rpcUrl }) => { - messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', - () => { - // Ensure that we advance to the next RPC request - // retry, not the next block tracker request. - // We also don't need to await this, it just needs to - // be added to the promise queue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.tickAsync(backoffDuration); - }, - ); - - for (let i = 0; i < numRequestsToMake - 1; i++) { - await ignoreRejection(makeRpcCall(request)); - } - await makeRpcCall(request); - - expect(rpcEndpointUnavailableEventHandler).toHaveBeenCalledWith( - { - chainId, - endpointUrl: rpcUrl, - failoverEndpointUrl, - error: getExpectedBreakError(rpcUrl), - }, - ); - }, - ); - }, - ); - }); - }); - - it('publishes the NetworkController:rpcEndpointUnavailable event when the failover becomes unavailable', async () => { - const failoverEndpointUrl = 'https://failover.endpoint/'; - - await withMockedCommunications({ providerType }, async (primaryComms) => { - await withMockedCommunications( - { - providerType: 'custom', - customRpcUrl: failoverEndpointUrl, - }, - async (failoverComms) => { - const request = requestToCall; - const requestToMock = getRequestToMock(request, blockNumber); - const additionalMockRpcCallOptions = - failure instanceof Error || typeof failure === 'string' - ? { error: failure } - : { response: failure }; - - // The first time a block-cacheable request is made, the - // latest block number is retrieved through the block - // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); - primaryComms.mockRpcCall({ - request: requestToMock, - times: maxConsecutiveFailures, - ...additionalMockRpcCallOptions, - }); - failoverComms.mockRpcCall({ - request: requestToMock, - times: maxConsecutiveFailures, - ...additionalMockRpcCallOptions, - }); - // Block tracker requests on the primary will fail over - failoverComms.mockNextBlockTrackerRequest({ - blockNumber, - }); - - const messenger = buildRootMessenger(); - const rpcEndpointUnavailableEventHandler = jest.fn(); - messenger.subscribe( - 'NetworkController:rpcEndpointUnavailable', - rpcEndpointUnavailableEventHandler, - ); - - await withNetworkClient( - { - providerType, - isRpcFailoverEnabled: true, - failoverRpcUrls: [failoverEndpointUrl], - messenger, - getRpcServiceOptions: () => ({ - fetch, - btoa, - policyOptions: { - backoff: new ConstantBackoff(backoffDuration), - }, - }), - }, - async ({ makeRpcCall, clock, chainId }) => { - messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', - () => { - // Ensure that we advance to the next RPC request - // retry, not the next block tracker request. - // We also don't need to await this, it just needs to - // be added to the promise queue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.tickAsync(backoffDuration); - }, - ); - - for (let i = 0; i < maxConsecutiveFailures - 1; i++) { - await ignoreRejection(makeRpcCall(request)); - } - for (let i = 0; i < maxConsecutiveFailures; i++) { - await ignoreRejection(makeRpcCall(request)); - } - - expect( - rpcEndpointUnavailableEventHandler, - ).toHaveBeenNthCalledWith(2, { - chainId, - endpointUrl: failoverEndpointUrl, - error: getExpectedBreakError(failoverEndpointUrl), - }); - }, - ); - }, - ); - }); - }); - it('allows RPC service options to be customized', async () => { const customMaxConsecutiveFailures = 6; const customMaxRetries = 2; @@ -390,7 +207,7 @@ export function testsForRpcFailoverBehavior({ }, async ({ makeRpcCall, clock }) => { messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', + 'NetworkController:rpcEndpointInstanceRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -453,7 +270,7 @@ export function testsForRpcFailoverBehavior({ }, async ({ makeRpcCall, clock, rpcUrl }) => { messenger.subscribe( - 'NetworkController:rpcEndpointRequestRetried', + 'NetworkController:rpcEndpointInstanceRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. diff --git a/yarn.lock b/yarn.lock index c1077e11cdf..bcf7f92be6e 100644 --- a/yarn.lock +++ b/yarn.lock @@ -4253,6 +4253,7 @@ __metadata: "@types/lodash": "npm:^4.14.191" "@types/node-fetch": "npm:^2.6.12" async-mutex: "npm:^0.5.0" + cockatiel: "npm:^3.1.2" deep-freeze-strict: "npm:^1.1.1" deepmerge: "npm:^4.2.2" fast-deep-equal: "npm:^3.1.3" From 0da865b90de28c55c883cccdb4e0cca97963241d Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 12:59:19 -0700 Subject: [PATCH 11/30] Remove this comment --- packages/network-controller/tests/network-client/helpers.ts | 3 --- 1 file changed, 3 deletions(-) diff --git a/packages/network-controller/tests/network-client/helpers.ts b/packages/network-controller/tests/network-client/helpers.ts index f53900dc99d..2740677a52e 100644 --- a/packages/network-controller/tests/network-client/helpers.ts +++ b/packages/network-controller/tests/network-client/helpers.ts @@ -212,9 +212,6 @@ function mockRpcCall({ return [httpStatus, resolvedResponse]; } - // It is very dumb that we have to do all of these shenanigans. - // Perhaps we can simplify this in the future. - const { id: jsonRpcId = jsonRpcRequest.id, jsonrpc: jsonRpcVersion = jsonRpcRequest.jsonrpc, From b3909afaf3b1b03a6f83ddc5f9c9ae1afeb833aa Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 14:02:56 -0700 Subject: [PATCH 12/30] Add 'degraded' status --- .../controller-utils/src/create-service-policy.ts | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/packages/controller-utils/src/create-service-policy.ts b/packages/controller-utils/src/create-service-policy.ts index bc8bdbfddc1..8707c2a22c8 100644 --- a/packages/controller-utils/src/create-service-policy.ts +++ b/packages/controller-utils/src/create-service-policy.ts @@ -144,20 +144,19 @@ type InternalCircuitState = | { state: Exclude }; /** - * List of avalability statuses. + * Availability statuses that the service can be in. * * Used to keep track of whether the `onAvailable` event should be fired. */ const AVAILABILITY_STATUSES = { - Unknown: 'unknown', Available: 'available', + Degraded: 'degraded', Unavailable: 'unavailable', -}; + Unknown: 'unknown', +} as const; /** - * An availability status. - * - * Used to keep track of whether the `onAvailable` event should be fired. + * An availability status that the service can be in. */ type AvailabilityStatus = (typeof AVAILABILITY_STATUSES)[keyof typeof AVAILABILITY_STATUSES]; @@ -329,12 +328,14 @@ export function createServicePolicy( retryPolicy.onGiveUp((data) => { if (circuitBreakerPolicy.state === CircuitState.Closed) { + availabilityStatus = AVAILABILITY_STATUSES.Degraded; onDegradedEventEmitter.emit(data); } }); retryPolicy.onSuccess(({ duration }) => { if (circuitBreakerPolicy.state === CircuitState.Closed) { if (duration > degradedThreshold) { + availabilityStatus = AVAILABILITY_STATUSES.Degraded; onDegradedEventEmitter.emit(); } else if (availabilityStatus !== AVAILABILITY_STATUSES.Available) { availabilityStatus = AVAILABILITY_STATUSES.Available; From 6b628d7f2f4c35d084053abff6097217f3051a2c Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 14:21:25 -0700 Subject: [PATCH 13/30] Use similar terminology as in createServicePolicy --- .../src/create-service-policy.ts | 2 +- .../src/rpc-service/rpc-service-chain.ts | 40 +++++++++---------- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/packages/controller-utils/src/create-service-policy.ts b/packages/controller-utils/src/create-service-policy.ts index bc8bdbfddc1..adda36c0361 100644 --- a/packages/controller-utils/src/create-service-policy.ts +++ b/packages/controller-utils/src/create-service-policy.ts @@ -144,7 +144,7 @@ type InternalCircuitState = | { state: Exclude }; /** - * List of avalability statuses. + * Statuses that the RPC service chain can be in. * * Used to keep track of whether the `onAvailable` event should be fired. */ diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.ts index 0617ebd9c3f..5aa03f937d8 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.ts @@ -23,21 +23,21 @@ import { projectLogger, createModuleLogger } from '../logger'; const log = createModuleLogger(projectLogger, 'RpcServiceChain'); /** - * Possible states of the RPC service chain. + * Statuses that the RPC service chain can be in. */ -const STATES = { - Initial: 'initial', +const STATUSES = { Available: 'available', Degraded: 'degraded', + Unknown: 'unknown', Unavailable: 'unavailable', } as const; -type RpcServiceConfiguration = Omit; - /** - * The state of the service. + * Statuses that the RPC service chain can be in. */ -type State = (typeof STATES)[keyof typeof STATES]; +type Status = (typeof STATUSES)[keyof typeof STATUSES]; + +type RpcServiceConfiguration = Omit; /** * This class constructs and manages requests to a chain of RpcService objects @@ -81,9 +81,9 @@ export class RpcServiceChain { readonly #services: RpcService[]; /** - * The state of the RPC service chain. + * The status of the RPC service chain. */ - #state: State; + #status: Status; /** * Constructs a new RpcServiceChain object. @@ -103,7 +103,7 @@ export class RpcServiceChain { ); this.#primaryService = this.#services[0]; - this.#state = STATES.Initial; + this.#status = STATUSES.Unknown; this.#onBreakEventEmitter = new CockatielEventEmitter< ExtendCockatielEventData< ExtractCockatielEventData, @@ -114,9 +114,9 @@ export class RpcServiceChain { this.#onDegradedEventEmitter = new CockatielEventEmitter(); for (const service of this.#services) { service.onDegraded((data) => { - if (this.#state !== STATES.Degraded) { - log('Updating state to "degraded"', data); - this.#state = STATES.Degraded; + if (this.#status !== STATUSES.Degraded) { + log('Updating status to "degraded"', data); + this.#status = STATUSES.Degraded; this.#onDegradedEventEmitter.emit({ ...data, primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), @@ -128,9 +128,9 @@ export class RpcServiceChain { this.#onAvailableEventEmitter = new CockatielEventEmitter(); for (const service of this.#services) { service.onAvailable((data) => { - if (this.#state !== STATES.Available) { - log('Updating state to "available"', data); - this.#state = STATES.Available; + if (this.#status !== STATUSES.Available) { + log('Updating status to "available"', data); + this.#status = STATUSES.Available; this.#onAvailableEventEmitter.emit({ ...data, primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), @@ -409,7 +409,7 @@ export class RpcServiceChain { 'Previous circuit state', previousCircuitState, 'state', - this.#state, + this.#status, ); if (isCircuitOpen) { @@ -422,16 +422,16 @@ export class RpcServiceChain { if ( previousCircuitState !== CircuitState.Open && - this.#state !== STATES.Unavailable && + this.#status !== STATUSES.Unavailable && lastFailureReason !== undefined ) { // If the service's circuit just broke and it's the last one in the // chain, then trigger the onBreak event. (But if for some reason we // have already done this, then don't do it.) log( - 'This service\'s circuit just opened and it is the last service. Updating state to "unavailable" and triggering onBreak.', + 'This service\'s circuit just opened and it is the last service. Updating status to "unavailable" and triggering onBreak.', ); - this.#state = STATES.Unavailable; + this.#status = STATUSES.Unavailable; this.#onBreakEventEmitter.emit({ ...lastFailureReason, primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), From 2d38446c963530f23b325a31ce01675f01f17644 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 14:22:33 -0700 Subject: [PATCH 14/30] Adjust createServicePolicy as well --- packages/controller-utils/src/create-service-policy.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/controller-utils/src/create-service-policy.ts b/packages/controller-utils/src/create-service-policy.ts index 8707c2a22c8..cd28da0669e 100644 --- a/packages/controller-utils/src/create-service-policy.ts +++ b/packages/controller-utils/src/create-service-policy.ts @@ -156,7 +156,9 @@ const AVAILABILITY_STATUSES = { } as const; /** - * An availability status that the service can be in. + * Availability statuses that the service can be in. + * + * Used to keep track of whether the `onAvailable` event should be fired. */ type AvailabilityStatus = (typeof AVAILABILITY_STATUSES)[keyof typeof AVAILABILITY_STATUSES]; From 3d8da80f9e3da06517d44b15ad197115b6a01dd2 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 14:22:33 -0700 Subject: [PATCH 15/30] Adjust createServicePolicy as well --- packages/controller-utils/src/create-service-policy.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/packages/controller-utils/src/create-service-policy.ts b/packages/controller-utils/src/create-service-policy.ts index 8707c2a22c8..cd28da0669e 100644 --- a/packages/controller-utils/src/create-service-policy.ts +++ b/packages/controller-utils/src/create-service-policy.ts @@ -156,7 +156,9 @@ const AVAILABILITY_STATUSES = { } as const; /** - * An availability status that the service can be in. + * Availability statuses that the service can be in. + * + * Used to keep track of whether the `onAvailable` event should be fired. */ type AvailabilityStatus = (typeof AVAILABILITY_STATUSES)[keyof typeof AVAILABILITY_STATUSES]; From f67839ab7cbc0aedef355b035f464484203083f9 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Mon, 17 Nov 2025 23:37:54 -0700 Subject: [PATCH 16/30] Update some of the terminology --- packages/network-controller/CHANGELOG.md | 14 +++++------ .../rpc-endpoint-events.test.ts | 24 +++++++++---------- .../src/rpc-service/rpc-service-chain.ts | 6 ++--- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/packages/network-controller/CHANGELOG.md b/packages/network-controller/CHANGELOG.md index 5228a1c1e9d..d5118cb904a 100644 --- a/packages/network-controller/CHANGELOG.md +++ b/packages/network-controller/CHANGELOG.md @@ -10,7 +10,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added - Add `NetworkController:rpcEndpointAvailable` messenger event ([#7166](https://github.com/MetaMask/core/pull/7166)) - - These are counterparts to the (new) `NetworkController:rpcEndpointUnavailable` and `NetworkController:rpcEndpointDegraded` events, but are published when a request to an RPC endpoint URL is made either initially or following a previously established degraded or unavailable status. + - These are counterparts to the (new) `NetworkController:rpcEndpointUnavailable` and `NetworkController:rpcEndpointDegraded` events, but is published when a successful network client request is made either initially or following a previously established degraded or unavailable status. ### Changed @@ -24,13 +24,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - In practice, this should happen rarely if ever. - **BREAKING:** Migrate `NetworkClient` to `JsonRpcEngineV2` ([#7065](https://github.com/MetaMask/core/pull/7065)) - This ought to be unobservable, but we mark it as breaking out of an abundance of caution. -- **BREAKING:** Split up and update payload data for `NetworkController:rpcEndpoint{Degraded,Unavailable}` ([#7166](https://github.com/MetaMask/core/pull/7166)) - - The existing events are now called `NetworkController:rpcEndpointInstance{Degraded,Unavailable}` and retain their present behavior. - - `NetworkController:rpcEndpointInstance{Degraded,Unavailable}` do still exist, but they are now designed to represent the entire RPC endpoint and are guaranteed to not be published multiple times in a row. In particular, `NetworkController:rpcEndpointUnavailable` is published only after trying all of the designated URLs for a particular RPC endpoint and the underlying circuit for the last URL breaks, not as each primary's or failover's circuit breaks. - - The event payloads have been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` has been added. +- **BREAKING:** Split up and update payload data for `NetworkController:rpcEndpointDegraded` and `NetworkController:rpcEndpointUnavailable` ([#7166](https://github.com/MetaMask/core/pull/7166)) + - The existing events are now called `NetworkController:rpcEndpointInstanceDegraded` and `NetworkController:rpcEndpointInstanceUnavailable` and retain their present behavior. + - `NetworkController:rpcEndpointDegraded` and `NetworkController:rpcEndpointUnavailable` do still exist, but they are now designed to represent a network client rather than an RPC endpoint and are guaranteed to not be published multiple times in a row. In particular, `NetworkController:rpcEndpointUnavailable` is published only after trying all of the defined endpoints for a network client and the underlying circuit for the last endpoint breaks, not as each primary's or failover's circuit breaks. + - The event payloads have been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` has been added to the payload. - **BREAKING:** Rename and update payload data for `NetworkController:rpcEndpointRequestRetried` ([#7166](https://github.com/MetaMask/core/pull/7166)) - - This event is now called `NetworkController:rpcEndpointInstanceRequestRetried` - - The event payload has been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` and `attempt` have been added. + - This event is now called `NetworkController:rpcEndpointInstanceRequestRetried`. + - The event payload has been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` and `attempt` have been added to the payload. - **BREAKING:** Update `AbstractRpcService`/`RpcServiceRequestable` to remove `{ isolated: true }` from the `onBreak` event data type ([#7166](https://github.com/MetaMask/core/pull/7166)) - This represented the error produced when `.isolate` is called on a Cockatiel circuit breaker policy, which we never do. - Bump `@metamask/controller-utils` from `^11.14.1` to `^11.15.0` ([#7003](https://github.com/MetaMask/core/pull/7003)) diff --git a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts index cc8d8455ba9..af8ef2a68f5 100644 --- a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts +++ b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts @@ -19,7 +19,7 @@ describe('createNetworkClient - RPC endpoint events', () => { const blockNumber = '0x100'; const backoffDuration = 100; - it('publishes the NetworkController:rpcEndpointUnavailable event only when the max number of consecutive request failures is reached for all of the provided endpoint URLs', async () => { + it('publishes the NetworkController:rpcEndpointUnavailable event only when the max number of consecutive request failures is reached for all of the endpoints in a group of endpoints', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -134,7 +134,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceUnavailable event each time the max number of consecutive request failures is reached for any of the provided endpoint URLs', async () => { + it('publishes the NetworkController:rpcEndpointInstanceUnavailable event each time the max number of consecutive request failures is reached for any of the endpoints in a group of endpoints', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -258,7 +258,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointDegraded event only once, even if the max number of retries is continually reached in making requests to the primary endpoint URL', async () => { + it('publishes the NetworkController:rpcEndpointDegraded event only once, even if the max number of retries is continually reached in making requests to a primary endpoint', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -343,7 +343,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointDegraded event only once, even if the time to complete a request to the primary endpoint URL is continually too long', async () => { + it('publishes the NetworkController:rpcEndpointDegraded event only once, even if the time to complete a request to a primary endpoint is continually too long', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -410,7 +410,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('does not publish the NetworkController:rpcEndpointDegraded event again if the max number of retries is reached in making requests to a failover endpoint URL', async () => { + it('does not publish the NetworkController:rpcEndpointDegraded event again if the max number of retries is reached in making requests to a failover endpoint', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -516,7 +516,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('does not publish the NetworkController:rpcEndpointDegraded event again when the time to complete a request to a failover endpoint URL is too long', async () => { + it('does not publish the NetworkController:rpcEndpointDegraded event again when the time to complete a request to a failover endpoint is too long', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -621,7 +621,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceDegraded event each time the max number of retries is reached in making requests to the primary endpoint URL', async () => { + it('publishes the NetworkController:rpcEndpointInstanceDegraded event each time the max number of retries is reached in making requests to a primary endpoint', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -717,7 +717,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceDegraded event when the time to complete a request to the primary endpoint URL is continually too long', async () => { + it('publishes the NetworkController:rpcEndpointInstanceDegraded event when the time to complete a request to a primary endpoint is continually too long', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -813,7 +813,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceDegraded event again if the max number of retries is reached in making requests to a failover endpoint URL', async () => { + it('publishes the NetworkController:rpcEndpointInstanceDegraded event again if the max number of retries is reached in making requests to a failover endpoint', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -937,7 +937,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceDegraded event again when the time to complete a request to a failover endpoint URL is too long', async () => { + it('publishes the NetworkController:rpcEndpointInstanceDegraded event again when the time to complete a request to a failover endpoint is too long', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -1060,7 +1060,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointAvailable event the first time a successful request to the RPC endpoint is made', async () => { + it('publishes the NetworkController:rpcEndpointAvailable event the first time a successful request to a (primary) RPC endpoint is made', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -1117,7 +1117,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointAvailable event the first time a successful request to a failover is made', async () => { + it('publishes the NetworkController:rpcEndpointAvailable event the first time a successful request to a failover endpoint is made', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.ts index 5aa03f937d8..e9067c4ea4d 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.ts @@ -41,9 +41,9 @@ type RpcServiceConfiguration = Omit; /** * This class constructs and manages requests to a chain of RpcService objects - * which represent an RPC endpoint on a particular network. The first service in - * the chain is intended to be the primary way of hitting the endpoint and the - * remaining services are used as failovers. + * which represent RPC endpoints with which to access a particular network. The + * first service in the chain is intended to be the primary way of hitting the + * network and the remaining services are used as failovers. */ export class RpcServiceChain { /** From 110cb0b42fa3be5156dc5b66a72ad3ea3539ec77 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Tue, 18 Nov 2025 00:07:34 -0700 Subject: [PATCH 17/30] Update more of the terminology --- packages/network-controller/CHANGELOG.md | 12 +- .../src/NetworkController.ts | 207 +++++++++--------- .../rpc-endpoint-events.test.ts | 188 ++++++++-------- .../src/create-network-client.ts | 12 +- packages/network-controller/src/index.ts | 10 +- .../tests/network-client/rpc-failover.ts | 23 +- 6 files changed, 223 insertions(+), 229 deletions(-) diff --git a/packages/network-controller/CHANGELOG.md b/packages/network-controller/CHANGELOG.md index d5118cb904a..0bccbd18b94 100644 --- a/packages/network-controller/CHANGELOG.md +++ b/packages/network-controller/CHANGELOG.md @@ -9,8 +9,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Added -- Add `NetworkController:rpcEndpointAvailable` messenger event ([#7166](https://github.com/MetaMask/core/pull/7166)) - - These are counterparts to the (new) `NetworkController:rpcEndpointUnavailable` and `NetworkController:rpcEndpointDegraded` events, but is published when a successful network client request is made either initially or following a previously established degraded or unavailable status. +- Add `NetworkController:rpcEndpointChainAvailable` messenger event ([#7166](https://github.com/MetaMask/core/pull/7166)) + - This is a counterpart to the (new) `NetworkController:rpcEndpointChainUnavailable` and `NetworkController:rpcEndpointChainDegraded` events, but is published when a successful request to an endpoint within a chain of endpoints is made either initially or following a previously established degraded or unavailable status. ### Changed @@ -25,11 +25,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - **BREAKING:** Migrate `NetworkClient` to `JsonRpcEngineV2` ([#7065](https://github.com/MetaMask/core/pull/7065)) - This ought to be unobservable, but we mark it as breaking out of an abundance of caution. - **BREAKING:** Split up and update payload data for `NetworkController:rpcEndpointDegraded` and `NetworkController:rpcEndpointUnavailable` ([#7166](https://github.com/MetaMask/core/pull/7166)) - - The existing events are now called `NetworkController:rpcEndpointInstanceDegraded` and `NetworkController:rpcEndpointInstanceUnavailable` and retain their present behavior. - - `NetworkController:rpcEndpointDegraded` and `NetworkController:rpcEndpointUnavailable` do still exist, but they are now designed to represent a network client rather than an RPC endpoint and are guaranteed to not be published multiple times in a row. In particular, `NetworkController:rpcEndpointUnavailable` is published only after trying all of the defined endpoints for a network client and the underlying circuit for the last endpoint breaks, not as each primary's or failover's circuit breaks. - - The event payloads have been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` has been added to the payload. + - `NetworkController:rpcEndpointDegraded` and `NetworkController:rpcEndpointUnavailable` still exist and retain the same behavior as before. + - New events are `NetworkController:rpcEndpointChainDegraded` and `NetworkController:rpcEndpointChainUnavailable`, and are designed to represent an entire chain of endpoints. They are also guaranteed to not be published multiple times in a row. In particular, `NetworkController:rpcEndpointUnavailable` is published only after trying all of the endpoints for a chain and when the underlying circuit for the last endpoint breaks, not as each primary's or failover's circuit breaks. + - The event payloads for all events have been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` has been added to the payload. - **BREAKING:** Rename and update payload data for `NetworkController:rpcEndpointRequestRetried` ([#7166](https://github.com/MetaMask/core/pull/7166)) - - This event is now called `NetworkController:rpcEndpointInstanceRequestRetried`. + - This event is now called `NetworkController:rpcEndpointRetried`. - The event payload has been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` and `attempt` have been added to the payload. - **BREAKING:** Update `AbstractRpcService`/`RpcServiceRequestable` to remove `{ isolated: true }` from the `onBreak` event data type ([#7166](https://github.com/MetaMask/core/pull/7166)) - This represented the error produced when `.isolate` is called on a Cockatiel circuit breaker policy, which we never do. diff --git a/packages/network-controller/src/NetworkController.ts b/packages/network-controller/src/NetworkController.ts index ba317f58e07..5cbc80b3bf7 100644 --- a/packages/network-controller/src/NetworkController.ts +++ b/packages/network-controller/src/NetworkController.ts @@ -443,32 +443,33 @@ export type NetworkControllerNetworkRemovedEvent = { }; /** - * `NetworkController:rpcEndpointUnavailable` is published when the number of - * failed consecutive attempts to receive a 2xx response from the primary URL of - * an RPC endpoint reaches a maximum, causing further requests to be temporarily - * paused, and when subsequent traffic to a failover URL similarly fails. + * `NetworkController:rpcEndpointChainUnavailable` is published when the number + * of failed consecutive attempts to receive a 2xx response from the primary + * endpoint of a chain of endpoints reaches a maximum, causing further requests + * to be temporarily paused, and when subsequent traffic to a failover endpoint + * similarly fails. * * In other words, this event will not published if a primary is deemed to be * unavailable but its failover is not. * - * Additionally, if this was the last `NetworkController:rpcEndpoint*` event to - * be published, the event will not be re-published (for instance, if both a - * primary and failover are deemed to be unavailable, or if more than one + * Additionally, if this was the last `NetworkController:rpcEndpointChain*` + * event to be published, the event will not be re-published (for instance, if + * both a primary and failover are deemed to be unavailable, or if more than one * failover is deemed to be unavailable). * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the network where the RPC endpoint - * lives. - * @param payload.networkClientId - The ID of the network client representing - * the RPC endpoint. - * @param payload.primaryEndpointUrl - The primary URL of the endpoint. - * @param payload.endpointUrl - One of the URLs defined for the endpoint which - * has been deemed to be unavailable. - * @param payload.error - The error from the last request to one of the URLs - * defined for the endpoint which determined the unavailability status. + * @param payload.chainId - The chain ID of the target network. + * @param payload.endpointUrl - The URL of the endpoint among the chain of + * endpoints which has been deemed to be unavailable. + * @param payload.error - The error from the last request to `endpointUrl` which + * determined the unavailability status. + * @param payload.networkClientId - The ID of the client representing the target + * network. + * @param payload.primaryEndpointUrl - The URL of the primary for the chain of + * endpoints. */ -export type NetworkControllerRpcEndpointUnavailableEvent = { - type: 'NetworkController:rpcEndpointUnavailable'; +export type NetworkControllerRpcEndpointChainUnavailableEvent = { + type: 'NetworkController:rpcEndpointChainUnavailable'; payload: [ { chainId: Hex; @@ -481,31 +482,31 @@ export type NetworkControllerRpcEndpointUnavailableEvent = { }; /** - * `NetworkController:rpcEndpointInstanceUnavailable` is published when the - * number of failed consecutive attempts to receive a 2xx response from *any* of - * the designated URLs of an RPC endpoint reaches a maximum. + * `NetworkController:rpcEndpointUnvailable` is published when the number of + * failed consecutive attempts to receive a 2xx response from *any* of the + * RPC endpoints within a chain of endpoints reaches a maximum. * * This event will still be published if a primary is deemed to be unavailable, * even its failover is available. * - * Additionally, even if this was the last `NetworkController:rpcEndpoint*` event - * to be published, the event may be re-published (for instance, if both a + * Additionally, even if this was the last `NetworkController:rpcEndpoint*` + * event to be published, the event may be re-published (for instance, if both a * primary and failover are deemed to be unavailable, or if more than one * failover is deemed to be unavailable). * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the network where the RPC endpoint - * lives. + * @param payload.chainId - The chain ID of the target network. + * @param payload.endpointUrl - The URL of the endpoint among the chain of + * endpoints which has been deemed to be unavailable. + * @param payload.error - The error from the last request to `endpointUrl` which + * determined the unavailability status. * @param payload.networkClientId - The ID of the network client representing - * the RPC endpoint. - * @param payload.primaryEndpointUrl - The primary URL of the endpoint. - * @param payload.endpointUrl - One of the URLs defined for the endpoint which - * has been deemed to be unavailable. - * @param payload.error - The error from the last request to the `endpointUrl` - * which determined the unavailability status. + * the chain of endpoints. + * @param payload.primaryEndpointUrl - The URL of the primary for the chain of + * endpoints. */ -export type NetworkControllerRpcEndpointInstanceUnavailableEvent = { - type: 'NetworkController:rpcEndpointInstanceUnavailable'; +export type NetworkControllerRpcEndpointUnvailableEvent = { + type: 'NetworkController:rpcEndpointUnvailable'; payload: [ { chainId: Hex; @@ -518,37 +519,37 @@ export type NetworkControllerRpcEndpointInstanceUnavailableEvent = { }; /** - * `NetworkController:rpcEndpointDegraded` is published in the following two - * cases: + * `NetworkController:rpcEndpointChainDegraded` is published in the following + * two cases: * - * 1. When an attempt to receive a 2xx response from any of the designated URLs - * for an RPC endpoint is unsuccessful, and all subsequent automatic retries - * lead to the same result. - * 2. When a 2xx response is received from any of the endpoint URLs, but the - * request takes longer than a set number of seconds to complete. + * 1. When an attempt to receive a 2xx response from any of the endpoints + * within a chain of endpoints is unsuccessful, and all subsequent automatic + * retries lead to the same result. + * 2. When a 2xx response is received from any of the endpoints, but the request + * takes longer than a set number of seconds to complete. * * Note that this event will be published even if there are local connectivity * issues which prevent requests from being initiated. This is intentional. * - * Additionally, if this was the last `NetworkController:rpcEndpoint*` event to - * be published, the event will not be re-published (for instance: a failover is - * activated and successive attempts to the failover fail, then the primary - * comes back online, but it is slow). + * Additionally, if this was the last `NetworkController:rpcEndpointChain*` + * event to be published, the event will not be re-published (for instance: a + * failover is activated and successive attempts to the failover fail, then the + * primary comes back online, but it is slow). * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the network where the RPC endpoint - * lives. - * @param payload.networkClientId - The ID of the network client representing - * the RPC endpoint. - * @param payload.primaryEndpointUrl - The primary URL of the endpoint. - * @param payload.endpointUrl - One of the URLs defined for the endpoint which - * has been deemed to be degraded. - * @param payload.error - The error from the last request to the `endpointUrl` - * which determined the degraded status (or `undefined` if the request was - * merely slow). + * @param payload.chainId - The chain ID of the target network. + * @param payload.endpointUrl - The URL of the endpoint among the chain of + * endpoints which has been deemed to be degraded. + * @param payload.error - The error from the last request to `endpointUrl` which + * determined the degraded status (or `undefined` if the request was merely + * slow). + * @param payload.networkClientId - The ID of the client representing the target + * network. + * @param payload.primaryEndpointUrl - The URL of the primary for the chain of + * endpoints. */ -export type NetworkControllerRpcEndpointDegradedEvent = { - type: 'NetworkController:rpcEndpointDegraded'; +export type NetworkControllerRpcEndpointChainDegradedEvent = { + type: 'NetworkController:rpcEndpointChainDegraded'; payload: [ { chainId: Hex; @@ -562,14 +563,14 @@ export type NetworkControllerRpcEndpointDegradedEvent = { /** * - * `NetworkController:rpcEndpointInstanceDegraded` is published in the following + * `NetworkController:rpcEndpointDegraded` is published in the following * two cases: * - * 1. When an attempt to receive a 2xx response from *any* of the designated - * URLs for an RPC endpoint is unsuccessful, and all subsequent automatic + * 1. When an attempt to receive a 2xx response from any of the endpoints + * within a chain of endpoints is unsuccessful, and all subsequent automatic * retries lead to the same result. - * 2. When a 2xx response is received from any of the endpoint URLs, but the - * request takes longer than a set number of seconds to complete. + * 2. When a 2xx response is received from any of the endpoints, but the request + * takes longer than a set number of seconds to complete. * * Note that this event will be published even if there are local connectivity * issues which prevent requests from being initiated. This is intentional. @@ -580,19 +581,19 @@ export type NetworkControllerRpcEndpointDegradedEvent = { * comes back online, but it is slow). * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the network where the RPC endpoint - * lives. - * @param payload.networkClientId - The ID of the network client representing - * the RPC endpoint. - * @param payload.primaryEndpointUrl - The primary URL of the endpoint. - * @param payload.endpointUrl - One of the URLs defined for the endpoint which - * has been deemed to be degraded. - * @param payload.error - The error from the last request to the `endpointUrl` - * which determined the degraded status (or `undefined` if the request was - * merely slow). + * @param payload.chainId - The chain ID of the target network. + * @param payload.endpointUrl - The URL of the endpoint among the chain of + * endpoints which has been deemed to be degraded. + * @param payload.error - The error from the last request to `endpointUrl` which + * determined the degraded status (or `undefined` if the request was merely + * slow). + * @param payload.networkClientId - The ID of the client representing the target + * network. + * @param payload.primaryEndpointUrl - The URL of the primary for the chain of + * endpoints. */ -export type NetworkControllerRpcEndpointInstanceDegradedEvent = { - type: 'NetworkController:rpcEndpointInstanceDegraded'; +export type NetworkControllerRpcEndpointDegradedEvent = { + type: 'NetworkController:rpcEndpointDegraded'; payload: [ { chainId: Hex; @@ -605,25 +606,25 @@ export type NetworkControllerRpcEndpointInstanceDegradedEvent = { }; /** - * `NetworkController:rpcEndpointAvailable` is published in either of the + * `NetworkController:rpcEndpointChainAvailable` is published in either of the * following two cases: * - * 1. The first time that a 2xx request is made to any of the designated URLs of - * an RPC endpoint. - * 2. When requests to any of the URLs previously failed (placing the endpoint - * in a degraded or unavailable status), but are now succeeding again. + * 1. The first time that a 2xx request is made to any of the endpoints within + * a chain of endpoints. + * 2. When requests to any of the endpoints previously failed (placing the + * endpoint in a degraded or unavailable status), but are now succeeding again. * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the network where the RPC endpoint - * lives. + * @param payload.chainId - The chain ID of the target network. + * @param payload.endpointUrl - The URL of the endpoint among the chain of + * endpoints which has been deemed to be available. * @param payload.networkClientId - The ID of the network client representing - * the RPC endpoint. - * @param payload.primaryEndpointUrl - The primary URL of the RPC endpoint. - * @param payload.endpointUrl - The specific URL that returned a successful - * response. + * the chain of endpoints. + * @param payload.primaryEndpointUrl - The URL of the primary for the chain of + * endpoints. */ -export type NetworkControllerRpcEndpointAvailableEvent = { - type: 'NetworkController:rpcEndpointAvailable'; +export type NetworkControllerRpcEndpointChainAvailableEvent = { + type: 'NetworkController:rpcEndpointChainAvailable'; payload: [ { chainId: Hex; @@ -635,25 +636,25 @@ export type NetworkControllerRpcEndpointAvailableEvent = { }; /** - * `NetworkController:rpcEndpointInstanceRetried` is published before a - * request to any of the designated URLs of an RPC endpoint is retried. + * `NetworkController:rpcEndpointRetried` is published before a request to any + * of the endpoints within a chain of endpoints is retried. * * This is mainly useful for tests. * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the network where the RPC endpoint - * lives. - * @param payload.networkClientId - The ID of the network client representing - * the RPC endpoint. - * @param payload.primaryEndpointUrl - The primary URL of the RPC endpoint. - * @param payload.endpointUrl - The URL defined for the endpoint that is being - * retried. - * @param payload.attempt - The current attempt counter for the endpoint URL + * @param payload.attempt - The current attempt counter for the endpoint * (starting from 0). + * @param payload.chainId - The chain ID of the target network. + * @param payload.endpointUrl - The URL of the endpoint among the chain of + * endpoints which is being retried. + * @param payload.networkClientId - The ID of the network client representing + * the chain of endpoints. + * @param payload.primaryEndpointUrl - The URL of the primary for the chain of + * endpoints. * @see {@link RpcService} for the list of retriable errors. */ -export type NetworkControllerRpcEndpointInstanceRetriedEvent = { - type: 'NetworkController:rpcEndpointInstanceRetried'; +export type NetworkControllerRpcEndpointRetriedEvent = { + type: 'NetworkController:rpcEndpointRetried'; payload: [ { attempt: number; @@ -673,12 +674,12 @@ export type NetworkControllerEvents = | NetworkControllerInfuraIsUnblockedEvent | NetworkControllerNetworkAddedEvent | NetworkControllerNetworkRemovedEvent - | NetworkControllerRpcEndpointUnavailableEvent - | NetworkControllerRpcEndpointInstanceUnavailableEvent + | NetworkControllerRpcEndpointChainUnavailableEvent + | NetworkControllerRpcEndpointUnvailableEvent + | NetworkControllerRpcEndpointChainDegradedEvent | NetworkControllerRpcEndpointDegradedEvent - | NetworkControllerRpcEndpointInstanceDegradedEvent - | NetworkControllerRpcEndpointAvailableEvent - | NetworkControllerRpcEndpointInstanceRetriedEvent; + | NetworkControllerRpcEndpointChainAvailableEvent + | NetworkControllerRpcEndpointRetriedEvent; /** * All events that {@link NetworkController} calls internally. diff --git a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts index af8ef2a68f5..3d7501ccc47 100644 --- a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts +++ b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts @@ -19,7 +19,7 @@ describe('createNetworkClient - RPC endpoint events', () => { const blockNumber = '0x100'; const backoffDuration = 100; - it('publishes the NetworkController:rpcEndpointUnavailable event only when the max number of consecutive request failures is reached for all of the endpoints in a group of endpoints', async () => { + it('publishes the NetworkController:rpcEndpointChainUnavailable event only when the max number of consecutive request failures is reached for all of the endpoints in a chain of endpoints', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -59,10 +59,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }); const messenger = buildRootMessenger(); - const rpcEndpointUnavailableEventHandler = jest.fn(); + const rpcEndpointChainUnavailableEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointUnavailable', - rpcEndpointUnavailableEventHandler, + 'NetworkController:rpcEndpointChainUnavailable', + rpcEndpointChainUnavailableEventHandler, ); await withNetworkClient( @@ -82,7 +82,7 @@ describe('createNetworkClient - RPC endpoint events', () => { }, async ({ makeRpcCall, clock, chainId, rpcUrl }) => { messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -115,10 +115,10 @@ describe('createNetworkClient - RPC endpoint events', () => { ); expect( - rpcEndpointUnavailableEventHandler, + rpcEndpointChainUnavailableEventHandler, ).toHaveBeenCalledTimes(1); expect( - rpcEndpointUnavailableEventHandler, + rpcEndpointChainUnavailableEventHandler, ).toHaveBeenCalledWith({ chainId, endpointUrl: failoverEndpointUrl, @@ -134,7 +134,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceUnavailable event each time the max number of consecutive request failures is reached for any of the endpoints in a group of endpoints', async () => { + it('publishes the NetworkController:rpcEndpointUnvailable event each time the max number of consecutive request failures is reached for any of the endpoints in a chain of endpoints', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -174,10 +174,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }); const messenger = buildRootMessenger(); - const rpcEndpointInstanceUnavailableEventHandler = jest.fn(); + const rpcEndpointUnvailableEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointInstanceUnavailable', - rpcEndpointInstanceUnavailableEventHandler, + 'NetworkController:rpcEndpointUnvailable', + rpcEndpointUnvailableEventHandler, ); await withNetworkClient( @@ -197,7 +197,7 @@ describe('createNetworkClient - RPC endpoint events', () => { }, async ({ makeRpcCall, clock, chainId, rpcUrl }) => { messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -230,10 +230,10 @@ describe('createNetworkClient - RPC endpoint events', () => { ); expect( - rpcEndpointInstanceUnavailableEventHandler, + rpcEndpointUnvailableEventHandler, ).toHaveBeenCalledTimes(2); expect( - rpcEndpointInstanceUnavailableEventHandler, + rpcEndpointUnvailableEventHandler, ).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, @@ -242,7 +242,7 @@ describe('createNetworkClient - RPC endpoint events', () => { primaryEndpointUrl: rpcUrl, }); expect( - rpcEndpointInstanceUnavailableEventHandler, + rpcEndpointUnvailableEventHandler, ).toHaveBeenCalledWith({ chainId, endpointUrl: failoverEndpointUrl, @@ -258,7 +258,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointDegraded event only once, even if the max number of retries is continually reached in making requests to a primary endpoint', async () => { + it('publishes the NetworkController:rpcEndpointChainDegraded event only once, even if the max number of retries is continually reached in making requests to a primary endpoint', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -284,10 +284,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }); const messenger = buildRootMessenger(); - const rpcEndpointDegradedEventHandler = jest.fn(); + const rpcEndpointChainDegradedEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointDegraded', - rpcEndpointDegradedEventHandler, + 'NetworkController:rpcEndpointChainDegraded', + rpcEndpointChainDegradedEventHandler, ); await withNetworkClient( @@ -305,7 +305,7 @@ describe('createNetworkClient - RPC endpoint events', () => { }, async ({ makeRpcCall, clock, chainId, rpcUrl }) => { messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -327,10 +327,12 @@ describe('createNetworkClient - RPC endpoint events', () => { expectedError, ); - expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes( - 1, - ); - expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, error: expectedDegradedError, @@ -343,7 +345,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointDegraded event only once, even if the time to complete a request to a primary endpoint is continually too long', async () => { + it('publishes the NetworkController:rpcEndpointChainDegraded event only once, even if the time to complete a request to a primary endpoint is continually too long', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -353,10 +355,10 @@ describe('createNetworkClient - RPC endpoint events', () => { { providerType: networkClientType }, async (comms) => { const messenger = buildRootMessenger(); - const rpcEndpointDegradedEventHandler = jest.fn(); + const rpcEndpointChainDegradedEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointDegraded', - rpcEndpointDegradedEventHandler, + 'NetworkController:rpcEndpointChainDegraded', + rpcEndpointChainDegradedEventHandler, ); await withNetworkClient( @@ -394,10 +396,12 @@ describe('createNetworkClient - RPC endpoint events', () => { await makeRpcCall(request); await makeRpcCall(request); - expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes( - 1, - ); - expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointChainDegradedEventHandler, + ).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, error: undefined, @@ -410,7 +414,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('does not publish the NetworkController:rpcEndpointDegraded event again if the max number of retries is reached in making requests to a failover endpoint', async () => { + it('does not publish the NetworkController:rpcEndpointChainDegraded event again if the max number of retries is reached in making requests to a failover endpoint', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -429,10 +433,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }, async (failoverComms) => { const messenger = buildRootMessenger(); - const rpcEndpointDegradedEventHandler = jest.fn(); + const rpcEndpointChainDegradedEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointDegraded', - rpcEndpointDegradedEventHandler, + 'NetworkController:rpcEndpointChainDegraded', + rpcEndpointChainDegradedEventHandler, ); await withNetworkClient( @@ -473,7 +477,7 @@ describe('createNetworkClient - RPC endpoint events', () => { }); messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -497,10 +501,10 @@ describe('createNetworkClient - RPC endpoint events', () => { ); expect( - rpcEndpointDegradedEventHandler, + rpcEndpointChainDegradedEventHandler, ).toHaveBeenCalledTimes(1); expect( - rpcEndpointDegradedEventHandler, + rpcEndpointChainDegradedEventHandler, ).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, @@ -516,7 +520,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('does not publish the NetworkController:rpcEndpointDegraded event again when the time to complete a request to a failover endpoint is too long', async () => { + it('does not publish the NetworkController:rpcEndpointChainDegraded event again when the time to complete a request to a failover endpoint is too long', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -535,10 +539,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }, async (failoverComms) => { const messenger = buildRootMessenger(); - const rpcEndpointDegradedEventHandler = jest.fn(); + const rpcEndpointChainDegradedEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointDegraded', - rpcEndpointDegradedEventHandler, + 'NetworkController:rpcEndpointChainDegraded', + rpcEndpointChainDegradedEventHandler, ); await withNetworkClient( @@ -581,7 +585,7 @@ describe('createNetworkClient - RPC endpoint events', () => { }); messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -602,10 +606,10 @@ describe('createNetworkClient - RPC endpoint events', () => { await makeRpcCall(request); expect( - rpcEndpointDegradedEventHandler, + rpcEndpointChainDegradedEventHandler, ).toHaveBeenCalledTimes(1); expect( - rpcEndpointDegradedEventHandler, + rpcEndpointChainDegradedEventHandler, ).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, @@ -621,7 +625,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceDegraded event each time the max number of retries is reached in making requests to a primary endpoint', async () => { + it('publishes the NetworkController:rpcEndpointDegraded event each time the max number of retries is reached in making requests to a primary endpoint', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -647,10 +651,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }); const messenger = buildRootMessenger(); - const rpcEndpointInstanceDegradedEventHandler = jest.fn(); + const rpcEndpointDegradedEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointInstanceDegraded', - rpcEndpointInstanceDegradedEventHandler, + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, ); await withNetworkClient( @@ -668,7 +672,7 @@ describe('createNetworkClient - RPC endpoint events', () => { }, async ({ makeRpcCall, clock, chainId, rpcUrl }) => { messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -690,21 +694,17 @@ describe('createNetworkClient - RPC endpoint events', () => { expectedError, ); - expect( - rpcEndpointInstanceDegradedEventHandler, - ).toHaveBeenCalledTimes(2); - expect( - rpcEndpointInstanceDegradedEventHandler, - ).toHaveBeenCalledWith({ + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes( + 2, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, error: expectedDegradedError, networkClientId: 'AAAA-AAAA-AAAA-AAAA', primaryEndpointUrl: rpcUrl, }); - expect( - rpcEndpointInstanceDegradedEventHandler, - ).toHaveBeenCalledWith({ + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, error: expectedDegradedError, @@ -717,7 +717,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceDegraded event when the time to complete a request to a primary endpoint is continually too long', async () => { + it('publishes the NetworkController:rpcEndpointDegraded event when the time to complete a request to a primary endpoint is continually too long', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -727,10 +727,10 @@ describe('createNetworkClient - RPC endpoint events', () => { { providerType: networkClientType }, async (comms) => { const messenger = buildRootMessenger(); - const rpcEndpointInstanceDegradedEventHandler = jest.fn(); + const rpcEndpointDegradedEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointInstanceDegraded', - rpcEndpointInstanceDegradedEventHandler, + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, ); await withNetworkClient( @@ -786,21 +786,17 @@ describe('createNetworkClient - RPC endpoint events', () => { await blockTracker.checkForLatestBlock(); await makeRpcCall(request); - expect( - rpcEndpointInstanceDegradedEventHandler, - ).toHaveBeenCalledTimes(2); - expect( - rpcEndpointInstanceDegradedEventHandler, - ).toHaveBeenCalledWith({ + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes( + 2, + ); + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, error: undefined, networkClientId: 'AAAA-AAAA-AAAA-AAAA', primaryEndpointUrl: rpcUrl, }); - expect( - rpcEndpointInstanceDegradedEventHandler, - ).toHaveBeenCalledWith({ + expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, error: undefined, @@ -813,7 +809,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceDegraded event again if the max number of retries is reached in making requests to a failover endpoint', async () => { + it('publishes the NetworkController:rpcEndpointDegraded event again if the max number of retries is reached in making requests to a failover endpoint', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -832,10 +828,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }, async (failoverComms) => { const messenger = buildRootMessenger(); - const rpcEndpointInstanceDegradedEventHandler = jest.fn(); + const rpcEndpointDegradedEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointInstanceDegraded', - rpcEndpointInstanceDegradedEventHandler, + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, ); await withNetworkClient( @@ -876,7 +872,7 @@ describe('createNetworkClient - RPC endpoint events', () => { }); messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -900,10 +896,10 @@ describe('createNetworkClient - RPC endpoint events', () => { ); expect( - rpcEndpointInstanceDegradedEventHandler, + rpcEndpointDegradedEventHandler, ).toHaveBeenCalledTimes(3); expect( - rpcEndpointInstanceDegradedEventHandler, + rpcEndpointDegradedEventHandler, ).toHaveBeenNthCalledWith(1, { chainId, endpointUrl: rpcUrl, @@ -912,7 +908,7 @@ describe('createNetworkClient - RPC endpoint events', () => { primaryEndpointUrl: rpcUrl, }); expect( - rpcEndpointInstanceDegradedEventHandler, + rpcEndpointDegradedEventHandler, ).toHaveBeenNthCalledWith(2, { chainId, endpointUrl: rpcUrl, @@ -921,7 +917,7 @@ describe('createNetworkClient - RPC endpoint events', () => { primaryEndpointUrl: rpcUrl, }); expect( - rpcEndpointInstanceDegradedEventHandler, + rpcEndpointDegradedEventHandler, ).toHaveBeenNthCalledWith(3, { chainId, endpointUrl: failoverEndpointUrl, @@ -937,7 +933,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointInstanceDegraded event again when the time to complete a request to a failover endpoint is too long', async () => { + it('publishes the NetworkController:rpcEndpointDegraded event again when the time to complete a request to a failover endpoint is too long', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -956,10 +952,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }, async (failoverComms) => { const messenger = buildRootMessenger(); - const rpcEndpointInstanceDegradedEventHandler = jest.fn(); + const rpcEndpointDegradedEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointInstanceDegraded', - rpcEndpointInstanceDegradedEventHandler, + 'NetworkController:rpcEndpointDegraded', + rpcEndpointDegradedEventHandler, ); await withNetworkClient( @@ -1002,7 +998,7 @@ describe('createNetworkClient - RPC endpoint events', () => { }); messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -1023,10 +1019,10 @@ describe('createNetworkClient - RPC endpoint events', () => { await makeRpcCall(request); expect( - rpcEndpointInstanceDegradedEventHandler, + rpcEndpointDegradedEventHandler, ).toHaveBeenCalledTimes(3); expect( - rpcEndpointInstanceDegradedEventHandler, + rpcEndpointDegradedEventHandler, ).toHaveBeenNthCalledWith(1, { chainId, endpointUrl: rpcUrl, @@ -1035,7 +1031,7 @@ describe('createNetworkClient - RPC endpoint events', () => { primaryEndpointUrl: rpcUrl, }); expect( - rpcEndpointInstanceDegradedEventHandler, + rpcEndpointDegradedEventHandler, ).toHaveBeenNthCalledWith(2, { chainId, endpointUrl: rpcUrl, @@ -1044,7 +1040,7 @@ describe('createNetworkClient - RPC endpoint events', () => { primaryEndpointUrl: rpcUrl, }); expect( - rpcEndpointInstanceDegradedEventHandler, + rpcEndpointDegradedEventHandler, ).toHaveBeenNthCalledWith(3, { chainId, endpointUrl: failoverEndpointUrl, @@ -1060,7 +1056,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointAvailable event the first time a successful request to a (primary) RPC endpoint is made', async () => { + it('publishes the NetworkController:rpcEndpointChainAvailable event the first time a successful request to a (primary) RPC endpoint is made', async () => { const request = { method: 'eth_gasPrice', params: [], @@ -1085,7 +1081,7 @@ describe('createNetworkClient - RPC endpoint events', () => { const messenger = buildRootMessenger(); const networkAvailableEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointAvailable', + 'NetworkController:rpcEndpointChainAvailable', networkAvailableEventHandler, ); @@ -1117,7 +1113,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointAvailable event the first time a successful request to a failover endpoint is made', async () => { + it('publishes the NetworkController:rpcEndpointChainAvailable event the first time a successful request to a failover endpoint is made', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -1157,7 +1153,7 @@ describe('createNetworkClient - RPC endpoint events', () => { const messenger = buildRootMessenger(); const networkAvailableEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointAvailable', + 'NetworkController:rpcEndpointChainAvailable', networkAvailableEventHandler, ); @@ -1178,7 +1174,7 @@ describe('createNetworkClient - RPC endpoint events', () => { }, async ({ makeRpcCall, clock, chainId, rpcUrl }) => { messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. diff --git a/packages/network-controller/src/create-network-client.ts b/packages/network-controller/src/create-network-client.ts index 045e182d14b..ddcf4201412 100644 --- a/packages/network-controller/src/create-network-client.ts +++ b/packages/network-controller/src/create-network-client.ts @@ -236,7 +236,7 @@ function createRpcServiceChain({ throw new Error('Could not make request to endpoint.'); } - messenger.publish('NetworkController:rpcEndpointUnavailable', { + messenger.publish('NetworkController:rpcEndpointChainUnavailable', { chainId: configuration.chainId, networkClientId: id, primaryEndpointUrl, @@ -248,7 +248,7 @@ function createRpcServiceChain({ rpcServiceChain.onServiceBreak( ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { const error = getError(rest); - messenger.publish('NetworkController:rpcEndpointInstanceUnavailable', { + messenger.publish('NetworkController:rpcEndpointUnvailable', { chainId: configuration.chainId, networkClientId: id, primaryEndpointUrl, @@ -261,7 +261,7 @@ function createRpcServiceChain({ rpcServiceChain.onDegraded( ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { const error = getError(rest); - messenger.publish('NetworkController:rpcEndpointDegraded', { + messenger.publish('NetworkController:rpcEndpointChainDegraded', { chainId: configuration.chainId, networkClientId: id, primaryEndpointUrl, @@ -274,7 +274,7 @@ function createRpcServiceChain({ rpcServiceChain.onServiceDegraded( ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { const error = getError(rest); - messenger.publish('NetworkController:rpcEndpointInstanceDegraded', { + messenger.publish('NetworkController:rpcEndpointDegraded', { chainId: configuration.chainId, networkClientId: id, primaryEndpointUrl, @@ -285,7 +285,7 @@ function createRpcServiceChain({ ); rpcServiceChain.onAvailable(({ primaryEndpointUrl: _, endpointUrl }) => { - messenger.publish('NetworkController:rpcEndpointAvailable', { + messenger.publish('NetworkController:rpcEndpointChainAvailable', { chainId: configuration.chainId, networkClientId: id, primaryEndpointUrl, @@ -295,7 +295,7 @@ function createRpcServiceChain({ rpcServiceChain.onServiceRetry( ({ primaryEndpointUrl: _, endpointUrl, attempt }) => { - messenger.publish('NetworkController:rpcEndpointInstanceRetried', { + messenger.publish('NetworkController:rpcEndpointRetried', { chainId: configuration.chainId, networkClientId: id, primaryEndpointUrl, diff --git a/packages/network-controller/src/index.ts b/packages/network-controller/src/index.ts index b4a0de293ea..82d5f04e41d 100644 --- a/packages/network-controller/src/index.ts +++ b/packages/network-controller/src/index.ts @@ -36,12 +36,12 @@ export type { NetworkControllerActions, NetworkControllerMessenger, NetworkControllerOptions, - NetworkControllerRpcEndpointUnavailableEvent, - NetworkControllerRpcEndpointInstanceUnavailableEvent, + NetworkControllerRpcEndpointChainUnavailableEvent, + NetworkControllerRpcEndpointUnvailableEvent, + NetworkControllerRpcEndpointChainDegradedEvent, NetworkControllerRpcEndpointDegradedEvent, - NetworkControllerRpcEndpointInstanceDegradedEvent, - NetworkControllerRpcEndpointAvailableEvent, - NetworkControllerRpcEndpointInstanceRetriedEvent, + NetworkControllerRpcEndpointChainAvailableEvent, + NetworkControllerRpcEndpointRetriedEvent, } from './NetworkController'; export { getDefaultNetworkControllerState, diff --git a/packages/network-controller/tests/network-client/rpc-failover.ts b/packages/network-controller/tests/network-client/rpc-failover.ts index a08562c2e0c..da09d22b9ae 100644 --- a/packages/network-controller/tests/network-client/rpc-failover.ts +++ b/packages/network-controller/tests/network-client/rpc-failover.ts @@ -95,7 +95,7 @@ export function testsForRpcFailoverBehavior({ }, async ({ makeRpcCall, clock }) => { messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -207,7 +207,7 @@ export function testsForRpcFailoverBehavior({ }, async ({ makeRpcCall, clock }) => { messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', + 'NetworkController:rpcEndpointRetried', () => { // Ensure that we advance to the next RPC request // retry, not the next block tracker request. @@ -269,17 +269,14 @@ export function testsForRpcFailoverBehavior({ }), }, async ({ makeRpcCall, clock, rpcUrl }) => { - messenger.subscribe( - 'NetworkController:rpcEndpointInstanceRetried', - () => { - // Ensure that we advance to the next RPC request - // retry, not the next block tracker request. - // We also don't need to await this, it just needs to - // be added to the promise queue. - // eslint-disable-next-line @typescript-eslint/no-floating-promises - clock.tickAsync(backoffDuration); - }, - ); + messenger.subscribe('NetworkController:rpcEndpointRetried', () => { + // Ensure that we advance to the next RPC request + // retry, not the next block tracker request. + // We also don't need to await this, it just needs to + // be added to the promise queue. + // eslint-disable-next-line @typescript-eslint/no-floating-promises + clock.tickAsync(backoffDuration); + }); for (let i = 0; i < numRequestsToMake - 1; i++) { await ignoreRejection(makeRpcCall(request)); From b16597acd3d63eba40fe9c03d7551e93dab82c5e Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Tue, 18 Nov 2025 00:18:18 -0700 Subject: [PATCH 18/30] RpcEndpointUnvailable -> RpcEndpointUnavailable --- .../network-controller/src/NetworkController.ts | 8 ++++---- .../rpc-endpoint-events.test.ts | 14 +++++++------- .../src/create-network-client.ts | 2 +- packages/network-controller/src/index.ts | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/packages/network-controller/src/NetworkController.ts b/packages/network-controller/src/NetworkController.ts index 5cbc80b3bf7..906660ec034 100644 --- a/packages/network-controller/src/NetworkController.ts +++ b/packages/network-controller/src/NetworkController.ts @@ -482,7 +482,7 @@ export type NetworkControllerRpcEndpointChainUnavailableEvent = { }; /** - * `NetworkController:rpcEndpointUnvailable` is published when the number of + * `NetworkController:rpcEndpointUnavailable` is published when the number of * failed consecutive attempts to receive a 2xx response from *any* of the * RPC endpoints within a chain of endpoints reaches a maximum. * @@ -505,8 +505,8 @@ export type NetworkControllerRpcEndpointChainUnavailableEvent = { * @param payload.primaryEndpointUrl - The URL of the primary for the chain of * endpoints. */ -export type NetworkControllerRpcEndpointUnvailableEvent = { - type: 'NetworkController:rpcEndpointUnvailable'; +export type NetworkControllerRpcEndpointUnavailableEvent = { + type: 'NetworkController:rpcEndpointUnavailable'; payload: [ { chainId: Hex; @@ -675,7 +675,7 @@ export type NetworkControllerEvents = | NetworkControllerNetworkAddedEvent | NetworkControllerNetworkRemovedEvent | NetworkControllerRpcEndpointChainUnavailableEvent - | NetworkControllerRpcEndpointUnvailableEvent + | NetworkControllerRpcEndpointUnavailableEvent | NetworkControllerRpcEndpointChainDegradedEvent | NetworkControllerRpcEndpointDegradedEvent | NetworkControllerRpcEndpointChainAvailableEvent diff --git a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts index 3d7501ccc47..3ef340a94a4 100644 --- a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts +++ b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts @@ -134,7 +134,7 @@ describe('createNetworkClient - RPC endpoint events', () => { ); }); - it('publishes the NetworkController:rpcEndpointUnvailable event each time the max number of consecutive request failures is reached for any of the endpoints in a chain of endpoints', async () => { + it('publishes the NetworkController:rpcEndpointUnavailable event each time the max number of consecutive request failures is reached for any of the endpoints in a chain of endpoints', async () => { const failoverEndpointUrl = 'https://failover.endpoint/'; const request = { method: 'eth_gasPrice', @@ -174,10 +174,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }); const messenger = buildRootMessenger(); - const rpcEndpointUnvailableEventHandler = jest.fn(); + const rpcEndpointUnavailableEventHandler = jest.fn(); messenger.subscribe( - 'NetworkController:rpcEndpointUnvailable', - rpcEndpointUnvailableEventHandler, + 'NetworkController:rpcEndpointUnavailable', + rpcEndpointUnavailableEventHandler, ); await withNetworkClient( @@ -230,10 +230,10 @@ describe('createNetworkClient - RPC endpoint events', () => { ); expect( - rpcEndpointUnvailableEventHandler, + rpcEndpointUnavailableEventHandler, ).toHaveBeenCalledTimes(2); expect( - rpcEndpointUnvailableEventHandler, + rpcEndpointUnavailableEventHandler, ).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, @@ -242,7 +242,7 @@ describe('createNetworkClient - RPC endpoint events', () => { primaryEndpointUrl: rpcUrl, }); expect( - rpcEndpointUnvailableEventHandler, + rpcEndpointUnavailableEventHandler, ).toHaveBeenCalledWith({ chainId, endpointUrl: failoverEndpointUrl, diff --git a/packages/network-controller/src/create-network-client.ts b/packages/network-controller/src/create-network-client.ts index ddcf4201412..9f8b93e99ec 100644 --- a/packages/network-controller/src/create-network-client.ts +++ b/packages/network-controller/src/create-network-client.ts @@ -248,7 +248,7 @@ function createRpcServiceChain({ rpcServiceChain.onServiceBreak( ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { const error = getError(rest); - messenger.publish('NetworkController:rpcEndpointUnvailable', { + messenger.publish('NetworkController:rpcEndpointUnavailable', { chainId: configuration.chainId, networkClientId: id, primaryEndpointUrl, diff --git a/packages/network-controller/src/index.ts b/packages/network-controller/src/index.ts index 82d5f04e41d..98153162fe7 100644 --- a/packages/network-controller/src/index.ts +++ b/packages/network-controller/src/index.ts @@ -37,7 +37,7 @@ export type { NetworkControllerMessenger, NetworkControllerOptions, NetworkControllerRpcEndpointChainUnavailableEvent, - NetworkControllerRpcEndpointUnvailableEvent, + NetworkControllerRpcEndpointUnavailableEvent, NetworkControllerRpcEndpointChainDegradedEvent, NetworkControllerRpcEndpointDegradedEvent, NetworkControllerRpcEndpointChainAvailableEvent, From cb498e37ce7bc671cad857d65fe261b4442b6dce Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Wed, 19 Nov 2025 13:04:32 -0700 Subject: [PATCH 19/30] Address Cursor comment --- .../src/rpc-service/rpc-service.test.ts | 857 ++++++++++-------- .../src/rpc-service/rpc-service.ts | 4 +- 2 files changed, 489 insertions(+), 372 deletions(-) diff --git a/packages/network-controller/src/rpc-service/rpc-service.test.ts b/packages/network-controller/src/rpc-service/rpc-service.test.ts index 43b19edae24..f8f35df9a16 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.test.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.test.ts @@ -1,4 +1,5 @@ import { + DEFAULT_CIRCUIT_BREAK_DURATION, DEFAULT_DEGRADED_THRESHOLD, HttpError, } from '@metamask/controller-utils'; @@ -27,6 +28,390 @@ describe('RpcService', () => { clock.restore(); }); + describe('resetPolicy', () => { + it('resets the state of the circuit to "closed"', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + expect(service.getCircuitState()).toBe(CircuitState.Open); + + service.resetPolicy(); + + expect(service.getCircuitState()).toBe(CircuitState.Closed); + }); + + it('allows making a successful request to the service if its circuit has broken', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + service.resetPolicy(); + + expect(await service.request(jsonRpcRequest)).toStrictEqual({ + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + }); + + it('calls onAvailable listeners if the service was executed successfully, its circuit broke, it was reset, and executes successfully again', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + const onAvailableListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + service.onAvailable(onAvailableListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + + // Make a successful requst + await service.request(jsonRpcRequest); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + service.resetPolicy(); + + // Make another successful requst + await service.request(jsonRpcRequest); + expect(onAvailableListener).toHaveBeenCalledTimes(2); + }); + + it('allows making an unsuccessful request to the service if its circuit has broken', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(500); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + + service.resetPolicy(); + + await expect(service.request(jsonRpcRequest)).rejects.toThrow( + 'RPC endpoint not found or unavailable', + ); + }); + + it('does not call onBreak listeners', async () => { + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .times(15) + .reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(500); + const onBreakListener = jest.fn(); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + service.onBreak(onBreakListener); + + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + + // Get through the first two rounds of retries + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + // The last retry breaks the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + expect(onBreakListener).toHaveBeenCalledTimes(1); + + service.resetPolicy(); + expect(onBreakListener).toHaveBeenCalledTimes(1); + }); + }); + + describe('getCircuitState', () => { + it('returns the state of the underlying circuit', async () => { + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + const endpointUrl = 'https://rpc.example.chain'; + nock(endpointUrl).post('/', jsonRpcRequest).times(15).reply(503); + nock(endpointUrl) + .post('/', { + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }) + .reply(500); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + expect(service.getCircuitState()).toBe(CircuitState.Closed); + + // Retry until we break the circuit + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + await ignoreRejection(service.request(jsonRpcRequest)); + expect(service.getCircuitState()).toBe(CircuitState.Open); + + clock.tick(DEFAULT_CIRCUIT_BREAK_DURATION); + const promise = ignoreRejection(service.request(jsonRpcRequest)); + expect(service.getCircuitState()).toBe(CircuitState.HalfOpen); + await promise; + expect(service.getCircuitState()).toBe(CircuitState.Open); + }); + }); + + describe('getLastInnerFailureReason', () => { + it('returns undefined if no requests have occurred yet', () => { + const endpointUrl = 'https://rpc.example.chain'; + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + + expect(service.getLastInnerFailureReason()).toBeUndefined(); + }); + + it('returns the last failure reason that the underlying policy captured', async () => { + const endpointUrl = 'https://rpc.example.chain'; + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + nock(endpointUrl).post('/', jsonRpcRequest).reply(500); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + await ignoreRejection(service.request(jsonRpcRequest)); + + expect(service.getLastInnerFailureReason()).toStrictEqual({ + error: new HttpError(500), + }); + }); + + it('returns undefined if the service failed, then succeeded', async () => { + const endpointUrl = 'https://rpc.example.chain'; + const jsonRpcRequest = { + id: 1, + jsonrpc: '2.0' as const, + method: 'eth_chainId', + params: [], + }; + nock(endpointUrl) + .post('/', jsonRpcRequest) + .reply(500) + .post('/', jsonRpcRequest) + .reply(200, { + id: 1, + jsonrpc: '2.0', + result: 'ok', + }); + const service = new RpcService({ + fetch, + btoa, + endpointUrl, + }); + service.onRetry(() => { + clock.next(); + }); + + await ignoreRejection(service.request(jsonRpcRequest)); + await service.request(jsonRpcRequest); + + expect(service.getLastInnerFailureReason()).toBeUndefined(); + }); + }); + describe('request', () => { // NOTE: Keep this list synced with CONNECTION_ERRORS describe.each([ @@ -483,287 +868,54 @@ describe('RpcService', () => { nonce: '0x378da40ff335b070', gasLimit: '0x47e7c4', gasUsed: '0x37993', - timestamp: '0x5835c54d', - transactions: [ - '0xa0807e117a8dd124ab949f460f08c36c72b710188f01609595223b325e58e0fc', - '0xeae6d797af50cb62a596ec3939114d63967c374fa57de9bc0f4e2b576ed6639d', - ], - baseFeePerGas: '0x7', - }, - }); - }); - - it('handles deeply frozen JSON-RPC requests', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_blockNumber', - params: [], - }) - .reply(200, { - id: 1, - jsonrpc: '2.0', - result: '0x1', - }); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const response = await service.request( - deepFreeze({ - id: 1, - jsonrpc: '2.0', - method: 'eth_blockNumber', - params: [], - }), - ); - - expect(response).toStrictEqual({ - id: 1, - jsonrpc: '2.0', - result: '0x1', - }); - }); - - it('does not throw if the endpoint returns an unsuccessful JSON-RPC response', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(200, { - id: 1, - jsonrpc: '2.0', - error: { - code: -32000, - message: 'oops', - }, - }); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - const response = await service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - - expect(response).toStrictEqual({ - id: 1, - jsonrpc: '2.0', - error: { - code: -32000, - message: 'oops', - }, - }); - }); - - it('calls the onDegraded callback if the endpoint takes more than 5 seconds to respond', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(200, () => { - clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); - return { - id: 1, - jsonrpc: '2.0', - result: '0x1', - }; - }); - const onDegradedListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onDegraded(onDegradedListener); - - await service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - - expect(onDegradedListener).toHaveBeenCalledTimes(1); - expect(onDegradedListener).toHaveBeenCalledWith({ - endpointUrl: `${endpointUrl}/`, - }); - }); - - it('calls the onAvailable callback the first time a successful request occurs', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(200, () => { - return { - id: 1, - jsonrpc: '2.0', - result: '0x1', - }; - }); - const onAvailableListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onAvailable(onAvailableListener); - - await service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - - expect(onAvailableListener).toHaveBeenCalledTimes(1); - expect(onAvailableListener).toHaveBeenCalledWith({ - endpointUrl: `${endpointUrl}/`, - }); - }); - - it('calls the onAvailable callback if the endpoint takes more than 5 seconds to respond and then speeds up again', async () => { - const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(200, () => { - clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); - return { - id: 1, - jsonrpc: '2.0', - result: '0x1', - }; - }) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(200, () => { - return { - id: 1, - jsonrpc: '2.0', - result: '0x1', - }; - }); - const onAvailableListener = jest.fn(); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onAvailable(onAvailableListener); - - await service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - await service.request({ - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }); - - expect(onAvailableListener).toHaveBeenCalledTimes(1); - expect(onAvailableListener).toHaveBeenCalledWith({ - endpointUrl: `${endpointUrl}/`, + timestamp: '0x5835c54d', + transactions: [ + '0xa0807e117a8dd124ab949f460f08c36c72b710188f01609595223b325e58e0fc', + '0xeae6d797af50cb62a596ec3939114d63967c374fa57de9bc0f4e2b576ed6639d', + ], + baseFeePerGas: '0x7', + }, }); }); - }); - describe('reset', () => { - it('resets the state of the circuit to "closed"', async () => { + it('handles deeply frozen JSON-RPC requests', async () => { const endpointUrl = 'https://rpc.example.chain'; nock(endpointUrl) .post('/', { id: 1, jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(15) - .reply(503); - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', + method: 'eth_blockNumber', params: [], }) .reply(200, { id: 1, jsonrpc: '2.0', - result: 'ok', + result: '0x1', }); const service = new RpcService({ fetch, btoa, endpointUrl, }); - service.onRetry(() => { - clock.next(); - }); - - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - // Get through the first two rounds of retries - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await ignoreRejection(service.request(jsonRpcRequest)); - expect(service.getCircuitState()).toBe(CircuitState.Open); - service.resetPolicy(); + const response = await service.request( + deepFreeze({ + id: 1, + jsonrpc: '2.0', + method: 'eth_blockNumber', + params: [], + }), + ); - expect(service.getCircuitState()).toBe(CircuitState.Closed); + expect(response).toStrictEqual({ + id: 1, + jsonrpc: '2.0', + result: '0x1', + }); }); - it('allows making a successful request to the service if its circuit has broken', async () => { + it('does not throw if the endpoint returns an unsuccessful JSON-RPC response', async () => { const endpointUrl = 'https://rpc.example.chain'; - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(15) - .reply(503); nock(endpointUrl) .post('/', { id: 1, @@ -774,39 +926,35 @@ describe('RpcService', () => { .reply(200, { id: 1, jsonrpc: '2.0', - result: 'ok', + error: { + code: -32000, + message: 'oops', + }, }); const service = new RpcService({ fetch, btoa, endpointUrl, }); - service.onRetry(() => { - clock.next(); - }); - const jsonRpcRequest = { + const response = await service.request({ id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - // Get through the first two rounds of retries - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await ignoreRejection(service.request(jsonRpcRequest)); - - service.resetPolicy(); + }); - expect(await service.request(jsonRpcRequest)).toStrictEqual({ + expect(response).toStrictEqual({ id: 1, jsonrpc: '2.0', - result: 'ok', + error: { + code: -32000, + message: 'oops', + }, }); }); - it('calls onAvailable listeners if the service was executed successfully, its circuit broke, it was reset, and executes successfully again', async () => { + it('calls the onDegraded callback if the endpoint takes more than 5 seconds to respond', async () => { const endpointUrl = 'https://rpc.example.chain'; nock(endpointUrl) .post('/', { @@ -815,68 +963,36 @@ describe('RpcService', () => { method: 'eth_chainId', params: [], }) - .reply(200, { - id: 1, - jsonrpc: '2.0', - result: 'ok', - }); - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .times(15) - .reply(503); - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(200, { - id: 1, - jsonrpc: '2.0', - result: 'ok', + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; }); - const onAvailableListener = jest.fn(); + const onDegradedListener = jest.fn(); const service = new RpcService({ fetch, btoa, endpointUrl, }); - service.onRetry(() => { - clock.next(); - }); - service.onAvailable(onAvailableListener); + service.onDegraded(onDegradedListener); - const jsonRpcRequest = { + await service.request({ id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - - // Make a successful requst - await service.request(jsonRpcRequest); - expect(onAvailableListener).toHaveBeenCalledTimes(1); - - // Get through the first two rounds of retries - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await ignoreRejection(service.request(jsonRpcRequest)); - - service.resetPolicy(); + }); - // Make another successful requst - await service.request(jsonRpcRequest); - expect(onAvailableListener).toHaveBeenCalledTimes(2); + expect(onDegradedListener).toHaveBeenCalledTimes(1); + expect(onDegradedListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, + }); }); - it('allows making an unsuccessful request to the service if its circuit has broken', async () => { + it('calls the onAvailable callback the first time a successful request occurs', async () => { const endpointUrl = 'https://rpc.example.chain'; nock(endpointUrl) .post('/', { @@ -885,45 +1001,35 @@ describe('RpcService', () => { method: 'eth_chainId', params: [], }) - .times(15) - .reply(503); - nock(endpointUrl) - .post('/', { - id: 1, - jsonrpc: '2.0', - method: 'eth_chainId', - params: [], - }) - .reply(500); + .reply(200, () => { + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const onAvailableListener = jest.fn(); const service = new RpcService({ fetch, btoa, endpointUrl, }); - service.onRetry(() => { - clock.next(); - }); + service.onAvailable(onAvailableListener); - const jsonRpcRequest = { + await service.request({ id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - // Get through the first two rounds of retries - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await ignoreRejection(service.request(jsonRpcRequest)); - - service.resetPolicy(); + }); - await expect(service.request(jsonRpcRequest)).rejects.toThrow( - 'RPC endpoint not found or unavailable', - ); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, + }); }); - it('does not call onBreak listeners', async () => { + it('calls the onAvailable callback if the endpoint takes more than 5 seconds to respond and then speeds up again', async () => { const endpointUrl = 'https://rpc.example.chain'; nock(endpointUrl) .post('/', { @@ -932,43 +1038,52 @@ describe('RpcService', () => { method: 'eth_chainId', params: [], }) - .times(15) - .reply(503); - nock(endpointUrl) + .reply(200, () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }) .post('/', { id: 1, jsonrpc: '2.0', method: 'eth_chainId', params: [], }) - .reply(500); - const onBreakListener = jest.fn(); + .reply(200, () => { + return { + id: 1, + jsonrpc: '2.0', + result: '0x1', + }; + }); + const onAvailableListener = jest.fn(); const service = new RpcService({ fetch, btoa, endpointUrl, }); - service.onRetry(() => { - clock.next(); - }); - service.onBreak(onBreakListener); + service.onAvailable(onAvailableListener); - const jsonRpcRequest = { + await service.request({ id: 1, - jsonrpc: '2.0' as const, + jsonrpc: '2.0', method: 'eth_chainId', params: [], - }; - - // Get through the first two rounds of retries - await ignoreRejection(service.request(jsonRpcRequest)); - await ignoreRejection(service.request(jsonRpcRequest)); - // The last retry breaks the circuit - await ignoreRejection(service.request(jsonRpcRequest)); - expect(onBreakListener).toHaveBeenCalledTimes(1); + }); + await service.request({ + id: 1, + jsonrpc: '2.0', + method: 'eth_chainId', + params: [], + }); - service.resetPolicy(); - expect(onBreakListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledTimes(1); + expect(onAvailableListener).toHaveBeenCalledWith({ + endpointUrl: `${endpointUrl}/`, + }); }); }); }); diff --git a/packages/network-controller/src/rpc-service/rpc-service.ts b/packages/network-controller/src/rpc-service/rpc-service.ts index 9b270ab11b5..d7bee41de15 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.ts @@ -531,7 +531,7 @@ export class RpcService implements AbstractRpcService { `[RpcService: ${this.endpointUrl}] Circuit state`, this.#policy.getCircuitState(), ); - return await this.#policy.execute(async (data) => { + const jsonDecodedResponse = await this.#policy.execute(async (data) => { log( 'REQUEST INITIATED:', this.endpointUrl.toString(), @@ -552,6 +552,8 @@ export class RpcService implements AbstractRpcService { ); return await response.json(); }); + this.#lastError = undefined; + return jsonDecodedResponse; } catch (error) { log('REQUEST ERROR:', this.endpointUrl.toString(), error); From cdd849165657b69fc420686ce0b25cdaa471241a Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Wed, 19 Nov 2025 13:59:09 -0700 Subject: [PATCH 20/30] Make the RPC endpoint event tests more realistic, and address Cursor feedback --- .../rpc-endpoint-events.test.ts | 195 ++++++++++++------ 1 file changed, 129 insertions(+), 66 deletions(-) diff --git a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts index 3ef340a94a4..711c9d38cf8 100644 --- a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts +++ b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts @@ -40,18 +40,21 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); primaryComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, }, }); failoverComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, @@ -155,18 +158,21 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); primaryComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, }, }); failoverComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, @@ -272,11 +278,11 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - comms.mockNextBlockTrackerRequest({ - blockNumber, - }); comms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, @@ -378,8 +384,17 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - comms.mockNextBlockTrackerRequest({ - blockNumber, + comms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: '0x1', + }; + }, }); comms.mockRpcCall({ request, @@ -392,7 +407,6 @@ describe('createNetworkClient - RPC endpoint events', () => { times: 2, }); - await makeRpcCall(request); await makeRpcCall(request); await makeRpcCall(request); @@ -458,18 +472,21 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); primaryComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, }, }); failoverComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: 5, response: { httpStatus: 503, @@ -564,16 +581,28 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); primaryComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, }, }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: '0x1', + }; + }, + }); failoverComms.mockRpcCall({ request, response: () => { @@ -639,11 +668,11 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - comms.mockNextBlockTrackerRequest({ - blockNumber, - }); comms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, @@ -749,24 +778,19 @@ describe('createNetworkClient - RPC endpoint events', () => { }, }), }, - async ({ blockTracker, makeRpcCall, clock, chainId, rpcUrl }) => { + async ({ makeRpcCall, clock, chainId, rpcUrl }) => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - comms.mockNextBlockTrackerRequest({ - blockNumber: '0x1', - }); - // We mock another block tracker request so we can clear the - // cache. - comms.mockNextBlockTrackerRequest({ - blockNumber: '0x2', - }); comms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, response: () => { clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); return { - result: 'ok', + result: '0x1', }; }, }); @@ -780,10 +804,6 @@ describe('createNetworkClient - RPC endpoint events', () => { }, }); - await makeRpcCall(request); - // Force another block to clear the cache on the previous - // request - await blockTracker.checkForLatestBlock(); await makeRpcCall(request); expect(rpcEndpointDegradedEventHandler).toHaveBeenCalledTimes( @@ -853,18 +873,21 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); primaryComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, }, }); failoverComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: 5, response: { httpStatus: 503, @@ -977,16 +1000,28 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); primaryComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, }, }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + response: () => { + clock.tick(DEFAULT_DEGRADED_THRESHOLD + 1); + return { + result: '0x1', + }; + }, + }); failoverComms.mockRpcCall({ request, response: () => { @@ -1020,7 +1055,7 @@ describe('createNetworkClient - RPC endpoint events', () => { expect( rpcEndpointDegradedEventHandler, - ).toHaveBeenCalledTimes(3); + ).toHaveBeenCalledTimes(4); expect( rpcEndpointDegradedEventHandler, ).toHaveBeenNthCalledWith(1, { @@ -1048,6 +1083,15 @@ describe('createNetworkClient - RPC endpoint events', () => { networkClientId: 'AAAA-AAAA-AAAA-AAAA', primaryEndpointUrl: rpcUrl, }); + expect( + rpcEndpointDegradedEventHandler, + ).toHaveBeenNthCalledWith(4, { + chainId, + endpointUrl: failoverEndpointUrl, + error: undefined, + networkClientId: 'AAAA-AAAA-AAAA-AAAA', + primaryEndpointUrl: rpcUrl, + }); }, ); }, @@ -1079,10 +1123,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }); const messenger = buildRootMessenger(); - const networkAvailableEventHandler = jest.fn(); + const rpcEndpointChainAvailableEventHandler = jest.fn(); messenger.subscribe( 'NetworkController:rpcEndpointChainAvailable', - networkAvailableEventHandler, + rpcEndpointChainAvailableEventHandler, ); await withNetworkClient( @@ -1101,7 +1145,12 @@ describe('createNetworkClient - RPC endpoint events', () => { async ({ makeRpcCall, chainId, rpcUrl }) => { await makeRpcCall(request); - expect(networkAvailableEventHandler).toHaveBeenCalledWith({ + expect( + rpcEndpointChainAvailableEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointChainAvailableEventHandler, + ).toHaveBeenCalledWith({ chainId, endpointUrl: rpcUrl, networkClientId: 'AAAA-AAAA-AAAA-AAAA', @@ -1133,16 +1182,25 @@ describe('createNetworkClient - RPC endpoint events', () => { // The first time a block-cacheable request is made, the // latest block number is retrieved through the block // tracker first. - primaryComms.mockNextBlockTrackerRequest({ - blockNumber, - }); primaryComms.mockRpcCall({ - request, + request: { + method: 'eth_blockNumber', + params: [], + }, times: DEFAULT_MAX_CONSECUTIVE_FAILURES, response: { httpStatus: 503, }, }); + failoverComms.mockRpcCall({ + request: { + method: 'eth_blockNumber', + params: [], + }, + response: { + result: '0x1', + }, + }); failoverComms.mockRpcCall({ request, response: { @@ -1151,10 +1209,10 @@ describe('createNetworkClient - RPC endpoint events', () => { }); const messenger = buildRootMessenger(); - const networkAvailableEventHandler = jest.fn(); + const rpcEndpointChainAvailableEventHandler = jest.fn(); messenger.subscribe( 'NetworkController:rpcEndpointChainAvailable', - networkAvailableEventHandler, + rpcEndpointChainAvailableEventHandler, ); await withNetworkClient( @@ -1194,9 +1252,14 @@ describe('createNetworkClient - RPC endpoint events', () => { // breaking the circuit; hit the failover await makeRpcCall(request); - expect(networkAvailableEventHandler).toHaveBeenCalledWith({ + expect( + rpcEndpointChainAvailableEventHandler, + ).toHaveBeenCalledTimes(1); + expect( + rpcEndpointChainAvailableEventHandler, + ).toHaveBeenCalledWith({ chainId, - endpointUrl: rpcUrl, + endpointUrl: failoverEndpointUrl, networkClientId: 'AAAA-AAAA-AAAA-AAAA', primaryEndpointUrl: rpcUrl, }); From 73017d115d0f55732c5b4239f92506279fd5e0b4 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Thu, 20 Nov 2025 14:23:41 -0700 Subject: [PATCH 21/30] Reword JSDoc for events, remove endpointUrl from chainUnavailable event --- .../src/NetworkController.ts | 168 +++++++----------- .../rpc-endpoint-events.test.ts | 1 - .../src/create-network-client.ts | 1 - 3 files changed, 64 insertions(+), 106 deletions(-) diff --git a/packages/network-controller/src/NetworkController.ts b/packages/network-controller/src/NetworkController.ts index 906660ec034..0b721924031 100644 --- a/packages/network-controller/src/NetworkController.ts +++ b/packages/network-controller/src/NetworkController.ts @@ -443,37 +443,25 @@ export type NetworkControllerNetworkRemovedEvent = { }; /** - * `NetworkController:rpcEndpointChainUnavailable` is published when the number - * of failed consecutive attempts to receive a 2xx response from the primary - * endpoint of a chain of endpoints reaches a maximum, causing further requests - * to be temporarily paused, and when subsequent traffic to a failover endpoint - * similarly fails. + * `NetworkController:rpcEndpointChainUnavailable` is published when, after + * trying all endpoints in an endpoint chain, the last failover reaches a + * maximum number of consecutive 5xx responses, breaking the underlying circuit. * - * In other words, this event will not published if a primary is deemed to be - * unavailable but its failover is not. - * - * Additionally, if this was the last `NetworkController:rpcEndpointChain*` - * event to be published, the event will not be re-published (for instance, if - * both a primary and failover are deemed to be unavailable, or if more than one - * failover is deemed to be unavailable). + * In other words, this event will not be published if a failover is available, + * even if the primary is not. * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the target network. - * @param payload.endpointUrl - The URL of the endpoint among the chain of - * endpoints which has been deemed to be unavailable. - * @param payload.error - The error from the last request to `endpointUrl` which - * determined the unavailability status. - * @param payload.networkClientId - The ID of the client representing the target - * network. - * @param payload.primaryEndpointUrl - The URL of the primary for the chain of - * endpoints. + * @param payload.chainId - The target network's chain ID. + * @param payload.error - The last error produced by the last failover in the + * endpoint chain. + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. */ export type NetworkControllerRpcEndpointChainUnavailableEvent = { type: 'NetworkController:rpcEndpointChainUnavailable'; payload: [ { chainId: Hex; - endpointUrl: string; error: unknown; networkClientId: NetworkClientId; primaryEndpointUrl: string; @@ -482,28 +470,21 @@ export type NetworkControllerRpcEndpointChainUnavailableEvent = { }; /** - * `NetworkController:rpcEndpointUnavailable` is published when the number of - * failed consecutive attempts to receive a 2xx response from *any* of the - * RPC endpoints within a chain of endpoints reaches a maximum. - * - * This event will still be published if a primary is deemed to be unavailable, - * even its failover is available. + * `NetworkController:rpcEndpointUnavailable` is published when any + * endpoint in an endpoint chain reaches a maximum number of consecutive 5xx + * responses, breaking the underlying circuit. * - * Additionally, even if this was the last `NetworkController:rpcEndpoint*` - * event to be published, the event may be re-published (for instance, if both a - * primary and failover are deemed to be unavailable, or if more than one - * failover is deemed to be unavailable). + * In other words, this event will be published if a primary is not available, + * even if a failover is. * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the target network. - * @param payload.endpointUrl - The URL of the endpoint among the chain of - * endpoints which has been deemed to be unavailable. - * @param payload.error - The error from the last request to `endpointUrl` which - * determined the unavailability status. - * @param payload.networkClientId - The ID of the network client representing - * the chain of endpoints. - * @param payload.primaryEndpointUrl - The URL of the primary for the chain of - * endpoints. + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint which reached the + * maximum number of consecutive 5xx responses. You can compare this to + * `primaryEndpointUrl` to know whether it was a failover or a primary. + * @param payload.error - The last error produced by the endpoint. + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. */ export type NetworkControllerRpcEndpointUnavailableEvent = { type: 'NetworkController:rpcEndpointUnavailable'; @@ -519,34 +500,26 @@ export type NetworkControllerRpcEndpointUnavailableEvent = { }; /** - * `NetworkController:rpcEndpointChainDegraded` is published in the following - * two cases: + * `NetworkController:rpcEndpointChainDegraded` is published for any of the + * endpoints in an endpoint chain when one of the following two conditions hold + * (and the chain is not already in a degraded state): * - * 1. When an attempt to receive a 2xx response from any of the endpoints - * within a chain of endpoints is unsuccessful, and all subsequent automatic - * retries lead to the same result. - * 2. When a 2xx response is received from any of the endpoints, but the request - * takes longer than a set number of seconds to complete. + * 1. A successful (2xx) request, even after being retried, cannot be made to + * the endpoint. + * 2. A successful (2xx) request can be made to the endpoint, but it takes + * longer than expected to complete. * * Note that this event will be published even if there are local connectivity * issues which prevent requests from being initiated. This is intentional. * - * Additionally, if this was the last `NetworkController:rpcEndpointChain*` - * event to be published, the event will not be re-published (for instance: a - * failover is activated and successive attempts to the failover fail, then the - * primary comes back online, but it is slow). - * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the target network. - * @param payload.endpointUrl - The URL of the endpoint among the chain of - * endpoints which has been deemed to be degraded. - * @param payload.error - The error from the last request to `endpointUrl` which - * determined the degraded status (or `undefined` if the request was merely - * slow). - * @param payload.networkClientId - The ID of the client representing the target - * network. - * @param payload.primaryEndpointUrl - The URL of the primary for the chain of - * endpoints. + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint for which requests + * failed or were slow to complete. + * @param payload.error - The last error produced by the endpoint (or + * `undefined` if the request was slow). + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. */ export type NetworkControllerRpcEndpointChainDegradedEvent = { type: 'NetworkController:rpcEndpointChainDegraded'; @@ -563,34 +536,26 @@ export type NetworkControllerRpcEndpointChainDegradedEvent = { /** * - * `NetworkController:rpcEndpointDegraded` is published in the following - * two cases: + * `NetworkController:rpcEndpointDegraded` is published for any of the endpoints + * in an endpoint chain when: * - * 1. When an attempt to receive a 2xx response from any of the endpoints - * within a chain of endpoints is unsuccessful, and all subsequent automatic - * retries lead to the same result. - * 2. When a 2xx response is received from any of the endpoints, but the request - * takes longer than a set number of seconds to complete. + * 1. A successful (2xx) request, even after being retried, cannot be made to + * the endpoint. + * 2. A successful (2xx) request can be made to the endpoint, but it takes + * longer than expected to complete. * * Note that this event will be published even if there are local connectivity * issues which prevent requests from being initiated. This is intentional. * - * Additionally, if this was the last `NetworkController:rpcEndpoint*` event to - * be published, the event may be re-published (for instance: a failover is - * activated and successive attempts to the failover fail, then the primary - * comes back online, but it is slow). - * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the target network. - * @param payload.endpointUrl - The URL of the endpoint among the chain of - * endpoints which has been deemed to be degraded. - * @param payload.error - The error from the last request to `endpointUrl` which - * determined the degraded status (or `undefined` if the request was merely - * slow). - * @param payload.networkClientId - The ID of the client representing the target - * network. - * @param payload.primaryEndpointUrl - The URL of the primary for the chain of - * endpoints. + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint for which requests + * failed or were slow to complete. You can compare this to `primaryEndpointUrl` + * to know whether it was a failover or a primary. + * @param payload.error - The last error produced by the endpoint (or + * `undefined` if the request was slow). + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. */ export type NetworkControllerRpcEndpointDegradedEvent = { type: 'NetworkController:rpcEndpointDegraded'; @@ -606,22 +571,20 @@ export type NetworkControllerRpcEndpointDegradedEvent = { }; /** - * `NetworkController:rpcEndpointChainAvailable` is published in either of the - * following two cases: + * `NetworkController:rpcEndpointChainAvailable` is published in one of two + * cases: * - * 1. The first time that a 2xx request is made to any of the endpoints within - * a chain of endpoints. + * 1. The first time that a 2xx request is made to any of the endpoints in an + * endpoint chain. * 2. When requests to any of the endpoints previously failed (placing the * endpoint in a degraded or unavailable status), but are now succeeding again. * * @param payload - The event payload. - * @param payload.chainId - The chain ID of the target network. - * @param payload.endpointUrl - The URL of the endpoint among the chain of - * endpoints which has been deemed to be available. - * @param payload.networkClientId - The ID of the network client representing - * the chain of endpoints. - * @param payload.primaryEndpointUrl - The URL of the primary for the chain of - * endpoints. + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint which meets either of + * the above conditions. + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. */ export type NetworkControllerRpcEndpointChainAvailableEvent = { type: 'NetworkController:rpcEndpointChainAvailable'; @@ -637,20 +600,17 @@ export type NetworkControllerRpcEndpointChainAvailableEvent = { /** * `NetworkController:rpcEndpointRetried` is published before a request to any - * of the endpoints within a chain of endpoints is retried. + * endpoint in an endpoint chain is retried. * * This is mainly useful for tests. * * @param payload - The event payload. * @param payload.attempt - The current attempt counter for the endpoint * (starting from 0). - * @param payload.chainId - The chain ID of the target network. - * @param payload.endpointUrl - The URL of the endpoint among the chain of - * endpoints which is being retried. - * @param payload.networkClientId - The ID of the network client representing - * the chain of endpoints. - * @param payload.primaryEndpointUrl - The URL of the primary for the chain of - * endpoints. + * @param payload.chainId - The target network's chain ID. + * @param payload.endpointUrl - The URL of the endpoint being retried. + * @param payload.networkClientId - The target network's client ID. + * @param payload.primaryEndpointUrl - The endpoint chain's primary URL. * @see {@link RpcService} for the list of retriable errors. */ export type NetworkControllerRpcEndpointRetriedEvent = { diff --git a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts index 711c9d38cf8..32f8903f8ea 100644 --- a/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts +++ b/packages/network-controller/src/create-network-client-tests/rpc-endpoint-events.test.ts @@ -124,7 +124,6 @@ describe('createNetworkClient - RPC endpoint events', () => { rpcEndpointChainUnavailableEventHandler, ).toHaveBeenCalledWith({ chainId, - endpointUrl: failoverEndpointUrl, error: expectedUnavailableError, networkClientId: 'AAAA-AAAA-AAAA-AAAA', primaryEndpointUrl: rpcUrl, diff --git a/packages/network-controller/src/create-network-client.ts b/packages/network-controller/src/create-network-client.ts index 9f8b93e99ec..83254b7bd3d 100644 --- a/packages/network-controller/src/create-network-client.ts +++ b/packages/network-controller/src/create-network-client.ts @@ -240,7 +240,6 @@ function createRpcServiceChain({ chainId: configuration.chainId, networkClientId: id, primaryEndpointUrl, - endpointUrl, error, }); }); From 026ce00f36c9a694f6af081578b140e850457f11 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Thu, 20 Nov 2025 14:50:24 -0700 Subject: [PATCH 22/30] Use the primaryEndpointUrl from the Cockatiel event in the messenger event --- .../src/create-network-client.ts | 84 ++++++++++++------- 1 file changed, 54 insertions(+), 30 deletions(-) diff --git a/packages/network-controller/src/create-network-client.ts b/packages/network-controller/src/create-network-client.ts index 83254b7bd3d..412e3dd4154 100644 --- a/packages/network-controller/src/create-network-client.ts +++ b/packages/network-controller/src/create-network-client.ts @@ -227,30 +227,40 @@ function createRpcServiceChain({ ...availableEndpointUrls.slice(1).map(buildRpcServiceConfiguration), ]); - rpcServiceChain.onBreak(({ endpointUrl, ...rest }) => { - const error = getError(rest); + rpcServiceChain.onBreak( + ({ + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + ...rest + }) => { + const error = getError(rest); - if (error === undefined) { - // This error shouldn't happen in practice because we never call `.isolate` - // on the circuit breaker policy, but we need to appease TypeScript. - throw new Error('Could not make request to endpoint.'); - } + if (error === undefined) { + // This error shouldn't happen in practice because we never call `.isolate` + // on the circuit breaker policy, but we need to appease TypeScript. + throw new Error('Could not make request to endpoint.'); + } - messenger.publish('NetworkController:rpcEndpointChainUnavailable', { - chainId: configuration.chainId, - networkClientId: id, - primaryEndpointUrl, - error, - }); - }); + messenger.publish('NetworkController:rpcEndpointChainUnavailable', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + error, + }); + }, + ); rpcServiceChain.onServiceBreak( - ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { + ({ + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + ...rest + }) => { const error = getError(rest); messenger.publish('NetworkController:rpcEndpointUnavailable', { chainId: configuration.chainId, networkClientId: id, - primaryEndpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, endpointUrl, error, }); @@ -258,12 +268,16 @@ function createRpcServiceChain({ ); rpcServiceChain.onDegraded( - ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { + ({ + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + ...rest + }) => { const error = getError(rest); messenger.publish('NetworkController:rpcEndpointChainDegraded', { chainId: configuration.chainId, networkClientId: id, - primaryEndpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, endpointUrl, error, }); @@ -271,33 +285,43 @@ function createRpcServiceChain({ ); rpcServiceChain.onServiceDegraded( - ({ primaryEndpointUrl: _, endpointUrl, ...rest }) => { + ({ + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + ...rest + }) => { const error = getError(rest); messenger.publish('NetworkController:rpcEndpointDegraded', { chainId: configuration.chainId, networkClientId: id, - primaryEndpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, endpointUrl, error, }); }, ); - rpcServiceChain.onAvailable(({ primaryEndpointUrl: _, endpointUrl }) => { - messenger.publish('NetworkController:rpcEndpointChainAvailable', { - chainId: configuration.chainId, - networkClientId: id, - primaryEndpointUrl, - endpointUrl, - }); - }); + rpcServiceChain.onAvailable( + ({ endpointUrl, primaryEndpointUrl: primaryEndpointUrlFromEvent }) => { + messenger.publish('NetworkController:rpcEndpointChainAvailable', { + chainId: configuration.chainId, + networkClientId: id, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + endpointUrl, + }); + }, + ); rpcServiceChain.onServiceRetry( - ({ primaryEndpointUrl: _, endpointUrl, attempt }) => { + ({ + attempt, + endpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, + }) => { messenger.publish('NetworkController:rpcEndpointRetried', { chainId: configuration.chainId, networkClientId: id, - primaryEndpointUrl, + primaryEndpointUrl: primaryEndpointUrlFromEvent, endpointUrl, attempt, }); From 74d9d0f172dde4f817f82a54a878390e0527ed96 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 21 Nov 2025 12:33:47 -0700 Subject: [PATCH 23/30] Remove RpcService.getLastInnerFailureReason, use lastError directly --- .../src/rpc-service/rpc-service-chain.ts | 16 ++--- .../src/rpc-service/rpc-service.test.ts | 70 ------------------- .../src/rpc-service/rpc-service.ts | 23 +++--- 3 files changed, 13 insertions(+), 96 deletions(-) diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.ts index 7fb81e3a2ad..4f8b17f9482 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.ts @@ -401,18 +401,10 @@ export class RpcServiceChain { // Oops, that didn't work. // Capture this error so that we can handle it later. - const lastFailureReason = service.getLastInnerFailureReason(); + const { lastError } = service; const isCircuitOpen = service.getCircuitState() === CircuitState.Open; - log('Service failed!', error, lastFailureReason); - log( - 'Circuit state', - service.getCircuitState(), - 'Previous circuit state', - previousCircuitState, - 'state', - this.#status, - ); + log('Service failed! error =', error, 'lastError = ', lastError); if (isCircuitOpen) { if (i < this.#services.length - 1) { @@ -425,7 +417,7 @@ export class RpcServiceChain { if ( previousCircuitState !== CircuitState.Open && this.#status !== STATUSES.Unavailable && - lastFailureReason !== undefined + lastError !== undefined ) { // If the service's circuit just broke and it's the last one in the // chain, then trigger the onBreak event. (But if for some reason we @@ -435,7 +427,7 @@ export class RpcServiceChain { ); this.#status = STATUSES.Unavailable; this.#onBreakEventEmitter.emit({ - ...lastFailureReason, + error: lastError, primaryEndpointUrl: this.#primaryService.endpointUrl.toString(), endpointUrl: service.endpointUrl.toString(), }); diff --git a/packages/network-controller/src/rpc-service/rpc-service.test.ts b/packages/network-controller/src/rpc-service/rpc-service.test.ts index f8f35df9a16..18a96e4d070 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.test.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.test.ts @@ -342,76 +342,6 @@ describe('RpcService', () => { }); }); - describe('getLastInnerFailureReason', () => { - it('returns undefined if no requests have occurred yet', () => { - const endpointUrl = 'https://rpc.example.chain'; - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - - expect(service.getLastInnerFailureReason()).toBeUndefined(); - }); - - it('returns the last failure reason that the underlying policy captured', async () => { - const endpointUrl = 'https://rpc.example.chain'; - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - nock(endpointUrl).post('/', jsonRpcRequest).reply(500); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onRetry(() => { - clock.next(); - }); - - await ignoreRejection(service.request(jsonRpcRequest)); - - expect(service.getLastInnerFailureReason()).toStrictEqual({ - error: new HttpError(500), - }); - }); - - it('returns undefined if the service failed, then succeeded', async () => { - const endpointUrl = 'https://rpc.example.chain'; - const jsonRpcRequest = { - id: 1, - jsonrpc: '2.0' as const, - method: 'eth_chainId', - params: [], - }; - nock(endpointUrl) - .post('/', jsonRpcRequest) - .reply(500) - .post('/', jsonRpcRequest) - .reply(200, { - id: 1, - jsonrpc: '2.0', - result: 'ok', - }); - const service = new RpcService({ - fetch, - btoa, - endpointUrl, - }); - service.onRetry(() => { - clock.next(); - }); - - await ignoreRejection(service.request(jsonRpcRequest)); - await service.request(jsonRpcRequest); - - expect(service.getLastInnerFailureReason()).toBeUndefined(); - }); - }); - describe('request', () => { // NOTE: Keep this list synced with CONNECTION_ERRORS describe.each([ diff --git a/packages/network-controller/src/rpc-service/rpc-service.ts b/packages/network-controller/src/rpc-service/rpc-service.ts index d7bee41de15..7a8ed7d5bfe 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.ts @@ -239,6 +239,12 @@ export class RpcService implements AbstractRpcService { */ readonly endpointUrl: URL; + /** + * The last error that the retry policy captured (or `undefined` if the last + * execution of the service was successful). + */ + lastError: Error | undefined; + /** * The function used to make an HTTP request. */ @@ -249,8 +255,6 @@ export class RpcService implements AbstractRpcService { */ readonly #fetchOptions: FetchOptions; - #lastError: unknown; - /** * A `loglevel` logger. */ @@ -326,16 +330,6 @@ export class RpcService implements AbstractRpcService { return this.#policy.getCircuitState(); } - /** - * @returns The last failure reason that the retry policy captured (or - * `undefined` if the last execution of the service was successful). - */ - getLastInnerFailureReason(): { error: unknown } | undefined { - return this.#lastError === undefined - ? undefined - : { error: this.#lastError }; - } - /** * Listens for when the RPC service retries the request. * @@ -552,12 +546,13 @@ export class RpcService implements AbstractRpcService { ); return await response.json(); }); - this.#lastError = undefined; + this.lastError = undefined; return jsonDecodedResponse; } catch (error) { log('REQUEST ERROR:', this.endpointUrl.toString(), error); - this.#lastError = error; + this.lastError = + error instanceof Error ? error : new Error(getErrorMessage(error)); if (error instanceof HttpError) { const status = error.httpStatus; From bbbfa9cd62871061b415badb9e48c757ac00d504 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 21 Nov 2025 12:50:23 -0700 Subject: [PATCH 24/30] Remove redundant tag in log --- packages/network-controller/src/rpc-service/rpc-service.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/network-controller/src/rpc-service/rpc-service.ts b/packages/network-controller/src/rpc-service/rpc-service.ts index 7a8ed7d5bfe..10361e4b2c0 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.ts @@ -522,7 +522,7 @@ export class RpcService implements AbstractRpcService { let response: Response | undefined; try { log( - `[RpcService: ${this.endpointUrl}] Circuit state`, + `[${this.endpointUrl}] Circuit state`, this.#policy.getCircuitState(), ); const jsonDecodedResponse = await this.#policy.execute(async (data) => { From 4f9d84b8a24d18adc9e73d337a9b9ee56b15ebce Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 21 Nov 2025 12:50:50 -0700 Subject: [PATCH 25/30] data -> context --- .../src/rpc-service/rpc-service.ts | 44 ++++++++++--------- 1 file changed, 23 insertions(+), 21 deletions(-) diff --git a/packages/network-controller/src/rpc-service/rpc-service.ts b/packages/network-controller/src/rpc-service/rpc-service.ts index 10361e4b2c0..97429c2ac02 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.ts @@ -525,27 +525,29 @@ export class RpcService implements AbstractRpcService { `[${this.endpointUrl}] Circuit state`, this.#policy.getCircuitState(), ); - const jsonDecodedResponse = await this.#policy.execute(async (data) => { - log( - 'REQUEST INITIATED:', - this.endpointUrl.toString(), - '::', - fetchOptions, - // @ts-expect-error This property _is_ here, the type of ServicePolicy - // is just wrong. - `(attempt ${data.attempt + 1})`, - ); - response = await this.#fetch(this.endpointUrl, fetchOptions); - if (!response.ok) { - throw new HttpError(response.status); - } - log( - 'REQUEST SUCCESSFUL:', - this.endpointUrl.toString(), - response.status, - ); - return await response.json(); - }); + const jsonDecodedResponse = await this.#policy.execute( + async (context) => { + log( + 'REQUEST INITIATED:', + this.endpointUrl.toString(), + '::', + fetchOptions, + // @ts-expect-error This property _is_ here, the type of + // ServicePolicy is just wrong. + `(attempt ${context.attempt + 1})`, + ); + response = await this.#fetch(this.endpointUrl, fetchOptions); + if (!response.ok) { + throw new HttpError(response.status); + } + log( + 'REQUEST SUCCESSFUL:', + this.endpointUrl.toString(), + response.status, + ); + return await response.json(); + }, + ); this.lastError = undefined; return jsonDecodedResponse; } catch (error) { From 18620242152defd1e6cd532ad8f41e6998e29809 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 21 Nov 2025 12:53:49 -0700 Subject: [PATCH 26/30] Remove unused utility type --- packages/network-controller/src/rpc-service/shared.ts | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/packages/network-controller/src/rpc-service/shared.ts b/packages/network-controller/src/rpc-service/shared.ts index 668fe3fc570..c66cb1082c8 100644 --- a/packages/network-controller/src/rpc-service/shared.ts +++ b/packages/network-controller/src/rpc-service/shared.ts @@ -49,17 +49,6 @@ export type CockatielEventToEventListenerWithData = ( data: ExtendCockatielEventData, Data>, ) => void; -/** - * Converts a Cockatiel event type to an event listener type, but removing the - * requested keys from the data. - */ -export type CockatielEventToEventListenerWithoutData< - Event, - Keys extends PropertyKey, -> = ( - data: ExcludeCockatielEventData, Keys>, -) => void; - /** * Converts a Cockatiel event listener type to an event emitter type. */ From 2f29875e922e763607b1aac980a69c65c50be223 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 21 Nov 2025 13:13:06 -0700 Subject: [PATCH 27/30] Tweak log statement --- .../network-controller/src/rpc-service/rpc-service-chain.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.ts index 4f8b17f9482..cbc85b7d31f 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.ts @@ -437,7 +437,7 @@ export class RpcServiceChain { // The service failed, and we throw whatever the error is. The calling // code can try again if it so desires. log( - `${isCircuitOpen ? "This service's circuit is open, but for some reason it wasn't handled above. " : "This service's circuit is closed. "}Re-throwing error.`, + `${isCircuitOpen ? '' : "This service's circuit is closed. "}Re-throwing error.`, ); throw error; } From 827696fced3d40b29dc158cf24bbfdd5e1a64d68 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 21 Nov 2025 13:19:54 -0700 Subject: [PATCH 28/30] Tweak comment --- .../src/rpc-service/rpc-service-chain.ts | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.ts index cbc85b7d31f..5c8f442b58b 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.ts @@ -20,7 +20,8 @@ import type { } from './shared'; import { projectLogger, createModuleLogger } from '../logger'; -const log = createModuleLogger(projectLogger, 'RpcServiceChain'); +// const log = createModuleLogger(projectLogger, 'RpcServiceChain'); +const log = console.log.bind(console); /** * Statuses that the RPC service chain can be in. @@ -444,13 +445,12 @@ export class RpcServiceChain { } if (response) { - // If one of the services returned a successful response, assume that we - // won't need to hit any of the failover services following it and reset - // all of the policies of the following services. In particularly this - // means that if any of the failover services' circuits was open when - // requests were diverted back to the available service, that circuit will - // now be reset so that if we start hitting it again we don't get a - // "circuit broken" error. + // If one of the services is available, reset all of the circuits of the + // following services. If we didn't do this and the service became + // unavailable in the future, and any of the failovers' circuits were + // open (due to previous failures), we would receive a "circuit broken" + // error when we attempted to divert traffic to the failovers again. + // if (availableServiceIndex !== undefined) { for (const [i, service] of [...this.#services.entries()].slice( availableServiceIndex + 1, From 72de1d66f2ee7a392695866cc9729b97562248c5 Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 21 Nov 2025 13:21:21 -0700 Subject: [PATCH 29/30] Revert log change --- .../network-controller/src/rpc-service/rpc-service-chain.ts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/packages/network-controller/src/rpc-service/rpc-service-chain.ts b/packages/network-controller/src/rpc-service/rpc-service-chain.ts index 5c8f442b58b..42a190f133b 100644 --- a/packages/network-controller/src/rpc-service/rpc-service-chain.ts +++ b/packages/network-controller/src/rpc-service/rpc-service-chain.ts @@ -20,8 +20,7 @@ import type { } from './shared'; import { projectLogger, createModuleLogger } from '../logger'; -// const log = createModuleLogger(projectLogger, 'RpcServiceChain'); -const log = console.log.bind(console); +const log = createModuleLogger(projectLogger, 'RpcServiceChain'); /** * Statuses that the RPC service chain can be in. From 0f55384192283a290e98d99653e8a32d165669ee Mon Sep 17 00:00:00 2001 From: Elliot Winkler Date: Fri, 21 Nov 2025 13:33:04 -0700 Subject: [PATCH 30/30] Add comment for why we are ignoring 'isolated' --- packages/network-controller/CHANGELOG.md | 2 +- .../network-controller/src/rpc-service/rpc-service.ts | 11 +++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/packages/network-controller/CHANGELOG.md b/packages/network-controller/CHANGELOG.md index 0642b1c89ba..7003d78b084 100644 --- a/packages/network-controller/CHANGELOG.md +++ b/packages/network-controller/CHANGELOG.md @@ -22,7 +22,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - This event is now called `NetworkController:rpcEndpointRetried`. - The event payload has been changed as well: `failoverEndpointUrl` has been renamed to `endpointUrl`, and `endpointUrl` has been renamed to `primaryEndpointUrl`. In addition, `networkClientId` and `attempt` have been added to the payload. - **BREAKING:** Update `AbstractRpcService`/`RpcServiceRequestable` to remove `{ isolated: true }` from the `onBreak` event data type ([#7166](https://github.com/MetaMask/core/pull/7166)) - - This represented the error produced when `.isolate` is called on a Cockatiel circuit breaker policy, which we never do. + - This represented the error produced when `isolate` is called on a Cockatiel circuit breaker policy, which we don't do (at least, not in the way it is designed to be called) - Move peer dependencies for controller and service packages to direct dependencies ([#7209](https://github.com/MetaMask/core/pull/7209)) - The dependencies moved are: - `@metamask/error-reporting-service` (^3.0.0) diff --git a/packages/network-controller/src/rpc-service/rpc-service.ts b/packages/network-controller/src/rpc-service/rpc-service.ts index 97429c2ac02..b170989fc2e 100644 --- a/packages/network-controller/src/rpc-service/rpc-service.ts +++ b/packages/network-controller/src/rpc-service/rpc-service.ts @@ -353,6 +353,17 @@ export class RpcService implements AbstractRpcService { */ onBreak(listener: Parameters[0]) { return this.#policy.onBreak((data) => { + // `{ isolated: true }` is a special object that shows up when `isolate` + // is called on the circuit breaker. Usually `isolate` is used to hold the + // circuit open, but we (ab)use this method in `createServicePolicy` to + // reset the circuit breaker policy. When we do this, we don't want to + // call `onBreak` handlers, because then it causes + // `NetworkController:rpcEndpointUnavailable` and + // `NetworkController:rpcEndpointChainUnavailable` to be published. So we + // have to ignore that object here. The consequence is that `isolate` + // doesn't function the way it is intended, at least in the context of an + // RpcService. However, we are making a bet that we won't need to use it + // other than how we are already using it. if (!('isolated' in data)) { listener({ ...data, endpointUrl: this.endpointUrl.toString() }); }