-
Notifications
You must be signed in to change notification settings - Fork 3
fix: Retry updates #141
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix: Retry updates #141
Changes from all commits
b94fb2a
be0edd9
6c9c88b
e8186b9
48eb18c
71a6d5d
b664e82
65813aa
7874610
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -13,20 +13,18 @@ | |
| use tracing::{error, info}; | ||
|
|
||
| #[derive(Error, Debug, Clone)] | ||
| pub enum RobustProviderError { | ||
| #[error("RPC error: {0}")] | ||
| RpcError(Arc<RpcError<TransportErrorKind>>), | ||
| pub enum Error { | ||
| #[error("Operation timed out")] | ||
| Timeout, | ||
| #[error("Retry failed after {0} tries")] | ||
| RetryFail(usize), | ||
| #[error("RPC call failed after exhausting all retry attempts: {0}")] | ||
| RetryFailure(Arc<RpcError<TransportErrorKind>>), | ||
| #[error("Block not found, block number: {0}")] | ||
| BlockNotFound(BlockNumberOrTag), | ||
| } | ||
|
|
||
| impl From<RpcError<TransportErrorKind>> for RobustProviderError { | ||
| impl From<RpcError<TransportErrorKind>> for Error { | ||
| fn from(err: RpcError<TransportErrorKind>) -> Self { | ||
| RobustProviderError::RpcError(Arc::new(err)) | ||
| Error::RetryFailure(Arc::new(err)) | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -89,17 +87,15 @@ | |
| pub async fn get_block_by_number( | ||
| &self, | ||
| number: BlockNumberOrTag, | ||
| ) -> Result<N::BlockResponse, RobustProviderError> { | ||
| ) -> Result<N::BlockResponse, Error> { | ||
| info!("eth_getBlockByNumber called"); | ||
| let operation = async || { | ||
| self.provider.get_block_by_number(number).await.map_err(RobustProviderError::from) | ||
| }; | ||
| let operation = async || self.provider.get_block_by_number(number).await; | ||
| let result = self.retry_with_total_timeout(operation).await; | ||
| if let Err(e) = &result { | ||
| error!(error = %e, "eth_getByBlockNumber failed"); | ||
| } | ||
|
|
||
| result?.ok_or_else(|| RobustProviderError::BlockNotFound(number)) | ||
|
Check failure on line 98 in src/robust_provider.rs
|
||
| } | ||
|
|
||
| /// Fetch the latest block number with retry and timeout. | ||
|
|
@@ -108,10 +104,9 @@ | |
| /// | ||
| /// Returns an error if RPC call fails repeatedly even | ||
| /// after exhausting retries or if the call times out. | ||
| pub async fn get_block_number(&self) -> Result<u64, RobustProviderError> { | ||
| pub async fn get_block_number(&self) -> Result<u64, Error> { | ||
| info!("eth_getBlockNumber called"); | ||
| let operation = | ||
| async || self.provider.get_block_number().await.map_err(RobustProviderError::from); | ||
| let operation = async || self.provider.get_block_number().await; | ||
| let result = self.retry_with_total_timeout(operation).await; | ||
| if let Err(e) = &result { | ||
| error!(error = %e, "eth_getBlockNumber failed"); | ||
|
|
@@ -128,10 +123,9 @@ | |
| pub async fn get_block_by_hash( | ||
| &self, | ||
| hash: alloy::primitives::BlockHash, | ||
| ) -> Result<Option<N::BlockResponse>, RobustProviderError> { | ||
| ) -> Result<Option<N::BlockResponse>, Error> { | ||
| info!("eth_getBlockByHash called"); | ||
| let operation = | ||
| async || self.provider.get_block_by_hash(hash).await.map_err(RobustProviderError::from); | ||
| let operation = async || self.provider.get_block_by_hash(hash).await; | ||
| let result = self.retry_with_total_timeout(operation).await; | ||
| if let Err(e) = &result { | ||
| error!(error = %e, "eth_getBlockByHash failed"); | ||
|
|
@@ -145,10 +139,9 @@ | |
| /// | ||
| /// Returns an error if RPC call fails repeatedly even | ||
| /// after exhausting retries or if the call times out. | ||
| pub async fn get_logs(&self, filter: &Filter) -> Result<Vec<Log>, RobustProviderError> { | ||
| pub async fn get_logs(&self, filter: &Filter) -> Result<Vec<Log>, Error> { | ||
| info!("eth_getLogs called"); | ||
| let operation = | ||
| async || self.provider.get_logs(filter).await.map_err(RobustProviderError::from); | ||
| let operation = async || self.provider.get_logs(filter).await; | ||
| let result = self.retry_with_total_timeout(operation).await; | ||
| if let Err(e) = &result { | ||
| error!(error = %e, "eth_getLogs failed"); | ||
|
|
@@ -162,16 +155,10 @@ | |
| /// | ||
| /// Returns an error if RPC call fails repeatedly even | ||
| /// after exhausting retries or if the call times out. | ||
| pub async fn subscribe_blocks( | ||
| &self, | ||
| ) -> Result<Subscription<N::HeaderResponse>, RobustProviderError> { | ||
| pub async fn subscribe_blocks(&self) -> Result<Subscription<N::HeaderResponse>, Error> { | ||
| info!("eth_subscribe called"); | ||
| let provider = self.provider.clone(); | ||
| let result = self | ||
| .retry_with_total_timeout(|| async { | ||
| provider.subscribe_blocks().await.map_err(RobustProviderError::from) | ||
| }) | ||
| .await; | ||
| let operation = async || self.provider.subscribe_blocks().await; | ||
| let result = self.retry_with_total_timeout(operation).await; | ||
| if let Err(e) = &result { | ||
| error!(error = %e, "eth_subscribe failed"); | ||
| } | ||
|
|
@@ -189,13 +176,10 @@ | |
| /// - Returns [`RpcError<TransportErrorKind>`] with message "total operation timeout exceeded" | ||
| /// if the overall timeout elapses. | ||
| /// - Propagates any [`RpcError<TransportErrorKind>`] from the underlying retries. | ||
| async fn retry_with_total_timeout<T, F, Fut>( | ||
| &self, | ||
| operation: F, | ||
| ) -> Result<T, RobustProviderError> | ||
| async fn retry_with_total_timeout<T, F, Fut>(&self, operation: F) -> Result<T, Error> | ||
| where | ||
| F: Fn() -> Fut, | ||
| Fut: Future<Output = Result<T, RobustProviderError>>, | ||
| Fut: Future<Output = Result<T, RpcError<TransportErrorKind>>>, | ||
|
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Reverting the change asked for in #135 (comment). |
||
| { | ||
| let retry_strategy = ExponentialBuilder::default() | ||
| .with_max_times(self.max_retries) | ||
|
|
@@ -207,9 +191,8 @@ | |
| ) | ||
| .await | ||
| { | ||
| Ok(Ok(res)) => Ok(res), | ||
| Ok(Err(_)) => Err(RobustProviderError::RetryFail(self.max_retries + 1)), | ||
| Err(_) => Err(RobustProviderError::Timeout), | ||
| Ok(res) => res.map_err(Error::from), | ||
| Err(_) => Err(Error::Timeout), | ||
| } | ||
| } | ||
| } | ||
|
|
@@ -243,7 +226,8 @@ | |
| let result = provider | ||
| .retry_with_total_timeout(|| async { | ||
| call_count.fetch_add(1, Ordering::SeqCst); | ||
| Ok(call_count.load(Ordering::SeqCst)) | ||
| let count = call_count.load(Ordering::SeqCst); | ||
| Ok(count) | ||
| }) | ||
| .await; | ||
|
|
||
|
|
@@ -259,12 +243,10 @@ | |
| let result = provider | ||
| .retry_with_total_timeout(|| async { | ||
| call_count.fetch_add(1, Ordering::SeqCst); | ||
| if call_count.load(Ordering::SeqCst) < 3 { | ||
| Err(RobustProviderError::RpcError(Arc::new(TransportErrorKind::custom_str( | ||
| "temp error", | ||
| )))) | ||
| } else { | ||
| Ok(call_count.load(Ordering::SeqCst)) | ||
| let count = call_count.load(Ordering::SeqCst); | ||
| match count { | ||
| 3 => Ok(count), | ||
| _ => Err(TransportErrorKind::BackendGone.into()), | ||
| } | ||
| }) | ||
| .await; | ||
|
|
@@ -278,21 +260,19 @@ | |
|
|
||
| let call_count = AtomicUsize::new(0); | ||
|
|
||
| let result = provider | ||
| let result: Result<(), Error> = provider | ||
| .retry_with_total_timeout(|| async { | ||
| call_count.fetch_add(1, Ordering::SeqCst); | ||
| // permanent error | ||
| Err::<i32, RobustProviderError>(RobustProviderError::Timeout) | ||
| Err(TransportErrorKind::BackendGone.into()) | ||
|
Comment on lines
-285
to
+266
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Presumably |
||
| }) | ||
| .await; | ||
|
|
||
| let err = result.unwrap_err(); | ||
| assert!(matches!(err, RobustProviderError::RetryFail(3))); | ||
| assert!(matches!(result, Err(Error::RetryFailure(_)))); | ||
| assert_eq!(call_count.load(Ordering::SeqCst), 3); | ||
| } | ||
|
|
||
| #[tokio::test] | ||
| async fn test_retry_with_timeout_respects_total_delay() { | ||
| async fn test_retry_with_timeout_respects_max_timeout() { | ||
| let max_timeout = 50; | ||
| let provider = test_provider(max_timeout, 10, 1); | ||
|
|
||
|
|
@@ -303,7 +283,6 @@ | |
| }) | ||
| .await; | ||
|
|
||
| let err = result.unwrap_err(); | ||
| assert!(matches!(err, RobustProviderError::Timeout)); | ||
| assert!(matches!(result, Err(Error::Timeout))); | ||
| } | ||
| } | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It should be safe to assume that the lib user already knows the max retry count, so we should only notify them that retries have failed and with what error