-
Notifications
You must be signed in to change notification settings - Fork 25.6k
Factor out common retrying logic #136663
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Factor out common retrying logic #136663
Changes from all commits
33c85bc
419ea29
7b61719
643883f
82f871a
c42cc7f
7e936c2
4629790
832141e
968d89b
e06d79a
972a505
c4fcaa1
3670b99
cec7b7f
2128fd1
66900b3
d775068
36ed417
6ccd458
7b2ee50
dd86c6d
eecda28
6fcef80
eb239b7
7634e66
e4b18ef
3d2c246
c88f8e2
14cff38
167b29c
ce4bd1f
0e3ab79
a49cbed
47f1834
5b23aa9
af8bc55
86aa89f
e3d54da
792e6de
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,89 @@ | ||
| /* | ||
| * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one | ||
| * or more contributor license agreements. Licensed under the "Elastic License | ||
| * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side | ||
| * Public License v 1"; you may not use this file except in compliance with, at | ||
| * your election, the "Elastic License 2.0", the "GNU Affero General Public | ||
| * License v3.0 only", or the "Server Side Public License, v 1". | ||
| */ | ||
|
|
||
| package org.elasticsearch.repositories.azure; | ||
|
|
||
| import com.azure.core.exception.HttpResponseException; | ||
|
|
||
| import org.elasticsearch.ExceptionsHelper; | ||
| import org.elasticsearch.common.blobstore.OperationPurpose; | ||
| import org.elasticsearch.common.blobstore.RetryingInputStream; | ||
| import org.elasticsearch.core.Nullable; | ||
| import org.elasticsearch.repositories.blobstore.RequestedRangeNotSatisfiedException; | ||
| import org.elasticsearch.rest.RestStatus; | ||
|
|
||
| import java.io.IOException; | ||
| import java.nio.file.NoSuchFileException; | ||
|
|
||
| public class AzureRetryingInputStream extends RetryingInputStream<String> { | ||
|
|
||
| protected AzureRetryingInputStream(AzureBlobStore azureBlobStore, OperationPurpose purpose, String blob, long position, Long length) | ||
| throws IOException { | ||
| super( | ||
| new AzureBlobStoreServices(azureBlobStore, purpose, blob), | ||
| purpose, | ||
| position, | ||
| length == null ? Long.MAX_VALUE - 1 : position + length | ||
| ); | ||
| } | ||
|
|
||
| private record AzureBlobStoreServices(AzureBlobStore blobStore, OperationPurpose purpose, String blob) | ||
| implements | ||
| RetryingInputStream.BlobStoreServices<String> { | ||
|
|
||
| @Override | ||
| public InputStreamAtVersion<String> getInputStreamAtVersion(@Nullable String version, long start, long end) throws IOException { | ||
| try { | ||
| final Long length = end < Long.MAX_VALUE - 1 ? end - start : null; | ||
| final AzureBlobStore.AzureInputStream inputStream = blobStore.getInputStream(purpose, blob, start, length, version); | ||
| return new InputStreamAtVersion<>(inputStream, inputStream.getETag()); | ||
| } catch (Exception e) { | ||
| if (ExceptionsHelper.unwrap(e, HttpResponseException.class) instanceof HttpResponseException httpResponseException) { | ||
| final var httpStatusCode = httpResponseException.getResponse().getStatusCode(); | ||
| if (httpStatusCode == RestStatus.NOT_FOUND.getStatus()) { | ||
| throw new NoSuchFileException("Blob [" + blob + "] not found"); | ||
| } | ||
| if (httpStatusCode == RestStatus.REQUESTED_RANGE_NOT_SATISFIED.getStatus()) { | ||
| throw new RequestedRangeNotSatisfiedException(blob, start, end == Long.MAX_VALUE - 1 ? -1 : end - start, e); | ||
| } | ||
| } | ||
| switch (e) { | ||
| case RuntimeException runtimeException -> throw runtimeException; | ||
| case IOException ioException -> throw ioException; | ||
| default -> throw new IOException("Unable to get input stream for blob [" + blob + "]", e); | ||
| } | ||
| } | ||
| } | ||
|
|
||
| @Override | ||
| public void onRetryStarted(String action) { | ||
| // No metrics for Azure | ||
| } | ||
|
|
||
| @Override | ||
| public void onRetrySucceeded(String action, long numberOfRetries) { | ||
| // No metrics for Azure | ||
| } | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. S3 has its own special metrics for these, we can probably make that consistent now, but I wonder if we want to do that in a separate PR to keep the volume down |
||
|
|
||
| @Override | ||
| public long getMeaningfulProgressSize() { | ||
| return Math.max(1L, blobStore.getReadChunkSize() / 100L); | ||
| } | ||
|
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This value seems kind-of arbitrary. The Azure value calculates to about 320KiB, the GCS to 160KiB and the S3 one to 1MiB, they are all functions of various loosely-related thresholds. Perhaps it makes sense to make this a first-class setting and consistent across the CSPs? |
||
|
|
||
| @Override | ||
| public int getMaxRetries() { | ||
| return blobStore.getMaxReadRetries(); | ||
| } | ||
|
|
||
| @Override | ||
| public String getBlobDescription() { | ||
| return blob; | ||
| } | ||
| } | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
We disable Azure client retries in these downloads so we can control them in the
RetryingInputStream