From 5f1fd9a2a7e3b6d6230d0377f3f8c3c6785cea13 Mon Sep 17 00:00:00 2001 From: Garrett Beatty Date: Thu, 6 Nov 2025 10:17:24 -0500 Subject: [PATCH] Add UploadWithResponseAsync api stack-info: PR: https://github.com/aws/aws-sdk-net/pull/4105, branch: GarrettBeatty/stacked/13 --- .../77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json | 11 + .../Internal/AbortMultipartUploadsCommand.cs | 2 +- .../Custom/Transfer/Internal/BaseCommand.cs | 11 +- .../Transfer/Internal/DownloadCommand.cs | 3 +- .../Internal/DownloadDirectoryCommand.cs | 2 +- .../Internal/MultipartUploadCommand.cs | 2 +- .../Transfer/Internal/OpenStreamCommand.cs | 7 +- .../Transfer/Internal/SimpleUploadCommand.cs | 2 +- .../Internal/UploadDirectoryCommand.cs | 2 +- .../AbortMultipartUploadsCommand.async.cs | 6 +- .../Internal/_async/BaseCommand.async.cs | 9 +- .../Internal/_async/DownloadCommand.async.cs | 7 +- .../_async/MultipartUploadCommand.async.cs | 10 +- .../_async/OpenStreamCommand.async.cs | 6 +- .../_async/SimpleUploadCommand.async.cs | 6 +- .../DownloadDirectoryCommand.cs | 6 +- .../UploadDirectoryCommand.cs | 6 +- .../S3/Custom/Transfer/TransferUtility.cs | 2 +- ...ferUtilityAbortMultipartUploadsResponse.cs | 35 +++ ...ransferUtilityDownloadDirectoryResponse.cs | 26 ++ .../TransferUtilityUploadDirectoryResponse.cs | 35 +++ .../Transfer/_async/TransferUtility.async.cs | 163 +++++++++++- .../_bcl+netstandard/TransferUtility.sync.cs | 148 +++++++++++ .../IntegrationTests/TransferUtilityTests.cs | 243 ++++++++++++++++++ 24 files changed, 710 insertions(+), 40 deletions(-) create mode 100644 generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs create mode 100644 sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs diff --git a/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json new file mode 100644 index 000000000000..be509aae4368 --- /dev/null +++ b/generator/.DevConfigs/77d980ad-8f58-4f2e-97f8-d2c8c5ba3732.json @@ -0,0 +1,11 @@ +{ + "services": [ + { + "serviceName": "S3", + "type": "minor", + "changeLogMessages": [ + "Create new UploadWithResponse API that returns response metadata information for transfer utility." + ] + } + ] +} diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs index a0313c75b6c0..9bf25dc5421d 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/AbortMultipartUploadsCommand.cs @@ -28,7 +28,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class AbortMultipartUploadsCommand : BaseCommand + internal partial class AbortMultipartUploadsCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityAbortMultipartUploadRequest _request; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs index 428758fa54e6..71d2685e6fda 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/BaseCommand.cs @@ -30,13 +30,12 @@ namespace Amazon.S3.Transfer.Internal { - internal abstract partial class BaseCommand + /// + /// Generic base command that returns a typed response + /// + /// Type of response returned by the command + internal abstract partial class BaseCommand where TResponse : class { - public virtual object Return - { - get { return null; } - } - internal GetObjectRequest ConvertToGetObjectRequest(BaseDownloadRequest request) { GetObjectRequest getRequest = new GetObjectRequest() diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs index 6359704fc0cd..f8e45d7b20fe 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadCommand.cs @@ -33,7 +33,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadCommand : BaseCommand + internal partial class DownloadCommand : BaseCommand { static int MAX_BACKOFF_IN_MILLISECONDS = (int)TimeSpan.FromSeconds(30).TotalMilliseconds; @@ -176,4 +176,3 @@ static ByteRange ByteRangeRemainingForDownload(string filepath) } } } - diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs index 0140554ded39..5058960d9a06 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/DownloadDirectoryCommand.cs @@ -33,7 +33,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadDirectoryCommand : BaseCommand + internal partial class DownloadDirectoryCommand : BaseCommand { private readonly IAmazonS3 _s3Client; private readonly TransferUtilityDownloadDirectoryRequest _request; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs index dca8f3f076ac..9c6374502885 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/MultipartUploadCommand.cs @@ -37,7 +37,7 @@ namespace Amazon.S3.Transfer.Internal /// /// The command to manage an upload using the S3 multipart API. /// - internal partial class MultipartUploadCommand : BaseCommand + internal partial class MultipartUploadCommand : BaseCommand { IAmazonS3 _s3Client; long _partSize; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs index 57eab52d3f98..0fdfc64bcbae 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/OpenStreamCommand.cs @@ -29,7 +29,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class OpenStreamCommand : BaseCommand + internal partial class OpenStreamCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityOpenStreamRequest _request; @@ -59,10 +59,5 @@ internal Stream ResponseStream { get { return this._responseStream; } } - - public override object Return - { - get { return this.ResponseStream; } - } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs index d8de23a6145b..799b36fcde28 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/SimpleUploadCommand.cs @@ -36,7 +36,7 @@ namespace Amazon.S3.Transfer.Internal /// /// This command is for doing regular PutObject requests. /// - internal partial class SimpleUploadCommand : BaseCommand + internal partial class SimpleUploadCommand : BaseCommand { IAmazonS3 _s3Client; TransferUtilityConfig _config; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs index e4be9b27aa74..693a9ef8325a 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/UploadDirectoryCommand.cs @@ -32,7 +32,7 @@ namespace Amazon.S3.Transfer.Internal /// This command files all the files that meets the criteria specified in the TransferUtilityUploadDirectoryRequest request /// and uploads them. /// - internal partial class UploadDirectoryCommand : BaseCommand + internal partial class UploadDirectoryCommand : BaseCommand { TransferUtilityUploadDirectoryRequest _request; TransferUtility _utility; diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs index 4e55afcd34e8..c4ae5bb8b9e5 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/AbortMultipartUploadsCommand.async.cs @@ -24,10 +24,10 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class AbortMultipartUploadsCommand : BaseCommand + internal partial class AbortMultipartUploadsCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { if (string.IsNullOrEmpty(this._request.BucketName)) { @@ -84,6 +84,8 @@ await asyncThrottler.WaitAsync(cancellationToken) await WhenAllOrFirstExceptionAsync(pendingTasks,cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); + + return new TransferUtilityAbortMultipartUploadsResponse(); } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs index f9591f6d1d68..65ee8d8cb4c8 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/BaseCommand.async.cs @@ -24,9 +24,12 @@ namespace Amazon.S3.Transfer.Internal { - internal abstract partial class BaseCommand + internal abstract partial class BaseCommand where TResponse : class { - public abstract Task ExecuteAsync(CancellationToken cancellationToken); + /// + /// Executes the command and returns a typed response + /// + public abstract Task ExecuteAsync(CancellationToken cancellationToken); /// /// Waits for all of the tasks to complete or till any task fails or is canceled. @@ -80,7 +83,7 @@ await completedTask } } - protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts, SemaphoreSlim throttler) + protected static async Task ExecuteCommandAsync(BaseCommand command, CancellationTokenSource internalCts, SemaphoreSlim throttler) where T : class { try { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs index 3e536a4bb607..6baef9262774 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/DownloadCommand.async.cs @@ -28,9 +28,9 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadCommand : BaseCommand + internal partial class DownloadCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { ValidateRequest(); GetObjectRequest getRequest = ConvertToGetObjectRequest(this._request); @@ -130,6 +130,9 @@ await response.WriteResponseStreamToFileAsync(this._request.FilePath, true, canc } WaitBeforeRetry(retries); } while (shouldRetry); + + // TODO map and return response + return new TransferUtilityDownloadResponse(); } private static bool HandleExceptionForHttpClient(Exception exception, int retries, int maxRetries) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs index dd747e3d83e8..3f29336f0fe0 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/MultipartUploadCommand.async.cs @@ -27,20 +27,20 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class MultipartUploadCommand : BaseCommand + internal partial class MultipartUploadCommand : BaseCommand { public SemaphoreSlim AsyncThrottler { get; set; } Dictionary _expectedUploadParts = new Dictionary(); - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { // Fire transfer initiated event FIRST, before choosing path FireTransferInitiatedEvent(); if ( (this._fileTransporterRequest.InputStream != null && !this._fileTransporterRequest.InputStream.CanSeek) || this._fileTransporterRequest.ContentLength == -1) { - await UploadUnseekableStreamAsync(this._fileTransporterRequest, cancellationToken).ConfigureAwait(false); + return await UploadUnseekableStreamAsync(this._fileTransporterRequest, cancellationToken).ConfigureAwait(false); } else { @@ -144,6 +144,7 @@ await localThrottler.WaitAsync(cancellationToken) var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); FireTransferCompletedEvent(mappedResponse); + return mappedResponse; } catch (Exception e) { @@ -275,7 +276,7 @@ private void AbortMultipartUpload(string uploadId) Logger.InfoFormat("Error attempting to abort multipart for key {0}: {1}", this._fileTransporterRequest.Key, e.Message); } } - private async Task UploadUnseekableStreamAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + private async Task UploadUnseekableStreamAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) { cancellationToken.ThrowIfCancellationRequested(); @@ -376,6 +377,7 @@ private void AbortMultipartUpload(string uploadId) var mappedResponse = ResponseMapper.MapCompleteMultipartUploadResponse(completeResponse); FireTransferCompletedEvent(mappedResponse); + return mappedResponse; } } catch (Exception ex) diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs index 192560f837ee..8c954d256fab 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/OpenStreamCommand.async.cs @@ -24,14 +24,16 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class OpenStreamCommand : BaseCommand + internal partial class OpenStreamCommand : BaseCommand { - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { var getRequest = ConstructRequest(); var response = await _s3Client.GetObjectAsync(getRequest, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); _responseStream = response.ResponseStream; + // TODO map and return response + return new TransferUtilityOpenStreamResponse(); } } } diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs index 51680eaaba09..1d936f0bdf5c 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_async/SimpleUploadCommand.async.cs @@ -24,11 +24,11 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class SimpleUploadCommand : BaseCommand + internal partial class SimpleUploadCommand : BaseCommand { public SemaphoreSlim AsyncThrottler { get; set; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { try { @@ -47,6 +47,8 @@ await this.AsyncThrottler.WaitAsync(cancellationToken) var mappedResponse = ResponseMapper.MapPutObjectResponse(response); FireTransferCompletedEvent(mappedResponse); + + return mappedResponse; } catch (Exception) { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs index a160bc1504f4..be3fb4f0ae33 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/DownloadDirectoryCommand.cs @@ -25,7 +25,7 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class DownloadDirectoryCommand : BaseCommand + internal partial class DownloadDirectoryCommand : BaseCommand { TransferUtilityConfig _config; @@ -38,7 +38,7 @@ internal DownloadDirectoryCommand(IAmazonS3 s3Client, TransferUtilityDownloadDir this._config = config; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { ValidateRequest(); EnsureDirectoryExists(new DirectoryInfo(this._request.LocalDirectory)); @@ -112,6 +112,8 @@ await asyncThrottler.WaitAsync(cancellationToken) } await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) .ConfigureAwait(continueOnCapturedContext: false); + + return new TransferUtilityDownloadDirectoryResponse(); } finally { diff --git a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs index 75e1744d5435..10f09be9ed07 100644 --- a/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs +++ b/sdk/src/Services/S3/Custom/Transfer/Internal/_bcl+netstandard/UploadDirectoryCommand.cs @@ -23,11 +23,11 @@ namespace Amazon.S3.Transfer.Internal { - internal partial class UploadDirectoryCommand : BaseCommand + internal partial class UploadDirectoryCommand : BaseCommand { public bool UploadFilesConcurrently { get; set; } - public override async Task ExecuteAsync(CancellationToken cancellationToken) + public override async Task ExecuteAsync(CancellationToken cancellationToken) { string prefix = GetKeyPrefix(); @@ -87,6 +87,8 @@ await WhenAllOrFirstExceptionAsync(pendingTasks, cancellationToken) if (asyncThrottler != null) asyncThrottler.Dispose(); } + + return new TransferUtilityUploadDirectoryResponse(); } private Task GetFiles(string path, string searchPattern, SearchOption searchOption, CancellationToken cancellationToken) diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs index 472a5933ba28..f4dde2c232e2 100644 --- a/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtility.cs @@ -386,7 +386,7 @@ private static TransferUtilityUploadRequest ConstructUploadRequest(Stream stream }; } - internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request) + internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request) { validate(request); diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs new file mode 100644 index 000000000000..6c63c4b0a75b --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityAbortMultipartUploadsResponse.cs @@ -0,0 +1,35 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility abort multipart uploads operations. + /// Contains response metadata from abort multipart uploads operations. + /// + public class TransferUtilityAbortMultipartUploadsResponse + { + // Empty placeholder class - properties will be added in future iterations + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs new file mode 100644 index 000000000000..6df0c1c5a619 --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityDownloadDirectoryResponse.cs @@ -0,0 +1,26 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. This file is distributed + * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language governing + * permissions and limitations under the License. + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Contains the details returned from a Transfer Utility download directory operation. + /// + public class TransferUtilityDownloadDirectoryResponse + { + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs new file mode 100644 index 000000000000..94f32558d1fb --- /dev/null +++ b/sdk/src/Services/S3/Custom/Transfer/TransferUtilityUploadDirectoryResponse.cs @@ -0,0 +1,35 @@ +/******************************************************************************* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"). You may not use + * this file except in compliance with the License. A copy of the License is located at + * + * http://aws.amazon.com/apache2.0 + * + * or in the "license" file accompanying this file. + * This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR + * CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + * ***************************************************************************** + * __ _ _ ___ + * ( )( \/\/ )/ __) + * /__\ \ / \__ \ + * (_)(_) \/\/ (___/ + * + * AWS SDK for .NET + * API Version: 2006-03-01 + * + */ + +using Amazon.Runtime; + +namespace Amazon.S3.Transfer +{ + /// + /// Response object for Transfer Utility upload directory operations. + /// Contains response metadata from upload directory operations. + /// + public class TransferUtilityUploadDirectoryResponse + { + // Empty placeholder class - properties will be added in future iterations + } +} diff --git a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs index 35205ad93f3a..92307954b039 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_async/TransferUtility.async.cs @@ -217,6 +217,167 @@ public partial class TransferUtility : ITransferUtility await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); } } + + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize + /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) + /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(string filePath, string bucketName, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(filePath, bucketName); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize + /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) + /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(string filePath, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(filePath, bucketName, key); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The buffer for the multipart upload is controlled by S3Constants.MinPartSize + /// and the default value is 5 megabytes. You can also adjust the read buffer size(i.e.how many bytes to read before writing to the part buffer) + /// via the BufferSize property on the ClientConfig.The default value for this is 8192 bytes. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(Stream stream, string bucketName, string key, CancellationToken cancellationToken = default(CancellationToken)) + { + var request = ConstructUploadRequest(stream, bucketName, key); + return await UploadWithResponseAsync(request, cancellationToken).ConfigureAwait(false); + } + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploadsAsync() to abort the incomplete multipart uploads. + /// + /// + /// For nonseekable streams or streams with an unknown length, TransferUtility will use multipart upload and buffer up to a part size in memory + /// until the final part is reached and complete the upload. The part size buffer for the multipart upload is controlled by the partSize + /// specified on the TransferUtilityUploadRequest, and if none is specified it defaults to S3Constants.MinPartSize (5 megabytes). + /// You can also adjust the read buffer size (i.e. how many bytes to read before adding it to the + /// part buffer) via the BufferSize property on the ClientConfig. The default value for this is 8192 bytes. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// + /// A cancellation token that can be used by other objects or threads to receive notice of cancellation. + /// + /// The task object representing the asynchronous operation with upload response metadata. + public async Task UploadWithResponseAsync(TransferUtilityUploadRequest request, CancellationToken cancellationToken = default(CancellationToken)) + { + using(CreateSpan(nameof(UploadWithResponseAsync), null, Amazon.Runtime.Telemetry.Tracing.SpanKind.CLIENT)) + { + CheckForBlockedArn(request.BucketName, "Upload"); + var command = GetUploadCommand(request, null); + return await command.ExecuteAsync(cancellationToken).ConfigureAwait(false); + } + } #endregion #region AbortMultipartUploads @@ -346,7 +507,7 @@ public partial class TransferUtility : ITransferUtility #endregion - internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) + internal BaseCommand GetUploadCommand(TransferUtilityUploadRequest request, SemaphoreSlim asyncThrottler) { validate(request); if (IsMultipartUpload(request)) diff --git a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs index f1ff62ce820d..c4d99745e3da 100644 --- a/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs +++ b/sdk/src/Services/S3/Custom/Transfer/_bcl+netstandard/TransferUtility.sync.cs @@ -285,6 +285,154 @@ public void Upload(TransferUtilityUploadRequest request) } } + /// + /// Uploads the specified file and returns response metadata. + /// The object key is derived from the file's name. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName) + { + try + { + return UploadWithResponseAsync(filePath, bucketName).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + /// Uploads the specified file and returns response metadata. + /// Multiple threads are used to read the file and perform multiple uploads in parallel. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The file path of the file to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the file to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(string filePath, string bucketName, string key) + { + try + { + return UploadWithResponseAsync(filePath, bucketName, key).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + /// Uploads the contents of the specified stream and returns response metadata. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// The stream to read to obtain the content to upload. + /// + /// + /// The target Amazon S3 bucket, that is, the name of the bucket to upload the stream to. + /// + /// + /// The key under which the Amazon S3 object is stored. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(Stream stream, string bucketName, string key) + { + try + { + return UploadWithResponseAsync(stream, bucketName, key).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + + /// + /// Uploads the file or stream specified by the request and returns response metadata. + /// To track the progress of the upload, + /// add an event listener to the request's UploadProgressEvent. + /// For large uploads, the file will be divided and uploaded in parts using + /// Amazon S3's multipart API. The parts will be reassembled as one object in + /// Amazon S3. + /// + /// + /// + /// If you are uploading large files, TransferUtility will use multipart upload to fulfill the request. + /// If a multipart upload is interrupted, TransferUtility will attempt to abort the multipart upload. + /// Under certain circumstances (network outage, power failure, etc.), TransferUtility will not be able + /// to abort the multipart upload. In this case, in order to stop getting charged for the storage of uploaded parts, + /// you should manually invoke TransferUtility.AbortMultipartUploads() to abort the incomplete multipart uploads. + /// + /// + /// + /// Contains all the parameters required to upload to Amazon S3. + /// + /// The upload response metadata. + public TransferUtilityUploadResponse UploadWithResponse(TransferUtilityUploadRequest request) + { + try + { + return UploadWithResponseAsync(request).Result; + } + catch (AggregateException e) + { + ExceptionDispatchInfo.Capture(e.InnerException).Throw(); + return null; + } + } + #endregion #region OpenStream diff --git a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs index 6aaa7ae3a5e3..967f07d4884f 100644 --- a/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs +++ b/sdk/test/Services/S3/IntegrationTests/TransferUtilityTests.cs @@ -13,6 +13,7 @@ using Amazon.Util; using System.Net.Mime; using System.Runtime.InteropServices.ComTypes; +using System.Threading.Tasks; namespace AWSSDK_DotNet.IntegrationTests.Tests.S3 { @@ -1433,6 +1434,248 @@ public void TestMultipartUploadWithSetContentTypeNotOverwritten() Assert.IsTrue(metadata.Headers.ContentType.Equals(MediaTypeNames.Text.Plain)); } + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncSmallFileTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\SmallFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 1 * MEG_SIZE; // Small file for single-part upload + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // For small files, we expect single-part upload behavior - ETag should be MD5 format (no quotes or dashes) + // ETag format varies, so we just ensure it's a valid non-empty string + Console.WriteLine($"ETag: {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually uploaded by checking metadata + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded file size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncLargeFileTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\LargeFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 20 * MEG_SIZE; // Large file for multipart upload + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // For multipart uploads, ETag format is different (contains dashes) + // We just validate it's a valid string for now + Console.WriteLine($"ETag (multipart): {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually uploaded by checking metadata + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded file size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncStreamTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\StreamFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 5 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + using (var fileStream = File.OpenRead(path)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + InputStream = fileStream, + Key = fileName, + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields that should always be present + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + Console.WriteLine($"ETag (stream): {response.ETag}"); + Console.WriteLine($"VersionId: {response.VersionId}"); + + // Validate file was actually streamed and uploaded correctly + var metadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + }); + Assert.AreEqual(fileSize, metadata.ContentLength, "Uploaded stream size should match original"); + Assert.AreEqual(response.ETag, metadata.ETag, "ETag from response should match object metadata"); + + // Validate content by downloading and comparing + var downloadPath = path + ".download"; + await transferUtility.DownloadAsync(new TransferUtilityDownloadRequest + { + BucketName = bucketName, + Key = fileName, + FilePath = downloadPath + }); + UtilityMethods.CompareFiles(path, downloadPath); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncWithChecksumTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\ChecksumFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 2 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + // Calculate checksum for the file + var fileBytes = File.ReadAllBytes(path); + var precalculatedChecksum = CryptoUtilFactory.CryptoInstance.ComputeCRC32Hash(fileBytes); + + using (var transferUtility = new TransferUtility(Client)) + { + var request = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName, + ContentType = octetStreamContentType, + ChecksumCRC32 = precalculatedChecksum + }; + + var response = await transferUtility.UploadWithResponseAsync(request); + + // Validate response object is not null + Assert.IsNotNull(response, "Response should not be null"); + + // Validate essential response fields + Assert.IsNotNull(response.ETag, "ETag should not be null"); + Assert.IsTrue(response.ETag.Length > 0, "ETag should not be empty"); + + // Validate checksum fields if they should be present + // Note: Checksum fields in response may not always be set depending on S3 behavior + Console.WriteLine($"ETag: {response.ETag}"); + Console.WriteLine($"ChecksumCRC32: {response.ChecksumCRC32}"); + Console.WriteLine($"ChecksumType: {response.ChecksumType}"); + } + } + + [TestMethod] + [TestCategory("S3")] + public async Task UploadWithResponseAsyncCompareWithLegacyUploadTest() + { + var fileName = UtilityMethods.GenerateName(@"UploadWithResponseTest\CompareFile"); + var path = Path.Combine(BasePath, fileName); + var fileSize = 8 * MEG_SIZE; + UtilityMethods.GenerateFile(path, fileSize); + + using (var transferUtility = new TransferUtility(Client)) + { + // Test the new UploadWithResponseAsync method + var responseRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName + "-with-response", + ContentType = octetStreamContentType + }; + + var response = await transferUtility.UploadWithResponseAsync(responseRequest); + + // Test the legacy Upload method for comparison + var legacyRequest = new TransferUtilityUploadRequest + { + BucketName = bucketName, + FilePath = path, + Key = fileName + "-legacy", + ContentType = octetStreamContentType + }; + + await transferUtility.UploadAsync(legacyRequest); + + // Validate that both uploads resulted in the same file being uploaded + var responseMetadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + "-with-response" + }); + + var legacyMetadata = await Client.GetObjectMetadataAsync(new GetObjectMetadataRequest + { + BucketName = bucketName, + Key = fileName + "-legacy" + }); + + // Both should have the same file size and content type + Assert.AreEqual(responseMetadata.ContentLength, legacyMetadata.ContentLength, "File sizes should match"); + Assert.AreEqual(responseMetadata.Headers.ContentType, legacyMetadata.Headers.ContentType, "Content types should match"); + + // Validate the response contains the expected ETag + Assert.IsNotNull(response.ETag, "Response ETag should not be null"); + Assert.AreEqual(response.ETag, responseMetadata.ETag, "Response ETag should match metadata ETag"); + + Console.WriteLine($"UploadWithResponseAsync ETag: {response.ETag}"); + Console.WriteLine($"Legacy upload ETag: {legacyMetadata.ETag}"); + Console.WriteLine($"File size: {fileSize}, Response metadata size: {responseMetadata.ContentLength}"); + } + } + #if ASYNC_AWAIT [TestMethod]