Skip to content

Commit

Permalink
[storage-blob-preview] az storage blob upload/set-tier/copy start: …
Browse files Browse the repository at this point in the history
…Support Cold tier (#5958)

* `az storage blob upload/set-tier/copy start`: Support Cold tier

* quote

* update version
  • Loading branch information
evelyn-ys committed Mar 14, 2023
1 parent 60929b2 commit 0a22a2d
Show file tree
Hide file tree
Showing 362 changed files with 32,934 additions and 191,515 deletions.
4 changes: 4 additions & 0 deletions src/storage-blob-preview/HISTORY.rst
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,10 @@
Release History
===============
0.7.0
++++++
* `az storage blob upload/set-tier/copy start`: Support `Cold` for `--tier`

0.6.2
++++++
* `az storage blob filter`: Add `--container-name` to support filter blobs in specific container
Expand Down
22 changes: 15 additions & 7 deletions src/storage-blob-preview/azext_storage_blob_preview/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
class StorageCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
from azure.cli.core.commands import CliCommandType
register_resource_type('latest', CUSTOM_DATA_STORAGE_BLOB, '2021-04-10')
register_resource_type('latest', CUSTOM_DATA_STORAGE_BLOB, '2021-12-02')
storage_custom = CliCommandType(operations_tmpl='azure.cli.command_modules.storage.custom#{}')
super(StorageCommandsLoader, self).__init__(cli_ctx=cli_ctx,
resource_type=CUSTOM_DATA_STORAGE_BLOB,
Expand Down Expand Up @@ -62,7 +62,9 @@ def register_content_settings_argument(self, settings_class, update, arg_group=N
self.extra('content_disposition', default=None, arg_group=arg_group,
help='Conveys additional information about how to process the response payload, and can also be '
'used to attach additional metadata.')
self.extra('content_cache_control', default=None, help='The cache control string.', arg_group=arg_group)
self.extra('content_cache_control', options_list=['--content-cache-control', '--content-cache'],
default=None, help='The cache control string.',
arg_group=arg_group)
self.extra('content_md5', default=None, help='The content\'s MD5 hash.', arg_group=arg_group)
if update:
self.extra('clear_content_settings', help='If this flag is set, then if any one or more of the '
Expand Down Expand Up @@ -113,16 +115,16 @@ def register_precondition_options(self, prefix=''):
help="Commence only if modified since supplied UTC datetime (Y-m-d'T'H:M'Z').",
type=get_datetime_type(False))
self.extra('{}if_unmodified_since'.format(prefix), arg_group='Precondition',
help="Commence only if modified since supplied UTC datetime (Y-m-d'T'H:M'Z').",
help="Commence only if unmodified since supplied UTC datetime (Y-m-d'T'H:M'Z').",
type=get_datetime_type(False))
self.extra('{}if_match'.format(prefix), arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform the "
"operation only if the resource's ETag matches the value specified.")
"operation only if the resource's ETag matches the value specified.")
self.extra('{}if_none_match'.format(prefix), arg_group='Precondition',
help="An ETag value, or the wildcard character (*). Specify this header to perform "
"the operation only if the resource's ETag does not match the value specified. Specify the wildcard "
"character (*) to perform the operation only if the resource does not exist, and fail the operation "
"if it does exist.")
"the operation only if the resource's ETag does not match the value specified. Specify the wildcard "
"character (*) to perform the operation only if the resource does not exist, and fail the operation "
"if it does exist.")
self.extra('{}if_tags_match_condition'.format(prefix), arg_group='Precondition',
options_list=['--{}tags-condition'.format(prefix.replace('_', '-'))],
help='Specify a SQL where clause on blob tags to operate only on blobs with a matching value.')
Expand Down Expand Up @@ -225,6 +227,12 @@ def _register_data_plane_account_arguments(self, command_name):
'present, the command will try to query the storage account key using the '
'authenticated Azure account. If a large number of storage commands are executed the '
'API quota may be hit')
command.add_argument('account_url', '--blob-endpoint',
required=False, default=None, arg_group=group_name,
help='Storage data service endpoint. Must be used in conjunction with either '
'storage account key or a SAS token. You can find each service primary endpoint '
'with `az storage account show`. '
'Environment variable: AZURE_STORAGE_SERVICE_ENDPOINT')
command.add_argument('account_key', '--account-key', required=False, default=None,
arg_group=group_name,
help='Storage account key. Must be used in conjunction with storage account name. '
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ def cf_blob_service(cli_ctx, kwargs):
'_blob_service_client#BlobServiceClient')
connection_string = kwargs.pop('connection_string', None)
account_name = kwargs.pop('account_name', None)
account_url = kwargs.pop('account_url', None)
account_key = kwargs.pop('account_key', None)
token_credential = kwargs.pop('token_credential', None)
sas_token = kwargs.pop('sas_token', None)
Expand All @@ -63,8 +64,8 @@ def cf_blob_service(cli_ctx, kwargs):
.format(connection_string, str(err)),
recommendation='Try `az storage account show-connection-string` '
'to get a valid connection string')

account_url = get_account_url(cli_ctx, account_name=account_name, service='blob')
if not account_url:
account_url = get_account_url(cli_ctx, account_name=account_name, service='blob')
credential = account_key or sas_token or token_credential

return t_blob_service(account_url=account_url, credential=credential, **client_kwargs)
Expand All @@ -77,6 +78,7 @@ def cf_blob_client(cli_ctx, kwargs):
# del unused kwargs
kwargs.pop('connection_string')
kwargs.pop('account_name')
kwargs.pop('account_url')
kwargs.pop('container_name')
kwargs.pop('blob_name')
return t_blob_client.from_blob_url(blob_url=kwargs.pop('blob_url'),
Expand Down Expand Up @@ -121,12 +123,14 @@ def cf_adls_service(cli_ctx, kwargs):
'_data_lake_service_client#DataLakeServiceClient')
connection_string = kwargs.pop('connection_string', None)
account_key = kwargs.pop('account_key', None)
account_url = kwargs.pop('account_url', None)
token_credential = kwargs.pop('token_credential', None)
sas_token = kwargs.pop('sas_token', None)
if connection_string:
return t_adls_service.from_connection_string(connection_string=connection_string)

account_url = get_account_url(cli_ctx, account_name=kwargs.pop('account_name', None), service='dfs')
if not account_url:
account_url = get_account_url(cli_ctx, account_name=kwargs.pop('account_name', None), service='dfs')
credential = account_key or sas_token or token_credential

if account_url and credential:
Expand All @@ -145,26 +149,3 @@ def cf_adls_directory(cli_ctx, kwargs):
def cf_adls_file(cli_ctx, kwargs):
return cf_adls_service(cli_ctx, kwargs).get_file_client(file_system=kwargs.pop('file_system_name', None),
file_path=kwargs.pop('path', None))


def cf_share_service(cli_ctx, kwargs):
t_share_service = get_sdk(cli_ctx, ResourceType.DATA_STORAGE_FILESHARE, '_share_service_client#ShareServiceClient')
connection_string = kwargs.pop('connection_string', None)
account_key = kwargs.pop('account_key', None)
token_credential = kwargs.pop('token_credential', None)
sas_token = kwargs.pop('sas_token', None)
account_name = kwargs.pop('account_name', None)
if connection_string:
return t_share_service.from_connection_string(conn_str=connection_string)

account_url = get_account_url(cli_ctx, account_name=account_name, service='file')
credential = account_key or sas_token or token_credential

if account_url and credential:
return t_share_service(account_url=account_url, credential=credential)
return None


def cf_share_client(cli_ctx, kwargs):
return cf_share_service(cli_ctx, kwargs).get_share_client(share=kwargs.pop('share_name'),
snapshot=kwargs.pop('snapshot', None))
41 changes: 26 additions & 15 deletions src/storage-blob-preview/azext_storage_blob_preview/_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,9 @@

from ._validators import (validate_metadata, get_permission_validator, get_permission_help_string,
validate_blob_type, validate_included_datasets_v2, get_datetime_type,
add_download_progress_callback, add_upload_progress_callback,
validate_storage_data_plane_list, as_user_validator, blob_tier_validator)
add_download_progress_callback, add_progress_callback,
validate_storage_data_plane_list, as_user_validator,
blob_tier_validator, validate_blob_name_for_upload)

from .profiles import CUSTOM_DATA_STORAGE_BLOB

Expand Down Expand Up @@ -94,7 +95,8 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
help='The tier value to set the blob to. For page blob, the tier correlates to the size of the blob '
'and number of allowed IOPS. Possible values are P10, P15, P20, P30, P4, P40, P50, P6, P60, P70, P80 '
'and this is only applicable to page blobs on premium storage accounts; For block blob, possible '
'values are Archive, Cool and Hot. This is only applicable to block blobs on standard storage accounts.'
'values are Archive, Cold, Cool and Hot. This is only applicable to block blobs on standard '
'storage accounts.'
)

rehydrate_priority_type = CLIArgumentType(
Expand Down Expand Up @@ -146,7 +148,7 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
c.argument('blob_name', blob_name_type, options_list=('--destination-blob', '-b'),
help='Name of the destination blob. If the exists, it will be overwritten.')

with self.argument_context('storage blob copy start') as c:
with self.argument_context('storage blob copy start', resource_type=CUSTOM_DATA_STORAGE_BLOB) as c:
from ._validators import validate_source_url

c.register_blob_arguments()
Expand All @@ -161,22 +163,26 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
c.argument('if_unmodified_since', options_list=['--destination-if-unmodified-since'])
c.argument('if_tags_match_condition', options_list=['--destination-tags-condition'])

c.ignore('blob_url')
c.argument('blob_name', options_list=['--destination-blob', '-b'], required=True,
help='Name of the destination blob. If the exists, it will be overwritten.')
help='Name of the destination blob. If it exists, it will be overwritten.')
c.argument('container_name', options_list=['--destination-container', '-c'], required=True,
help='The container name.')
c.extra('destination_lease', options_list='--destination-lease-id',
help='The lease ID specified for this header must match the lease ID of the estination blob. '
'If the request does not include the lease ID or it is not valid, the operation fails with status '
'code 412 (Precondition Failed).')
'If the request does not include the lease ID or it is not valid, the operation fails with status '
'code 412 (Precondition Failed).')
c.extra('source_lease', options_list='--source-lease-id', arg_group='Copy Source',
help='Specify this to perform the Copy Blob operation only if the lease ID given matches the '
'active lease ID of the source blob.')
'active lease ID of the source blob.')
c.extra('rehydrate_priority', rehydrate_priority_type)
c.extra('requires_sync', arg_type=get_three_state_flag(),
help='Enforce that the service will not return a response until the copy is complete.')
c.extra('tier', tier_type)
c.extra('tags', tags_type)
c.extra('destination_blob_type', arg_type=get_enum_type(['Detect', 'BlockBlob', 'PageBlob', 'AppendBlob']),
help='Defines the type of blob at the destination. '
'Value of "Detect" determines the type based on source blob type.')

with self.argument_context('storage blob copy start-batch', arg_group='Copy Source') as c:
from ._validators import get_source_file_or_blob_service_client
Expand Down Expand Up @@ -420,26 +426,29 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
c.register_precondition_options()
c.register_content_settings_argument(t_blob_content_settings, update=False, arg_group="Content Control")

c.extra('blob_name', validator=validate_blob_name_for_upload)

c.argument('file_path', options_list=('--file', '-f'), type=file_type, completer=FilesCompleter(),
help='Path of the file to upload as the blob content.', validator=validate_upload_blob)
c.argument('data', help='The blob data to upload.', required=False, is_preview=True, min_api='2019-02-02')
c.argument('length', type=int, help='Number of bytes to read from the stream. This is optional, but should be '
'supplied for optimal performance. Cooperate with --data.', is_preview=True, min_api='2019-02-02')
'supplied for optimal performance. Cooperate with --data.', is_preview=True,
min_api='2019-02-02')
c.argument('overwrite', arg_type=get_three_state_flag(), arg_group="Additional Flags", is_preview=True,
help='Whether the blob to be uploaded should overwrite the current data. If True, blob upload '
'operation will overwrite the existing data. If set to False, the operation will fail with '
'ResourceExistsError. The exception to the above is with Append blob types: if set to False and the '
'data already exists, an error will not be raised and the data will be appended to the existing '
'blob. If set overwrite=True, then the existing append blob will be deleted, and a new one created. '
'Defaults to False.')
'operation will overwrite the existing data. If set to False, the operation will fail with '
'ResourceExistsError. The exception to the above is with Append blob types: if set to False and the '
'data already exists, an error will not be raised and the data will be appended to the existing '
'blob. If set overwrite=True, then the existing append blob will be deleted, and a new one created. '
'Defaults to False.')
c.argument('max_connections', type=int, arg_group="Additional Flags",
help='Maximum number of parallel connections to use when the blob size exceeds 64MB.')
c.extra('maxsize_condition', type=int, arg_group="Content Control",
help='The max length in bytes permitted for the append blob.')
c.argument('blob_type', options_list=('--type', '-t'), validator=validate_blob_type,
arg_type=get_enum_type(get_blob_types()), arg_group="Additional Flags")
c.argument('validate_content', action='store_true', min_api='2016-05-31', arg_group="Content Control")
c.extra('no_progress', progress_type, validator=add_upload_progress_callback, arg_group="Additional Flags")
c.extra('no_progress', progress_type, validator=add_progress_callback, arg_group="Additional Flags")
c.extra('tier', tier_type, validator=blob_tier_validator, arg_group="Additional Flags")
c.argument('encryption_scope', validator=validate_encryption_scope_client_params,
help='A predefined encryption scope used to encrypt the data on the service.',
Expand All @@ -448,6 +457,8 @@ def load_arguments(self, _): # pylint: disable=too-many-locals, too-many-statem
c.extra('tags', arg_type=tags_type, arg_group="Additional Flags")
c.argument('metadata', arg_group="Additional Flags")
c.argument('timeout', arg_group="Additional Flags")
c.extra('connection_timeout', options_list=('--socket-timeout'), type=int,
help='The socket timeout(secs), used by the service to regulate data flow.')

with self.argument_context('storage blob upload-batch') as c:
from .sdkutil import get_blob_types
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,15 @@ def _transform_page_ranges(page_ranges):
return None


def transform_response_with_bytearray(response):
""" transform bytearray to string """
from msrest import Serializer
for item in response:
if response[item] and isinstance(response[item], (bytes, bytearray)):
response[item] = Serializer.serialize_bytearray(response[item])
return response


def transform_blob_list_output(result):
for i, item in enumerate(result):
if isinstance(item, dict) and 'nextMarker' in item:
Expand Down
Loading

0 comments on commit 0a22a2d

Please sign in to comment.