diff --git a/oss2/__init__.py b/oss2/__init__.py index 03d8c35b..131ddfce 100644 --- a/oss2/__init__.py +++ b/oss2/__init__.py @@ -23,9 +23,10 @@ from .utils import http_date, http_to_unixtime, iso8601_to_unixtime, date_to_iso8601, iso8601_to_date -from .models import BUCKET_ACL_PRIVATE, BUCKET_ACL_PUBLIC_READ, BUCKET_ACL_PUBLIC_READ_WRITE +from .models import BUCKET_ACL_PRIVATE, BUCKET_ACL_PUBLIC_READ, BUCKET_ACL_PUBLIC_READ_WRITE, SERVER_SIDE_ENCRYPTION_AES256, SERVER_SIDE_ENCRYPTION_KMS from .models import OBJECT_ACL_DEFAULT, OBJECT_ACL_PRIVATE, OBJECT_ACL_PUBLIC_READ, OBJECT_ACL_PUBLIC_READ_WRITE from .models import BUCKET_STORAGE_CLASS_STANDARD, BUCKET_STORAGE_CLASS_IA, BUCKET_STORAGE_CLASS_ARCHIVE +from .models import BUCKET_VERSIONING_ENABLE, BUCKET_VERSIONING_SUSPEND from .crypto import LocalRsaProvider, AliKMSProvider import logging diff --git a/oss2/api.py b/oss2/api.py index d696d549..bf717481 100644 --- a/oss2/api.py +++ b/oss2/api.py @@ -289,21 +289,31 @@ def __init__(self, auth, endpoint, super(Service, self).__init__(auth, endpoint, False, session, connect_timeout, app_name=app_name) - def list_buckets(self, prefix='', marker='', max_keys=100): + def list_buckets(self, prefix='', marker='', max_keys=100, params=None): """根据前缀罗列用户的Bucket。 :param str prefix: 只罗列Bucket名为该前缀的Bucket,空串表示罗列所有的Bucket :param str marker: 分页标志。首次调用传空串,后续使用返回值中的next_marker :param int max_keys: 每次调用最多返回的Bucket数目 + :param dict params: list操作参数,传入'tag-key','tag-value'对结果进行过滤 :return: 罗列的结果 :rtype: oss2.models.ListBucketsResult """ logger.debug("Start to list buckets, prefix: {0}, marker: {1}, max-keys: {2}".format(prefix, marker, max_keys)) - resp = self._do('GET', '', '', - params={'prefix': prefix, - 'marker': marker, - 'max-keys': str(max_keys)}) + + listParam = {} + listParam['prefix'] = prefix + listParam['marker'] = marker + listParam['max-keys'] = str(max_keys) + + if params is not None: + if 'tag-key' in params: + listParam['tag-key'] = params['tag-key'] + if 'tag-value' in params: + listParam['tag-value'] = params['tag-value'] + + resp = self._do('GET', '', '', params=listParam) logger.debug("List buckets done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return self._parse_result(resp, xml_utils.parse_list_buckets, ListBucketsResult) @@ -350,6 +360,12 @@ class Bucket(_Base): BUCKET_INFO = 'bucketInfo' PROCESS = 'x-oss-process' TAGGING = 'tagging' + ENCRYPTION = 'encryption' + VERSIONS = 'versions' + VERSIONING = 'versioning' + VERSIONID = 'versionId' + RESTORE = 'restore' + OBJECTMETA = 'objectMeta' def __init__(self, auth, endpoint, bucket_name, is_cname=False, @@ -805,7 +821,7 @@ def select_object_to_file(self, key, filename, sql, return result - def head_object(self, key, headers=None): + def head_object(self, key, headers=None, params=None): """获取文件元信息。 HTTP响应的头部包含了文件元信息,可以通过 `RequestResult` 的 `headers` 成员获得。 @@ -820,13 +836,18 @@ def head_object(self, key, headers=None): :param headers: HTTP头部 :type headers: 可以是dict,建议是oss2.CaseInsensitiveDict + :param params: HTTP请求参数,传入versionId,获取指定版本Object元信息 + :type params: 可以是dict,建议是oss2.CaseInsensitiveDict + :return: :class:`HeadObjectResult ` :raises: 如果Bucket不存在或者Object不存在,则抛出 :class:`NotFound ` """ logger.debug("Start to head object, bucket: {0}, key: {1}, headers: {2}".format( self.bucket_name, to_string(key), headers)) - resp = self.__do_object('HEAD', key, headers=headers) + + resp = self.__do_object('HEAD', key, headers=headers, params=params) + logger.debug("Head object done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return HeadObjectResult(resp) @@ -866,7 +887,7 @@ def create_select_object_meta(self, key, select_meta_params=None): resp = self.__do_object('POST', key, data=body, headers=headers, params=params) return GetSelectObjectMetaResult(resp) - def get_object_meta(self, key): + def get_object_meta(self, key, params=None): """获取文件基本元信息,包括该Object的ETag、Size(文件大小)、LastModified,并不返回其内容。 HTTP响应的头部包含了文件基本元信息,可以通过 `GetObjectMetaResult` 的 `last_modified`,`content_length`,`etag` 成员获得。 @@ -878,7 +899,14 @@ def get_object_meta(self, key): :raises: 如果文件不存在,则抛出 :class:`NoSuchKey ` ;还可能抛出其他异常 """ logger.debug("Start to get object metadata, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key))) - resp = self.__do_object('GET', key, params={'objectMeta': ''}) + + if params is None: + params = dict() + + if Bucket.OBJECTMETA not in params: + params[Bucket.OBJECTMETA] = '' + + resp = self.__do_object('GET', key, params=params) logger.debug("Get object metadata done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return GetObjectMetaResult(resp) @@ -903,7 +931,7 @@ def object_exists(self, key): return True - def copy_object(self, source_bucket_name, source_key, target_key, headers=None): + def copy_object(self, source_bucket_name, source_key, target_key, headers=None, params=None): """拷贝一个文件到当前Bucket。 :param str source_bucket_name: 源Bucket名 @@ -915,8 +943,14 @@ def copy_object(self, source_bucket_name, source_key, target_key, headers=None): :return: :class:`PutObjectResult ` """ + headers = http.CaseInsensitiveDict(headers) - headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + '/' + urlquote(source_key, '') + + if params and Bucket.VERSIONID in params: + headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + \ + '/' + urlquote(source_key, '') + '?versionId=' + params[Bucket.VERSIONID] + else: + headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + '/' + urlquote(source_key, '') logger.debug( "Start to copy object, source bucket: {0}, source key: {1}, bucket: {2}, key: {3}, headers: {4}".format( @@ -941,7 +975,7 @@ def update_object_meta(self, key, headers): logger.debug("Start to update object metadata, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key))) return self.copy_object(self.bucket_name, key, key, headers=headers) - def delete_object(self, key): + def delete_object(self, key, params=None): """删除一个文件。 :param str key: 文件名 @@ -949,11 +983,11 @@ def delete_object(self, key): :return: :class:`RequestResult ` """ logger.warn("Start to delete object, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key))) - resp = self.__do_object('DELETE', key) + resp = self.__do_object('DELETE', key, params=params) logger.debug("Delete object done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return RequestResult(resp) - def restore_object(self, key): + def restore_object(self, key, params=None): """restore an object 如果是第一次针对该object调用接口,返回RequestResult.status = 202; 如果已经成功调用过restore接口,且服务端仍处于解冻中,抛异常RestoreAlreadyInProgress(status=409) @@ -976,32 +1010,54 @@ def restore_object(self, key): :return: :class:`RequestResult ` """ logger.debug("Start to restore object, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key))) - resp = self.__do_object('POST', key, params={'restore': ''}) + + if params is None: + params = dict() + + if Bucket.RESTORE not in params: + params[Bucket.RESTORE] = '' + + resp = self.__do_object('POST', key, params=params) logger.debug("Restore object done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return RequestResult(resp) - def put_object_acl(self, key, permission): + def put_object_acl(self, key, permission, params=None): """设置文件的ACL。 :param str key: 文件名 :param str permission: 可以是oss2.OBJECT_ACL_DEFAULT、oss2.OBJECT_ACL_PRIVATE、oss2.OBJECT_ACL_PUBLIC_READ或 oss2.OBJECT_ACL_PUBLIC_READ_WRITE。 + :param dict params: 请求参数 :return: :class:`RequestResult ` """ logger.debug("Start to put object acl, bucket: {0}, key: {1}, acl: {2}".format( self.bucket_name, to_string(key), permission)) - resp = self.__do_object('PUT', key, params={'acl': ''}, headers={OSS_OBJECT_ACL: permission}) + + if params is None: + params = dict() + + if Bucket.ACL not in params: + params[Bucket.ACL] = '' + + resp = self.__do_object('PUT', key, params=params, headers={OSS_OBJECT_ACL: permission}) logger.debug("Put object acl done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return RequestResult(resp) - def get_object_acl(self, key): + def get_object_acl(self, key, params=None): """获取文件的ACL。 :return: :class:`GetObjectAclResult ` """ logger.debug("Start to get object acl, bucket: {0}, key: {1}".format(self.bucket_name, to_string(key))) - resp = self.__do_object('GET', key, params={'acl': ''}) + + if params is None: + params = dict() + + if Bucket.ACL not in params: + params[Bucket.ACL] = '' + + resp = self.__do_object('GET', key, params=params) logger.debug("Get object acl done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return self._parse_result(resp, xml_utils.parse_get_object_acl, GetObjectAclResult) @@ -1017,7 +1073,9 @@ def batch_delete_objects(self, key_list): raise ClientError('key_list should not be empty') logger.debug("Start to delete objects, bucket: {0}, keys: {1}".format(self.bucket_name, key_list)) + data = xml_utils.to_batch_delete_objects_request(key_list, False) + resp = self.__do_object('POST', '', data=data, params={'delete': '', 'encoding-type': 'url'}, @@ -1025,6 +1083,28 @@ def batch_delete_objects(self, key_list): logger.debug("Delete objects done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return self._parse_result(resp, xml_utils.parse_batch_delete_objects, BatchDeleteObjectsResult) + def delete_object_versions(self, keylist_versions): + """批量删除带版本文件。待删除文件列表不能为空。 + + :param key_list_with_version: 带版本的文件名列表,不能为空。(如果传入,则不能为空) + :type key_list: list of BatchDeleteObjectsList + + :return: :class:`BatchDeleteObjectsResult ` + """ + if not keylist_versions: + raise ClientError('keylist_versions should not be empty') + + logger.debug("Start to delete object versions, bucket: {0}".format(self.bucket_name)) + + data = xml_utils.to_batch_delete_objects_version_request(keylist_versions, False) + + resp = self.__do_object('POST', '', + data=data, + params={'delete': '', 'encoding-type': 'url'}, + headers={'Content-MD5': utils.content_md5(data)}) + logger.debug("Delete object versions done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) + return self._parse_result(resp, xml_utils.parse_batch_delete_objects, BatchDeleteObjectsResult) + def init_multipart_upload(self, key, headers=None): """初始化分片上传。 @@ -1164,7 +1244,7 @@ def list_multipart_uploads(self, def upload_part_copy(self, source_bucket_name, source_key, byte_range, target_key, target_upload_id, target_part_number, - headers=None): + headers=None, params=None): """分片拷贝。把一个已有文件的一部分或整体拷贝成目标文件的一个分片。 :param byte_range: 指定待拷贝内容在源文件里的范围。参见 :ref:`byte_range` @@ -1175,7 +1255,13 @@ def upload_part_copy(self, source_bucket_name, source_key, byte_range, :return: :class:`PutObjectResult ` """ headers = http.CaseInsensitiveDict(headers) - headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + '/' + urlquote(source_key, '') + + if params and Bucket.VERSIONID in params: + headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + \ + '/' + urlquote(source_key, '') + '?versionId=' + params[Bucket.VERSIONID] + else: + headers[OSS_COPY_OBJECT_SOURCE] = '/' + source_bucket_name + '/' + urlquote(source_key, '') + range_string = _make_range_string(byte_range) if range_string: @@ -1183,15 +1269,17 @@ def upload_part_copy(self, source_bucket_name, source_key, byte_range, logger.debug("Start to upload part copy, source bucket: {0}, source key: {1}, bucket: {2}, key: {3}, range" ": {4}, upload id: {5}, part_number: {6}, headers: {7}".format(source_bucket_name, - to_string(source_key), - self.bucket_name, - to_string(target_key), - byte_range, target_upload_id, - target_part_number, headers)) + to_string(source_key),self.bucket_name,to_string(target_key), + byte_range, target_upload_id,target_part_number, headers)) + + if params is None: + params = dict() + + params['uploadId'] = target_upload_id + params['partNumber'] = str(target_part_number) + resp = self.__do_object('PUT', target_key, - params={'uploadId': target_upload_id, - 'partNumber': str(target_part_number)}, - headers=headers) + params=params,headers=headers) logger.debug("Upload part copy done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return PutObjectResult(resp) @@ -1226,16 +1314,18 @@ def put_symlink(self, target_key, symlink_key, headers=None): """ headers = headers or {} headers[OSS_SYMLINK_TARGET] = urlquote(target_key, '') + logger.debug("Start to put symlink, bucket: {0}, target_key: {1}, symlink_key: {2}, headers: {3}".format( self.bucket_name, to_string(target_key), to_string(symlink_key), headers)) resp = self.__do_object('PUT', symlink_key, headers=headers, params={Bucket.SYMLINK: ''}) logger.debug("Put symlink done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return RequestResult(resp) - def get_symlink(self, symlink_key): + def get_symlink(self, symlink_key, params=None): """获取符号连接文件的目标文件。 :param str symlink_key: 符号连接类文件 + :param dict params: 请求参数 :return: :class:`GetSymlinkResult ` @@ -1243,7 +1333,14 @@ def get_symlink(self, symlink_key): """ logger.debug( "Start to get symlink, bucket: {0}, symlink_key: {1}".format(self.bucket_name, to_string(symlink_key))) - resp = self.__do_object('GET', symlink_key, params={Bucket.SYMLINK: ''}) + + if params is None: + params = dict() + + if Bucket.SYMLINK not in params: + params[Bucket.SYMLINK] = '' + + resp = self.__do_object('GET', symlink_key, params=params) logger.debug("Get symlink done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return GetSymlinkResult(resp) @@ -1597,13 +1694,13 @@ def process_object(self, key, process): logger.debug("Process object done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return ProcessObjectResult(resp) - def put_object_tagging(self, key, tagging, headers=None): + def put_object_tagging(self, key, tagging, headers=None, params=None): """ :param str key: 上传tagging的对象名称,不能为空。 :param tagging: tag 标签内容 - :type tagging: :class:`ObjectTagging ` 对象 + :type tagging: :class:`ObjectTagging ` 对象 :return: :class:`RequestResult ` """ @@ -1613,33 +1710,196 @@ def put_object_tagging(self, key, tagging, headers=None): if headers is not None: headers = http.CaseInsensitiveDict(headers) - data = self.__convert_data(ObjectTagging, xml_utils.to_put_object_tagging, tagging) - resp = self.__do_object('PUT', key, data=data, params={Bucket.TAGGING: ''}, headers=headers) + if params is None: + params = dict() + + if Bucket.TAGGING not in params: + params[Bucket.TAGGING] = "" + + data = self.__convert_data(Tagging, xml_utils.to_put_tagging, tagging) + resp = self.__do_object('PUT', key, data=data, params=params, headers=headers) return RequestResult(resp) - def get_object_tagging(self, key): + def get_object_tagging(self, key, params=None): """ :param str key: 要获取tagging的对象名称 - :return: :class:`ObjectTagging ` + :param dict params: 请求参数 + :return: :class:`GetTaggingResult ` """ - logger.debug("Start to get object tagging, bucket: {0}, key: {1}".format( - self.bucket_name, to_string(key))) - resp = self.__do_object('GET', key, params={Bucket.TAGGING: ''}) + logger.debug("Start to get object tagging, bucket: {0}, key: {1} params: {2}".format( + self.bucket_name, to_string(key), str(params))) + + if params is None: + params = dict() + + if Bucket.TAGGING not in params: + params[Bucket.TAGGING] = "" + + resp = self.__do_object('GET', key, params=params) - return self._parse_result(resp, xml_utils.parse_get_object_tagging, GetObjectTaggingResult) + return self._parse_result(resp, xml_utils.parse_get_tagging, GetTaggingResult) - def delete_object_tagging(self, key): + def delete_object_tagging(self, key, params=None): """ :param str key: 要删除tagging的对象名称 :return: :class:`RequestResult ` """ logger.debug("Start to delete object tagging, bucket: {0}, key: {1}".format( self.bucket_name, to_string(key))) - resp = self.__do_object('DELETE', key, params={Bucket.TAGGING: ''}) + + if params is None: + params = dict() + + if Bucket.TAGGING not in params: + params[Bucket.TAGGING] = "" + + resp = self.__do_object('DELETE', key, params=params) + logger.debug("Delete object tagging done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) return RequestResult(resp) + + def put_bucket_encryption(self, rule): + """设置bucket加密配置。 + + :param rule: :class:` ServerSideEncryptionRule` 对象 + """ + data = self.__convert_data(ServerSideEncryptionRule, xml_utils.to_put_bucket_encryption, rule) + + logger.debug("Start to put bucket encryption, bucket: {0}, rule: {1}".format(self.bucket_name, data)) + resp = self.__do_bucket('PUT', data=data, params={Bucket.ENCRYPTION: ""}) + logger.debug("Put bucket encryption done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) + return RequestResult(resp) + + def get_bucket_encryption(self): + """获取bucket加密配置。 + + :return: :class:`GetServerSideEncryptionResult ` + + :raises: 如果没有设置Bucket encryption,则抛出 :class:`NoSuchServerSideEncryptionRule ` + """ + logger.debug("Start to get bucket encryption, bucket: {0}".format(self.bucket_name)) + resp = self.__do_bucket('GET', params={Bucket.ENCRYPTION: ''}) + logger.debug("Get bucket encryption done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) + return self._parse_result(resp, xml_utils.parse_get_bucket_encryption, GetServerSideEncryptionResult) + + def delete_bucket_encryption(self): + """删除Bucket加密配置。如果Bucket加密没有设置,也返回成功。""" + logger.debug("Start to delete bucket encryption, bucket: {0}".format(self.bucket_name)) + resp = self.__do_bucket('DELETE', params={Bucket.ENCRYPTION: ''}) + logger.debug("Delete bucket encryption done, req_id: {0}, status_code: {1}".format(resp.request_id, resp.status)) + return RequestResult(resp) + + def put_bucket_tagging(self, tagging, headers=None): + """ + + :param str key: 上传tagging的对象名称,不能为空。 + + :param tagging: tag 标签内容 + :type tagging: :class:`ObjectTagging ` 对象 + + :return: :class:`RequestResult ` + """ + logger.debug("Start to put object tagging, bucket: {0} tagging: {1}".format( + self.bucket_name, tagging)) + + if headers is not None: + headers = http.CaseInsensitiveDict(headers) + + data = self.__convert_data(Tagging, xml_utils.to_put_tagging, tagging) + resp = self.__do_bucket('PUT', data=data, params={Bucket.TAGGING: ''}, headers=headers) + + return RequestResult(resp) + + def get_bucket_tagging(self): + + """ + :param str key: 要获取tagging的对象名称 + :param dict params: 请求参数 + :return: :class:`GetTaggingResult` + """ + logger.debug("Start to get bucket tagging, bucket: {0}".format( + self.bucket_name)) + + resp = self.__do_bucket('GET', params={Bucket.TAGGING: ''}) + + return self._parse_result(resp, xml_utils.parse_get_tagging, GetTaggingResult) + + def delete_bucket_tagging(self): + """ + :param str key: 要删除tagging的对象名称 + :return: :class:`RequestResult ` + """ + logger.debug("Start to delete bucket tagging, bucket: {0}".format( + self.bucket_name)) + + resp = self.__do_bucket('DELETE', params={Bucket.TAGGING: ''}) + + logger.debug("Delete bucket tagging done, req_id: {0}, status_code: {1}".format( + resp.request_id, resp.status)) + return RequestResult(resp) + + def list_object_versions(self, prefix='', delimiter='', key_marker='', + max_keys=100, versionid_marker=''): + """根据前缀罗列Bucket里的文件的版本。 + + :param str prefix: 只罗列文件名为该前缀的文件 + :param str delimiter: 分隔符。可以用来模拟目录 + :param str key_marker: 分页标志。首次调用传空串,后续使用返回值的next_marker + :param int max_keys: 最多返回文件的个数,文件和目录的和不能超过该值 + :param str versionid_marker: 设定结果从key-marker对象的 + versionid-marker之后按新旧版本排序开始返回,该版本不会在返回的结果当中。 + + :return: :class:`ListObjectVersionsResult ` + """ + logger.debug( + "Start to List object versions, bucket: {0}, prefix: {1}, delimiter: {2}," + +"key_marker: {3}, versionid_marker: {4}, max-keys: {5}".format( + self.bucket_name, to_string(prefix), delimiter, to_string(key_marker), + to_string(versionid_marker), max_keys)) + + resp = self.__do_bucket('GET', + params={'prefix': prefix, + 'delimiter': delimiter, + 'key-marker': key_marker, + 'version-id-marker': versionid_marker, + 'max-keys': str(max_keys), + 'encoding-type': 'url', + Bucket.VERSIONS: ''}) + logger.debug("List object versions done, req_id: {0}, status_code: {1}" + .format(resp.request_id, resp.status)) + + return self._parse_result(resp, xml_utils.parse_list_object_versions, ListObjectVersionsResult) + + def put_bucket_versioning(self, config, headers=None): + """ + + :param str operation: 设置bucket是否开启多版本特性,可取值为:[Enabled,Suspend] + + :return: :class:`RequestResult ` + """ + logger.debug("Start to put object versioning, bucket: {0}".format( + self.bucket_name)) + + if headers is not None: + headers = http.CaseInsensitiveDict(headers) + + data = self.__convert_data(BucketVersioningConfig, xml_utils.to_put_bucket_versioning, config) + resp = self.__do_bucket('PUT', data=data, params={Bucket.VERSIONING: ''}, headers=headers) + + return RequestResult(resp) + + def get_bucket_versioning(self): + """ + :return: :class:`GetBucketVersioningResult` + """ + logger.debug("Start to get bucket versioning, bucket: {0}".format( + self.bucket_name)) + + resp = self.__do_bucket('GET', params={Bucket.VERSIONING: ''}) + + return self._parse_result(resp, xml_utils.parse_get_bucket_versioning, GetBucketVersioningResult) def _get_bucket_config(self, config): """获得Bucket某项配置,具体哪种配置由 `config` 指定。该接口直接返回 `RequestResult` 对象。 diff --git a/oss2/auth.py b/oss2/auth.py index aaf6fe13..56c4ed01 100644 --- a/oss2/auth.py +++ b/oss2/auth.py @@ -72,7 +72,8 @@ class Auth(AuthBase): 'response-expires', 'response-content-disposition', 'cors', 'lifecycle', 'restore', 'qos', 'referer', 'stat', 'bucketInfo', 'append', 'position', 'security-token', 'live', 'comp', 'status', 'vod', 'startTime', 'endTime', 'x-oss-process', - 'symlink', 'callback', 'callback-var', 'tagging'] + 'symlink', 'callback', 'callback-var', 'tagging', 'encryption', 'versions', + 'versioning', 'versionId'] ) def _sign_request(self, req, bucket_name, key): diff --git a/oss2/exceptions.py b/oss2/exceptions.py index 4b5ec493..6c60f2d8 100644 --- a/oss2/exceptions.py +++ b/oss2/exceptions.py @@ -230,6 +230,14 @@ class AccessDenied(ServerError): status = 403 code = 'AccessDenied' +class NoSuchServerSideEncryptionRule(NotFound): + status = 404 + code = 'NoSuchServerSideEncryptionRule' + +class InvalidEncryptionAlgorithmError(ServerError): + status = 400 + code = 'InvalidEncryptionAlgorithmError' + class SelectOperationFailed(ServerError): code = 'SelectOperationFailed' def __init__(self, status, code, message): diff --git a/oss2/models.py b/oss2/models.py index 2b8d62ca..690dfc94 100644 --- a/oss2/models.py +++ b/oss2/models.py @@ -59,6 +59,10 @@ def __init__(self, resp): #: 请求ID,用于跟踪一个OSS请求。提交工单时,最后能够提供请求ID self.request_id = resp.request_id + self.versionid = _hget(self.headers, 'x-oss-version-id') + + self.delete_marker = _hget(self.headers, 'x-oss-delete-marker', bool) + class HeadObjectResult(RequestResult): def __init__(self, resp): super(HeadObjectResult, self).__init__(resp) @@ -202,6 +206,27 @@ def __init__(self, resp): #: 下次追加写的偏移 self.next_position = _hget(resp.headers, OSS_NEXT_APPEND_POSITION, int) +class BatchDeleteObjectVersion(object): + def __init__(self, key=None, versionid=None): + self.key = key or '' + self.versionid = versionid or '' + +class BatchDeleteObjectVersionList(object): + def __init__(self, object_version_list=None): + self.object_version_list = object_version_list or [] + + def append(self, object_version): + self.object_version_list.append(object_version) + + def len(self): + return len(self.object_version_list) + +class BatchDeleteObjectVersionResult(object): + def __init__(self, key, versionid=None, delete_marker=None, delete_marker_versionid=None): + self.key = key + self.versionid = versionid or '' + self.delete_marker = delete_marker or False + self.delete_marker_versionid = delete_marker_versionid or '' class BatchDeleteObjectsResult(RequestResult): def __init__(self, resp): @@ -210,6 +235,9 @@ def __init__(self, resp): #: 已经删除的文件名列表 self.deleted_keys = [] + #:已经删除的带版本信息的文件信息列表 + self.delete_versions = [] + class InitMultipartUploadResult(RequestResult): def __init__(self, resp): @@ -429,7 +457,8 @@ def __init__(self, display_name, owner_id): class BucketInfo(object): def __init__(self, name=None, owner=None, location=None, storage_class=None, intranet_endpoint=None, - extranet_endpoint=None, creation_date=None, acl=None): + extranet_endpoint=None, creation_date=None, acl=None, bucket_encryption_rule=None, + versioning_status=None): self.name = name self.owner = owner self.location = location @@ -439,6 +468,9 @@ def __init__(self, name=None, owner=None, location=None, storage_class=None, int self.creation_date = creation_date self.acl = acl + self.bucket_encryption_rule = bucket_encryption_rule + self.versioning_status = versioning_status + class GetBucketStatResult(RequestResult, BucketStat): def __init__(self, resp): @@ -555,7 +587,7 @@ class LifecycleRule(object): :param storage_transitions: 存储类型转换规则 :type storage_transitions: :class:`StorageTransition` :param tagging: object tagging 规则 - :type tagging: :class:`ObjectTagging` + :type tagging: :class:`Tagging` """ ENABLED = 'Enabled' @@ -887,11 +919,11 @@ def __init__(self, resp): _MAX_OBJECT_TAGGING_KEY_LENGTH=128 _MAX_OBJECT_TAGGING_VALUE_LENGTH=256 -class ObjectTagging(object): +class Tagging(object): def __init__(self, tagging_rules=None): - self.tag_set = tagging_rules or ObjectTaggingRule() + self.tag_set = tagging_rules or TaggingRule() def __str__(self): @@ -905,7 +937,7 @@ def __str__(self): return tag_str -class ObjectTaggingRule(object): +class TaggingRule(object): def __init__(self): self.tagging_rule = dict() @@ -913,13 +945,13 @@ def __init__(self): def add(self, key, value): if key is None or key == '': - raise ClientError("ObjectTagging key should not be empty") + raise ClientError("Tagging key should not be empty") if len(key) > _MAX_OBJECT_TAGGING_KEY_LENGTH: - raise ClientError("ObjectTagging key is too long") + raise ClientError("Tagging key is too long") if len(value) > _MAX_OBJECT_TAGGING_VALUE_LENGTH: - raise ClientError("ObjectTagging value is too long") + raise ClientError("Tagging value is too long") self.tagging_rule[key] = value @@ -945,8 +977,93 @@ def to_query_string(self): return query_string -class GetObjectTaggingResult(RequestResult, ObjectTagging): +class GetTaggingResult(RequestResult, Tagging): + + def __init__(self, resp): + RequestResult.__init__(self, resp) + Tagging.__init__(self) + +SERVER_SIDE_ENCRYPTION_AES256 = 'AES256' +SERVER_SIDE_ENCRYPTION_KMS = 'KMS' + +class ServerSideEncryptionRule(object): + + def __init__(self, ssealgorithm=None, kmsmasterkeyid=None): + + self.ssealgorithm = ssealgorithm + self.kmsmasterkeyid = kmsmasterkeyid + +class GetServerSideEncryptionResult(RequestResult, ServerSideEncryptionRule): def __init__(self, resp): RequestResult.__init__(self, resp) - ObjectTagging.__init__(self) + ServerSideEncryptionRule.__init__(self) + +class ListObjectVersionsResult(RequestResult): + def __init__(self, resp): + super(ListObjectVersionsResult, self).__init__(resp) + + #: True表示还有更多的文件可以罗列;False表示已经列举完毕。 + self.is_truncated = False + + #: 本次使用的分页标记符 + self.key_marker = '' + + #: 下一次罗列的分页标记符,即,可以作为 :func:`list_object_versions ` 的 `key_marker` 参数。 + self.next_key_marker = '' + + #: 本次使用的versionid分页标记符 + self.versionid_marker = '' + + #: 下一次罗列的versionid分页标记符,即,可以作为 :func:`list_object_versions ` 的 `versionid_marker` 参数。 + self.next_versionid_marker = '' + + self.name = '' + + self.owner = '' + + self.prefix = '' + + self.max_keys = '' + + self.delimiter = '' + + #: 本次罗列得到的delete marker列表。其中元素的类型为 :class:`DeleteMarkerInfo` 。 + self.delete_marker = [] + + #: 本次罗列得到的文件version列表。其中元素的类型为 :class:`ObjectVersionInfo` 。 + self.versions = [] + + self.common_prefix = [] + +class DeleteMarkerInfo(object): + def __init__(self): + self.key = '' + self.versionid = '' + self.is_latest = False + self.last_modified = '' + self.owner = Owner('', '') + +class ObjectVersionInfo(object): + def __init__(self): + self.key = '' + self.versionid = '' + self.is_latest = False + self.last_modified = '' + self.owner = Owner('', '') + self.type = '' + self.storage_class = '' + self.size = '' + self.etag = '' + +BUCKET_VERSIONING_ENABLE = 'Enabled' +BUCKET_VERSIONING_SUSPEND = 'Suspended' + +class BucketVersioningConfig(object): + def __init__(self, status=None): + self.status = status + +class GetBucketVersioningResult(RequestResult, BucketVersioningConfig): + def __init__(self, resp): + RequestResult.__init__(self,resp) + BucketVersioningConfig.__init__(self) diff --git a/oss2/resumable.py b/oss2/resumable.py index 4a574417..cf4d880e 100644 --- a/oss2/resumable.py +++ b/oss2/resumable.py @@ -89,7 +89,8 @@ def resumable_download(bucket, key, filename, part_size=None, progress_callback=None, num_threads=None, - store=None): + store=None, + params=None): """断点下载。 实现的方法是: @@ -120,6 +121,8 @@ def resumable_download(bucket, key, filename, :param store: 用来保存断点信息的持久存储,可以指定断点信息所在的目录。 :type store: `ResumableDownloadStore` + :param dict params: 指定下载参数,可以传入versionId下载指定版本文件 + :raises: 如果OSS文件不存在,则抛出 :class:`NotFound ` ;也有可能抛出其他因下载文件而产生的异常。 """ @@ -129,7 +132,7 @@ def resumable_download(bucket, key, filename, multiget_threshold = defaults.get(multiget_threshold, defaults.multiget_threshold) if isinstance(bucket, Bucket): - result = bucket.head_object(key) + result = bucket.head_object(key, params=params) logger.debug("The size of object to download is: {0}, multiget_threshold: {1}".format(result.content_length, multiget_threshold)) if result.content_length >= multiget_threshold: @@ -137,12 +140,13 @@ def resumable_download(bucket, key, filename, part_size=part_size, progress_callback=progress_callback, num_threads=num_threads, - store=store) + store=store, + params=params) downloader.download(result.server_crc) else: - bucket.get_object_to_file(key, filename, progress_callback=progress_callback) + bucket.get_object_to_file(key, filename, progress_callback=progress_callback, params=params) else: - bucket.get_object_to_file(key, filename, progress_callback=progress_callback) + bucket.get_object_to_file(key, filename, progress_callback=progress_callback, params=params) _MAX_MULTIGET_PART_COUNT = 100 @@ -248,7 +252,8 @@ def __init__(self, bucket, key, filename, objectInfo, part_size=None, store=None, progress_callback=None, - num_threads=None): + num_threads=None, + params=None): super(_ResumableDownloader, self).__init__(bucket, key, filename, objectInfo.size, store or ResumableDownloadStore(), progress_callback=progress_callback) @@ -261,6 +266,7 @@ def __init__(self, bucket, key, filename, objectInfo, self.__num_threads = defaults.get(num_threads, defaults.multiget_num_threads) self.__finished_parts = None self.__finished_size = None + self.__params = params # protect record self.__lock = threading.Lock() @@ -311,7 +317,7 @@ def __download_part(self, part): headers = {IF_MATCH : self.objectInfo.etag, IF_UNMODIFIED_SINCE : utils.http_date(self.objectInfo.mtime)} - result = self.bucket.get_object(self.key, byte_range=(part.start, part.end - 1), headers=headers) + result = self.bucket.get_object(self.key, byte_range=(part.start, part.end - 1), headers=headers, params=self.__params) utils.copyfileobj_and_verify(result, f, part.end - part.start, request_id=result.request_id) part.part_crc = result.client_crc diff --git a/oss2/xml_utils.py b/oss2/xml_utils.py index d723d9b7..81672482 100644 --- a/oss2/xml_utils.py +++ b/oss2/xml_utils.py @@ -30,8 +30,13 @@ AccessControlList, AbortMultipartUpload, StorageTransition, - ObjectTagging, - ObjectTaggingRule) + Tagging, + TaggingRule, + ServerSideEncryptionRule, + ListObjectVersionsResult, + ObjectVersionInfo, + DeleteMarkerInfo, + BatchDeleteObjectVersionResult) from .compat import urlunquote, to_unicode, to_string from .utils import iso8601_to_unixtime, date_to_iso8601, iso8601_to_date @@ -201,7 +206,25 @@ def parse_batch_delete_objects(result, body): url_encoded = _is_url_encoding(root) for deleted_node in root.findall('Deleted'): - result.deleted_keys.append(_find_object(deleted_node, 'Key', url_encoded)) + key = _find_object(deleted_node, 'Key', url_encoded) + + result.deleted_keys.append(key) + + versionid_node = deleted_node.find('VersionId') + versionid = None + if versionid_node is not None: + versionid = _find_tag(deleted_node, 'VersionId') + + delete_marker_node = deleted_node.find('DeleteMarker') + delete_marker = False + if delete_marker_node is not None: + delete_marker = _find_bool(deleted_node, 'DeleteMarker') + + marker_versionid_node = deleted_node.find('DeleteMarkerVersionId') + delete_marker_versionid = '' + if marker_versionid_node is not None: + delete_marker_versionid = _find_tag(deleted_node, 'DeleteMarkerVersionId') + result.delete_versions.append(BatchDeleteObjectVersionResult(key, versionid, delete_marker, delete_marker_versionid)) return result @@ -259,8 +282,37 @@ def parse_get_bucket_info(result, body): result.owner = Owner(_find_tag(root, 'Bucket/Owner/DisplayName'), _find_tag(root, 'Bucket/Owner/ID')) result.acl = AccessControlList(_find_tag(root, 'Bucket/AccessControlList/Grant')) + server_side_encryption = root.find("Bucket/ServerSideEncryptionRule") + + result.bucket_encryption_rule = _parse_bucket_encryption_info(server_side_encryption) + + bucket_versioning = root.find('Bucket/Versioning') + + if bucket_versioning is None or bucket_versioning.text is None: + result.versioning_status = None + else: + result.versioning_status = to_string(bucket_versioning.text) + return result +def _parse_bucket_encryption_info(node): + + rule = ServerSideEncryptionRule() + + rule.ssealgorithm = _find_tag(node,"SSEAlgorithm") + + if rule.ssealgorithm == "None": + rule.kmsmasterkeyid = None + rule.ssealgorithm = None + return rule + + kmsnode = node.find("KMSMasterKeyID") + if kmsnode is None or kmsnode.text is None: + rule.kmsmasterkeyid = '' + else: + rule.kmsmasterkeyid = to_string(kmsnode.text) + + return rule def parse_get_bucket_referer(result, body): root = ElementTree.fromstring(body) @@ -427,15 +479,15 @@ def parse_lifecycle_storage_transitions(storage_transition_nodes): def parse_lifecycle_object_taggings(lifecycle_tagging_nodes): if lifecycle_tagging_nodes is None: - return ObjectTagging() + return Tagging() - tagging_rule = ObjectTaggingRule() + tagging_rule = TaggingRule() for tag_node in lifecycle_tagging_nodes: key = _find_tag(tag_node, 'Key') value = _find_tag(tag_node, 'Value') tagging_rule.add(key, value) - return ObjectTagging(tagging_rule) + return Tagging(tagging_rule) def parse_get_bucket_lifecycle(result, body): root = ElementTree.fromstring(body) @@ -500,6 +552,22 @@ def to_batch_delete_objects_request(keys, quiet): return _node_to_string(root_node) +def to_batch_delete_objects_version_request(objectVersions, quiet): + + root_node = ElementTree.Element('Delete') + + _add_text_child(root_node, 'Quiet', str(quiet).lower()) + + objectVersionList = objectVersions.object_version_list + + for ver in objectVersionList: + object_node = ElementTree.SubElement(root_node, 'Object') + _add_text_child(object_node, 'Key', ver.key) + if ver.versionid != '': + _add_text_child(object_node, 'VersionId', ver.versionid) + + return _node_to_string(root_node) + def to_put_bucket_config(bucket_config): root = ElementTree.Element('CreateBucketConfiguration') @@ -766,7 +834,7 @@ def to_get_select_json_object_meta(json_meta_param): return _node_to_string(root) -def to_put_object_tagging(object_tagging): +def to_put_tagging(object_tagging): root = ElementTree.Element("Tagging") tag_set = ElementTree.SubElement(root, "TagSet") @@ -777,7 +845,7 @@ def to_put_object_tagging(object_tagging): return _node_to_string(root) -def parse_get_object_tagging(result, body): +def parse_get_tagging(result, body): root = ElementTree.fromstring(body) url_encoded = _is_url_encoding(root) tagset_node = root.find('TagSet') @@ -785,7 +853,7 @@ def parse_get_object_tagging(result, body): if tagset_node is None: return result - tagging_rules = ObjectTaggingRule() + tagging_rules = TaggingRule() for tag_node in tagset_node.findall('Tag'): key = _find_object(tag_node, 'Key', url_encoded) value = _find_object(tag_node, 'Value', url_encoded) @@ -794,3 +862,90 @@ def parse_get_object_tagging(result, body): result.tag_set = tagging_rules return result +def to_put_bucket_encryption(rule): + root = ElementTree.Element("ServerSideEncryptionRule") + apply_node = ElementTree.SubElement(root, "ApplyServerSideEncryptionByDefault") + + _add_text_child(apply_node, "SSEAlgorithm", rule.ssealgorithm) + + if rule.kmsmasterkeyid is not None: + _add_text_child(apply_node, "KMSMasterKeyID", rule.kmsmasterkeyid) + + return _node_to_string(root) + +def parse_get_bucket_encryption(result, body): + root = ElementTree.fromstring(body) + apply_node = root.find('ApplyServerSideEncryptionByDefault') + + result.ssealgorithm = _find_tag(apply_node, "SSEAlgorithm") + + kmsnode = apply_node.find('KMSMasterKeyID') + if kmsnode is None or kmsnode.text is None: + result.kmsmasterkeyid = '' + else: + result.kmsmasterkeyid = to_string(kmsnode.text) + + return result + +def parse_list_object_versions(result, body): + root = ElementTree.fromstring(body) + url_encoded = _is_url_encoding(root) + result.is_truncated = _find_bool(root, 'IsTruncated') + if result.is_truncated: + result.next_key_marker = _find_object(root, 'NextKeyMarker', url_encoded) + result.next_versionid_marker = _find_object(root, "NextVersionIdMarker", url_encoded) + + result.name = _find_tag(root, "Name") + result.prefix = _find_object(root, "Prefix", url_encoded) + result.key_marker = _find_object(root, "KeyMarker", url_encoded) + result.versionid_marker = _find_object(root, "VersionIdMarker", url_encoded) + result.max_keys = _find_int(root, "MaxKeys") + result.delimiter = _find_object(root, "Delimiter", url_encoded) + + for delete_marker in root.findall("DeleteMarker"): + deleteInfo = DeleteMarkerInfo() + deleteInfo.key = _find_object(delete_marker, "Key", url_encoded) + deleteInfo.versionid = _find_tag(delete_marker, "VersionId") + deleteInfo.is_latest = _find_bool(delete_marker, "IsLatest") + deleteInfo.last_modified = iso8601_to_unixtime(_find_tag(delete_marker, "LastModified")) + deleteInfo.owner.id = _find_tag(delete_marker, "Owner/ID") + deleteInfo.owner.display_name = _find_tag(delete_marker, "Owner/DisplayName") + result.delete_marker.append(deleteInfo) + + for version in root.findall("Version"): + versionInfo = ObjectVersionInfo() + versionInfo.key = _find_object(version, "Key", url_encoded) + versionInfo.versionid = _find_tag(version, "VersionId") + versionInfo.is_latest = _find_bool(version, "IsLatest") + versionInfo.last_modified = iso8601_to_unixtime(_find_tag(version, "LastModified")) + versionInfo.owner.id = _find_tag(version, "Owner/ID") + versionInfo.owner.display_name = _find_tag(version, "Owner/DisplayName") + versionInfo.type = _find_tag(version, "Type") + versionInfo.storage_class = _find_tag(version, "StorageClass") + versionInfo.size = _find_int(version, "Size") + versionInfo.etag = _find_tag(version, "ETag").strip('"') + + result.versions.append(versionInfo) + + for common_prefix in root.findall("CommonPrefixes"): + result.common_prefix.append(_find_object(common_prefix, "Prefix", url_encoded)) + + return result + +def to_put_bucket_versioning(bucket_version_config): + root = ElementTree.Element('VersioningConfiguration') + + _add_text_child(root, 'Status', str(bucket_version_config.status)) + + return _node_to_string(root) + +def parse_get_bucket_versioning(result, body): + root = ElementTree.fromstring(body) + + status_node = root.find("Status") + if status_node is None: + result.status = None + else: + result.status = _find_tag(root, "Status") + + return result diff --git a/tests/test_bucket.py b/tests/test_bucket.py index 21765e70..70b9a55d 100644 --- a/tests/test_bucket.py +++ b/tests/test_bucket.py @@ -63,7 +63,6 @@ def test_acl(self): bucket = oss2.Bucket(auth, OSS_ENDPOINT, random_string(63).lower()) bucket.create_bucket(oss2.BUCKET_ACL_PUBLIC_READ) - bucket.create_bucket() self.retry_assert(lambda: bucket.get_bucket_acl().acl == oss2.BUCKET_ACL_PUBLIC_READ) @@ -365,7 +364,7 @@ def test_lifecycle_storage_transitions_date(self): self.bucket.delete_bucket_lifecycle() def test_lifecycle_object_tagging(self): - from oss2.models import LifecycleExpiration, LifecycleRule, BucketLifecycle, StorageTransition, ObjectTagging, ObjectTaggingRule + from oss2.models import LifecycleExpiration, LifecycleRule, BucketLifecycle, StorageTransition, Tagging, TaggingRule rule = LifecycleRule(random_string(10), 'aaaaaaaaaaa/', status=LifecycleRule.ENABLED, @@ -373,9 +372,9 @@ def test_lifecycle_object_tagging(self): rule.storage_transitions = [StorageTransition(created_before_date=datetime.date(2016, 12, 20), storage_class=oss2.BUCKET_STORAGE_CLASS_IA)] - tagging_rule = ObjectTaggingRule() + tagging_rule = TaggingRule() tagging_rule.add('test_key', 'test_value') - tagging = ObjectTagging(tagging_rule) + tagging = Tagging(tagging_rule) rule.tagging = tagging @@ -448,7 +447,7 @@ def test_lifecycle_all(self): def test_lifecycle_object_tagging_exceptions_wrong_key(self): - from oss2.models import LifecycleExpiration, LifecycleRule, BucketLifecycle, StorageTransition, ObjectTagging, ObjectTaggingRule + from oss2.models import LifecycleExpiration, LifecycleRule, BucketLifecycle, StorageTransition, Tagging, TaggingRule rule = LifecycleRule(random_string(10), '中文前缀/', status=LifecycleRule.ENABLED, @@ -456,7 +455,7 @@ def test_lifecycle_object_tagging_exceptions_wrong_key(self): rule.storage_transitions = [StorageTransition(created_before_date=datetime.date(2016, 12, 20), storage_class=oss2.BUCKET_STORAGE_CLASS_IA)] - tagging = ObjectTagging() + tagging = Tagging() tagging.tag_set.tagging_rule[129*'a'] = 'test' @@ -483,7 +482,7 @@ def test_lifecycle_object_tagging_exceptions_wrong_key(self): def test_lifecycle_object_tagging_exceptions_wrong_value(self): - from oss2.models import LifecycleExpiration, LifecycleRule, BucketLifecycle, StorageTransition, ObjectTagging, ObjectTaggingRule + from oss2.models import LifecycleExpiration, LifecycleRule, BucketLifecycle, StorageTransition, Tagging, TaggingRule rule = LifecycleRule(random_string(10), '中文前缀/', status=LifecycleRule.ENABLED, @@ -491,7 +490,7 @@ def test_lifecycle_object_tagging_exceptions_wrong_value(self): rule.storage_transitions = [StorageTransition(created_before_date=datetime.date(2016, 12, 20), storage_class=oss2.BUCKET_STORAGE_CLASS_IA)] - tagging = ObjectTagging() + tagging = Tagging() tagging.tag_set.tagging_rule['test'] = 257*'a' @@ -516,7 +515,7 @@ def test_lifecycle_object_tagging_exceptions_wrong_value(self): pass def test_lifecycle_object_tagging_exceptions_too_much_rules(self): - from oss2.models import LifecycleExpiration, LifecycleRule, BucketLifecycle, StorageTransition, ObjectTagging, ObjectTaggingRule + from oss2.models import LifecycleExpiration, LifecycleRule, BucketLifecycle, StorageTransition, Tagging, TaggingRule rule = LifecycleRule(random_string(10), '中文前缀/', status=LifecycleRule.ENABLED, @@ -524,7 +523,7 @@ def test_lifecycle_object_tagging_exceptions_too_much_rules(self): rule.storage_transitions = [StorageTransition(created_before_date=datetime.date(2016, 12, 20), storage_class=oss2.BUCKET_STORAGE_CLASS_IA)] - tagging = ObjectTagging() + tagging = Tagging() for i in range(1, 20): key='test_key_'+str(i) value='test_value_'+str(i) @@ -610,6 +609,8 @@ def test_bucket_info(self): self.assertTrue(len(result.extranet_endpoint) > 0) self.assertTrue(len(result.owner.id) > 0) self.assertEqual(result.acl.grant, oss2.BUCKET_ACL_PRIVATE) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, None) bucket.delete_bucket() wait_meta_sync() @@ -631,6 +632,353 @@ def test_location(self): result = self.bucket.get_bucket_location() self.assertTrue(result.location) + def test_bucket_encryption_wrong(self): + + from oss2.models import ServerSideEncryptionRule + + self.assertRaises(oss2.exceptions.NoSuchServerSideEncryptionRule, self.bucket.get_bucket_encryption) + + rule = ServerSideEncryptionRule() + rule.ssealgorithm = oss2.SERVER_SIDE_ENCRYPTION_AES256 + rule.kmsmasterkeyid = "test" + + self.assertRaises(oss2.exceptions.InvalidArgument, + self.bucket.put_bucket_encryption, rule) + + rule.ssealgorithm = "random" + rule.kmsmasterkeyid = "" + self.assertRaises(oss2.exceptions.InvalidEncryptionAlgorithmError, + self.bucket.put_bucket_encryption, rule) + + rule.ssealgorithm = oss2.SERVER_SIDE_ENCRYPTION_KMS + rule.kmsmasterkeyid = "" + result = self.bucket.put_bucket_encryption(rule) + self.assertEqual(int(result.status)/100, 2) + + rule.kmsmasterkeyid = None + result = self.bucket.put_bucket_encryption(rule) + self.assertEqual(int(result.status)/100, 2) + + result = self.bucket.get_bucket_encryption() + self.assertEqual(result.ssealgorithm, oss2.SERVER_SIDE_ENCRYPTION_KMS) + self.assertEqual(result.kmsmasterkeyid, "") + + #self.bucket.put_object("test", "test") + + result = self.bucket.delete_bucket_encryption() + + rule.ssealgorithm = oss2.SERVER_SIDE_ENCRYPTION_KMS + rule.kmsmasterkeyid = "test_wrong" + + result = self.bucket.put_bucket_encryption(rule) + self.assertEqual(int(result.status)/100, 2) + + ''' + self.assertRaises(oss2.exceptions.InvalidArgument, + self.bucket.put_object, "test", "test") + ''' + + result = self.bucket.get_bucket_encryption() + self.assertEqual(result.ssealgorithm, oss2.SERVER_SIDE_ENCRYPTION_KMS) + self.assertEqual(result.kmsmasterkeyid, "test_wrong") + + result = self.bucket.delete_bucket_encryption() + + self.assertEqual(int(result.status), 204) + + def test_bucket_encryption(self): + + from oss2.models import ServerSideEncryptionRule + + rule = ServerSideEncryptionRule() + + # AES256 + rule.ssealgorithm = oss2.SERVER_SIDE_ENCRYPTION_AES256 + rule.kmsmasterkeyid = "" + + result = self.bucket.put_bucket_encryption(rule) + self.assertEqual(int(result.status)/100, 2) + + wait_meta_sync() + + result = self.bucket.get_bucket_info() + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, 'AES256') + + result = self.bucket.put_object("test", "test") + self.assertEqual(int(result.status)/100, 2) + + result = self.bucket.get_object("test") + self.assertEqual(int(result.status)/100, 2) + + self.assertEqual("test", result.read()) + + result = self.bucket.delete_bucket_encryption() + self.assertEqual(int(result.status)/100, 2) + + # KMS + rule.ssealgorithm = oss2.SERVER_SIDE_ENCRYPTION_KMS + rule.kmsmasterkeyid = "" + + result = self.bucket.put_bucket_encryption(rule) + self.assertEqual(int(result.status)/100, 2) + + wait_meta_sync() + + result = self.bucket.get_bucket_info() + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, 'KMS') + self.assertEqual(result.bucket_encryption_rule.kmsmasterkeyid, '') + + result = self.bucket.delete_bucket_encryption() + self.assertEqual(int(result.status)/100, 2) + + def test_bucket_versioning_wrong(self): + + from oss2.models import BucketVersioningConfig + + config = BucketVersioningConfig() + + self.assertRaises(oss2.exceptions.MalformedXml, + self.bucket.put_bucket_versioning, config) + + config.status = "Disabled" + self.assertRaises(oss2.exceptions.MalformedXml, + self.bucket.put_bucket_versioning, config) + + def test_bucket_versioning(self): + + from oss2.models import BucketVersioningConfig + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket = oss2.Bucket(auth, OSS_ENDPOINT, random_string(63).lower()) + + self.assertRaises(oss2.exceptions.NoSuchBucket, bucket.get_bucket_info) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + result = bucket.get_bucket_versioning() + + self.assertTrue(result.status is None) + + config = BucketVersioningConfig() + + config.status = oss2.BUCKET_VERSIONING_ENABLE + result = bucket.put_bucket_versioning(config) + self.assertEqual(int(result.status)/100, 2) + + wait_meta_sync() + + result = bucket.get_bucket_info() + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, 'Enabled') + + config.status = oss2.BUCKET_VERSIONING_SUSPEND + result = bucket.put_bucket_versioning(config) + self.assertEqual(int(result.status)/100, 2) + + bucket.delete_bucket() + + def test_list_object_versions_wrong(self): + from oss2.models import BucketVersioningConfig + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket = oss2.Bucket(auth, OSS_ENDPOINT, random_string(63).lower()) + + self.assertRaises(oss2.exceptions.NoSuchBucket, bucket.get_bucket_info) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + + config.status = "Enabled" + result = bucket.put_bucket_versioning(config) + self.assertEqual(int(result.status)/100, 2) + + result = bucket.put_object("test", "test1") + self.assertEqual(int(result.status)/100, 2) + versionid1 = result.versionid + + result = bucket.put_object("test", "test2") + self.assertEqual(int(result.status)/100, 2) + versionid2 = result.versionid + + self.assertRaises(oss2.exceptions.InvalidArgument, + bucket.list_object_versions, prefix=1025*'a') + + self.assertRaises(oss2.exceptions.InvalidArgument, + bucket.list_object_versions, key_marker=1025*'a') + + self.assertRaises(oss2.exceptions.InvalidArgument, + bucket.list_object_versions, versionid_marker=1025*'a') + + self.assertRaises(oss2.exceptions.InvalidArgument, + bucket.list_object_versions, delimiter=1025*'a') + + self.assertRaises(oss2.exceptions.InvalidArgument, + bucket.list_object_versions, max_keys=1001) + + result = bucket.list_object_versions() + self.assertEqual(len(result.versions), 2) + self.assertEqual(result.versions[0].versionid, versionid2) + self.assertEqual(result.versions[1].versionid, versionid1) + self.assertEqual(len(result.delete_marker), 0) + + bucket.delete_object("test", {"versionId": versionid1}) + bucket.delete_object("test", {"versionId": versionid2}) + + bucket.delete_bucket() + + def test_list_object_versions_truncated(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket_name = random_string(63).lower() + bucket = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + for i in range(0, 1024): + bucket.put_object("test", "test"+str(i)) + + loop_time = 0 + next_key_marker = '' + next_version_marker = '' + delete_versions = [] + + while True: + + result = bucket.list_object_versions(key_marker=next_key_marker, versionid_marker=next_version_marker) + self.assertTrue(len(result.versions) > 0) + self.assertTrue(len(result.delete_marker) == 0) + version_list = BatchDeleteObjectVersionList() + for item in result.versions: + version_list.append(BatchDeleteObjectVersion(item.key, item.versionid)) + delete_versions.append(version_list) + + if result.is_truncated: + next_key_marker = result.next_key_marker + next_version_marker = result.next_versionid_marker + else: + break + + loop_time += 1 + if loop_time > 12: + self.assertFalse(True, "loop too much times, break") + + for item in delete_versions: + result = bucket.delete_object_versions(item) + + try: + bucket.delete_bucket() + except: + self.assertFalse(True, "should not get a exception") + + def test_bucket_tagging(self): + + from oss2.models import Tagging, TaggingRule + + rule = TaggingRule() + self.assertRaises(oss2.exceptions.ClientError, rule.add, 129*'a', 'test') + self.assertRaises(oss2.exceptions.ClientError, rule.add, 'test', 257*'a') + self.assertRaises(oss2.exceptions.ClientError, rule.add, None, 'test') + self.assertRaises(oss2.exceptions.ClientError, rule.add, '', 'test') + self.assertRaises(KeyError, rule.delete, 'not_exist') + + tagging = Tagging() + tagging.tag_set.tagging_rule['%@abc'] = 'abc' + tagging.tag_set.tagging_rule['123++'] = '++123%' + + try: + result = self.bucket.put_bucket_tagging(tagging) + except oss2.exceptions.OssError: + self.assertFalse(True, 'should not get exception') + pass + + result = self.bucket.get_bucket_tagging() + tag_rule = result.tag_set.tagging_rule + self.assertEqual(2, len(tag_rule)) + self.assertEqual('abc', tag_rule['%@abc']) + self.assertEqual('++123%', tag_rule['123++']) + + result = self.bucket.delete_bucket_tagging() + self.assertEqual(int(result.status)/100, 2) + + def test_list_bucket_with_tagging(self): + + from oss2.models import Tagging, TaggingRule + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + service = oss2.Service(auth, OSS_ENDPOINT) + + bucket_name1 = random_string(63).lower() + bucket1 = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name1) + + bucket1.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + bucket_name2 = random_string(63).lower() + bucket2 = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name2) + + bucket2.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + rule = TaggingRule() + rule.add('tagging_key_test_test1', 'value1') + rule.add('tagging_key_test1', 'value1') + + tagging1 = Tagging(rule) + try: + result = bucket1.put_bucket_tagging(tagging1) + except oss2.exceptions.OssError: + self.assertFalse(True, 'should not get exception') + pass + + rule = TaggingRule() + rule.add('tagging_key2', 'value2') + + tagging2 = Tagging(rule) + try: + result = bucket2.put_bucket_tagging(tagging2) + except oss2.exceptions.OssError: + self.assertFalse(True, 'should not get exception') + pass + + params = {} + params['tag-key'] = 'tagging_key_test_test1' + params['tag-value'] = 'value1' + + result = service.list_buckets(params=params) + self.assertEqual(1, len(result.buckets)) + + result = service.list_buckets() + self.assertTrue(len(result.buckets) > 1) + + bucket1.delete_bucket() + bucket2.delete_bucket() + def test_malformed_xml(self): xml_input = '''''' self.assertRaises(oss2.exceptions.MalformedXml, self.bucket.put_bucket_lifecycle, xml_input) diff --git a/tests/test_download.py b/tests/test_download.py index d171a4c6..f00ad1c2 100644 --- a/tests/test_download.py +++ b/tests/test_download.py @@ -609,6 +609,54 @@ def test_resumable_incomplete_download(self): except: self.assertTrue(False) + def test_resumable_download_with_version(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket = oss2.Bucket(auth, OSS_ENDPOINT, random_string(63).lower()) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + self.version_bucket = bucket + + content_small = random_bytes(5*1024) + result = bucket.put_object("object_small", content_small) + + version_small = result.versionid + filename_small = self.random_filename() + result = oss2.resumable_download(bucket, "object_small", filename_small) + + self.assertFileContent(filename_small, content_small) + + content_big = random_bytes(30*1024*1024) + result = bucket.put_object("object_big", content_big) + + version_big = result.versionid + filename_big = self.random_filename() + result = oss2.resumable_download(bucket, "object_big", filename_big) + + self.assertFileContent(filename_big, content_big) + + version_list = BatchDeleteObjectVersionList() + version_list.append(BatchDeleteObjectVersion("object_small", version_small)) + version_list.append(BatchDeleteObjectVersion("object_big", version_big)) + + result = bucket.delete_object_versions(version_list) + self.assertTrue(len(result.delete_versions) == 2) + + bucket.delete_bucket() if __name__ == '__main__': unittest.main() diff --git a/tests/test_multipart.py b/tests/test_multipart.py index b620af89..e05679d4 100644 --- a/tests/test_multipart.py +++ b/tests/test_multipart.py @@ -207,5 +207,136 @@ def test_multipart_with_object_tagging(self): result = self.bucket.delete_object_tagging(key) + def test_multipart_with_versionging(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket_name = random_string(63).lower() + bucket = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + + key = self.random_key() + content = random_bytes(128 * 1024) + + parts = [] + upload_id = bucket.init_multipart_upload(key).upload_id + + headers = {'Content-Md5': oss2.utils.content_md5(content)} + + result = bucket.upload_part(key, upload_id, 1, content, headers=headers) + parts.append(oss2.models.PartInfo(1, result.etag, size=len(content), part_crc=result.crc)) + self.assertTrue(result.crc is not None) + + complete_result = bucket.complete_multipart_upload(key, upload_id, parts) + + object_crc = calc_obj_crc_from_parts(parts) + self.assertTrue(complete_result.crc is not None) + self.assertEqual(object_crc, result.crc) + self.assertTrue(complete_result.versionid is not None) + + bucket.delete_object(key, params={'versionId': complete_result.versionid}) + + try: + bucket.delete_bucket() + except: + self.assertFalse(True, "should not get a exception") + + def test_upload_part_copy_with_versioning(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket_name = random_string(63).lower() + bucket = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + src_object = self.random_key() + dst_object = self.random_key() + + content = random_bytes(200 * 1024) + content2 = random_bytes(200 * 1024) + + # 上传源文件 version1 + put_result1 = bucket.put_object(src_object, content) + self.assertTrue(put_result1.versionid is not None) + versionid1 = put_result1.versionid + + # 上传源文件 version2 + put_result2 = bucket.put_object(src_object, content2) + self.assertTrue(put_result2.versionid is not None) + versionid2 = put_result2.versionid + + # part copy到目标文件 + parts = [] + upload_id = bucket.init_multipart_upload(dst_object).upload_id + + result = bucket.upload_part_copy(bucket_name, src_object, + (0, 100 * 1024 - 1), dst_object, upload_id, 1) + parts.append(oss2.models.PartInfo(1, result.etag)) + + result = bucket.upload_part_copy(bucket_name, src_object, + (100*1024, None), dst_object, upload_id, 2, params={'versionId': versionid1}) + + parts.append(oss2.models.PartInfo(2, result.etag)) + + complete_result = bucket.complete_multipart_upload(dst_object, upload_id, parts) + + # 验证 + content_got = bucket.get_object(dst_object).read() + self.assertEqual(len(content_got), len(content)) + self.assertTrue(content_got != content) + + version_list = BatchDeleteObjectVersionList() + version_list.append(BatchDeleteObjectVersion(key=src_object, versionid=versionid1)) + version_list.append(BatchDeleteObjectVersion(key=src_object, versionid=versionid2)) + version_list.append(BatchDeleteObjectVersion(key=dst_object, versionid=complete_result.versionid)) + + self.assertTrue(version_list.len(), 3) + + result = bucket.delete_object_versions(version_list) + + try: + bucket.delete_bucket() + except: + self.assertFalse(True, "should not get a exception") + if __name__ == '__main__': unittest.main() diff --git a/tests/test_object.py b/tests/test_object.py index 1821371c..5a63d143 100644 --- a/tests/test_object.py +++ b/tests/test_object.py @@ -10,7 +10,7 @@ NotFound, NoSuchKey, Conflict, PositionNotEqualToLength, ObjectNotAppendable) from oss2.compat import is_py2, is_py33 -from oss2.models import ObjectTagging, ObjectTaggingRule +from oss2.models import Tagging, TaggingRule from oss2.headers import OSS_OBJECT_TAGGING, OSS_OBJECT_TAGGING_COPY_DIRECTIVE from oss2.compat import urlunquote, urlquote @@ -304,6 +304,15 @@ def test_get_object_iterator(self): self.assertEqual(len(content), len(content_got)) self.assertEqual(content, content_got) + result = self.bucket.get_object(key, params={'versionId': 'null'}) + content_got = b'' + + for chunk in result: + content_got += chunk + + self.assertEqual(len(content), len(content_got)) + self.assertEqual(content, content_got) + def test_query_parameter(self): key = self.random_key() content = random_bytes(1024 * 1024) @@ -366,6 +375,12 @@ def test_batch_delete_objects(self): result = self.bucket.batch_delete_objects(object_list) self.assertEqual(sorted(object_list), sorted(result.deleted_keys)) + keys = [] + for i in range(0, 5): + keys.append(result.delete_versions[i].key) + self.assertEqual(sorted(object_list), sorted(keys)) + self.assertEqual(5, len(result.deleted_keys)) + self.assertEqual(5, len(result.delete_versions)) for object in object_list: self.assertTrue(not self.bucket.object_exists(object)) @@ -897,7 +912,7 @@ def test_process_object(self): def test_object_tagging_client_error(self): - rule = ObjectTaggingRule() + rule = TaggingRule() self.assertRaises(oss2.exceptions.ClientError, rule.add, 129*'a', 'test') self.assertRaises(oss2.exceptions.ClientError, rule.add, 'test', 257*'a') self.assertRaises(oss2.exceptions.ClientError, rule.add, None, 'test') @@ -906,7 +921,7 @@ def test_object_tagging_client_error(self): def test_object_tagging_wrong_key(self): - tagging = ObjectTagging() + tagging = Tagging() tagging.tag_set.tagging_rule[129*'a'] = 'test' key = self.random_key('.dat') @@ -945,7 +960,7 @@ def test_object_tagging_wrong_key(self): def test_object_tagging_wrong_value(self): - tagging = ObjectTagging() + tagging = Tagging() tagging.tag_set.tagging_rule['test'] = 257*'a' @@ -979,7 +994,7 @@ def test_object_tagging_wrong_rule_num(self): key = self.random_key('.dat') result = self.bucket.put_object(key, "test") - tagging = ObjectTagging(None) + tagging = Tagging(None) for i in range(0,12): key='test_'+str(i) value='test_'+str(i) @@ -1002,7 +1017,7 @@ def test_object_tagging(self): except oss2.exceptions.OssError: self.assertFalse(True, "should get exception") - rule = ObjectTaggingRule() + rule = TaggingRule() key1=128*'a' value1=256*'a' rule.add(key1, value1) @@ -1011,7 +1026,7 @@ def test_object_tagging(self): value2='_/' rule.add(key2, value2) - tagging = ObjectTagging(rule) + tagging = Tagging(rule) result = self.bucket.put_object_tagging(key, tagging) self.assertTrue(200, result.status) @@ -1108,7 +1123,7 @@ def test_append_object_with_tagging(self): self.bucket.delete_object(key) - rule = ObjectTaggingRule() + rule = TaggingRule() self.assertEqual('', rule.to_query_string()) rule.add('key1', 'value1') @@ -1161,7 +1176,7 @@ def test_append_object_with_tagging_wrong_num(self): # append object with wrong tagging kv num, but not in # first call, it will be ignored - rule = ObjectTaggingRule() + rule = TaggingRule() self.assertEqual('', rule.to_query_string()) for i in range(0, 15): @@ -1211,7 +1226,7 @@ def test_append_object_with_tagging_wrong_num(self): # append object with wrong tagging kv num in first call, # it will be fail - rule = ObjectTaggingRule() + rule = TaggingRule() self.assertEqual('', rule.to_query_string()) for i in range(0, 15): @@ -1235,7 +1250,7 @@ def test_put_symlink_with_tagging(self): self.bucket.put_object(key, content) - rule = ObjectTaggingRule() + rule = TaggingRule() self.assertEqual('', rule.to_query_string()) rule.add('key1', 'value1') @@ -1267,7 +1282,7 @@ def test_put_symlink_with_tagging_with_wrong_num(self): content = 'hello' self.bucket.put_object(key, content) - rule = ObjectTaggingRule() + rule = TaggingRule() self.assertEqual('', rule.to_query_string()) for i in range(0, 15): @@ -1312,6 +1327,808 @@ def test_put_symlink_with_tagging_with_wrong_num(self): self.assertEqual(head_result.headers['x-oss-meta-key1'], 'value1') self.assertEqual(head_result.headers['x-oss-meta-key2'], 'value2') + def test_put_symlink_with_version(self): + + from oss2.models import BucketVersioningConfig + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket = oss2.Bucket(auth, OSS_ENDPOINT, random_string(63).lower()) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + result = bucket.put_object("test", "test") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid = result.versionid + object_version = result.versionid + + params = dict() + params['versionId'] = result.versionid + + result = bucket.put_symlink("test", "test_link") + self.assertEqual(int(result.status)/100, 2) + + params['versionId'] = result.versionid + result = bucket.get_symlink("test_link", params=params) + self.assertEqual(int(result.status)/100, 2) + + result = bucket.delete_object("test_link") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != '') + delete_marker_versionid = result.versionid + + try: + result = bucket.get_symlink("test_link") + except oss2.exceptions.NotFound: + pass + + self.assertEqual(result.delete_marker, True) + + result = bucket.delete_object("test_link", params=params) + self.assertEqual(int(result.status)/100, 2) + + params['versionId'] = delete_marker_versionid + result = bucket.delete_object("test_link", params=params) + self.assertEqual(int(result.status)/100, 2) + + params['versionId'] = object_version + result = bucket.delete_object("test", params=params) + self.assertEqual(int(result.status)/100, 2) + + bucket.delete_bucket() + + def test_put_object_tagging_with_versioning(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import Tagging + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket = oss2.Bucket(auth, OSS_ENDPOINT, random_string(63).lower()) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + result = bucket.put_object("test", "test1") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid1 = result.versionid + + result = bucket.put_object("test", "test2") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid2 = result.versionid + + self.assertTrue(versionid1 != versionid2) + + tagging = Tagging() + + tagging.tag_set.add('k1', 'v1') + tagging.tag_set.add('+++', ':::') + + # put object tagging without version + result = bucket.put_object_tagging("test", tagging) + self.assertEqual(int(result.status)/100, 2) + + params = dict() + params['versionId'] = versionid2 + + result = bucket.get_object_tagging("test", params=params) + self.assertEqual(int(result.status)/100, 2) + + rule = result.tag_set.tagging_rule + + self.assertEqual('v1', rule['k1']) + self.assertEqual(':::', rule['+++']) + + tagging = Tagging() + + tagging.tag_set.add('k2', 'v2') + tagging.tag_set.add(':::', '+++') + + params['versionId'] = versionid1 + + # put object tagging with version + result = bucket.put_object_tagging("test", tagging, params=params) + self.assertEqual(int(result.status)/100, 2) + + result = bucket.get_object_tagging("test", params=params) + self.assertEqual(int(result.status)/100, 2) + + rule = result.tag_set.tagging_rule + + self.assertEqual('v2', rule['k2']) + self.assertEqual('+++', rule[':::']) + + result = bucket.delete_object_tagging("test", params=params) + self.assertEqual(int(result.status)/100, 2) + + params['versionId'] = versionid2 + + result = bucket.delete_object_tagging("test", params=params) + self.assertEqual(int(result.status)/100, 2) + + + result = bucket.delete_object("test") + self.assertEqual(int(result.status)/100, 2) + delete_marker_versionid = result.versionid + self.assertTrue(delete_marker_versionid is not None) + + params['versionId'] = versionid2 + + try: + result = bucket.get_object("test", params=params) + self.assertFalse(True) + except: + pass + + # delete 'DELETE' mark + bucket.delete_object("test", params={'versionId': delete_marker_versionid}) + + bucket.delete_object("test", params={'versionId': versionid1}) + bucket.delete_object("test", params={'versionId': versionid2}) + + bucket.delete_bucket() + + def test_batch_delete_same_object_multi_version(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket = oss2.Bucket(auth, OSS_ENDPOINT, random_string(63).lower()) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + # put version 1 + result = bucket.put_object("test", "test1") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid1 = result.versionid + + # put version 2 + result = bucket.put_object("test", "test2") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid2 = result.versionid + + version_list = BatchDeleteObjectVersionList() + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid1)) + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid2)) + + self.assertTrue(version_list.len(), 2) + + result = bucket.delete_object_versions(version_list) + + self.assertTrue(len(result.delete_versions) == 2) + self.assertTrue(result.delete_versions[0].versionid == versionid1 + or result.delete_versions[0].versionid == versionid2) + self.assertTrue(result.delete_versions[1].versionid == versionid1 + or result.delete_versions[1].versionid == versionid2) + + result = bucket.delete_object_versions(version_list) + + try: + bucket.delete_bucket() + except: + self.assertFalse(True, "should not get a exception") + + def test_batch_delete_objects_multi_version(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket_name = random_string(63).lower() + bucket = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + # put "test" version 1 + result = bucket.put_object("test", "test1") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid1 = result.versionid + + # put "test" version 2 + result = bucket.put_object("test", "test2") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid2 = result.versionid + + # put "foo" version 1 + result = bucket.put_object("foo", "bar") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid2 = result.versionid + + result = bucket.list_object_versions() + self.assertTrue(result.is_truncated == False) + self.assertTrue(result.key_marker == '') + self.assertTrue(result.versionid_marker == '') + self.assertTrue(result.next_key_marker == '') + self.assertTrue(result.next_versionid_marker == '') + self.assertTrue(result.name == bucket_name) + self.assertTrue(result.prefix == '') + self.assertTrue(result.delimiter == '') + self.assertTrue(len(result.delete_marker) == 0) + self.assertTrue(len(result.versions) == 3) + self.assertTrue(result.versions[0].key == "foo") + self.assertTrue(result.versions[1].key == "test") + + # batch delete without version + key_list = [] + key_list.append("foo") + key_list.append("test") + + result = bucket.batch_delete_objects(key_list) + + self.assertTrue(len(result.delete_versions) == 2) + self.assertTrue(len(result.deleted_keys) == 2) + self.assertTrue(result.delete_versions[0].delete_marker == True) + self.assertTrue(result.delete_versions[1].delete_marker == True) + + result = bucket.list_object_versions() + self.assertTrue(result.is_truncated == False) + self.assertTrue(result.key_marker == '') + self.assertTrue(result.versionid_marker == '') + self.assertTrue(result.next_key_marker == '') + self.assertTrue(result.next_versionid_marker == '') + self.assertTrue(result.prefix == '') + self.assertTrue(result.delimiter == '') + self.assertTrue(len(result.delete_marker) == 2) + self.assertTrue(len(result.versions) == 3) + self.assertTrue(result.versions[0].key == "foo") + self.assertTrue(result.versions[1].key == "test") + + version_list = BatchDeleteObjectVersionList() + version_list.append(BatchDeleteObjectVersion(result.delete_marker[0].key, result.delete_marker[0].versionid)) + version_list.append(BatchDeleteObjectVersion(result.delete_marker[1].key, result.delete_marker[1].versionid)) + version_list.append(BatchDeleteObjectVersion(result.versions[0].key, result.versions[0].versionid)) + version_list.append(BatchDeleteObjectVersion(result.versions[1].key, result.versions[1].versionid)) + version_list.append(BatchDeleteObjectVersion(result.versions[2].key, result.versions[2].versionid)) + + result = bucket.delete_object_versions(version_list) + + self.assertTrue(len(result.delete_versions) == 5) + + + try: + bucket.delete_bucket() + except: + self.assertFalse(True, "should not get a exception") + + def test_get_object_meta_with_version(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket_name = random_string(63).lower() + bucket = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + # put "test" version 1 + result = bucket.put_object("test", "test1") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid1 = result.versionid + + # put "test" version 2 + result = bucket.put_object("test", "test2") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid2 = result.versionid + + try: + result_exception = bucket.get_object_meta("test", params={"versionId": None}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + result_normal = bucket.get_object_meta("test", params={"versionId": ''}) + self.assertFalse(True, "should get a exception") + except: + pass + + + result1 = bucket.get_object_meta("test", params={"versionId": versionid1}) + result2 = bucket.get_object_meta("test", params={"versionId": versionid2}) + + self.assertTrue(result1.versionid == versionid1) + self.assertTrue(result2.versionid == versionid2) + self.assertTrue(result1.content_length == result2.content_length) + + version_list = BatchDeleteObjectVersionList() + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid1)) + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid2)) + + self.assertTrue(version_list.len(), 2) + + result = bucket.delete_object_versions(version_list) + + try: + bucket.delete_bucket() + except: + self.assertFalse(True, "should not get a exception") + + def test_object_acl_with_version(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket_name = random_string(63).lower() + bucket = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + # put "test" + result = bucket.put_object("test_no_version", "test1") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid is None) + + result = bucket.get_object_acl("test_no_version") + + bucket.delete_object("test_no_version") + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + # put "test" version 1 + result = bucket.put_object("test", "test1") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid1 = result.versionid + + # put "test" version 2 + result = bucket.put_object("test", "test2") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid2 = result.versionid + + try: + result_exception = bucket.put_object_acl("test", oss2.OBJECT_ACL_DEFAULT, + params={'versionId': 'IllegalVersion'}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + result_exception = bucket.put_object_acl("test", oss2.OBJECT_ACL_DEFAULT, + params={'versionId': ''}) + self.assertFalse(True, "should get a exception") + except: + pass + + + try: + result_exception = bucket.get_object_acl("test", params={'versionId': 'IllegalVersion'}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + result_exception = bucket.get_object_acl("test", params={'versionId': ''}) + self.assertFalse(True, "should get a exception") + except: + pass + + result = bucket.get_object_acl("test", params={"versionId": versionid2}) + self.assertEqual(result.acl, oss2.OBJECT_ACL_DEFAULT) + + result = bucket.put_object_acl("test", oss2.OBJECT_ACL_PUBLIC_READ, params={"versionId": versionid2}) + self.assertEqual(int(result.status)/100, 2) + + result = bucket.get_object_acl("test", params={"versionId": versionid2}) + self.assertEqual(result.acl, oss2.OBJECT_ACL_PUBLIC_READ) + + version_list = BatchDeleteObjectVersionList() + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid1)) + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid2)) + + self.assertTrue(version_list.len(), 2) + + result = bucket.delete_object_versions(version_list) + + try: + bucket.delete_bucket() + except: + self.assertFalse(True, "should not get a exception") + + def test_head_object_with_version(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket_name = random_string(63).lower() + bucket = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + # put "test" version 1 + result = bucket.put_object("test_no_version", "test") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid is None) + + try: + result_exception = bucket.head_object("test_no_version", params={"versionId": "IllegalVersion"}) + self.assertFalse(True, "should get a exception") + except: + pass + + bucket.delete_object("test_no_version") + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + # put "test" version 1 + result = bucket.put_object("test", "test1") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid1 = result.versionid + + # put "test" version 2 + headers = {} + headers['x-oss-storage-class'] = oss2.BUCKET_STORAGE_CLASS_ARCHIVE + result = bucket.put_object("test", "test2", headers=headers) + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid2 = result.versionid + + try: + result_exception = bucket.head_object("test", params={"versionId": None}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + result_normal = bucket.head_object("test", params={"versionId": ''}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + result_exception = bucket.head_object("test_no_version", params={"versionId": "IllegalVersion"}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + result_exception = bucket.head_object("test", + params={"versionId": "CAEQJhiBgIDVmYrr1RYiIGE5ZmUxMjViZDIwYjQwY2I5ODA1YWIxNmIyNDNjYjk4"}) + self.assertFalse(True, "should get a exception") + except: + pass + + + result1 = bucket.head_object("test", params={"versionId": versionid1}) + + result2 = bucket.head_object("test", params={"versionId": versionid2}) + + result3 = bucket.head_object("test") + self.assertEqual(result2.versionid, result3.versionid) + + self.assertEqual(result1.object_type, result2.object_type) + self.assertEqual(result1.content_type, result2.content_type) + self.assertEqual(result1.content_length, result2.content_length) + self.assertTrue(result1.etag != result2.etag) + + delete_result = bucket.delete_object("test") + delete_marker_versionid = delete_result.versionid + + try: + result3 = bucket.head_object("test", params={'versionId': delete_marker_versionid}) + self.assertFalse(True, "should get a exception, but not") + except: + pass + + version_list = BatchDeleteObjectVersionList() + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid1)) + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid2)) + version_list.append(BatchDeleteObjectVersion(key="test", versionid=delete_marker_versionid)) + + self.assertTrue(version_list.len(), 3) + + result = bucket.delete_object_versions(version_list) + + try: + bucket.delete_bucket() + except: + self.assertFalse(True, "should not get a exception") + + def test_copy_object_with_version(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket_name = random_string(63).lower() + bucket = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + # put "test" version 1 + result = bucket.put_object("test", "test1") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid1 = result.versionid + + # put "test" version 2 + result = bucket.put_object("test", "test2") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid2 = result.versionid + + try: + result_exception = bucket.copy_object(bucket_name, + "test", "test_copy_wrong", params={"versionId": None}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + result_exception = bucket.copy_object(bucket_name, + "test", "test_copy_wrong", params={"versionId": ''}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + result_exception = bucket.copy_object(bucket_name, + "test", "test_copy_wrong", params={"versionId": 'NotExistVersionID'}) + self.assertFalse(True, "should get a exception") + except: + pass + + result = bucket.copy_object(bucket_name, "test", "test_copy", params={'versionId': versionid1}) + + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + copy_versionid = result.versionid + + version_list = BatchDeleteObjectVersionList() + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid1)) + version_list.append(BatchDeleteObjectVersion(key="test", versionid=versionid2)) + version_list.append(BatchDeleteObjectVersion(key="test_copy", versionid=copy_versionid)) + + self.assertTrue(version_list.len(), 3) + + result = bucket.delete_object_versions(version_list) + + try: + bucket.delete_bucket() + except: + self.assertFalse(True, "should not get a exception") + + def test_delete_object_with_version(self): + + from oss2.models import BucketVersioningConfig + from oss2.models import BatchDeleteObjectVersion + from oss2.models import BatchDeleteObjectVersionList + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket_name = random_string(63).lower() + bucket = oss2.Bucket(auth, OSS_ENDPOINT, bucket_name) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE) + + wait_meta_sync() + + # put "test" version 1 + result = bucket.put_object("test_no_version", "test") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid is None) + + try: + result_exception = bucket.head_object("test_no_version", params={"versionId": "IllegalVersion"}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + bucket.delete_object("test_no_version", params={"versionId": None}) + self.assertFalse(True, "should get a exception") + except: + pass + + try: + bucket.delete_object("test_no_version", params={"versionId": ""}) + self.assertFalse(True, "should get a exception") + except: + pass + + bucket.delete_object("test_no_version") + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + result = bucket.get_bucket_info() + + self.assertEqual(int(result.status)/100, 2) + self.assertEqual(result.bucket_encryption_rule.ssealgorithm, None) + self.assertEqual(result.versioning_status, "Enabled") + + # put "test" version 1 + result = bucket.put_object("test", "test1") + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid1 = result.versionid + + # put "test" version 2 + headers = {} + headers['x-oss-storage-class'] = oss2.BUCKET_STORAGE_CLASS_ARCHIVE + result = bucket.put_object("test", "test2", headers=headers) + self.assertEqual(int(result.status)/100, 2) + self.assertTrue(result.versionid != "") + versionid2 = result.versionid + + bucket.delete_object("test", params={'versionId': versionid1}) + bucket.delete_object("test", params={'versionId': versionid2}) + bucket.delete_bucket() + + def test_restore_object_with_version(self): + + from oss2.models import BucketVersioningConfig + + auth = oss2.Auth(OSS_ID, OSS_SECRET) + bucket = oss2.Bucket(auth, OSS_ENDPOINT, random_string(63).lower()) + + bucket.create_bucket(oss2.BUCKET_ACL_PRIVATE, oss2.models.BucketCreateConfig(oss2.BUCKET_STORAGE_CLASS_ARCHIVE)) + + service = oss2.Service(auth, OSS_ENDPOINT) + + config = BucketVersioningConfig() + config.status = 'Enabled' + + result = bucket.put_bucket_versioning(config) + + wait_meta_sync() + + self.retry_assert(lambda: bucket.bucket_name in (b.name for b in + service.list_buckets(prefix=bucket.bucket_name).buckets)) + + key = 'a.txt' + result = bucket.put_object(key, 'content_version1') + self.assertEqual(202, bucket.restore_object(key).status) + version1 = result.versionid + + result = bucket.put_object(key, 'content_version2') + version2 = result.versionid + + result = bucket.restore_object(key, params={'versionId': version2}) + self.assertEqual(202, result.status) + + bucket.delete_object(key, params={'versionId': version1}) + bucket.delete_object(key, params={'versionId': version2}) + bucket.delete_bucket() + + class TestSign(TestObject): """ 这个类主要是用来增加测试覆盖率,当环境变量为oss2.AUTH_VERSION_2,则重新设置为oss2.AUTH_VERSION_1再运行TestObject,反之亦然