Skip to content

Commit

Permalink
some code fix
Browse files Browse the repository at this point in the history
  • Loading branch information
wujinhu committed Nov 15, 2017
1 parent 7b4f64d commit dcca6a2
Show file tree
Hide file tree
Showing 6 changed files with 50 additions and 30 deletions.
18 changes: 9 additions & 9 deletions examples/bucket.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,31 +32,31 @@
# 创建Bucket对象,所有Object相关的接口都可以通过Bucket对象来进行
bucket = oss2.Bucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name)

# create bucket with permission and storage class
# 带权限与存储类型创建bucket
bucket.create_bucket(permission=oss2.BUCKET_ACL_PRIVATE,
input=oss2.models.BucketCreateConfig(oss2.BUCKET_STORAGE_CLASS_STANDARD))

# get bucket info
# 获取bucket相关信息
bucket_info = bucket.get_bucket_info()
print('name: ' + bucket_info.name)
print('storage class: ' + bucket_info.storage_class)
print('creation date: ' + bucket_info.creation_date)

# get bucket stat
# 查看Bucket的状态
bucket_stat = bucket.get_bucket_stat()
print('storage: ' + str(bucket_stat.storage))
print('storage: ' + str(bucket_stat.storage_size_in_bytes))
print('object count: ' + str(bucket_stat.object_count))
print('multi part upload count: ' + str(bucket_stat.multi_part_upload_count))

# set bucket lifecycle. Object's will expire after 357 days since its last modified time
rule = oss2.models.LifecycleRule('lc_for_chinese_prefix', '中文前缀/', status=oss2.models.LifecycleRule.ENABLED,
# 设置bucket生命周期, 有'中文/'前缀的对象在最后修改时间之后357天失效
rule = oss2.models.LifecycleRule('lc_for_chinese_prefix', '中文/', status=oss2.models.LifecycleRule.ENABLED,
expiration=oss2.models.LifecycleExpiration(days=357))

# abort multipart upload after 356 days
# 删除相对最后修改时间365天之后的parts
rule.abort_multipart_upload = oss2.models.AbortMultipartUpload(days=356)
# transition to IA after 180 days since object's last modified time
# 对象最后修改时间超过180天后转为IA
rule.storage_transitions = [oss2.models.StorageTransition(days=180, storage_class=oss2.BUCKET_STORAGE_CLASS_IA)]
# transition to ARCHIVE after 356 days since object's last modified time
# 对象最后修改时间超过356天后转为ARCHIVE
rule.storage_transitions.append(oss2.models.StorageTransition(days=356, storage_class=oss2.BUCKET_STORAGE_CLASS_ARCHIVE))

lifecycle = oss2.models.BucketLifecycle([rule])
Expand Down
31 changes: 20 additions & 11 deletions oss2/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -595,11 +595,23 @@ def delete_object(self, key):

def restore_object(self, key):
"""restore an object
return 202 if first time invoke restore_object for this key
return 409 if restore still in progress
return 200 if restore process has finished and will extend one day for download(max 7 days)
return 404 if object does not exist
return 400 if not restore Archive object
如果是第一次针对该object调用接口,返回RequestResult.status = 202;
如果已经成功调用过restore接口,且服务端仍处于解冻中,抛异常RestoreAlreadyInProgress(status=409)
如果已经成功调用过restore接口,且服务端解冻已经完成,再次调用时返回RequestResult.status = 200,且会将object的可下载时间延长一天,最多延长7天。
如果object不存在,则抛异常NoSuchKey(status=404);
对非Archive类型的Object提交restore,则抛异常OperationNotSupported(status=400)
也可以通过调用head_object接口来获取meta信息来判断是否可以restore与restore的状态
代码示例::
>>> meta = bucket.head_object(key)
>>> if meta.resp.headers['x-oss-storage-class'] == oss2.BUCKET_STORAGE_CLASS_ARCHIVE:
>>> bucket.restore_object(key)
>>> while True:
>>> meta = bucket.head_object(key)
>>> if meta.resp.headers['x-oss-restore'] == 'ongoing-request="true"':
>>> time.sleep(5)
>>> else:
>>> break
:param str key: object name
:return: :class:`RequestResult <oss2.models.RequestResult>`
"""
Expand Down Expand Up @@ -833,10 +845,7 @@ def create_bucket(self, permission=None, input=None):
else:
headers = None

if input:
data = self.__convert_data(BucketCreateConfig, xml_utils.to_put_bucket_config, input)
else:
data = None
data = self.__convert_data(BucketCreateConfig, xml_utils.to_put_bucket_config, input)
resp = self.__do_bucket('PUT', headers=headers, data=data)
return RequestResult(resp)

Expand Down Expand Up @@ -961,15 +970,15 @@ def get_bucket_referer(self):
return self._parse_result(resp, xml_utils.parse_get_bucket_referer, GetBucketRefererResult)

def get_bucket_stat(self):
"""get bucket stat
"""查看Bucket的状态,目前包括bucket大小,bucket的object数量,bucket正在上传的Multipart Upload事件个数等。
:return: :class:`GetBucketStatResult <oss2.models.GetBucketStatResult>`
"""
resp = self.__do_bucket('GET', params={Bucket.STAT: ''})
return self._parse_result(resp, xml_utils.parse_get_bucket_stat, GetBucketStatResult)

def get_bucket_info(self):
"""get bucket info
"""获取bucket相关信息,如创建时间,访问Endpoint,Owner与ACL等。
:return: :class:`GetBucketInfoResult <oss2.models.GetBucketInfoResult>`
"""
Expand Down
10 changes: 10 additions & 0 deletions oss2/exceptions.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,6 +102,16 @@ class InvalidRequest(ServerError):
code = 'InvalidRequest'


class OperationNotSupported(ServerError):
status = 400
code = 'OperationNotSupported'


class RestoreAlreadyInProgress(ServerError):
status = 409
code = 'RestoreAlreadyInProgress'


class InvalidArgument(ServerError):
status = 400
code = 'InvalidArgument'
Expand Down
16 changes: 8 additions & 8 deletions oss2/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,8 +353,8 @@ def __init__(self, storage_class):


class BucketStat(object):
def __init__(self, storage, object_count, multi_part_upload_count):
self.storage = storage
def __init__(self, storage_size_in_bytes, object_count, multi_part_upload_count):
self.storage_size_in_bytes = storage_size_in_bytes
self.object_count = object_count
self.multi_part_upload_count = multi_part_upload_count

Expand Down Expand Up @@ -457,10 +457,10 @@ def __init__(self, days=None, date=None, created_before_date=None):


class AbortMultipartUpload(object):
"""delete parts
"""删除parts
:param days: delete parts after days since last modified
:param created_before_date: delete parts if their last modified time earlier than created_before_date
:param days: 删除相对最后修改时间days天之后的parts
:param created_before_date: 删除最后修改时间早于created_before_date的parts
"""
def __init__(self, days=None, created_before_date=None):
Expand All @@ -474,9 +474,9 @@ def __init__(self, days=None, created_before_date=None):
class StorageTransition(object):
"""transit objects
:param days: transit objects after days since last modified
:param created_before_date: transit objects if their last modified time earlier than created_before_date
:param storage_class: transit objects to storage_class
:param days: 将相对最后修改时间days天之后的Object转储
:param created_before_date: 将最后修改时间早于created_before_date的对象转储
:param storage_class: 对象转储到OSS的目标存储类型
"""
def __init__(self, days=None, created_before_date=None, storage_class=None):
if days is not None and created_before_date is not None:
Expand Down
2 changes: 1 addition & 1 deletion oss2/xml_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ def parse_get_bucket_logging(result, body):
def parse_get_bucket_stat(result, body):
root = ElementTree.fromstring(body)

result.storage = _find_int(root, 'Storage')
result.storage_size_in_bytes = _find_int(root, 'Storage')
result.object_count = _find_int(root, 'ObjectCount')
result.multi_part_upload_count = _find_int(root, 'MultipartUploadCount')

Expand Down
3 changes: 2 additions & 1 deletion tests/test_bucket.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ def test_lifecycle_abort_multipart_upload_date(self):

self.bucket.put_bucket_lifecycle(lifecycle)

wait_meta_sync()
result = self.bucket.get_bucket_lifecycle()
self.assertEqual(1, len(result.rules))
self.assertEqual(datetime.date(2016, 12, 20), result.rules[0].abort_multipart_upload.created_before_date)
Expand Down Expand Up @@ -414,7 +415,7 @@ def test_bucket_stat(self):
result = bucket.get_bucket_stat()
self.assertEqual(1, result.object_count)
self.assertEqual(0, result.multi_part_upload_count)
self.assertEqual(7, result.storage)
self.assertEqual(7, result.storage_size_in_bytes)

bucket.delete_object(key)
bucket.delete_bucket()
Expand Down

0 comments on commit dcca6a2

Please sign in to comment.