diff --git a/.github/workflows/ci-test.yml b/.github/workflows/ci-test.yml
index cea3133a..c705019b 100644
--- a/.github/workflows/ci-test.yml
+++ b/.github/workflows/ci-test.yml
@@ -55,6 +55,7 @@ jobs:
           QINIU_ACCESS_KEY: ${{ secrets.QINIU_ACCESS_KEY }}
           QINIU_SECRET_KEY: ${{ secrets.QINIU_SECRET_KEY }}
           QINIU_TEST_BUCKET: ${{ secrets.QINIU_TEST_BUCKET }}
+          QINIU_TEST_NO_ACC_BUCKET: ${{ secrets.QINIU_TEST_NO_ACC_BUCKET }}
           QINIU_TEST_DOMAIN: ${{ secrets.QINIU_TEST_DOMAIN }}
           QINIU_UPLOAD_CALLBACK_URL: ${{secrets.QINIU_UPLOAD_CALLBACK_URL}}
           QINIU_TEST_ENV: "travis"
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 63f6ff70..c299d25b 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,10 @@
 # Changelog
+## 7.14.0
+* 对象存储,空间管理、上传文件新增备用域名重试逻辑
+* 对象存储,调整查询区域主备域名
+* 对象存储,支持空间级别加速域名开关
+* 对象存储,回调签名验证函数新增兼容 Qiniu 签名
+
 ## 7.13.2(2024-05-28)
 * 对象存储,修复上传回调设置自定义变量失效(v7.12.0 引入)
 
diff --git a/examples/upload.py b/examples/upload.py
index 690ea046..25aef7cc 100755
--- a/examples/upload.py
+++ b/examples/upload.py
@@ -1,7 +1,8 @@
 # -*- coding: utf-8 -*-
 # flake8: noqa
+# import hashlib
 
-from qiniu import Auth, put_file, etag, urlsafe_base64_encode
+from qiniu import Auth, put_file, urlsafe_base64_encode
 import qiniu.config
 from qiniu.compat import is_py2, is_py3
 
@@ -24,7 +25,24 @@
 # 要上传文件的本地路径
 localfile = '/Users/jemy/Documents/qiniu.png'
 
-ret, info = put_file(token, key, localfile)
+# 上传时,sdk 会自动计算文件 hash 作为参数传递给服务端确保上传完整性
+# (若不一致,服务端会拒绝完成上传)
+# 但在访问文件时,服务端可能不会提供 MD5 或者编码格式不是期望的
+# 因此若有需有,请通过元数据功能自定义 MD5 或其他 hash 字段
+# hasher = hashlib.md5()
+# with open(localfile, 'rb') as f:
+#     for d in f:
+#         hasher.update(d)
+# object_metadata = {
+#     'x-qn-meta-md5': hasher.hexdigest()
+# }
+
+ret, info = put_file(
+    token,
+    key,
+    localfile
+    # metadata=object_metadata
+)
 print(ret)
 print(info)
 
@@ -32,5 +50,3 @@
     assert ret['key'].encode('utf-8') == key
 elif is_py3:
     assert ret['key'] == key
-
-assert ret['hash'] == etag(localfile)
diff --git a/examples/upload_callback.py b/examples/upload_callback.py
index d8a0a788..468120a5 100755
--- a/examples/upload_callback.py
+++ b/examples/upload_callback.py
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 # flake8: noqa
 
-from qiniu import Auth, put_file, etag
+from qiniu import Auth, put_file
 
 access_key = '...'
 secret_key = '...'
@@ -25,4 +25,3 @@
 ret, info = put_file(token, key, localfile)
 print(info)
 assert ret['key'] == key
-assert ret['hash'] == etag(localfile)
diff --git a/examples/upload_pfops.py b/examples/upload_pfops.py
index 7cc2b9e1..d8546c3f 100755
--- a/examples/upload_pfops.py
+++ b/examples/upload_pfops.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 # flake8: noqa
-from qiniu import Auth, put_file, etag, urlsafe_base64_encode
+from qiniu import Auth, put_file, urlsafe_base64_encode
 
 access_key = '...'
 secret_key = '...'
@@ -36,4 +36,3 @@
 ret, info = put_file(token, key, localfile)
 print(info)
 assert ret['key'] == key
-assert ret['hash'] == etag(localfile)
diff --git a/examples/upload_with_qvmzone.py b/examples/upload_with_qvmzone.py
index 54f8b603..4d298f59 100644
--- a/examples/upload_with_qvmzone.py
+++ b/examples/upload_with_qvmzone.py
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 # flake8: noqa
 
-from qiniu import Auth, put_file, etag, urlsafe_base64_encode
+from qiniu import Auth, put_file, urlsafe_base64_encode
 import qiniu.config
 from qiniu import Zone, set_default
 
@@ -37,4 +37,3 @@
 ret, info = put_file(token, key, localfile)
 print(info)
 assert ret['key'] == key
-assert ret['hash'] == etag(localfile)
diff --git a/qiniu/__init__.py b/qiniu/__init__.py
index 4378ee9d..55acfb25 100644
--- a/qiniu/__init__.py
+++ b/qiniu/__init__.py
@@ -9,16 +9,16 @@
 
 # flake8: noqa
 
-__version__ = '7.13.2'
+__version__ = '7.14.0'
 
 from .auth import Auth, QiniuMacAuth
 
 from .config import set_default
 from .zone import Zone
-from .region import Region
+from .region import LegacyRegion as Region
 
 from .services.storage.bucket import BucketManager, build_batch_copy, build_batch_rename, build_batch_move, \
-    build_batch_stat, build_batch_delete, build_batch_restoreAr
+    build_batch_stat, build_batch_delete, build_batch_restoreAr, build_batch_restore_ar
 from .services.storage.uploader import put_data, put_file, put_stream
 from .services.storage.upload_progress_recorder import UploadProgressRecorder
 from .services.cdn.manager import CdnManager, create_timestamp_anti_leech_url, DomainManager
diff --git a/qiniu/auth.py b/qiniu/auth.py
index b86d8bf5..bdaaef47 100644
--- a/qiniu/auth.py
+++ b/qiniu/auth.py
@@ -37,6 +37,8 @@
     str('persistentOps'),  # 持久化处理操作
     str('persistentNotifyUrl'),  # 持久化处理结果通知URL
     str('persistentPipeline'),  # 持久化处理独享队列
+    str('persistentType'),  # 为 `1` 时,开启闲时任务,必须是 int 类型
+
     str('deleteAfterDays'),  # 文件多少天后自动删除
     str('fileType'),  # 文件的存储类型,0为标准存储,1为低频存储,2为归档存储,3为深度归档存储,4为归档直读存储
     str('isPrefixalScope'),  # 指定上传文件必须使用的前缀
@@ -194,22 +196,56 @@ def __upload_token(self, policy):
         return self.token_with_data(data)
 
     def verify_callback(
-            self,
-            origin_authorization,
-            url,
-            body,
-            content_type='application/x-www-form-urlencoded'):
-        """回调验证
-
-        Args:
-            origin_authorization: 回调时请求Header中的Authorization字段
-            url:                  回调请求的url
-            body:                 回调请求的body
-            content_type:         回调请求body的Content-Type
-
-        Returns:
-            返回true表示验证成功,返回false表示验证失败
+        self,
+        origin_authorization,
+        url,
+        body,
+        content_type='application/x-www-form-urlencoded',
+        method='GET',
+        headers=None
+    ):
+        """
+        Qbox 回调验证
+
+        Parameters
+        ----------
+        origin_authorization: str
+            回调时请求 Header 中的 Authorization 字段
+        url: str
+            回调请求的 url
+        body: str
+            回调请求的 body
+        content_type: str
+            回调请求的 Content-Type
+        method: str
+            回调请求的 method,Qiniu 签名必须传入,默认 GET
+        headers: dict
+            回调请求的 headers,Qiniu 签名必须传入,默认为空字典
+
+        Returns
+        -------
+        bool
+            返回 True 表示验证成功,返回 False 表示验证失败
         """
+        if headers is None:
+            headers = {}
+
+        # 兼容 Qiniu 签名
+        if origin_authorization.startswith("Qiniu"):
+            qn_auth = QiniuMacAuth(
+                access_key=self.__access_key,
+                secret_key=self.__secret_key,
+                disable_qiniu_timestamp_signature=True
+            )
+            return qn_auth.verify_callback(
+                origin_authorization,
+                url=url,
+                body=body,
+                content_type=content_type,
+                method=method,
+                headers=headers
+            )
+
         token = self.token_of_request(url, body, content_type)
         authorization = 'QBox {0}'.format(token)
         return origin_authorization == authorization
@@ -326,6 +362,50 @@ def qiniu_headers(self, headers):
             '%s: %s' % (canonical_mime_header_key(key), headers.get(key)) for key in sorted(qiniu_fields)
         ])
 
+    def verify_callback(
+        self,
+        origin_authorization,
+        url,
+        body,
+        content_type='application/x-www-form-urlencoded',
+        method='GET',
+        headers=None
+    ):
+        """
+        Qiniu 回调验证
+
+        Parameters
+        ----------
+        origin_authorization: str
+            回调时请求 Header 中的 Authorization 字段
+        url: str
+            回调请求的 url
+        body: str
+            回调请求的 body
+        content_type: str
+            回调请求的 Content-Type
+        method: str
+            回调请求的 Method
+        headers: dict
+            回调请求的 headers
+
+        Returns
+        -------
+
+        """
+        if headers is None:
+            headers = {}
+        token = self.token_of_request(
+            method=method,
+            host=headers.get('Host', None),
+            url=url,
+            qheaders=self.qiniu_headers(headers),
+            content_type=content_type,
+            body=body
+        )
+        authorization = 'Qiniu {0}'.format(token)
+        return origin_authorization == authorization
+
     @staticmethod
     def __checkKey(access_key, secret_key):
         if not (access_key and secret_key):
diff --git a/qiniu/config.py b/qiniu/config.py
index cb2ac57c..338c1399 100644
--- a/qiniu/config.py
+++ b/qiniu/config.py
@@ -1,26 +1,27 @@
 # -*- coding: utf-8 -*-
-from qiniu import region
-
 RS_HOST = 'http://rs.qiniu.com'  # 管理操作Host
 RSF_HOST = 'http://rsf.qbox.me'  # 列举操作Host
 API_HOST = 'http://api.qiniuapi.com'  # 数据处理操作Host
-UC_HOST = region.UC_HOST  # 获取空间信息Host
-QUERY_REGION_HOST = 'https://kodo-config.qiniuapi.com'
+QUERY_REGION_HOST = 'https://uc.qiniuapi.com'
+QUERY_REGION_BACKUP_HOSTS = [
+    'kodo-config.qiniuapi.com',
+    'uc.qbox.me'
+]
+UC_HOST = QUERY_REGION_HOST  # 获取空间信息Host
+UC_BACKUP_HOSTS = QUERY_REGION_BACKUP_HOSTS
 
 _BLOCK_SIZE = 1024 * 1024 * 4  # 断点续传分块大小,该参数为接口规格,暂不支持修改
 
 _config = {
-    'default_zone': region.Region(),
+    'default_zone': None,
     'default_rs_host': RS_HOST,
     'default_rsf_host': RSF_HOST,
     'default_api_host': API_HOST,
     'default_uc_host': UC_HOST,
+    'default_uc_backup_hosts': UC_BACKUP_HOSTS,
     'default_query_region_host': QUERY_REGION_HOST,
-    'default_query_region_backup_hosts': [
-        'uc.qbox.me',
-        'api.qiniu.com'
-    ],
-    'default_backup_hosts_retry_times': 2,
+    'default_query_region_backup_hosts': QUERY_REGION_BACKUP_HOSTS,
+    'default_backup_hosts_retry_times': 3,  # 仅控制旧区域 LegacyRegion 查询 Hosts 的重试次数
     'connection_timeout': 30,  # 链接超时为时间为30s
     'connection_retries': 3,  # 链接重试次数为3次
     'connection_pool': 10,  # 链接池个数为10
@@ -28,18 +29,8 @@
 }
 
 _is_customized_default = {
-    'default_zone': False,
-    'default_rs_host': False,
-    'default_rsf_host': False,
-    'default_api_host': False,
-    'default_uc_host': False,
-    'default_query_region_host': False,
-    'default_query_region_backup_hosts': False,
-    'default_backup_hosts_retry_times': False,
-    'connection_timeout': False,
-    'connection_retries': False,
-    'connection_pool': False,
-    'default_upload_threshold': False
+    k: False
+    for k in _config.keys()
 }
 
 
@@ -48,6 +39,10 @@ def is_customized_default(key):
 
 
 def get_default(key):
+    if key == 'default_zone' and not _is_customized_default[key]:
+        # prevent circle import
+        from .region import LegacyRegion
+        return LegacyRegion()
     return _config[key]
 
 
@@ -56,7 +51,7 @@ def set_default(
         connection_timeout=None, default_rs_host=None, default_uc_host=None,
         default_rsf_host=None, default_api_host=None, default_upload_threshold=None,
         default_query_region_host=None, default_query_region_backup_hosts=None,
-        default_backup_hosts_retry_times=None):
+        default_backup_hosts_retry_times=None, default_uc_backup_hosts=None):
     if default_zone:
         _config['default_zone'] = default_zone
         _is_customized_default['default_zone'] = True
@@ -72,16 +67,23 @@ def set_default(
     if default_uc_host:
         _config['default_uc_host'] = default_uc_host
         _is_customized_default['default_uc_host'] = True
+        _config['default_uc_backup_hosts'] = []
+        _is_customized_default['default_uc_backup_hosts'] = True
         _config['default_query_region_host'] = default_uc_host
         _is_customized_default['default_query_region_host'] = True
         _config['default_query_region_backup_hosts'] = []
         _is_customized_default['default_query_region_backup_hosts'] = True
+    if default_uc_backup_hosts is not None:
+        _config['default_uc_backup_hosts'] = default_uc_backup_hosts
+        _is_customized_default['default_uc_backup_hosts'] = True
+        _config['default_query_region_backup_hosts'] = default_uc_backup_hosts
+        _is_customized_default['default_query_region_backup_hosts'] = True
     if default_query_region_host:
         _config['default_query_region_host'] = default_query_region_host
         _is_customized_default['default_query_region_host'] = True
         _config['default_query_region_backup_hosts'] = []
         _is_customized_default['default_query_region_backup_hosts'] = True
-    if default_query_region_backup_hosts:
+    if default_query_region_backup_hosts is not None:
         _config['default_query_region_backup_hosts'] = default_query_region_backup_hosts
         _is_customized_default['default_query_region_backup_hosts'] = True
     if default_backup_hosts_retry_times:
diff --git a/qiniu/http/__init__.py b/qiniu/http/__init__.py
index 2b61e0fe..83a837a4 100644
--- a/qiniu/http/__init__.py
+++ b/qiniu/http/__init__.py
@@ -1,39 +1,15 @@
 # -*- coding: utf-8 -*-
 import logging
 import platform
-import functools
 
 import requests
-from requests.adapters import HTTPAdapter
 from requests.auth import AuthBase
 
 from qiniu import config, __version__
 import qiniu.auth
 
-from .client import HTTPClient
 from .response import ResponseInfo
-from .middleware import UserAgentMiddleware
-
-
-qn_http_client = HTTPClient(
-    middlewares=[
-        UserAgentMiddleware(__version__)
-    ]
-)
-
-
-# compatibility with some config from qiniu.config
-def _before_send(func):
-    @functools.wraps(func)
-    def wrapper(self, *args, **kwargs):
-        if _session is None:
-            _init()
-        return func(self, *args, **kwargs)
-
-    return wrapper
-
-
-qn_http_client.send_request = _before_send(qn_http_client.send_request)
+from .default_client import qn_http_client, _init_http_adapter
 
 _sys_info = '{0}; {1}'.format(platform.system(), platform.machine())
 _python_ver = platform.python_version()
@@ -61,12 +37,7 @@ def _init():
     global _session
     if _session is None:
         _session = qn_http_client.session
-
-    adapter = HTTPAdapter(
-        pool_connections=config.get_default('connection_pool'),
-        pool_maxsize=config.get_default('connection_pool'),
-        max_retries=config.get_default('connection_retries'))
-    _session.mount('http://', adapter)
+    _init_http_adapter()
 
 
 def _post(url, data, files, auth, headers=None):
diff --git a/qiniu/http/default_client.py b/qiniu/http/default_client.py
new file mode 100644
index 00000000..7d7ccc60
--- /dev/null
+++ b/qiniu/http/default_client.py
@@ -0,0 +1,37 @@
+import functools
+
+from requests.adapters import HTTPAdapter
+
+from qiniu import config, __version__
+
+from .client import HTTPClient
+from .middleware import UserAgentMiddleware
+
+qn_http_client = HTTPClient(
+    middlewares=[
+        UserAgentMiddleware(__version__)
+    ]
+)
+
+
+# compatibility with some config from qiniu.config
+def _before_send(func):
+    @functools.wraps(func)
+    def wrapper(self, *args, **kwargs):
+        _init_http_adapter()
+        return func(self, *args, **kwargs)
+
+    return wrapper
+
+
+qn_http_client.send_request = _before_send(qn_http_client.send_request)
+
+
+def _init_http_adapter():
+    # may be optimized:
+    # only called when config changed, not every time before send request
+    adapter = HTTPAdapter(
+        pool_connections=config.get_default('connection_pool'),
+        pool_maxsize=config.get_default('connection_pool'),
+        max_retries=config.get_default('connection_retries'))
+    qn_http_client.session.mount('http://', adapter)
diff --git a/qiniu/http/endpoint.py b/qiniu/http/endpoint.py
new file mode 100644
index 00000000..307542b9
--- /dev/null
+++ b/qiniu/http/endpoint.py
@@ -0,0 +1,68 @@
+class Endpoint:
+    @staticmethod
+    def from_host(host):
+        """
+        Autodetect scheme from host string
+
+        Parameters
+        ----------
+        host: str
+
+        Returns
+        -------
+        Endpoint
+        """
+        if '://' in host:
+            scheme, host = host.split('://')
+            return Endpoint(host=host, default_scheme=scheme)
+        else:
+            return Endpoint(host=host)
+
+    def __init__(self, host, default_scheme='https'):
+        """
+        Parameters
+        ----------
+        host: str
+        default_scheme: str
+        """
+        self.host = host
+        self.default_scheme = default_scheme
+
+    def __str__(self):
+        return 'Endpoint(host:\'{0}\',default_scheme:\'{1}\')'.format(
+            self.host,
+            self.default_scheme
+        )
+
+    def __repr__(self):
+        return self.__str__()
+
+    def __eq__(self, other):
+        if not isinstance(other, Endpoint):
+            raise TypeError('Cannot compare Endpoint with {0}'.format(type(other)))
+
+        return self.host == other.host and self.default_scheme == other.default_scheme
+
+    def get_value(self, scheme=None):
+        """
+        Parameters
+        ----------
+        scheme: str
+
+        Returns
+        -------
+        str
+        """
+        scheme = scheme if scheme is not None else self.default_scheme
+        return ''.join([scheme, '://', self.host])
+
+    def clone(self):
+        """
+        Returns
+        -------
+        Endpoint
+        """
+        return Endpoint(
+            host=self.host,
+            default_scheme=self.default_scheme
+        )
diff --git a/qiniu/http/endpoints_provider.py b/qiniu/http/endpoints_provider.py
new file mode 100644
index 00000000..ccfb3b43
--- /dev/null
+++ b/qiniu/http/endpoints_provider.py
@@ -0,0 +1,13 @@
+import abc
+
+
+class EndpointsProvider:
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def __iter__(self):
+        """
+        Returns
+        -------
+        list[Endpoint]
+        """
diff --git a/qiniu/http/endpoints_retry_policy.py b/qiniu/http/endpoints_retry_policy.py
new file mode 100644
index 00000000..f648a29e
--- /dev/null
+++ b/qiniu/http/endpoints_retry_policy.py
@@ -0,0 +1,56 @@
+from qiniu.retry.abc import RetryPolicy
+
+
+class EndpointsRetryPolicy(RetryPolicy):
+    def __init__(self, endpoints_provider=None, skip_init_context=False):
+        """
+        Parameters
+        ----------
+        endpoints_provider: Iterable[Endpoint]
+        skip_init_context: bool
+        """
+        self.endpoints_provider = endpoints_provider if endpoints_provider else []
+        self.skip_init_context = skip_init_context
+
+    def init_context(self, context):
+        """
+        Parameters
+        ----------
+        context: dict
+
+        Returns
+        -------
+        None
+        """
+        if self.skip_init_context:
+            return
+        context['alternative_endpoints'] = list(self.endpoints_provider)
+        if not context['alternative_endpoints']:
+            raise ValueError('There isn\'t available endpoint')
+        context['endpoint'] = context['alternative_endpoints'].pop(0)
+
+    def should_retry(self, attempt):
+        """
+        Parameters
+        ----------
+        attempt: qiniu.retry.Attempt
+
+        Returns
+        -------
+        bool
+        """
+        return len(attempt.context['alternative_endpoints']) > 0
+
+    def prepare_retry(self, attempt):
+        """
+        Parameters
+        ----------
+        attempt: qiniu.retry.Attempt
+
+        Returns
+        -------
+        None
+        """
+        if not attempt.context['alternative_endpoints']:
+            raise Exception('There isn\'t available endpoint for next try')
+        attempt.context['endpoint'] = attempt.context['alternative_endpoints'].pop(0)
diff --git a/qiniu/http/region.py b/qiniu/http/region.py
new file mode 100644
index 00000000..ebf4c770
--- /dev/null
+++ b/qiniu/http/region.py
@@ -0,0 +1,188 @@
+from datetime import datetime, timedelta
+
+from enum import Enum
+
+from .endpoint import Endpoint
+
+
+# Use StrEnum when min version of python update to >= 3.11
+# to make the json stringify more readable,
+# or find another way to simple the json stringify
+class ServiceName(Enum):
+    UC = 'uc'
+    UP = 'up'
+    UP_ACC = 'up_acc'
+    IO = 'io'
+    # IO_SRC = 'io_src'
+    RS = 'rs'
+    RSF = 'rsf'
+    API = 'api'
+    S3 = 's3'
+
+
+class Region:
+    @staticmethod
+    def merge(*args):
+        """
+        Parameters
+        ----------
+        args: list[list[Region]]
+
+        Returns
+        -------
+
+        """
+        if not args:
+            raise TypeError('There aren\'ta any regions to merge')
+        source, rest = args[0], args[1:]
+        target = source.clone()
+        for r in rest:
+            for sn, el in r.services.items():
+                if sn not in target.services:
+                    target.services[sn] = [e.clone() for e in el]
+                else:
+                    target_values = [e.get_value() for e in target.services[sn]]
+                    target.services[sn] += [
+                        e.clone()
+                        for e in el
+                        if e.get_value() not in target_values
+                    ]
+
+        return target
+
+    @staticmethod
+    def from_region_id(region_id, **kwargs):
+        """
+        Parameters
+        ----------
+        region_id: str
+        kwargs: dict
+            s3_region_id: str
+            ttl: int
+            create_time: datetime
+            extended_services: dict[str, list[Region]]
+            preferred_scheme: str
+
+        Returns
+        -------
+        Region
+        """
+        # create services endpoints
+        endpoint_kwargs = {
+        }
+        if 'preferred_scheme' in kwargs:
+            endpoint_kwargs['default_scheme'] = kwargs.get('preferred_scheme')
+
+        is_z0 = region_id == 'z0'
+        services_hosts = {
+            ServiceName.UC: ['uc.qiniuapi.com'],
+            ServiceName.UP: [
+                'upload-{0}.qiniup.com'.format(region_id),
+                'up-{0}.qiniup.com'.format(region_id)
+            ] if not is_z0 else [
+                'upload.qiniup.com',
+                'up.qiniup.com'
+            ],
+            ServiceName.IO: [
+                'iovip-{0}.qiniuio.com'.format(region_id),
+            ] if not is_z0 else [
+                'iovip.qiniuio.com',
+            ],
+            ServiceName.RS: [
+                'rs-{0}.qiniuapi.com'.format(region_id),
+            ],
+            ServiceName.RSF: [
+                'rsf-{0}.qiniuapi.com'.format(region_id),
+            ],
+            ServiceName.API: [
+                'api-{0}.qiniuapi.com'.format(region_id),
+            ],
+            ServiceName.S3: [
+                's3.{0}.qiniucs.com'.format(region_id),
+            ]
+        }
+        services = {
+            k: [
+                Endpoint(h, **endpoint_kwargs) for h in v
+            ]
+            for k, v in services_hosts.items()
+        }
+        services.update(kwargs.get('extended_services', {}))
+
+        # create region
+        region_kwargs = {
+            k: kwargs.get(k)
+            for k in [
+                's3_region_id',
+                'ttl',
+                'create_time'
+            ] if k in kwargs
+        }
+        region_kwargs['region_id'] = region_id
+        region_kwargs.setdefault('s3_region_id', region_id)
+        region_kwargs['services'] = services
+
+        return Region(**region_kwargs)
+
+    def __init__(
+        self,
+        region_id=None,
+        s3_region_id=None,
+        services=None,
+        ttl=86400,
+        create_time=None
+    ):
+        """
+        Parameters
+        ----------
+        region_id: str
+        s3_region_id: str
+        services: dict[ServiceName or str, list[Endpoint]]
+        ttl: int, default 86400
+        create_time: datetime, default datetime.now()
+        """
+        self.region_id = region_id
+        self.s3_region_id = s3_region_id if s3_region_id else region_id
+
+        self.services = services if services else {}
+        self.services.update(
+            {
+                k: []
+                for k in ServiceName
+                if
+                k not in self.services or
+                not isinstance(self.services[k], list)
+            }
+        )
+
+        self.ttl = ttl
+        self.create_time = create_time if create_time else datetime.now()
+
+    @property
+    def is_live(self):
+        """
+        Returns
+        -------
+        bool
+        """
+        if self.ttl < 0:
+            return True
+        live_time = datetime.now() - self.create_time
+        return live_time < timedelta(seconds=self.ttl)
+
+    def clone(self):
+        """
+        Returns
+        -------
+        Region
+        """
+        return Region(
+            region_id=self.region_id,
+            s3_region_id=self.s3_region_id,
+            services={
+                k: [endpoint.clone() for endpoint in self.services[k]]
+                for k in self.services
+            },
+            ttl=self.ttl,
+            create_time=self.create_time
+        )
diff --git a/qiniu/http/regions_provider.py b/qiniu/http/regions_provider.py
new file mode 100644
index 00000000..8b52822c
--- /dev/null
+++ b/qiniu/http/regions_provider.py
@@ -0,0 +1,743 @@
+import abc
+import datetime
+import itertools
+from collections import namedtuple
+import logging
+import tempfile
+import os
+
+from qiniu.compat import json, b as to_bytes
+from qiniu.utils import io_md5
+
+from .endpoint import Endpoint
+from .region import Region, ServiceName
+from .default_client import qn_http_client
+from .middleware import RetryDomainsMiddleware
+
+
+class RegionsProvider:
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def __iter__(self):
+        """
+        Returns
+        -------
+        list[Region]
+        """
+
+
+class MutableRegionsProvider(RegionsProvider):
+    @abc.abstractmethod
+    def set_regions(self, regions):
+        """
+        Parameters
+        ----------
+        regions: list[Region]
+        """
+
+
+# --- serializers for QueryRegionsProvider ---
+
+def _get_region_from_query(data, **kwargs):
+    preferred_scheme = kwargs.get('preferred_scheme')
+    if not preferred_scheme:
+        preferred_scheme = 'http'
+
+    domain_path_map = {
+        k: (k.value, 'domains')
+        for k in ServiceName
+        if k not in [ServiceName.UP_ACC]
+    }
+    domain_path_map[ServiceName.UP_ACC] = ('up', 'acc_domains')
+
+    services = {
+        # sn service name, dsn data service name
+        sn: [
+            Endpoint(h, default_scheme=preferred_scheme)
+            for h in data.get(dsn, {}).get(k, [])
+        ]
+        for sn, (dsn, k) in domain_path_map.items()
+    }
+
+    return Region(
+        region_id=data.get('region'),
+        s3_region_id=data.get('s3', {}).get('region_alias', None),
+        services=services,
+        ttl=data.get('ttl', None)
+    )
+
+
+class QueryRegionsProvider(RegionsProvider):
+    def __init__(
+        self,
+        access_key,
+        bucket_name,
+        endpoints_provider,
+        preferred_scheme='http',
+        max_retry_times_per_endpoint=1,
+    ):
+        """
+        Parameters
+        ----------
+        access_key: str
+        bucket_name: str
+        endpoints_provider: Iterable[Endpoint]
+        preferred_scheme: str
+        max_retry_times_per_endpoint: int
+        """
+        self.access_key = access_key
+        self.bucket_name = bucket_name
+        self.endpoints_provider = endpoints_provider
+        self.preferred_scheme = preferred_scheme
+        self.max_retry_times_per_endpoint = max_retry_times_per_endpoint
+
+    def __iter__(self):
+        regions = self.__fetch_regions()
+        # change to `yield from` when min version of python update to >= 3.3
+        for r in regions:
+            yield r
+
+    def __fetch_regions(self):
+        endpoints = list(self.endpoints_provider)
+        if not endpoints:
+            raise ValueError('There aren\'t any available endpoints to query regions')
+        endpoint, alternative_endpoints = endpoints[0], endpoints[1:]
+
+        url = '{0}/v4/query?ak={1}&bucket={2}'.format(endpoint.get_value(), self.access_key, self.bucket_name)
+        ret, resp = qn_http_client.get(
+            url,
+            middlewares=[
+                RetryDomainsMiddleware(
+                    backup_domains=[e.host for e in alternative_endpoints],
+                    max_retry_times=self.max_retry_times_per_endpoint
+                )
+            ]
+        )
+
+        if not resp.ok():
+            raise RuntimeError(
+                (
+                    'Query regions failed with '
+                    'HTTP Status Code {0}, '
+                    'Body {1}'
+                ).format(resp.status_code, resp.text_body)
+            )
+
+        return [
+            _get_region_from_query(d, preferred_scheme=self.preferred_scheme)
+            for d in ret.get('hosts', [])
+        ]
+
+
+# --- helpers for CachedRegionsProvider ---
+class FileAlreadyLocked(RuntimeError):
+    def __init__(self, message):
+        super(FileAlreadyLocked, self).__init__(message)
+
+
+class _FileLocker:
+    def __init__(self, origin_file):
+        self._origin_file = origin_file
+
+    def __enter__(self):
+        if os.access(self.lock_file_path, os.R_OK | os.W_OK):
+            raise FileAlreadyLocked('File {0} already locked'.format(self._origin_file))
+        with open(self.lock_file_path, 'w'):
+            pass
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        os.remove(self.lock_file_path)
+
+    @property
+    def lock_file_path(self):
+        """
+        Returns
+        -------
+        str
+        """
+        return self._origin_file + '.lock'
+
+
+# use dataclass instead namedtuple if min version of python update to 3.7
+CacheScope = namedtuple(
+    'CacheScope',
+    [
+        'memo_cache',
+        'persist_path',
+        'last_shrink_at',
+        'shrink_interval',
+        'should_shrink_expired_regions'
+    ]
+)
+
+
+_global_cache_scope = CacheScope(
+    memo_cache={},
+    persist_path=os.path.join(
+        tempfile.gettempdir(),
+        'qn-regions-cache.jsonl'
+    ),
+    last_shrink_at=datetime.datetime.fromtimestamp(0),
+    shrink_interval=datetime.timedelta(-1),  # useless for now
+    should_shrink_expired_regions=False
+)
+
+
+# --- serializers for CachedRegionsProvider ---
+
+_PersistedEndpoint = namedtuple(
+    'PersistedEndpoint',
+    [
+        'host',
+        'defaultScheme'
+    ]
+)
+
+
+def _persist_endpoint(endpoint):
+    """
+    Parameters
+    ----------
+    endpoint: Endpoint
+
+    Returns
+    -------
+    dict
+    """
+    return _PersistedEndpoint(
+        defaultScheme=endpoint.default_scheme,
+        host=endpoint.host
+    )._asdict()
+
+
+def _get_endpoint_from_persisted(data):
+    """
+    Parameters
+    ----------
+    data: dict
+
+    Returns
+    -------
+    Endpoint
+    """
+    persisted_endpoint = _PersistedEndpoint(**data)
+    return Endpoint(
+        persisted_endpoint.host,
+        default_scheme=persisted_endpoint.defaultScheme
+    )
+
+
+_PersistedRegion = namedtuple(
+    'PersistedRegion',
+    [
+        'regionId',
+        's3RegionId',
+        'services',
+        'ttl',
+        'createTime'
+    ]
+)
+
+
+def _persist_region(region):
+    """
+    Parameters
+    ----------
+    region: Region
+
+    Returns
+    -------
+    dict
+    """
+    return _PersistedRegion(
+        regionId=region.region_id,
+        s3RegionId=region.s3_region_id,
+        services={
+            # The StrEnum not available in python < 3.11
+            # so need stringify the key manually
+            k.value if isinstance(k, ServiceName) else k: [
+                _persist_endpoint(e)
+                for e in v
+            ]
+            for k, v in region.services.items()
+        },
+        ttl=region.ttl,
+        # use datetime.datetime.timestamp() when min version of python >= 3
+        createTime=int(float(region.create_time.strftime('%s.%f')) * 1000)
+    )._asdict()
+
+
+def _get_region_from_persisted(data):
+    """
+    Parameters
+    ----------
+    data: dict
+
+    Returns
+    -------
+    Region
+    """
+    def _get_service_name(k):
+        try:
+            return ServiceName(k)
+        except ValueError:
+            return k
+
+    persisted_region = _PersistedRegion(**data)
+
+    return Region(
+        region_id=persisted_region.regionId,
+        s3_region_id=persisted_region.s3RegionId,
+        services={
+            # The StrEnum not available in python < 3.11
+            # so need parse the key manually
+            _get_service_name(k): [
+                _get_endpoint_from_persisted(d)
+                for d in v
+            ]
+            for k, v in persisted_region.services.items()
+        },
+        ttl=persisted_region.ttl,
+        create_time=datetime.datetime.fromtimestamp(persisted_region.createTime / 1000)
+    )
+
+
+def _parse_persisted_regions(persisted_data):
+    """
+    Parameters
+    ----------
+    persisted_data: str
+
+    Returns
+    -------
+    cache_key: str
+    regions: list[Region]
+    """
+    parsed_data = json.loads(persisted_data)
+    regions = [
+        _get_region_from_persisted(d)
+        for d in parsed_data.get('regions', [])
+    ]
+    return parsed_data.get('cacheKey'), regions
+
+
+def _walk_persist_cache_file(persist_path, ignore_parse_error=False):
+    """
+    Parameters
+    ----------
+    persist_path: str
+    ignore_parse_error: bool
+
+    Returns
+    -------
+    Iterable[(str, list[Region])]
+    """
+    if not os.access(persist_path, os.R_OK):
+        return
+
+    with open(persist_path, 'r') as f:
+        for line in f:
+            try:
+                cache_key, regions = _parse_persisted_regions(line)
+                yield cache_key, regions
+            except Exception as err:
+                if not ignore_parse_error:
+                    raise err
+
+
+def _merge_regions(*args):
+    """
+    merge two regions by region id.
+    if the same region id, the last create region will be keep.
+    Parameters
+    ----------
+    args: list[Region]
+
+    Returns
+    -------
+    list[Region]
+    """
+    regions_dict = {}
+
+    for r in itertools.chain(*args):
+        if r.region_id not in regions_dict:
+            regions_dict[r.region_id] = r
+        else:
+            if r.create_time > regions_dict[r.region_id].create_time:
+                regions_dict[r.region_id] = r
+
+    return regions_dict.values()
+
+
+class CachedRegionsProvider(MutableRegionsProvider):
+    def __init__(
+        self,
+        cache_key,
+        base_regions_provider,
+        **kwargs
+    ):
+        """
+        Parameters
+        ----------
+        cache_key: str
+        base_regions_provider: Iterable[Region]
+        kwargs
+            persist_path: str
+            shrink_interval: datetime.timedelta
+            should_shrink_expired_regions: bool
+        """
+        self.cache_key = cache_key
+        self.base_regions_provider = base_regions_provider
+
+        persist_path = kwargs.get('persist_path', None)
+        if persist_path is None:
+            persist_path = _global_cache_scope.persist_path
+
+        shrink_interval = kwargs.get('shrink_interval', None)
+        if shrink_interval is None:
+            shrink_interval = datetime.timedelta(days=1)
+
+        should_shrink_expired_regions = kwargs.get('should_shrink_expired_regions', None)
+        if should_shrink_expired_regions is None:
+            should_shrink_expired_regions = False
+
+        self._cache_scope = CacheScope(
+            memo_cache=_global_cache_scope.memo_cache,
+            persist_path=persist_path,
+            last_shrink_at=datetime.datetime.fromtimestamp(0),
+            shrink_interval=shrink_interval,
+            should_shrink_expired_regions=should_shrink_expired_regions,
+        )
+
+    def __iter__(self):
+        if self.__should_shrink:
+            self.__shrink_cache()
+
+        get_regions_fns = [
+            self.__get_regions_from_memo,
+            self.__get_regions_from_file,
+            self.__get_regions_from_base_provider
+        ]
+
+        regions = None
+        for get_regions in get_regions_fns:
+            regions = get_regions(fallback=regions)
+            if regions and all(r.is_live for r in regions):
+                break
+
+        # change to `yield from` when min version of python update to >= 3.3
+        for r in regions:
+            yield r
+
+    def set_regions(self, regions):
+        """
+        Parameters
+        ----------
+        regions: list[Region]
+        """
+        self._cache_scope.memo_cache[self.cache_key] = regions
+
+        if not self._cache_scope.persist_path:
+            return
+
+        try:
+            with open(self._cache_scope.persist_path, 'a') as f:
+                f.write(json.dumps({
+                    'cacheKey': self.cache_key,
+                    'regions': [_persist_region(r) for r in regions]
+                }) + os.linesep)
+        except Exception as err:
+            logging.warning('failed to cache regions result to file', err)
+
+    @property
+    def persist_path(self):
+        """
+        Returns
+        -------
+        str
+        """
+        return self._cache_scope.persist_path
+
+    @persist_path.setter
+    def persist_path(self, value):
+        """
+        Parameters
+        ----------
+        value: str
+        """
+        self._cache_scope = self._cache_scope._replace(
+            persist_path=value
+        )
+
+    @property
+    def last_shrink_at(self):
+        """
+        Returns
+        -------
+        datetime.datetime
+        """
+        # copy the datetime make sure it is read-only
+        return self._cache_scope.last_shrink_at.replace()
+
+    @property
+    def shrink_interval(self):
+        """
+        Returns
+        -------
+        datetime.timedelta
+        """
+        return self._cache_scope.shrink_interval
+
+    @shrink_interval.setter
+    def shrink_interval(self, value):
+        """
+        Parameters
+        ----------
+        value: datetime.timedelta
+        """
+        self._cache_scope = self._cache_scope._replace(
+            shrink_interval=value
+        )
+
+    @property
+    def should_shrink_expired_regions(self):
+        """
+        Returns
+        -------
+        bool
+        """
+        return self._cache_scope.should_shrink_expired_regions
+
+    @should_shrink_expired_regions.setter
+    def should_shrink_expired_regions(self, value):
+        """
+        Parameters
+        ----------
+        value: bool
+        """
+        self._cache_scope = self._cache_scope._replace(
+            should_shrink_expired_regions=value
+        )
+
+    def __get_regions_from_memo(self, fallback=None):
+        """
+        Parameters
+        ----------
+        fallback: list[Region]
+
+        Returns
+        -------
+        list[Region]
+        """
+        regions = self._cache_scope.memo_cache.get(self.cache_key)
+
+        if regions:
+            return regions
+
+        return fallback
+
+    def __get_regions_from_file(self, fallback=None):
+        """
+        Parameters
+        ----------
+        fallback: list[Region]
+
+        Returns
+        -------
+        list[Region]
+        """
+        if not self._cache_scope.persist_path:
+            return fallback
+
+        try:
+            self.__flush_file_cache_to_memo()
+        except Exception as err:
+            if fallback is not None:
+                return fallback
+            else:
+                raise err
+
+        return self.__get_regions_from_memo(fallback)
+
+    def __get_regions_from_base_provider(self, fallback=None):
+        """
+        Parameters
+        ----------
+        fallback: list[Region]
+
+        Returns
+        -------
+        list[Region]
+        """
+        try:
+            regions = list(self.base_regions_provider)
+        except Exception as err:
+            if fallback is not None:
+                return fallback
+            else:
+                raise err
+        self.set_regions(regions)
+        return regions
+
+    def __flush_file_cache_to_memo(self):
+        for cache_key, regions in _walk_persist_cache_file(
+            persist_path=self._cache_scope.persist_path
+            # ignore_parse_error=True
+        ):
+            if cache_key not in self._cache_scope.memo_cache:
+                self._cache_scope.memo_cache[cache_key] = regions
+                return
+            memo_regions = self._cache_scope.memo_cache[cache_key]
+            self._cache_scope.memo_cache[cache_key] = _merge_regions(
+                memo_regions,
+                regions
+            )
+
+    @property
+    def __should_shrink(self):
+        """
+        Returns
+        -------
+        bool
+        """
+        return datetime.datetime.now() - self._cache_scope.last_shrink_at > self._cache_scope.shrink_interval
+
+    def __shrink_cache(self):
+        # shrink memory cache
+        if self._cache_scope.should_shrink_expired_regions:
+            kept_memo_cache = {}
+            for k, regions in self._cache_scope.memo_cache.items():
+                live_regions = [r for r in regions if r.is_live]
+                if live_regions:
+                    kept_memo_cache[k] = live_regions
+            self._cache_scope = self._cache_scope._replace(memo_cache=kept_memo_cache)
+
+        # shrink file cache
+        if not self._cache_scope.persist_path:
+            self._cache_scope = self._cache_scope._replace(
+                last_shrink_at=datetime.datetime.now()
+            )
+            return
+
+        shrink_file_path = self._cache_scope.persist_path + '.shrink'
+        try:
+            with _FileLocker(shrink_file_path):
+                # filter data
+                shrunk_cache = {}
+                for cache_key, regions in _walk_persist_cache_file(
+                    persist_path=self._cache_scope.persist_path
+                ):
+                    kept_regions = regions
+                    if self._cache_scope.should_shrink_expired_regions:
+                        kept_regions = [
+                            r for r in kept_regions if r.is_live
+                        ]
+
+                    if cache_key not in shrunk_cache:
+                        shrunk_cache[cache_key] = kept_regions
+                    else:
+                        shrunk_cache[cache_key] = _merge_regions(
+                            shrunk_cache[cache_key],
+                            kept_regions
+                        )
+
+                # write data
+                with open(shrink_file_path, 'a') as f:
+                    for cache_key, regions in shrunk_cache.items():
+                        f.write(
+                            json.dumps(
+                                {
+                                    'cacheKey': cache_key,
+                                    'regions': [_persist_region(r) for r in regions]
+                                }
+                            ) + os.linesep
+                        )
+
+                # rename file
+                os.rename(shrink_file_path, self._cache_scope.persist_path)
+        except FileAlreadyLocked:
+            pass
+        finally:
+            self._cache_scope = self._cache_scope._replace(
+                last_shrink_at=datetime.datetime.now()
+            )
+
+
+def get_default_regions_provider(
+    query_endpoints_provider,
+    access_key,
+    bucket_name,
+    accelerate_uploading=False,
+    force_query=False,
+    **kwargs
+):
+    """
+    Parameters
+    ----------
+    query_endpoints_provider: Iterable[Endpoint]
+    access_key: str
+    bucket_name: str
+    accelerate_uploading: bool
+    force_query: bool
+    kwargs
+        preferred_scheme: str
+            option of QueryRegionsProvider
+        max_retry_times_per_endpoint: int
+            option of QueryRegionsProvider
+        persist_path: str
+            option of CachedRegionsProvider
+        shrink_interval: datetime.timedelta
+            option of CachedRegionsProvider
+        should_shrink_expired_regions: bool
+            option of CachedRegionsProvider
+
+    Returns
+    -------
+    Iterable[Region]
+    """
+    query_regions_provider_opts = {
+        'access_key': access_key,
+        'bucket_name': bucket_name,
+        'endpoints_provider': query_endpoints_provider,
+    }
+    query_regions_provider_opts.update({
+        k: v
+        for k, v in kwargs.items()
+        if k in ['preferred_scheme', 'max_retry_times_per_endpoint']
+    })
+
+    query_regions_provider = QueryRegionsProvider(**query_regions_provider_opts)
+
+    if force_query:
+        return query_regions_provider
+
+    query_endpoints = list(query_endpoints_provider)
+
+    endpoints_md5 = io_md5([
+        to_bytes(e.host) for e in query_endpoints
+    ])
+    cache_key = ':'.join([
+        endpoints_md5,
+        access_key,
+        bucket_name,
+        'true' if accelerate_uploading else 'false'
+    ])
+
+    cached_regions_provider_opts = {
+        'cache_key': cache_key,
+        'base_regions_provider': query_regions_provider,
+    }
+    cached_regions_provider_opts.update({
+        k: v
+        for k, v in kwargs.items()
+        if k in [
+            'persist_path',
+            'shrink_interval',
+            'should_shrink_expired_regions'
+        ]
+    })
+
+    return CachedRegionsProvider(
+        **cached_regions_provider_opts
+    )
diff --git a/qiniu/http/regions_retry_policy.py b/qiniu/http/regions_retry_policy.py
new file mode 100644
index 00000000..29b2f9a9
--- /dev/null
+++ b/qiniu/http/regions_retry_policy.py
@@ -0,0 +1,162 @@
+from qiniu.retry.abc import RetryPolicy
+
+from .region import Region, ServiceName
+
+
+class RegionsRetryPolicy(RetryPolicy):
+    def __init__(
+        self,
+        regions_provider,
+        service_names,
+        preferred_endpoints_provider=None,
+        on_change_region=None
+    ):
+        """
+        Parameters
+        ----------
+        regions_provider: Iterable[Region]
+        service_names: list[ServiceName or str]
+        preferred_endpoints_provider: Iterable[Endpoint]
+        on_change_region: Callable
+            `(context: dict) -> None`
+        """
+        self.regions_provider = regions_provider
+        self.service_names = service_names
+        if not service_names:
+            raise ValueError('Must provide at least one service name')
+        if preferred_endpoints_provider is None:
+            preferred_endpoints_provider = []
+        self.preferred_endpoints_provider = preferred_endpoints_provider
+        self.on_change_region = on_change_region
+
+    def init_context(self, context):
+        """
+        Parameters
+        ----------
+        context: dict
+        """
+        self._init_regions(context)
+        self._prepare_endpoints(context)
+
+    def should_retry(self, attempt):
+        """
+        Parameters
+        ----------
+        attempt: Attempt
+        """
+        return (
+            len(attempt.context.get('alternative_regions', [])) > 0 or
+            len(attempt.context.get('alternative_service_names', [])) > 0
+        )
+
+    def prepare_retry(self, attempt):
+        """
+        Parameters
+        ----------
+        attempt: Attempt
+        """
+        if attempt.context.get('alternative_service_names'):
+            # change service for next try
+            attempt.context['service_name'] = attempt.context.get('alternative_service_names').pop(0)
+        elif attempt.context.get('alternative_regions'):
+            # change region for next try
+            attempt.context['region'] = attempt.context.get('alternative_regions').pop(0)
+            if callable(self.on_change_region):
+                self.on_change_region(attempt.context)
+        else:
+            raise RuntimeError('There isn\'t available region or service for next try')
+        self._prepare_endpoints(attempt.context)
+
+    def _init_regions(self, context):
+        """
+        Parameters
+        ----------
+        context: dict
+        """
+        regions = list(self.regions_provider)
+        preferred_endpoints = list(self.preferred_endpoints_provider)
+        if not regions and not preferred_endpoints:
+            raise ValueError('There isn\'t available region or preferred endpoint')
+
+        if not preferred_endpoints:
+            # regions are not empty implicitly by above if condition
+            context['alternative_regions'] = regions
+            context['region'] = context['alternative_regions'].pop(0)
+            # shallow copy list
+            # change to `list.copy` for more readable when min version of python update to >= 3
+            context['alternative_service_names'] = self.service_names[:]
+            context['service_name'] = context['alternative_service_names'].pop(0)
+            return
+
+        # find preferred service name and region by preferred endpoints
+        preferred_region_index = -1
+        preferred_service_index = -1
+        for ri, region in enumerate(regions):
+            for si, service_name in enumerate(self.service_names):
+                if any(
+                    pe.host in [
+                        e.host for e in region.services.get(service_name, [])
+                    ]
+                    for pe in preferred_endpoints
+                ):
+                    preferred_region_index = ri
+                    preferred_service_index = si
+                    break
+
+        # initialize the order of service_names and regions
+        if preferred_region_index < 0:
+            # shallow copy list
+            # change to `list.copy` for more readable when min version of python update to >= 3
+            context['alternative_service_names'] = self.service_names[:]
+            context['service_name'] = context['alternative_service_names'].pop(0)
+
+            context['region'] = Region(
+                region_id='preferred_region',
+                services={
+                    context['service_name']: preferred_endpoints
+                }
+            )
+            context['alternative_regions'] = regions
+        else:
+            # regions are not empty implicitly by above if condition
+            # preferred endpoints are in a known region, then reorder the regions and services
+            context['alternative_regions'] = regions
+            context['region'] = context['alternative_regions'].pop(preferred_region_index)
+            # shallow copy list
+            # change to `list.copy` for more readable when min version of python update to >= 3
+            context['alternative_service_names'] = self.service_names[:]
+            context['service_name'] = context['alternative_service_names'].pop(preferred_service_index)
+
+    def _prepare_endpoints(self, context):
+        """
+        Parameters
+        ----------
+        context: dict
+        """
+        # shallow copy list
+        # change to `list.copy` for more readable when min version of python update to >= 3
+        endpoints = context['region'].services.get(context['service_name'], [])[:]
+        while not endpoints:
+            if context['alternative_service_names']:
+                context['service_name'] = context['alternative_service_names'].pop(0)
+                endpoints = context['region'].services.get(context['service_name'], [])[:]
+            elif context['alternative_regions']:
+                context['region'] = context['alternative_regions'].pop(0)
+                # shallow copy list
+                # change to `list.copy` for more readable when min version of python update to >= 3
+                context['alternative_service_names'] = self.service_names[:]
+                context['service_name'] = context['alternative_service_names'].pop(0)
+                endpoints = context['region'].services.get(context['service_name'], [])[:]
+                if callable(self.on_change_region):
+                    self.on_change_region(context)
+            else:
+                raise RuntimeError(
+                    'There isn\'t available endpoint for {0} service(s) in any available regions'.format(
+                        ', '.join(
+                            sn.value if isinstance(sn, ServiceName) else sn
+                            for sn in self.service_names
+                        )
+                    )
+                )
+        context['alternative_endpoints'] = endpoints
+        context['endpoint'] = context['alternative_endpoints'].pop(0)
diff --git a/qiniu/http/response.py b/qiniu/http/response.py
index 8e9fd84d..6450438d 100644
--- a/qiniu/http/response.py
+++ b/qiniu/http/response.py
@@ -20,12 +20,14 @@ def __init__(self, response, exception=None):
         self.__response = response
         self.exception = exception
         if response is None:
+            self.url = None
             self.status_code = -1
             self.text_body = None
             self.req_id = None
             self.x_log = None
             self.error = str(exception)
         else:
+            self.url = response.url
             self.status_code = response.status_code
             self.text_body = response.text
             self.req_id = response.headers.get('X-Reqid')
diff --git a/qiniu/region.py b/qiniu/region.py
index dcef4398..28770e01 100644
--- a/qiniu/region.py
+++ b/qiniu/region.py
@@ -1,31 +1,52 @@
 # -*- coding: utf-8 -*-
+import functools
 import logging
 import os
 import time
 
-from qiniu import compat
-from qiniu import utils
 
-UC_HOST = 'https://uc.qbox.me'  # 获取空间信息Host
+from .compat import json, s as str_from_bytes
+from .utils import urlsafe_base64_decode
+from .config import UC_HOST, is_customized_default, get_default
+from .http.endpoint import Endpoint as _HTTPEndpoint
+from .http.regions_provider import Region as _HTTPRegion, ServiceName, get_default_regions_provider
 
 
-class Region(object):
+def _legacy_default_get(key):
+    def decorator(func):
+        @functools.wraps(func)
+        def wrapper(self, *args, **kwargs):
+            if hasattr(self, key) and getattr(self, key):
+                return self.rs_host
+            if is_customized_default('default_' + key):
+                return get_default('default_' + key)
+            return func(self, *args, **kwargs)
+
+        return wrapper
+
+    return decorator
+
+
+class LegacyRegion(_HTTPRegion, object):
     """七牛上传区域类
     该类主要内容上传区域地址。
     """
 
     def __init__(
-            self,
-            up_host=None,
-            up_host_backup=None,
-            io_host=None,
-            host_cache=None,
-            home_dir=None,
-            scheme="http",
-            rs_host=None,
-            rsf_host=None,
-            api_host=None):
+        self,
+        up_host=None,
+        up_host_backup=None,
+        io_host=None,
+        host_cache=None,
+        home_dir=None,
+        scheme="http",
+        rs_host=None,
+        rsf_host=None,
+        api_host=None,
+        accelerate_uploading=False
+    ):
         """初始化Zone类"""
+        super(LegacyRegion, self).__init__()
         if host_cache is None:
             host_cache = {}
         self.up_host = up_host
@@ -37,6 +58,20 @@ def __init__(
         self.home_dir = home_dir
         self.host_cache = host_cache
         self.scheme = scheme
+        self.services.update({
+            k: [
+                _HTTPEndpoint.from_host(h)
+                for h in v if h
+            ]
+            for k, v in {
+                ServiceName.UP: [up_host, up_host_backup],
+                ServiceName.IO: [io_host],
+                ServiceName.RS: [rs_host],
+                ServiceName.RSF: [rsf_host],
+                ServiceName.API: [api_host]
+            }.items()
+        })
+        self.accelerate_uploading = accelerate_uploading
 
     def get_up_host_by_token(self, up_token, home_dir):
         ak, bucket = self.unmarshal_up_token(up_token)
@@ -67,12 +102,8 @@ def get_io_host(self, ak, bucket, home_dir=None):
         io_hosts = bucket_hosts['ioHosts']
         return io_hosts[0]
 
+    @_legacy_default_get('rs_host')
     def get_rs_host(self, ak, bucket, home_dir=None):
-        from .config import get_default, is_customized_default
-        if self.rs_host:
-            return self.rs_host
-        if is_customized_default('default_rs_host'):
-            return get_default('default_rs_host')
         if home_dir is None:
             home_dir = os.getcwd()
         bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir)
@@ -81,12 +112,8 @@ def get_rs_host(self, ak, bucket, home_dir=None):
         rs_hosts = bucket_hosts['rsHosts']
         return rs_hosts[0]
 
+    @_legacy_default_get('rsf_host')
     def get_rsf_host(self, ak, bucket, home_dir=None):
-        from .config import get_default, is_customized_default
-        if self.rsf_host:
-            return self.rsf_host
-        if is_customized_default('default_rsf_host'):
-            return get_default('default_rsf_host')
         if home_dir is None:
             home_dir = os.getcwd()
         bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir)
@@ -95,12 +122,8 @@ def get_rsf_host(self, ak, bucket, home_dir=None):
         rsf_hosts = bucket_hosts['rsfHosts']
         return rsf_hosts[0]
 
+    @_legacy_default_get('api_host')
     def get_api_host(self, ak, bucket, home_dir=None):
-        from .config import get_default, is_customized_default
-        if self.api_host:
-            return self.api_host
-        if is_customized_default('default_api_host'):
-            return get_default('default_api_host')
         if home_dir is None:
             home_dir = os.getcwd()
         bucket_hosts = self.get_bucket_hosts(ak, bucket, home_dir)
@@ -120,64 +143,56 @@ def get_up_host(self, ak, bucket, home_dir):
 
     def unmarshal_up_token(self, up_token):
         token = up_token.split(':')
-        if (len(token) != 3):
+        if len(token) != 3:
             raise ValueError('invalid up_token')
 
         ak = token[0]
-        policy = compat.json.loads(
-            compat.s(
-                utils.urlsafe_base64_decode(
+        policy = json.loads(
+            str_from_bytes(
+                urlsafe_base64_decode(
                     token[2])))
 
         scope = policy["scope"]
         bucket = scope
-        if (':' in scope):
+        if ':' in scope:
             bucket = scope.split(':')[0]
 
         return ak, bucket
 
-    def get_bucket_hosts(self, ak, bucket, home_dir, force=False):
-        key = self.scheme + ":" + ak + ":" + bucket
-
-        bucket_hosts = self.get_bucket_hosts_to_cache(key, home_dir)
-        if not force and len(bucket_hosts) > 0:
-            return bucket_hosts
-
-        hosts = compat.json.loads(self.bucket_hosts(ak, bucket)).get('hosts', [])
-
-        if type(hosts) is not list or len(hosts) == 0:
-            raise KeyError("Please check your BUCKET_NAME! Server hosts not correct! The hosts is %s" % hosts)
+    def get_bucket_hosts(self, ak, bucket, home_dir=None, force=False):
+        cache_persist_path = os.path.join(home_dir, 'qn-regions-cache.jsonl') if home_dir else None
+        regions = self.__get_bucket_regions(
+            ak,
+            bucket,
+            force_query=force,
+            cache_persist_path=cache_persist_path
+        )
 
-        region = hosts[0]
+        if not regions:
+            raise KeyError("Please check your BUCKET_NAME! Server hosts not correct! The hosts is empty")
 
-        default_ttl = 24 * 3600  # 1 day
-        region['ttl'] = region.get('ttl', default_ttl)
+        region = regions[0]
 
         bucket_hosts = {
-            'upHosts': [
-                '{0}://{1}'.format(self.scheme, domain)
-                for domain in region.get('up', {}).get('domains', [])
-            ],
-            'ioHosts': [
-                '{0}://{1}'.format(self.scheme, domain)
-                for domain in region.get('io', {}).get('domains', [])
-            ],
-            'rsHosts': [
-                '{0}://{1}'.format(self.scheme, domain)
-                for domain in region.get('rs', {}).get('domains', [])
-            ],
-            'rsfHosts': [
-                '{0}://{1}'.format(self.scheme, domain)
-                for domain in region.get('rsf', {}).get('domains', [])
-            ],
-            'apiHosts': [
-                '{0}://{1}'.format(self.scheme, domain)
-                for domain in region.get('api', {}).get('domains', [])
-            ],
-            'deadline': int(time.time()) + region['ttl']
+            k: [
+                e.get_value(scheme=self.scheme)
+                for e in region.services[sn]
+                if e
+            ]
+            for k, sn in {
+                'upHosts': ServiceName.UP,
+                'ioHosts': ServiceName.IO,
+                'rsHosts': ServiceName.RS,
+                'rsfHosts': ServiceName.RSF,
+                'apiHosts': ServiceName.API
+            }.items()
         }
-        home_dir = ""
-        self.set_bucket_hosts_to_cache(key, bucket_hosts, home_dir)
+
+        ttl = region.ttl if region.ttl > 0 else 24 * 3600  # 1 day
+        # use datetime.datetime.timestamp() when min version of python >= 3
+        create_time = int(float(region.create_time.strftime('%s.%f')) * 1000)
+        bucket_hosts['deadline'] = create_time + ttl
+
         return bucket_hosts
 
     def get_bucket_hosts_to_cache(self, key, home_dir):
@@ -206,7 +221,7 @@ def host_cache_from_file(self, home_dir):
             return None
         with open(path, 'r') as f:
             try:
-                bucket_hosts = compat.json.load(f)
+                bucket_hosts = json.load(f)
                 self.host_cache = bucket_hosts
             except Exception as e:
                 logging.error(e)
@@ -219,28 +234,64 @@ def host_cache_file_path(self):
     def host_cache_to_file(self, home_dir):
         path = self.host_cache_file_path()
         with open(path, 'w') as f:
-            compat.json.dump(self.host_cache, f)
+            json.dump(self.host_cache, f)
         f.close()
 
     def bucket_hosts(self, ak, bucket):
-        from .config import get_default, is_customized_default
-        from .http import qn_http_client
-        from .http.middleware import RetryDomainsMiddleware
-        uc_host = UC_HOST
-        if is_customized_default('default_uc_host'):
-            uc_host = get_default('default_uc_host')
-        uc_backup_hosts = get_default('default_query_region_backup_hosts')
-        uc_backup_retry_times = get_default('default_backup_hosts_retry_times')
-        url = "{0}/v4/query?ak={1}&bucket={2}".format(uc_host, ak, bucket)
-
-        ret, _resp = qn_http_client.get(
-            url,
-            middlewares=[
-                RetryDomainsMiddleware(
-                    backup_domains=uc_backup_hosts,
-                    max_retry_times=uc_backup_retry_times,
-                )
+        regions = self.__get_bucket_regions(ak, bucket)
+
+        data_dict = {
+            'hosts': [
+                {
+                    k.value if isinstance(k, ServiceName) else k: {
+                        'domains': [
+                            e.host for e in v
+                        ]
+                    }
+                    for k, v in r.services.items()
+                }
+                for r in regions
             ]
-        )
-        data = compat.json.dumps(ret, separators=(',', ':'))
+        }
+        for r in data_dict['hosts']:
+            if 'up_acc' in r:
+                r.setdefault('up', {})
+                r['up'].update(acc_domains=r['up_acc'].get('domains', []))
+                del r['up_acc']
+
+        data = json.dumps(data_dict)
+
         return data
+
+    def __get_bucket_regions(
+        self,
+        access_key,
+        bucket_name,
+        force_query=False,
+        cache_persist_path=None
+    ):
+        query_region_host = UC_HOST
+        if is_customized_default('default_query_region_host'):
+            query_region_host = get_default('default_query_region_host')
+        query_region_backup_hosts = get_default('default_query_region_backup_hosts')
+        query_region_backup_retry_times = get_default('default_backup_hosts_retry_times')
+
+        regions_provider = get_default_regions_provider(
+            query_endpoints_provider=[
+                _HTTPEndpoint.from_host(h)
+                for h in [query_region_host] + query_region_backup_hosts
+                if h
+            ],
+            access_key=access_key,
+            bucket_name=bucket_name,
+            accelerate_uploading=self.accelerate_uploading,
+            force_query=force_query,
+            preferred_scheme=self.scheme,
+            persist_path=cache_persist_path,
+            max_retry_times_per_endpoint=query_region_backup_retry_times
+        )
+
+        return list(regions_provider)
+
+
+Region = LegacyRegion
diff --git a/qiniu/retry/__init__.py b/qiniu/retry/__init__.py
new file mode 100644
index 00000000..e726010f
--- /dev/null
+++ b/qiniu/retry/__init__.py
@@ -0,0 +1,7 @@
+from .attempt import Attempt
+from .retrier import Retrier
+
+__all__ = [
+    'Attempt',
+    'Retrier'
+]
diff --git a/qiniu/retry/abc/__init__.py b/qiniu/retry/abc/__init__.py
new file mode 100644
index 00000000..4f458a73
--- /dev/null
+++ b/qiniu/retry/abc/__init__.py
@@ -0,0 +1,5 @@
+from .policy import RetryPolicy
+
+__all__ = [
+    'RetryPolicy'
+]
diff --git a/qiniu/retry/abc/policy.py b/qiniu/retry/abc/policy.py
new file mode 100644
index 00000000..b5b792bf
--- /dev/null
+++ b/qiniu/retry/abc/policy.py
@@ -0,0 +1,61 @@
+import abc
+
+
+class RetryPolicy(object):
+    __metaclass__ = abc.ABCMeta
+
+    @abc.abstractmethod
+    def init_context(self, context):
+        """
+        initial context values the policy required
+
+        Parameters
+        ----------
+        context: dict
+        """
+
+    @abc.abstractmethod
+    def should_retry(self, attempt):
+        """
+        if returns True, this policy will be applied
+
+        Parameters
+        ----------
+        attempt: qiniu.retry.attempt.Attempt
+
+        Returns
+        -------
+        bool
+        """
+
+    @abc.abstractmethod
+    def prepare_retry(self, attempt):
+        """
+        apply this policy to change the context values for next attempt
+
+        Parameters
+        ----------
+        attempt: qiniu.retry.attempt.Attempt
+        """
+
+    def is_important(self, attempt):
+        """
+        if returns True, this policy will be applied, whether it should retry or not.
+        this is useful when want to stop retry.
+
+        Parameters
+        ----------
+        attempt: qiniu.retry.attempt.Attempt
+
+        Returns
+        -------
+        bool
+        """
+
+    def after_retry(self, attempt, policy):
+        """
+        Parameters
+        ----------
+        attempt: qiniu.retry.attempt.Attempt
+        policy: RetryPolicy
+        """
diff --git a/qiniu/retry/attempt.py b/qiniu/retry/attempt.py
new file mode 100644
index 00000000..460145c6
--- /dev/null
+++ b/qiniu/retry/attempt.py
@@ -0,0 +1,18 @@
+class Attempt:
+    def __init__(self, custom_context=None):
+        """
+        Parameters
+        ----------
+        custom_context: dict or None
+        """
+        self.context = custom_context if custom_context is not None else {}
+        self.exception = None
+        self.result = None
+
+    def __enter__(self):
+        pass
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        if exc_type is not None and exc_val is not None:
+            self.exception = exc_val
+            return True  # Swallow exception.
diff --git a/qiniu/retry/retrier.py b/qiniu/retry/retrier.py
new file mode 100644
index 00000000..23ff23b6
--- /dev/null
+++ b/qiniu/retry/retrier.py
@@ -0,0 +1,183 @@
+import functools
+
+from .attempt import Attempt
+
+
+def before_retry_nothing(attempt, policy):
+    return True
+
+
+class Retrier:
+    def __init__(self, policies=None, before_retry=None):
+        """
+        Parameters
+        ----------
+        policies: list[qiniu.retry.abc.RetryPolicy]
+        before_retry: callable
+            `(attempt: Attempt, policy: qiniu.retry.abc.RetryPolicy) -> bool`
+        """
+        self.policies = policies if policies is not None else []
+        self.before_retry = before_retry if before_retry is not None else before_retry_nothing
+
+    def __iter__(self):
+        retrying = Retrying(
+            # change to `list.copy` for more readable when min version of python update to >= 3
+            policies=self.policies[:],
+            before_retry=self.before_retry
+        )
+        retrying.init_context()
+        while True:
+            attempt = Attempt(retrying.context)
+            yield attempt
+            if (
+                hasattr(attempt.exception, 'no_need_retry') and
+                attempt.exception.no_need_retry
+            ):
+                break
+            policy = retrying.get_retry_policy(attempt)
+            if not policy:
+                break
+            if not self.before_retry(attempt, policy):
+                break
+            policy.prepare_retry(attempt)
+            retrying.after_retried(attempt, policy)
+        if attempt.exception:
+            raise attempt.exception
+
+    def try_do(
+        self,
+        func,
+        *args,
+        **kwargs
+    ):
+        attempt = None
+        for attempt in self:
+            with attempt:
+                if kwargs.get('with_retry_context', False):
+                    # inject retry_context
+                    kwargs['retry_context'] = attempt.context
+                if 'with_retry_context' in kwargs:
+                    del kwargs['with_retry_context']
+
+                # store result
+                attempt.result = func(*args, **kwargs)
+
+        if attempt is None:
+            raise RuntimeError('attempt is none')
+
+        return attempt.result
+
+    def _wrap(self, with_retry_context=False):
+        def decorator(func):
+            @functools.wraps(func)
+            def wrapper(*args, **kwargs):
+                return self.try_do(
+                    func,
+                    with_retry_context=with_retry_context,
+                    *args,
+                    **kwargs
+                )
+
+            return wrapper
+
+        return decorator
+
+    def retry(self, *args, **kwargs):
+        """
+        decorator to retry
+        """
+        if len(args) == 1 and callable(args[0]):
+            return self.retry()(args[0])
+        else:
+            return self._wrap(**kwargs)
+
+
+class Retrying:
+    def __init__(self, policies, before_retry):
+        """
+        Parameters
+        ----------
+        policies: list[qiniu.retry.abc.RetryPolicy]
+        before_retry: callable
+            `(attempt: Attempt, policy: qiniu.retry.abc.RetryPolicy) -> bool`
+        """
+        self.policies = policies
+        self.before_retry = before_retry
+        self.context = {}
+
+    def init_context(self):
+        for policy in self.policies:
+            policy.init_context(self.context)
+
+    def get_retry_policy(self, attempt):
+        """
+
+        Parameters
+        ----------
+        attempt: Attempt
+
+        Returns
+        -------
+        qiniu.retry.abc.RetryPolicy
+
+        """
+        policy = None
+
+        # find important policy
+        for p in self.policies:
+            if p.is_important(attempt):
+                policy = p
+                break
+        if policy and policy.should_retry(attempt):
+            return policy
+        else:
+            policy = None
+
+        # find retry policy
+        for p in self.policies:
+            if p.should_retry(attempt):
+                policy = p
+                break
+
+        return policy
+
+    def after_retried(self, attempt, policy):
+        for p in self.policies:
+            p.after_retry(attempt, policy)
+
+
+"""
+Examples
+--------
+retrier = Retrier()
+result = None
+for attempt in retrier:
+    with attempt:
+        endpoint = attempt.context.get('endpoint')
+        result = upload(endpoint)
+        attempt.result = result
+return result
+"""
+
+"""
+Examples
+--------
+def foo():
+    print('hi')
+
+retrier = Retrier()
+retrier.try_do(foo)
+"""
+
+"""
+Examples
+--------
+retrier = Retrier()
+
+
+@retrier.retry
+def foo():
+    print('hi')
+
+foo()
+"""
diff --git a/qiniu/services/processing/pfop.py b/qiniu/services/processing/pfop.py
index fa414930..346e6277 100644
--- a/qiniu/services/processing/pfop.py
+++ b/qiniu/services/processing/pfop.py
@@ -24,17 +24,25 @@ def __init__(self, auth, bucket, pipeline=None, notify_url=None):
         self.pipeline = pipeline
         self.notify_url = notify_url
 
-    def execute(self, key, fops, force=None):
-        """执行持久化处理:
-
-        Args:
-            key:    待处理的源文件
-            fops:   处理详细操作,规格详见 https://developer.qiniu.com/dora/manual/1291/persistent-data-processing-pfop
-            force:  强制执行持久化处理开关
+    def execute(self, key, fops, force=None, persistent_type=None):
+        """
+        执行持久化处理
 
-        Returns:
-            一个dict变量,返回持久化处理的persistentId,类似{"persistentId": 5476bedf7823de4068253bae};
-            一个ResponseInfo对象
+        Parameters
+        ----------
+        key: str
+            待处理的源文件
+        fops: list[str]
+            处理详细操作,规格详见 https://developer.qiniu.com/dora/manual/1291/persistent-data-processing-pfop
+        force: int or str, optional
+            强制执行持久化处理开关
+        persistent_type: int or str, optional
+            持久化处理类型,为 '1' 时开启闲时任务
+        Returns
+        -------
+        ret: dict
+            持久化处理的 persistentId,类似 {"persistentId": 5476bedf7823de4068253bae};
+        resp: ResponseInfo
         """
         ops = ';'.join(fops)
         data = {'bucket': self.bucket, 'key': key, 'fops': ops}
@@ -42,8 +50,30 @@ def execute(self, key, fops, force=None):
             data['pipeline'] = self.pipeline
         if self.notify_url:
             data['notifyURL'] = self.notify_url
-        if force == 1:
-            data['force'] = 1
+        if force == 1 or force == '1':
+            data['force'] = str(force)
+        if persistent_type and type(int(persistent_type)) is int:
+            data['type'] = str(persistent_type)
 
         url = '{0}/pfop'.format(config.get_default('default_api_host'))
         return http._post_with_auth(url, data, self.auth)
+
+    def get_status(self, persistent_id):
+        """
+        获取持久化处理状态
+
+        Parameters
+        ----------
+        persistent_id: str
+
+        Returns
+        -------
+        ret: dict
+            持久化处理的状态,详见 https://developer.qiniu.com/dora/1294/persistent-processing-status-query-prefop
+        resp: ResponseInfo
+        """
+        url = '{0}/status/get/prefop'.format(config.get_default('default_api_host'))
+        data = {
+            'id': persistent_id
+        }
+        return http._get_with_auth(url, data, self.auth)
diff --git a/qiniu/services/storage/_bucket_default_retrier.py b/qiniu/services/storage/_bucket_default_retrier.py
new file mode 100644
index 00000000..70758e30
--- /dev/null
+++ b/qiniu/services/storage/_bucket_default_retrier.py
@@ -0,0 +1,25 @@
+from qiniu.http.endpoints_retry_policy import EndpointsRetryPolicy
+from qiniu.http.regions_retry_policy import RegionsRetryPolicy
+from qiniu.retry import Retrier
+
+
+def get_default_retrier(
+    regions_provider,
+    service_names,
+    preferred_endpoints_provider=None,
+):
+    if not service_names:
+        raise ValueError('service_names should not be empty')
+
+    retry_policies = [
+        EndpointsRetryPolicy(
+            skip_init_context=True
+        ),
+        RegionsRetryPolicy(
+            regions_provider=regions_provider,
+            service_names=service_names,
+            preferred_endpoints_provider=preferred_endpoints_provider
+        )
+    ]
+
+    return Retrier(retry_policies)
diff --git a/qiniu/services/storage/bucket.py b/qiniu/services/storage/bucket.py
index 8edb00a3..5e21b6c4 100644
--- a/qiniu/services/storage/bucket.py
+++ b/qiniu/services/storage/bucket.py
@@ -1,8 +1,12 @@
 # -*- coding: utf-8 -*-
-
 from qiniu import config, QiniuMacAuth
 from qiniu import http
 from qiniu.utils import urlsafe_base64_encode, entry, decode_entry
+from qiniu.http.endpoint import Endpoint
+from qiniu.http.region import Region, ServiceName
+from qiniu.http.regions_provider import get_default_regions_provider
+
+from ._bucket_default_retrier import get_default_retrier
 
 
 class BucketManager(object):
@@ -15,17 +19,38 @@ class BucketManager(object):
         auth: 账号管理密钥对,Auth对象
     """
 
-    def __init__(self, auth, zone=None):
+    def __init__(
+        self,
+        auth,
+        zone=None,
+        regions=None,
+        query_regions_endpoints=None,
+        preferred_scheme='http'
+    ):
+        """
+        Parameters
+        ----------
+        auth: Auth
+        zone: LegacyRegion
+        regions: list[Region]
+        query_regions_endpoints: list[Endpoint]
+        preferred_scheme: str, default='http'
+        """
         self.auth = auth
         self.mac_auth = QiniuMacAuth(
             auth.get_access_key(),
             auth.get_secret_key(),
             auth.disable_qiniu_timestamp_signature)
-        if (zone is None):
+
+        if zone is None:
             self.zone = config.get_default('default_zone')
         else:
             self.zone = zone
 
+        self.regions = regions
+        self.query_regions_endpoints = query_regions_endpoints
+        self.preferred_scheme = preferred_scheme
+
     def list(self, bucket, prefix=None, marker=None, limit=None, delimiter=None):
         """前缀查询:
 
@@ -59,10 +84,13 @@ def list(self, bucket, prefix=None, marker=None, limit=None, delimiter=None):
         if delimiter is not None:
             options['delimiter'] = delimiter
 
-        ak = self.auth.get_access_key()
-        rsf_host = self.zone.get_rsf_host(ak, bucket)
-        url = '{0}/list'.format(rsf_host)
-        ret, info = self.__get(url, options)
+        ret, info = self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RSF],
+            '/list',
+            data=options,
+            method='GET'
+        )
 
         eof = False
         if ret and not ret.get('marker'):
@@ -81,7 +109,7 @@ def list_domains(self, bucket):
             resBody, respInfo
             resBody 为绑定的域名列表,格式:["example.com"]
         """
-        return self.__uc_do('v2/domains?tbl={0}'.format(bucket))
+        return self.__uc_do_with_retrier('/v2/domains?tbl={0}'.format(bucket))
 
     def stat(self, bucket, key):
         """获取文件信息:
@@ -105,7 +133,11 @@ def stat(self, bucket, key):
             一个ResponseInfo对象
         """
         resource = entry(bucket, key)
-        return self.__rs_do(bucket, 'stat', resource)
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            '/stat/{0}'.format(resource)
+        )
 
     def delete(self, bucket, key):
         """删除文件:
@@ -122,7 +154,11 @@ def delete(self, bucket, key):
             一个ResponseInfo对象
         """
         resource = entry(bucket, key)
-        return self.__rs_do(bucket, 'delete', resource)
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            '/delete/{0}'.format(resource)
+        )
 
     def rename(self, bucket, key, key_to, force='false'):
         """重命名文件:
@@ -133,6 +169,7 @@ def rename(self, bucket, key, key_to, force='false'):
             bucket: 待操作资源所在空间
             key:    待操作资源文件名
             key_to: 目标资源文件名
+            force:  是否强制覆盖
 
         Returns:
             一个dict变量,成功返回NULL,失败返回{"error": "<errMsg string>"}
@@ -151,14 +188,23 @@ def move(self, bucket, key, bucket_to, key_to, force='false'):
             bucket_to:  目标资源空间名
             key:        待操作资源文件名
             key_to:     目标资源文件名
+            force:      是否强制覆盖
 
         Returns:
             一个dict变量,成功返回NULL,失败返回{"error": "<errMsg string>"}
             一个ResponseInfo对象
         """
-        resource = entry(bucket, key)
-        to = entry(bucket_to, key_to)
-        return self.__rs_do(bucket, 'move', resource, to, 'force/{0}'.format(force))
+        src = entry(bucket, key)
+        dst = entry(bucket_to, key_to)
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            '/move/{src}/{dst}/force/{force}'.format(
+                src=src,
+                dst=dst,
+                force=force
+            )
+        )
 
     def copy(self, bucket, key, bucket_to, key_to, force='false'):
         """复制文件:
@@ -171,14 +217,23 @@ def copy(self, bucket, key, bucket_to, key_to, force='false'):
             bucket_to:  目标资源空间名
             key:        待操作资源文件名
             key_to:     目标资源文件名
+            force:      是否强制覆盖
 
         Returns:
             一个dict变量,成功返回NULL,失败返回{"error": "<errMsg string>"}
             一个ResponseInfo对象
         """
-        resource = entry(bucket, key)
-        to = entry(bucket_to, key_to)
-        return self.__rs_do(bucket, 'copy', resource, to, 'force/{0}'.format(force))
+        src = entry(bucket, key)
+        dst = entry(bucket_to, key_to)
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            '/copy/{src}/{dst}/force/{force}'.format(
+                src=src,
+                dst=dst,
+                force=force
+            )
+        )
 
     def fetch(self, url, bucket, key=None, hostscache_dir=None):
         """抓取文件:
@@ -189,7 +244,8 @@ def fetch(self, url, bucket, key=None, hostscache_dir=None):
             url:      指定的URL
             bucket:   目标资源空间
             key:      目标资源文件名
-            hostscache_dir: host请求 缓存文件保存位置
+            hostscache_dir: deprecated, 此参数不再生效,可修改 get_default_regions_provider 返回对象的属性达成同样功能;
+                查询区域缓存文件保存位置
 
         Returns:
             一个dict变量:
@@ -199,7 +255,11 @@ def fetch(self, url, bucket, key=None, hostscache_dir=None):
         """
         resource = urlsafe_base64_encode(url)
         to = entry(bucket, key)
-        return self.__io_do(bucket, 'fetch', hostscache_dir, resource, 'to/{0}'.format(to))
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.IO],
+            '/fetch/{0}/to/{1}'.format(resource, to)
+        )
 
     def prefetch(self, bucket, key, hostscache_dir=None):
         """镜像回源预取文件:
@@ -210,14 +270,19 @@ def prefetch(self, bucket, key, hostscache_dir=None):
         Args:
             bucket: 待获取资源所在的空间
             key:    代获取资源文件名
-            hostscache_dir: host请求 缓存文件保存位置
+            hostscache_dir: deprecated, 此参数不再生效,可修改 get_default_regions_provider 返回对象的属性达成同样功能;
+                查询区域缓存文件保存位置
 
         Returns:
             一个dict变量,成功返回NULL,失败返回{"error": "<errMsg string>"}
             一个ResponseInfo对象
         """
         resource = entry(bucket, key)
-        return self.__io_do(bucket, 'prefetch', hostscache_dir, resource)
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.IO],
+            '/prefetch/{0}'.format(resource)
+        )
 
     def change_mime(self, bucket, key, mime):
         """修改文件mimeType:
@@ -232,7 +297,11 @@ def change_mime(self, bucket, key, mime):
         """
         resource = entry(bucket, key)
         encode_mime = urlsafe_base64_encode(mime)
-        return self.__rs_do(bucket, 'chgm', resource, 'mime/{0}'.format(encode_mime))
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            '/chgm/{0}/mime/{1}'.format(resource, encode_mime)
+        )
 
     def change_type(self, bucket, key, storage_type):
         """修改文件的存储类型
@@ -246,21 +315,52 @@ def change_type(self, bucket, key, storage_type):
             storage_type:   待操作资源存储类型,0为普通存储,1为低频存储,2 为归档存储,3 为深度归档,4 为归档直读存储
         """
         resource = entry(bucket, key)
-        return self.__rs_do(bucket, 'chtype', resource, 'type/{0}'.format(storage_type))
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            '/chtype/{0}/type/{1}'.format(resource, storage_type)
+        )
 
     def restoreAr(self, bucket, key, freezeAfter_days):
-        """解冻归档存储、深度归档存储文件
-
-        对归档存储、深度归档存储文件,进行解冻操作参考文档:
-        https://developer.qiniu.com/kodo/6380/restore-archive
+        """
+        restore_ar 的别名,用于兼容旧版本
 
         Args:
             bucket:         待操作资源所在空间
             key:            待操作资源文件名
             freezeAfter_days:   解冻有效时长,取值范围 1~7
         """
+        return self.restore_ar(
+            bucket,
+            key,
+            freezeAfter_days
+        )
+
+    def restore_ar(self, bucket, key, freeze_after_days):
+        """
+        解冻归档存储、深度归档存储文件
+
+        对归档存储、深度归档存储文件,进行解冻操作参考文档:
+        https://developer.qiniu.com/kodo/6380/restore-archive
+
+        Parameters
+        ----------
+        bucket: str
+        key: str
+        freeze_after_days: int
+
+        Returns
+        -------
+        ret: dict
+        resp: ResponseInfo
+        """
+
         resource = entry(bucket, key)
-        return self.__rs_do(bucket, 'restoreAr', resource, 'freezeAfterDays/{0}'.format(freezeAfter_days))
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            '/restoreAr/{0}/freezeAfterDays/{1}'.format(resource, freeze_after_days)
+        )
 
     def change_status(self, bucket, key, status, cond):
         """修改文件的状态
@@ -270,16 +370,23 @@ def change_status(self, bucket, key, status, cond):
         Args:
             bucket:         待操作资源所在空间
             key:            待操作资源文件名
-            storage_type:   待操作资源存储类型,0为启用,1为禁用
+            status:   待操作资源存储类型,0为启用,1为禁用
         """
         resource = entry(bucket, key)
+        url_resource = '/chstatus/{0}/status/{1}'.format(resource, status)
         if cond and isinstance(cond, dict):
-            condstr = ""
-            for k, v in cond.items():
-                condstr += "{0}={1}&".format(k, v)
-            condstr = urlsafe_base64_encode(condstr[:-1])
-            return self.__rs_do(bucket, 'chstatus', resource, 'status/{0}'.format(status), 'cond', condstr)
-        return self.__rs_do(bucket, 'chstatus', resource, 'status/{0}'.format(status))
+            condstr = urlsafe_base64_encode(
+                '&'.join(
+                    '='.join([k, v])
+                    for k, v in cond.items()
+                )
+            )
+            url_resource += '/cond/{0}'.format(condstr)
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            url_resource
+        )
 
     def set_object_lifecycle(
         self,
@@ -318,10 +425,17 @@ def set_object_lifecycle(
             'deleteAfterDays', str(delete_after_days)
         ]
         if cond and isinstance(cond, dict):
-            cond_str = '&'.join(["{0}={1}".format(k, v) for k, v in cond.items()])
+            cond_str = '&'.join(
+                '='.join([k, v])
+                for k, v in cond.items()
+            )
             options += ['cond', urlsafe_base64_encode(cond_str)]
         resource = entry(bucket, key)
-        return self.__rs_do(bucket, 'lifecycle', resource, *options)
+        return self.__server_do_with_retrier(
+            bucket,
+            service_names=[ServiceName.RS],
+            url_resource='/lifecycle/{0}/{1}'.format(resource, '/'.join(options)),
+        )
 
     def batch(self, operations):
         """批量操作:
@@ -345,6 +459,7 @@ def batch(self, operations):
             一个ResponseInfo对象
         """
         if not operations:
+            # change to ValueError when make break changes version
             raise Exception('operations is empty')
         bucket = ''
         for op in operations:
@@ -354,11 +469,15 @@ def batch(self, operations):
             if bucket:
                 break
         if not bucket:
+            # change to ValueError when make break changes version
             raise Exception('bucket is empty')
-        ak = self.auth.get_access_key()
-        rs_host = self.zone.get_rs_host(ak, bucket)
-        url = '{0}/batch'.format(rs_host)
-        return self.__post(url, dict(op=operations))
+
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            '/batch',
+            {'op': operations}
+        )
 
     def buckets(self):
         """获取所有空间名:
@@ -370,7 +489,7 @@ def buckets(self):
                 [ <Bucket1>, <Bucket2>, ... ]
             一个ResponseInfo对象
         """
-        return self.__uc_do('buckets')
+        return self.__uc_do_with_retrier('/buckets')
 
     def delete_after_days(self, bucket, key, days):
         """更新文件生命周期
@@ -392,7 +511,11 @@ def delete_after_days(self, bucket, key, days):
             days:   指定天数
         """
         resource = entry(bucket, key)
-        return self.__rs_do(bucket, 'deleteAfterDays', resource, days)
+        return self.__server_do_with_retrier(
+            bucket,
+            [ServiceName.RS],
+            '/deleteAfterDays/{0}/{1}'.format(resource, days)
+        )
 
     def mkbucketv3(self, bucket_name, region):
         """
@@ -402,7 +525,9 @@ def mkbucketv3(self, bucket_name, region):
             bucket_name: 存储空间名
             region: 存储区域
         """
-        return self.__uc_do('mkbucketv3', bucket_name, 'region', region)
+        return self.__uc_do_with_retrier(
+            '/mkbucketv3/{0}/region/{1}'.format(bucket_name, region)
+        )
 
     def list_bucket(self, region):
         """
@@ -410,7 +535,7 @@ def list_bucket(self, region):
 
         Args:
         """
-        return self.__uc_do('v3/buckets?region={0}'.format(region))
+        return self.__uc_do_with_retrier('/v3/buckets?region={0}'.format(region))
 
     def bucket_info(self, bucket_name):
         """
@@ -419,7 +544,7 @@ def bucket_info(self, bucket_name):
         Args:
             bucket_name: 存储空间名
         """
-        return self.__uc_do('v2/bucketInfo?bucket={}'.format(bucket_name), )
+        return self.__uc_do_with_retrier('/v2/bucketInfo?bucket={0}'.format(bucket_name))
 
     def bucket_domain(self, bucket_name):
         """
@@ -437,32 +562,146 @@ def change_bucket_permission(self, bucket_name, private):
             bucket_name: 存储空间名
             private: 0 公开;1 私有 ,str类型
         """
-        url = "{0}/private?bucket={1}&private={2}".format(config.get_default("default_uc_host"), bucket_name, private)
-        return self.__post(url)
+        return self.__uc_do_with_retrier(
+            '/private?bucket={0}&private={1}'.format(bucket_name, private)
+        )
 
-    def __api_do(self, bucket, operation, data=None):
-        ak = self.auth.get_access_key()
-        api_host = self.zone.get_api_host(ak, bucket)
-        url = '{0}/{1}'.format(api_host, operation)
-        return self.__post(url, data)
+    def _get_regions_provider(self, bucket_name):
+        """
+        Parameters
+        ----------
+        bucket_name: str
 
-    def __uc_do(self, operation, *args):
-        return self.__server_do(config.get_default('default_uc_host'), operation, *args)
+        Returns
+        -------
+        Iterable[Region]
+        """
+        if self.regions:
+            return self.regions
+
+        # handle compatibility for legacy config
+        if self.zone and any(
+            hasattr(self.zone, attr_name) and getattr(self.zone, attr_name)
+            for attr_name in [
+                'io_host',
+                'rs_host',
+                'rsf_host',
+                'api_host'
+            ]
+        ):
+            return [self.zone]
+
+        # handle compatibility for default_query_region_host
+        query_regions_endpoints = self.query_regions_endpoints
+        if not query_regions_endpoints:
+            query_region_host = config.get_default('default_query_region_host')
+            query_region_backup_hosts = config.get_default('default_query_region_backup_hosts')
+            query_regions_endpoints = [
+                Endpoint.from_host(h)
+                for h in [query_region_host] + query_region_backup_hosts
+            ]
+
+        return get_default_regions_provider(
+            query_endpoints_provider=query_regions_endpoints,
+            access_key=self.auth.get_access_key(),
+            bucket_name=bucket_name,
+            preferred_scheme=self.preferred_scheme
+        )
+
+    def __uc_do_with_retrier(self, url_resource, data=None):
+        """
+        Parameters
+        ----------
+        url_resource: url
+        data: dict or None
+
+        Returns
+        -------
+        ret: dict or None
+        resp: ResponseInfo
+        """
+        regions = self.regions
+
+        # ignore self.zone by no uc in it
+        # handle compatibility for default_uc
+        if not regions:
+            uc_host = config.get_default('default_uc_host')
+            uc_backup_hosts = config.get_default('default_uc_backup_hosts')
+            uc_endpoints = [
+                Endpoint.from_host(h)
+                for h in [uc_host] + uc_backup_hosts
+            ]
+            regions = [Region(services={ServiceName.UC: uc_endpoints})]
+
+        retrier = get_default_retrier(
+            regions_provider=regions,
+            service_names=[ServiceName.UC]
+        )
+
+        attempt = None
+        for attempt in retrier:
+            with attempt:
+                host = attempt.context.get('endpoint').get_value(scheme=self.preferred_scheme)
+                url = host + url_resource
+                attempt.result = self.__post(url, data)
+                ret, resp = attempt.result
+                if resp.ok() and ret:
+                    return attempt.result
+                if not resp.need_retry():
+                    return attempt.result
+
+        if attempt is None:
+            raise RuntimeError('Retrier is not working. attempt is None')
+
+        return attempt.result
+
+    def __server_do_with_retrier(self, bucket_name, service_names, url_resource, data=None, method='POST'):
+        """
+        Parameters
+        ----------
+        bucket_name: str
+        service_names: List[ServiceName]
+        url_resource: str
+        data: dict or None
+        method: str
+
+        Returns
+        -------
+        ret: dict or None
+        resp: ResponseInfo
+        """
+        if not service_names:
+            raise ValueError('service_names is empty')
+
+        retrier = get_default_retrier(
+            regions_provider=self._get_regions_provider(bucket_name=bucket_name),
+            service_names=service_names
+        )
+
+        method = method.upper()
+        if method == 'POST':
+            send_request = self.__post
+        elif method == 'GET':
+            send_request = self.__get
+        else:
+            raise ValueError('"method" must be "POST" or "GET"')
 
-    def __rs_do(self, bucket, operation, *args):
-        ak = self.auth.get_access_key()
-        rs_host = self.zone.get_rs_host(ak, bucket)
-        return self.__server_do(rs_host, operation, *args)
+        attempt = None
+        for attempt in retrier:
+            with attempt:
+                host = attempt.context.get('endpoint').get_value(scheme=self.preferred_scheme)
+                url = host + url_resource
+                attempt.result = send_request(url, data)
+                ret, resp = attempt.result
+                if resp.ok() and ret:
+                    return attempt.result
+                if not resp.need_retry():
+                    return attempt.result
 
-    def __io_do(self, bucket, operation, home_dir, *args):
-        ak = self.auth.get_access_key()
-        io_host = self.zone.get_io_host(ak, bucket, home_dir)
-        return self.__server_do(io_host, operation, *args)
+        if attempt is None:
+            raise RuntimeError('Retrier is not working. attempt is None')
 
-    def __server_do(self, host, operation, *args):
-        cmd = _build_op(operation, *args)
-        url = '{0}/{1}'.format(host, cmd)
-        return self.__post(url)
+        return attempt.result
 
     def __post(self, url, data=None):
         return http._post_with_qiniu_mac(url, data, self.mac_auth)
@@ -472,44 +711,200 @@ def __get(self, url, params=None):
 
 
 def _build_op(*args):
-    return '/'.join(args)
+    return '/'.join(map(str, args))
 
 
 def build_batch_copy(source_bucket, key_pairs, target_bucket, force='false'):
+    """
+    Parameters
+    ----------
+    source_bucket: str
+    key_pairs: dict
+    target_bucket: str
+    force: str
+
+    Returns
+    -------
+    list[str]
+    """
     return _two_key_batch('copy', source_bucket, key_pairs, target_bucket, force)
 
 
 def build_batch_rename(bucket, key_pairs, force='false'):
+    """
+    Parameters
+    ----------
+    bucket: str
+    key_pairs: dict
+    force: str
+
+    Returns
+    -------
+    list[str]
+    """
     return build_batch_move(bucket, key_pairs, bucket, force)
 
 
 def build_batch_move(source_bucket, key_pairs, target_bucket, force='false'):
+    """
+    Parameters
+    ----------
+    source_bucket: str
+    key_pairs: dict
+    target_bucket: str
+    force: str
+
+    Returns
+    -------
+    list[str]
+    """
     return _two_key_batch('move', source_bucket, key_pairs, target_bucket, force)
 
 
 def build_batch_restoreAr(bucket, keys):
-    return _three_key_batch('restoreAr', bucket, keys)
+    """
+    alias for build_batch_restore_ar for compatibility with old version
+
+    Parameters
+    ----------
+    bucket: str
+    keys: dict
+
+    Returns
+    -------
+    list[str]
+    """
+    return build_batch_restore_ar(bucket, keys)
+
+
+def build_batch_restore_ar(bucket, keys):
+    """
+    Parameters
+    ----------
+    bucket: str
+    keys: dict
+
+    Returns
+    -------
+    list[str]
+    """
+    keys = {
+        k: ['freezeAfterDays', v]
+        for k, v in keys.items()
+    }
+    return _one_key_batch('restoreAr', bucket, keys)
 
 
 def build_batch_delete(bucket, keys):
+    """
+    Parameters
+    ----------
+    bucket: str
+    keys: list[str]
+
+    Returns
+    -------
+    list[str]
+    """
     return _one_key_batch('delete', bucket, keys)
 
 
 def build_batch_stat(bucket, keys):
+    """
+    Parameters
+    ----------
+    bucket: str
+    keys: list[str]
+
+    Returns
+    -------
+    list[str]
+    """
     return _one_key_batch('stat', bucket, keys)
 
 
 def _one_key_batch(operation, bucket, keys):
-    return [_build_op(operation, entry(bucket, key)) for key in keys]
+    """
+    Parameters
+    ----------
+    operation: str
+    bucket: str
+    keys: list[str] or dict
+
+    Returns
+    -------
+    list[str]
+    """
+    # use functools.singledispatch to refactor when min version of python >= 3.4
+    if isinstance(keys, list):
+        return [
+            _build_op(
+                operation,
+                entry(bucket, key),
+            )
+            for key in keys
+        ]
+    elif isinstance(keys, dict):
+        return [
+            _build_op(
+                operation,
+                entry(bucket, key),
+                *opts
+            )
+            for key, opts in keys.items()
+        ]
+    else:
+        raise TypeError('"keys" only support list or dict')
+
 
+def _two_key_batch(operation, source_bucket, key_pairs, target_bucket=None, force='false'):
+    """
 
-def _two_key_batch(operation, source_bucket, key_pairs, target_bucket, force='false'):
+    Parameters
+    ----------
+    operation: str
+    source_bucket: str
+    key_pairs: dict
+    target_bucket: str
+    force: str
+
+    Returns
+    -------
+    list[str]
+    """
     if target_bucket is None:
         target_bucket = source_bucket
-    return [_build_op(operation, entry(source_bucket, k), entry(target_bucket, v), 'force/{0}'.format(force)) for k, v
-            in key_pairs.items()]
+    return _one_key_batch(
+        operation,
+        source_bucket,
+        {
+            src_key: [
+                entry(target_bucket, dst_key),
+                'force',
+                force
+            ]
+            for src_key, dst_key in key_pairs.items()
+        }
+    )
 
 
 def _three_key_batch(operation, bucket, keys):
-    return [_build_op(operation, entry(bucket, k), 'freezeAfterDays/{0}'.format(v)) for k, v
-            in keys.items()]
+    """
+    .. deprecated: Use `_one_key_batch` instead.
+        `keys` could be `{key: [freezeAfterDays, days]}`
+
+    Parameters
+    ----------
+    operation: str
+    bucket: str
+    keys: dict
+
+    Returns
+    -------
+    list[str]
+    """
+    keys = {
+        k: ['freezeAfterDays', v]
+        for k, v in keys.items()
+    }
+    return _one_key_batch(operation, bucket, keys)
diff --git a/qiniu/services/storage/upload_progress_recorder.py b/qiniu/services/storage/upload_progress_recorder.py
index 5be466d8..8673b198 100644
--- a/qiniu/services/storage/upload_progress_recorder.py
+++ b/qiniu/services/storage/upload_progress_recorder.py
@@ -1,5 +1,4 @@
 # -*- coding: utf-8 -*-
-
 import hashlib
 import json
 import os
@@ -8,16 +7,11 @@
 
 
 class UploadProgressRecorder(object):
-    """持久化上传记录类
+    """
+    持久化上传记录类
 
     该类默认保存每个文件的上传记录到文件系统中,用于断点续传
-    上传记录为json格式:
-    {
-        "size": file_size,
-        "offset": upload_offset,
-        "modify_time": file_modify_time,
-        "contexts": contexts
-    }
+    上传记录为json格式
 
     Attributes:
         record_folder: 保存上传记录的目录
@@ -26,46 +20,37 @@ class UploadProgressRecorder(object):
     def __init__(self, record_folder=tempfile.gettempdir()):
         self.record_folder = record_folder
 
-    def get_upload_record(self, file_name, key):
+    def __get_upload_record_file_path(self, file_name, key):
         record_key = '{0}/{1}'.format(key, file_name)
         if is_py2:
             record_file_name = hashlib.md5(record_key).hexdigest()
         else:
             record_file_name = hashlib.md5(record_key.encode('utf-8')).hexdigest()
+        return os.path.join(self.record_folder, record_file_name)
 
-        upload_record_file_path = os.path.join(self.record_folder, record_file_name)
-        if not os.path.isfile(upload_record_file_path):
+    def has_upload_record(self, file_name, key):
+        upload_record_file_path = self.__get_upload_record_file_path(file_name, key)
+        return os.path.isfile(upload_record_file_path)
+
+    def get_upload_record(self, file_name, key):
+        upload_record_file_path = self.__get_upload_record_file_path(file_name, key)
+        if not self.has_upload_record(file_name, key):
             return None
         try:
             with open(upload_record_file_path, 'r') as f:
-                try:
-                    json_data = json.load(f)
-                except ValueError:
-                    json_data = None
-        except IOError:
+                json_data = json.load(f)
+        except (IOError, ValueError):
             json_data = None
 
         return json_data
 
     def set_upload_record(self, file_name, key, data):
-        record_key = '{0}/{1}'.format(key, file_name)
-        if is_py2:
-            record_file_name = hashlib.md5(record_key).hexdigest()
-        else:
-            record_file_name = hashlib.md5(record_key.encode('utf-8')).hexdigest()
-
-        upload_record_file_path = os.path.join(self.record_folder, record_file_name)
+        upload_record_file_path = self.__get_upload_record_file_path(file_name, key)
         with open(upload_record_file_path, 'w') as f:
             json.dump(data, f)
 
     def delete_upload_record(self, file_name, key):
-        record_key = '{0}/{1}'.format(key, file_name)
-        if is_py2:
-            record_file_name = hashlib.md5(record_key).hexdigest()
-        else:
-            record_file_name = hashlib.md5(record_key.encode('utf-8')).hexdigest()
-
-        upload_record_file_path = os.path.join(self.record_folder, record_file_name)
+        upload_record_file_path = self.__get_upload_record_file_path(file_name, key)
         try:
             os.remove(upload_record_file_path)
         except OSError:
diff --git a/qiniu/services/storage/uploader.py b/qiniu/services/storage/uploader.py
index 049cc876..5bcdf4e5 100644
--- a/qiniu/services/storage/uploader.py
+++ b/qiniu/services/storage/uploader.py
@@ -9,13 +9,23 @@
 from qiniu.services.storage.uploaders import FormUploader, ResumeUploaderV1, ResumeUploaderV2
 from qiniu.services.storage.upload_progress_recorder import UploadProgressRecorder
 
-# for compact to old sdk
+# for compat to old sdk (<= v7.11.1)
 from qiniu.services.storage.legacy import _Resume # noqa
 
 
 def put_data(
-    up_token, key, data, params=None, mime_type='application/octet-stream', check_crc=False, progress_handler=None,
-    fname=None, hostscache_dir=None, metadata=None
+    up_token,
+    key,
+    data,
+    params=None,
+    mime_type='application/octet-stream',
+    check_crc=False,
+    progress_handler=None,
+    fname=None,
+    hostscache_dir=None,
+    metadata=None,
+    regions=None,
+    accelerate_uploading=False
 ):
     """上传二进制流到七牛
 
@@ -27,8 +37,11 @@ def put_data(
         mime_type:        上传数据的mimeType
         check_crc:        是否校验crc32
         progress_handler: 上传进度
+        fname:            文件名
         hostscache_dir:   host请求 缓存文件保存位置
         metadata:         元数据
+        regions:          区域信息,默认自动查询
+        accelerate_uploading: 是否优先使用加速上传
 
     Returns:
         一个dict变量,类似 {"hash": "<Hash string>", "key": "<Key string>"}
@@ -48,7 +61,8 @@ def put_data(
     crc = crc32(final_data)
     return _form_put(
         up_token, key, final_data, params, mime_type,
-        crc, hostscache_dir, progress_handler, fname, metadata=metadata
+        crc, hostscache_dir, progress_handler, fname, metadata=metadata,
+        regions=regions, accelerate_uploading=accelerate_uploading
     )
 
 
@@ -56,7 +70,8 @@ def put_file(
     up_token, key, file_path, params=None,
     mime_type='application/octet-stream', check_crc=False,
     progress_handler=None, upload_progress_recorder=None, keep_last_modified=False, hostscache_dir=None,
-    part_size=None, version=None, bucket_name=None, metadata=None
+    part_size=None, version=None, bucket_name=None, metadata=None,
+    regions=None, accelerate_uploading=False
 ):
     """上传文件到七牛
 
@@ -69,11 +84,14 @@ def put_file(
         check_crc:                是否校验crc32
         progress_handler:         上传进度
         upload_progress_recorder: 记录上传进度,用于断点续传
+        keep_last_modified:       是否保留文件的最后修改时间
         hostscache_dir:           host请求 缓存文件保存位置
         version:                  分片上传版本 目前支持v1/v2版本 默认v1
         part_size:                分片上传v2必传字段 默认大小为4MB 分片大小范围为1 MB - 1 GB
         bucket_name:              分片上传v2字段 空间名称
         metadata:                 元数据信息
+        regions:                  region信息
+        accelerate_uploading:     是否开启加速上传
 
     Returns:
         一个dict变量,类似 {"hash": "<Hash string>", "key": "<Key string>"}
@@ -90,14 +108,16 @@ def put_file(
                 mime_type, progress_handler,
                 upload_progress_recorder=upload_progress_recorder,
                 modify_time=modify_time, keep_last_modified=keep_last_modified,
-                part_size=part_size, version=version, bucket_name=bucket_name, metadata=metadata
+                part_size=part_size, version=version, bucket_name=bucket_name, metadata=metadata,
+                regions=regions, accelerate_uploading=accelerate_uploading
             )
         else:
             crc = file_crc32(file_path)
             ret, info = _form_put(
                 up_token, key, input_stream, params, mime_type,
                 crc, hostscache_dir, progress_handler, file_name,
-                modify_time=modify_time, keep_last_modified=keep_last_modified, metadata=metadata
+                modify_time=modify_time, keep_last_modified=keep_last_modified, metadata=metadata,
+                regions=regions, accelerate_uploading=accelerate_uploading
             )
     return ret, info
 
@@ -114,13 +134,17 @@ def _form_put(
     file_name=None,
     modify_time=None,
     keep_last_modified=False,
-    metadata=None
+    metadata=None,
+    regions=None,
+    accelerate_uploading=False
 ):
     bucket_name = Auth.get_bucket_name(up_token)
     uploader = FormUploader(
         bucket_name,
         progress_handler=progress_handler,
-        hosts_cache_dir=hostscache_dir
+        regions=regions,
+        accelerate_uploading=accelerate_uploading,
+        preferred_scheme=get_default('default_zone').scheme
     )
 
     if modify_time and keep_last_modified:
@@ -156,7 +180,9 @@ def put_stream(
     part_size=None,
     version='v1',
     bucket_name=None,
-    metadata=None
+    metadata=None,
+    regions=None,
+    accelerate_uploading=False
 ):
     if not bucket_name:
         bucket_name = Auth.get_bucket_name(up_token)
@@ -172,7 +198,9 @@ def put_stream(
             bucket_name,
             progress_handler=progress_handler,
             upload_progress_recorder=upload_progress_recorder,
-            hosts_cache_dir=hostscache_dir
+            regions=regions,
+            accelerate_uploading=accelerate_uploading,
+            preferred_scheme=get_default('default_zone').scheme
         )
         if modify_time and keep_last_modified:
             metadata['x-qn-meta-!Last-Modified'] = rfc_from_timestamp(modify_time)
@@ -182,7 +210,9 @@ def put_stream(
             progress_handler=progress_handler,
             upload_progress_recorder=upload_progress_recorder,
             part_size=part_size,
-            hosts_cache_dir=hostscache_dir
+            regions=regions,
+            accelerate_uploading=accelerate_uploading,
+            preferred_scheme=get_default('default_zone').scheme
         )
     else:
         raise ValueError('version only could be v1 or v2')
diff --git a/qiniu/services/storage/uploaders/_default_retrier.py b/qiniu/services/storage/uploaders/_default_retrier.py
new file mode 100644
index 00000000..25e0b18f
--- /dev/null
+++ b/qiniu/services/storage/uploaders/_default_retrier.py
@@ -0,0 +1,210 @@
+from collections import namedtuple
+
+from qiniu.http.endpoints_retry_policy import EndpointsRetryPolicy
+from qiniu.http.region import ServiceName
+from qiniu.http.regions_retry_policy import RegionsRetryPolicy
+from qiniu.retry.abc import RetryPolicy
+from qiniu.retry import Retrier
+
+
+_TokenExpiredRetryState = namedtuple(
+    'TokenExpiredRetryState',
+    [
+        'retried_times',
+        'upload_api_version'
+    ]
+)
+
+
+class TokenExpiredRetryPolicy(RetryPolicy):
+    def __init__(
+        self,
+        upload_api_version,
+        record_delete_handler,
+        record_exists_handler,
+        max_retry_times=1
+    ):
+        """
+        Parameters
+        ----------
+        upload_api_version: str
+        record_delete_handler: callable
+            `() -> None`
+        record_exists_handler: callable
+            `() -> bool`
+        max_retry_times: int
+        """
+        self.upload_api_version = upload_api_version
+        self.record_delete_handler = record_delete_handler
+        self.record_exists_handler = record_exists_handler
+        self.max_retry_times = max_retry_times
+
+    def init_context(self, context):
+        """
+        Parameters
+        ----------
+        context: dict
+        """
+        context[self] = _TokenExpiredRetryState(
+            retried_times=0,
+            upload_api_version=self.upload_api_version
+        )
+
+    def should_retry(self, attempt):
+        """
+        Parameters
+        ----------
+        attempt: qiniu.retry.Attempt
+
+        Returns
+        -------
+        bool
+        """
+        state = attempt.context[self]
+
+        if (
+            state.retried_times >= self.max_retry_times or
+            not self.record_exists_handler()
+        ):
+            return False
+
+        if not attempt.result:
+            return False
+
+        _ret, resp = attempt.result
+
+        if (
+            state.upload_api_version == 'v1' and
+            resp.status_code == 701
+        ):
+            return True
+
+        if (
+            state.upload_api_version == 'v2' and
+            resp.status_code == 612
+        ):
+            return True
+
+        return False
+
+    def prepare_retry(self, attempt):
+        """
+        Parameters
+        ----------
+        attempt: qiniu.retry.Attempt
+        """
+        state = attempt.context[self]
+        attempt.context[self] = state._replace(retried_times=state.retried_times + 1)
+
+        if not self.record_exists_handler():
+            return
+
+        self.record_delete_handler()
+
+
+class AccUnavailableRetryPolicy(RetryPolicy):
+    def __init__(self):
+        pass
+
+    def init_context(self, context):
+        pass
+
+    def should_retry(self, attempt):
+        """
+        Parameters
+        ----------
+        attempt: qiniu.retry.Attempt
+
+        Returns
+        -------
+        bool
+        """
+        if not attempt.result:
+            return False
+
+        region = attempt.context.get('region')
+        if not region:
+            return False
+
+        if all(
+            not region.services[sn]
+            for sn in attempt.context.get('alternative_service_names')
+        ):
+            return False
+
+        _ret, resp = attempt.result
+
+        return resp.status_code == 400 and \
+            'transfer acceleration is not configured on this bucket' in resp.text_body
+
+    def prepare_retry(self, attempt):
+        """
+        Parameters
+        ----------
+        attempt: qiniu.retry.Attempt
+        """
+        endpoints = []
+        while not endpoints:
+            if not attempt.context.get('alternative_service_names'):
+                raise RuntimeError('No alternative service available')
+            attempt.context['service_name'] = attempt.context.get('alternative_service_names').pop(0)
+            # shallow copy list
+            # change to `list.copy` for more readable when min version of python update to >= 3
+            endpoints = attempt.context['region'].services.get(attempt.context['service_name'], [])[:]
+        attempt.context['alternative_endpoints'] = endpoints
+        attempt.context['endpoint'] = attempt.context['alternative_endpoints'].pop(0)
+
+
+ProgressRecord = namedtuple(
+    'ProgressRecorder',
+    [
+        'upload_api_version',
+        'exists',
+        'delete'
+    ]
+)
+
+
+def get_default_retrier(
+    regions_provider,
+    preferred_endpoints_provider=None,
+    progress_record=None,
+    accelerate_uploading=False
+):
+    """
+    Parameters
+    ----------
+    regions_provider: Iterable[Region]
+    preferred_endpoints_provider: Iterable[Endpoint]
+    progress_record: ProgressRecord
+    accelerate_uploading: bool
+
+    Returns
+    -------
+    Retrier
+    """
+    retry_policies = []
+    upload_service_names = [ServiceName.UP]
+
+    if accelerate_uploading:
+        retry_policies.append(AccUnavailableRetryPolicy())
+        upload_service_names.insert(0, ServiceName.UP_ACC)
+
+    if progress_record:
+        retry_policies.append(TokenExpiredRetryPolicy(
+            upload_api_version=progress_record.upload_api_version,
+            record_delete_handler=progress_record.delete,
+            record_exists_handler=progress_record.exists
+        ))
+
+    retry_policies += [
+        EndpointsRetryPolicy(skip_init_context=True),
+        RegionsRetryPolicy(
+            regions_provider=regions_provider,
+            service_names=upload_service_names,
+            preferred_endpoints_provider=preferred_endpoints_provider,
+            on_change_region=lambda _: progress_record.delete()
+        )
+    ]
+
+    return Retrier(retry_policies)
diff --git a/qiniu/services/storage/uploaders/abc/resume_uploader_base.py b/qiniu/services/storage/uploaders/abc/resume_uploader_base.py
index 06d827df..2965dee0 100644
--- a/qiniu/services/storage/uploaders/abc/resume_uploader_base.py
+++ b/qiniu/services/storage/uploaders/abc/resume_uploader_base.py
@@ -205,7 +205,7 @@ def complete_parts(
         up_token: str
         data_size: int
         context: any
-        kwargs: dictr
+        kwargs: dict
 
         Returns
         -------
diff --git a/qiniu/services/storage/uploaders/abc/uploader_base.py b/qiniu/services/storage/uploaders/abc/uploader_base.py
index bb39fbfc..5907aa1c 100644
--- a/qiniu/services/storage/uploaders/abc/uploader_base.py
+++ b/qiniu/services/storage/uploaders/abc/uploader_base.py
@@ -1,9 +1,13 @@
 import abc
 
 import qiniu.config as config
+from qiniu.region import LegacyRegion
+from qiniu.http.endpoint import Endpoint
+from qiniu.http.regions_provider import get_default_regions_provider
 
 # type import
 from qiniu.auth import Auth # noqa
+from qiniu.http.region import Region, ServiceName  # noqa
 
 
 class UploaderBase(object):
@@ -33,21 +37,37 @@ def __init__(
         kwargs
             The others arguments may be used by subclass.
         """
+        # default bucket_name
         self.bucket_name = bucket_name
 
         # change the default when implements AuthProvider
         self.auth = kwargs.get('auth', None)
 
-        regions = kwargs.get('regions', [])
-        # remove the check when implement RegionsProvider
-        # if not regions:
-        #     raise TypeError('You must provide the regions')
+        # regions config
+        regions = kwargs.get('regions', None)
+        if not regions:
+            regions = []
         self.regions = regions
 
-        hosts_cache_dir = kwargs.get('hosts_cache_dir', None)
-        self.hosts_cache_dir = hosts_cache_dir
+        query_regions_endpoints = kwargs.get('query_regions_endpoints', None)
+        if not query_regions_endpoints:
+            query_regions_endpoints = []
+        self.query_regions_endpoints = query_regions_endpoints
+
+        self.preferred_scheme = kwargs.get('preferred_scheme', 'http')
+
+        # change the default value to False when remove config.get_default('default_zone')
+        self.accelerate_uploading = kwargs.get('accelerate_uploading', None)
 
-    def get_up_token(self, **kwargs):
+    def get_up_token(
+        self,
+        bucket_name=None,
+        key=None,
+        expired=None,
+        policy=None,
+        strict_policy=None,
+        **_kwargs
+    ):
         """
         Generate up token
 
@@ -56,8 +76,11 @@ def get_up_token(self, **kwargs):
         bucket_name: str
         key: str
         expired: int
+            seconds
         policy: dict
         strict_policy: bool
+        _kwargs: dict
+            useless for now, just for compatibility
 
         Returns
         -------
@@ -66,64 +89,148 @@ def get_up_token(self, **kwargs):
         if not self.auth:
             raise ValueError('can not get up_token by auth not provided')
 
-        bucket_name = kwargs.get('bucket_name', self.bucket_name)
+        bucket_name = bucket_name if bucket_name else self.bucket_name
 
         kwargs_for_up_token = {
-            k: kwargs[k]
-            for k in [
-                'key', 'expires', 'policy', 'strict_policy'
-            ] if k in kwargs
+            k: v
+            for k, v in {
+                'bucket': bucket_name,
+                'key': key,
+                'expired': expired,
+                'policy': policy,
+                'strict_policy': strict_policy
+            }.items()
+            if k
         }
-        up_token = self.auth.upload_token(
-            bucket=bucket_name,
-            **kwargs_for_up_token
-        )
+        up_token = self.auth.upload_token(**kwargs_for_up_token)
         return up_token
 
-    def _get_regions(self):
+    def _get_regions_provider(self, access_key=None, bucket_name=None):
+        """
+        Parameters
+        ----------
+        access_key: str
+        bucket_name: str
+
+        Returns
+        -------
+        Iterable[Region or LegacyRegion]
+        """
         if self.regions:
             return self.regions
 
         # handle compatibility for default_zone
-        default_region = config.get_default('default_zone')
-        if default_region:
-            self.regions = [default_region]
+        if config.is_customized_default('default_zone'):
+            return [config.get_default('default_zone')]
 
-        return self.regions
+        # handle compatibility for default_query_region_host
+        query_regions_endpoints = self.query_regions_endpoints
+        if not query_regions_endpoints:
+            query_region_host = config.get_default('default_query_region_host')
+            query_region_backup_hosts = config.get_default('default_query_region_backup_hosts')
+            query_regions_endpoints = [
+                Endpoint.from_host(h)
+                for h in [query_region_host] + query_region_backup_hosts
+            ]
+
+        # get regions from default regions provider
+        if not self.auth and not access_key:
+            raise ValueError('Must provide access_key and bucket_name if auth is unavailable.')
+        if not access_key:
+            access_key = self.auth.get_access_key()
+        if not bucket_name:
+            bucket_name = self.bucket_name
 
-    def _get_up_hosts(self, access_key=None):
+        return get_default_regions_provider(
+            query_endpoints_provider=query_regions_endpoints,
+            access_key=access_key,
+            bucket_name=bucket_name,
+            accelerate_uploading=self.accelerate_uploading,
+            preferred_scheme=self.preferred_scheme,
+        )
+
+    def _get_regions(self, access_key=None, bucket_name=None):
         """
-        This will be deprecated when implement regions and endpoints
+        .. deprecated::
+            This has been deprecated by implemented regions provider and endpoints
+
+        Parameters
+        ----------
+        access_key: str
+        bucket_name: str
+
+        Returns
+        -------
+        list[LegacyRegion]
+        """
+        def get_legacy_region(r):
+            if isinstance(r, LegacyRegion):
+                return r
+            opts = {
+                'scheme': self.preferred_scheme,
+                'accelerate_uploading': self.accelerate_uploading
+            }
+            if r.services[ServiceName.UP]:
+                opts['up_host'] = r.services[ServiceName.UP][0].get_value(self.preferred_scheme)
+            if len(r.services[ServiceName.UP]) > 1:
+                opts['up_host_backup'] = [
+                    e.get_value(self.preferred_scheme)
+                    for e in r.services[ServiceName.UP][1:]
+                ]
+            if r.services[ServiceName.IO]:
+                opts['io_host'] = r.services[ServiceName.IO][0].get_value(self.preferred_scheme)
+            if r.services[ServiceName.RS]:
+                opts['rs_host'] = r.services[ServiceName.RS][0].get_value(self.preferred_scheme)
+            if r.services[ServiceName.RSF]:
+                opts['rsf_host'] = r.services[ServiceName.RSF][0].get_value(self.preferred_scheme)
+            if r.services[ServiceName.API]:
+                opts['api_host'] = r.services[ServiceName.API][0].get_value(self.preferred_scheme)
+            result = LegacyRegion(**opts)
+            result.services = r.services
+            result.region_id = r.region_id
+            result.s3_region_id = r.s3_region_id
+            result.ttl = r.ttl
+            result.create_time = r.create_time
+            return result
+
+        return [
+            get_legacy_region(r)
+            for r in self._get_regions_provider(access_key, bucket_name)
+        ]
+
+    def _get_up_hosts(self, access_key=None, bucket_name=None):
+        """
+        get hosts of upload by access key or the first region
+
+        .. deprecated::
+            This has been deprecated by implemented regions provider and endpoints
 
         Returns
         -------
         list[str]
         """
+        if not bucket_name:
+            bucket_name = self.bucket_name
         if not self.auth and not access_key:
             raise ValueError('Must provide access_key if auth is unavailable.')
         if not access_key:
             access_key = self.auth.get_access_key()
 
-        regions = self._get_regions()
+        regions = self._get_regions(access_key, bucket_name)
 
         if not regions:
             raise ValueError('No region available.')
 
         # get up hosts in region
-        up_hosts = [
-            regions[0].up_host,
-            regions[0].up_host_backup
+        service_names = [ServiceName.UP]
+        if self.accelerate_uploading:
+            service_names.insert(0, ServiceName.UP_ACC)
+
+        return [
+            e.get_value()
+            for sn in service_names
+            for e in regions[0].services[sn]
         ]
-        up_hosts = [h for h in up_hosts if h]
-        if up_hosts:
-            return up_hosts
-
-        # this is correct, it does return hosts. bad function name by legacy
-        return regions[0].get_up_host(
-            ak=access_key,
-            bucket=self.bucket_name,
-            home_dir=self.hosts_cache_dir
-        )
 
     @abc.abstractmethod
     def upload(
diff --git a/qiniu/services/storage/uploaders/form_uploader.py b/qiniu/services/storage/uploaders/form_uploader.py
index ea096f59..288a69da 100644
--- a/qiniu/services/storage/uploaders/form_uploader.py
+++ b/qiniu/services/storage/uploaders/form_uploader.py
@@ -7,7 +7,8 @@
 from qiniu.auth import Auth
 from qiniu.http import qn_http_client
 
-from qiniu.services.storage.uploaders.abc import UploaderBase
+from .abc import UploaderBase
+from ._default_retrier import get_default_retrier
 
 
 class FormUploader(UploaderBase):
@@ -54,8 +55,16 @@ def upload(
         file_name: str
         custom_vars: dict
         kwargs
-            up_token, crc32_int
-            bucket_name, key, expired, policy, strict_policy for get up_token
+            up_token: str
+            crc32_int: int
+            bucket_name: str
+                is required if upload to another bucket
+            expired: int
+                option for generate up_token if not provide up_token. seconds
+            policy: dict
+                option for generate up_token if not provide up_token. details see `auth.Auth`
+            strict_policy: bool
+                option for generate up_token if not provide up_token
 
         Returns
         -------
@@ -63,14 +72,16 @@ def upload(
         resp: ResponseInfo
         """
         # check and initial arguments
-        # up_token and up_hosts
+        # bucket_name
+        bucket_name = kwargs.get('bucket_name', self.bucket_name)
+
+        # up_token
         up_token = kwargs.get('up_token', None)
         if not up_token:
             up_token = self.get_up_token(**kwargs)
-            up_hosts = self._get_up_hosts()
+            access_key = self.auth.get_access_key()
         else:
             access_key, _, _ = Auth.up_token_decode(up_token)
-            up_hosts = self._get_up_hosts(access_key)
 
         # crc32 from outside
         crc32_int = kwargs.get('crc32_int', None)
@@ -84,6 +95,7 @@ def upload(
         if file_path and data:
             raise TypeError('Must provide only one of file_path or data.')
 
+        # useless for form upload
         if not modify_time:
             if file_path:
                 modify_time = int(path.getmtime(file_path))
@@ -104,15 +116,17 @@ def upload(
             if not crc32_int:
                 crc32_int = self.__get_crc32_int(data)
             fields = self.__get_form_fields(
-                up_hosts=up_hosts,
                 up_token=up_token,
                 key=key,
                 crc32_int=crc32_int,
                 custom_vars=custom_vars,
                 metadata=metadata
             )
-            ret, resp = self.__upload_data(
-                up_hosts=up_hosts,
+            ret, resp = self.__upload_data_with_retrier(
+                # retrier options
+                access_key=access_key,
+                bucket_name=bucket_name,
+                # upload_data options
                 fields=fields,
                 file_name=file_name,
                 data=data,
@@ -125,9 +139,45 @@ def upload(
 
         return ret, resp
 
+    def __upload_data_with_retrier(
+        self,
+        access_key,
+        bucket_name,
+        **upload_data_opts
+    ):
+        retrier = get_default_retrier(
+            regions_provider=self._get_regions_provider(
+                access_key=access_key,
+                bucket_name=bucket_name
+            ),
+            accelerate_uploading=self.accelerate_uploading
+        )
+        data = upload_data_opts.get('data')
+        attempt = None
+        for attempt in retrier:
+            with attempt:
+                attempt.result = self.__upload_data(
+                    up_endpoint=attempt.context.get('endpoint'),
+                    **upload_data_opts
+                )
+                ret, resp = attempt.result
+                if resp.ok() and ret:
+                    return attempt.result
+                if (
+                    not is_seekable(data) or
+                    not resp.need_retry()
+                ):
+                    return attempt.result
+                data.seek(0)
+
+        if attempt is None:
+            raise RuntimeError('Retrier is not working. attempt is None')
+
+        return attempt.result
+
     def __upload_data(
         self,
-        up_hosts,
+        up_endpoint,
         fields,
         file_name,
         data,
@@ -137,7 +187,7 @@ def __upload_data(
         """
         Parameters
         ----------
-        up_hosts: list[str]
+        up_endpoint: Endpoint
         fields: dict
         file_name: str
         data: IOBase
@@ -149,26 +199,17 @@ def __upload_data(
         ret: dict
         resp: ResponseInfo
         """
+        req_url = up_endpoint.get_value(scheme=self.preferred_scheme)
         if not file_name or not file_name.strip():
             file_name = 'file_name'
 
-        ret, resp = None, None
-        for up_host in up_hosts:
-            ret, resp = qn_http_client.post(
-                url=up_host,
-                data=fields,
-                files={
-                    'file': (file_name, data, mime_type)
-                }
-            )
-            if resp.ok() and ret:
-                return ret, resp
-            if (
-                not is_seekable(data) or
-                not resp.need_retry()
-            ):
-                return ret, resp
-            data.seek(0)
+        ret, resp = qn_http_client.post(
+            url=req_url,
+            data=fields,
+            files={
+                'file': (file_name, data, mime_type)
+            }
+        )
         return ret, resp
 
     def __get_form_fields(
diff --git a/qiniu/services/storage/uploaders/resume_uploader_v1.py b/qiniu/services/storage/uploaders/resume_uploader_v1.py
index 7f1ae89f..8a0e9cfb 100644
--- a/qiniu/services/storage/uploaders/resume_uploader_v1.py
+++ b/qiniu/services/storage/uploaders/resume_uploader_v1.py
@@ -1,5 +1,6 @@
 import logging
 import math
+import functools
 from collections import namedtuple
 from concurrent import futures
 from io import BytesIO
@@ -11,10 +12,12 @@
 from qiniu.compat import is_seekable
 from qiniu.auth import Auth
 from qiniu.http import qn_http_client, ResponseInfo
+from qiniu.http.endpoint import Endpoint
 from qiniu.utils import b, io_crc32, urlsafe_base64_encode
 
-from qiniu.services.storage.uploaders.abc import ResumeUploaderBase
-from qiniu.services.storage.uploaders.io_chunked import IOChunked
+from ._default_retrier import ProgressRecord, get_default_retrier
+from .abc import ResumeUploaderBase
+from .io_chunked import IOChunked
 
 
 class ResumeUploaderV1(ResumeUploaderBase):
@@ -36,7 +39,7 @@ def _recover_from_record(
         _ResumeUploadV1Context
         """
         if not isinstance(context, _ResumeUploadV1Context):
-            raise TypeError('context must be an instance of _ResumeUploadV1Context')
+            raise TypeError('"context" must be an instance of _ResumeUploadV1Context')
 
         if not self.upload_progress_recorder or not any([file_name, key]):
             return context
@@ -54,7 +57,7 @@ def _recover_from_record(
         record_modify_time = record.get('modify_time', 0)
         record_context = record.get('contexts', [])
 
-        # compact with old sdk(<= v7.11.1)
+        # compat with old sdk(<= v7.11.1)
         if not record_up_hosts or not record_part_size:
             return context
 
@@ -128,23 +131,20 @@ def _try_delete_record(
         self,
         file_name,
         key,
-        context,
-        resp
+        context=None,
+        resp=None
     ):
         """
         Parameters
         ----------
-        file_name: str
-        key: str
+        file_name: str or None
+        key: str or None
         context: _ResumeUploadV1Context
         resp: ResponseInfo
         """
         if not self.upload_progress_recorder or not any([file_name, key]):
             return
-        if resp and context and not any([
-            resp.ok(),
-            resp.status_code == 701 and context.resumed
-        ]):
+        if resp and not resp.ok():
             return
         self.upload_progress_recorder.delete_upload_record(file_name, key)
 
@@ -167,8 +167,46 @@ def _progress_handler(
 
         """
         self._set_to_record(file_name, key, context)
-        if callable(self.progress_handler):
+        if not callable(self.progress_handler):
+            return
+        try:
             self.progress_handler(uploaded_size, total_size)
+        except Exception as err:
+            err.no_need_retry = True
+            raise err
+
+    def _initial_context(
+        self,
+        key,
+        file_name,
+        modify_time
+    ):
+        """
+        Parameters
+        ----------
+        key: str
+        file_name: str
+        modify_time: float
+
+        Returns
+        -------
+        _ResumeUploadV1Context
+        """
+        part_size = 4 * (1024 ** 2)
+        context = _ResumeUploadV1Context(
+            up_hosts=[],
+            part_size=part_size,
+            parts=[],
+            modify_time=modify_time,
+            resumed=False
+        )
+
+        # try to recover from record
+        return self._recover_from_record(
+            key=key,
+            file_name=file_name,
+            context=context
+        )
 
     def initial_parts(
         self,
@@ -176,23 +214,28 @@ def initial_parts(
         key,
         file_path=None,
         data=None,
-        modify_time=None,
         data_size=None,
+        modify_time=None,
+        part_size=None,
         file_name=None,
+        up_endpoint=None,
         **kwargs
     ):
         """
         Parameters
         ----------
-        up_token
-        key
-        file_path
-        data
-        modify_time
-        data_size
-        file_name
+        up_token: str
+        key: str
+        file_path: str or None
+        data: str or None
+        modify_time: float
+        data_size: int
+        part_size: None
+            useless for v1 by fixed part size
+        file_name: str
+        up_endpoint: Endpoint
 
-        kwargs
+        kwargs: dict
 
         Returns
         -------
@@ -217,28 +260,20 @@ def initial_parts(
             else:
                 modify_time = int(time())
 
-        part_size = 4 * (1024 ** 2)
-
-        # -- initial context
-        context = _ResumeUploadV1Context(
-            up_hosts=[],
-            part_size=part_size,
-            parts=[],
-            modify_time=modify_time,
-            resumed=False
-        )
-
-        # try to recover from record
         if not file_name and file_path:
             file_name = path.basename(file_path)
-        context = self._recover_from_record(
-            file_name,
-            key,
-            context
+
+        context = self._initial_context(
+            key=key,
+            file_name=file_name,
+            modify_time=modify_time
         )
 
-        access_key, _, _ = Auth.up_token_decode(up_token)
+        if not context.up_hosts and up_endpoint:
+            context.up_hosts.extend([up_endpoint.get_value(self.preferred_scheme)])
+
         if not context.up_hosts:
+            access_key, _, _ = Auth.up_token_decode(up_token)
             context.up_hosts.extend(self._get_up_hosts(access_key))
 
         return context, None
@@ -285,7 +320,7 @@ def upload_parts(
         part, resp = None, None
         uploaded_size = context.part_size * len(context.parts)
         if math.ceil(data_size / context.part_size) in [p.part_no for p in context.parts]:
-            # if last part uploaded, should correct the uploaded size
+            # if last part has been uploaded, should correct the uploaded size
             uploaded_size += (data_size % context.part_size) - context.part_size
         lock = Lock()
 
@@ -424,57 +459,95 @@ def complete_parts(
         )
         return ret, resp
 
-    def upload(
+    def __upload_with_retrier(
         self,
-        key,
-        file_path=None,
-        data=None,
-        data_size=None,
-        modify_time=None,
-
-        part_size=None,
-        mime_type=None,
-        metadata=None,
-        file_name=None,
-        custom_vars=None,
-        **kwargs
+        access_key,
+        bucket_name,
+        **upload_opts
     ):
-        """
+        file_name = upload_opts.get('file_name', None)
+        key = upload_opts.get('key', None)
+        modify_time = upload_opts.get('modify_time', None)
 
-        Parameters
-        ----------
-        key
-        file_path
-        data
-        data_size
-        modify_time
+        context = self._initial_context(
+            key=key,
+            file_name=file_name,
+            modify_time=modify_time
+        )
+        preferred_endpoints = None
+        if context.up_hosts:
+            preferred_endpoints = [
+                Endpoint.from_host(h)
+                for h in context.up_hosts
+            ]
 
-        part_size
-        mime_type
-        metadata
-        file_name
-        custom_vars
+        progress_record = None
+        if all([
+            self.upload_progress_recorder,
+            file_name,
+            key
+        ]):
+            progress_record = ProgressRecord(
+                upload_api_version='v1',
+                exists=functools.partial(
+                    self.upload_progress_recorder.has_upload_record,
+                    file_name=file_name,
+                    key=key
+                ),
+                delete=functools.partial(
+                    self.upload_progress_recorder.delete_upload_record,
+                    file_name=file_name,
+                    key=key
+                )
+            )
 
-        kwargs:
-            up_token
-            bucket_name, expires, policy, strict_policy for generate `up_token`
+        retrier = get_default_retrier(
+            regions_provider=self._get_regions_provider(
+                access_key=access_key,
+                bucket_name=bucket_name
+            ),
+            preferred_endpoints_provider=preferred_endpoints,
+            progress_record=progress_record,
+            accelerate_uploading=self.accelerate_uploading,
+        )
 
-        Returns
-        -------
-            ret: dict
-            resp: ResponseInfo
-        """
-        # part_size
-        if part_size:
-            logging.warning('ResumeUploader not support part_size. It is fixed to 4MB.')
+        data = upload_opts.get('data')
+        attempt = None
+        for attempt in retrier:
+            with attempt:
+                upload_opts['up_endpoint'] = attempt.context.get('endpoint')
+                attempt.result = self.__upload(
+                    **upload_opts
+                )
+                ret, resp = attempt.result
+                if resp.ok() and ret:
+                    return attempt.result
+                if (
+                    not is_seekable(data) or
+                    not resp.need_retry()
+                ):
+                    return attempt.result
+                data.seek(0)
 
-        # up_token
-        up_token = kwargs.get('up_token', None)
-        if not up_token:
-            up_token = self.get_up_token(**kwargs)
-        if not file_name and file_path:
-            file_name = path.basename(file_path)
+        if attempt is None:
+            raise RuntimeError('Retrier is not working. attempt is None')
 
+        return attempt.result
+
+    def __upload(
+        self,
+        up_token,
+        key,
+        file_path,
+        file_name,
+        data,
+        data_size,
+        modify_time,
+        mime_type,
+        custom_vars,
+        metadata,
+        up_endpoint
+    ):
         # initial_parts
         context, resp = self.initial_parts(
             up_token,
@@ -484,6 +557,7 @@ def upload(
             data=data,
             data_size=data_size,
             modify_time=modify_time,
+            up_endpoint=up_endpoint
         )
 
         # upload_parts
@@ -526,23 +600,89 @@ def upload(
             metadata=metadata
         )
 
-        # retry if expired. the record file will be deleted by complete_parts
-        if resp.status_code == 701 and context.resumed:
-            return self.upload(
-                key,
-                file_path=file_path,
-                data=data,
-                data_size=data_size,
-                modify_time=modify_time,
+        return ret, resp
 
-                mime_type=mime_type,
-                metadata=metadata,
-                file_name=file_name,
-                custom_vars=custom_vars,
-                **kwargs
-            )
+    def upload(
+        self,
+        key,
+        file_path=None,
+        data=None,
+        data_size=None,
+        modify_time=None,
 
-        return ret, resp
+        part_size=None,
+        mime_type=None,
+        metadata=None,
+        file_name=None,
+        custom_vars=None,
+        **kwargs
+    ):
+        """
+
+        Parameters
+        ----------
+        key
+        file_path
+        data
+        data_size
+        modify_time
+
+        part_size
+        mime_type
+        metadata
+        file_name
+        custom_vars
+
+        kwargs:
+            up_token: str
+            crc32_int: int
+            bucket_name: str
+                is required if upload to another bucket
+            expired: int
+                option for generate up_token if not provide up_token. seconds
+            policy: dict
+                option for generate up_token if not provide up_token. details see `auth.Auth`
+            strict_policy: bool
+                option for generate up_token if not provide up_token
+
+        Returns
+        -------
+        ret: dict
+        resp: ResponseInfo
+        """
+        # part_size
+        if part_size:
+            logging.warning('ResumeUploader not support part_size. It is fixed to 4MB.')
+
+        # up_token
+        up_token = kwargs.get('up_token', None)
+        if not up_token:
+            kwargs.setdefault('up_token', self.get_up_token(**kwargs))
+            access_key = self.auth.get_access_key()
+        else:
+            access_key, _, _ = Auth.up_token_decode(up_token)
+
+        # bucket_name
+        kwargs['bucket_name'] = Auth.get_bucket_name(up_token)
+
+        # file_name
+        if not file_name and file_path:
+            file_name = path.basename(file_path)
+
+        # upload
+        return self.__upload_with_retrier(
+            access_key=access_key,
+            key=key,
+            file_path=file_path,
+            data=data,
+            data_size=data_size,
+            modify_time=modify_time,
+            mime_type=mime_type,
+            metadata=metadata,
+            file_name=file_name,
+            custom_vars=custom_vars,
+            **kwargs
+        )
 
     def __upload_part(
         self,
@@ -623,11 +763,11 @@ def __get_mkfile_url(
         ----------
         up_host: str
         data_size: int
-        mime_type: str
-        key: str
-        file_name: str
-        params: dict
-        metadata: dict
+        mime_type: str or None
+        key: str or None
+        file_name: str or None
+        params: dict or None
+        metadata: dict or None
 
         Returns
         -------
diff --git a/qiniu/services/storage/uploaders/resume_uploader_v2.py b/qiniu/services/storage/uploaders/resume_uploader_v2.py
index db73b182..3e165e2f 100644
--- a/qiniu/services/storage/uploaders/resume_uploader_v2.py
+++ b/qiniu/services/storage/uploaders/resume_uploader_v2.py
@@ -1,3 +1,4 @@
+import functools
 import math
 from collections import namedtuple
 from concurrent import futures
@@ -9,11 +10,13 @@
 from qiniu.compat import is_seekable
 from qiniu.auth import Auth
 from qiniu.http import qn_http_client, ResponseInfo
+from qiniu.http.endpoint import Endpoint
 from qiniu.utils import b, io_md5, urlsafe_base64_encode
 from qiniu.compat import json
 
-from qiniu.services.storage.uploaders.abc import ResumeUploaderBase
-from qiniu.services.storage.uploaders.io_chunked import IOChunked
+from ._default_retrier import ProgressRecord, get_default_retrier
+from .abc import ResumeUploaderBase
+from .io_chunked import IOChunked
 
 
 class ResumeUploaderV2(ResumeUploaderBase):
@@ -36,7 +39,7 @@ def _recover_from_record(
         _ResumeUploadV2Context
         """
         if not isinstance(context, _ResumeUploadV2Context):
-            raise TypeError('context must be an instance of _ResumeUploadV2Context')
+            raise TypeError('"context" must be an instance of _ResumeUploadV2Context')
 
         if (
             not self.upload_progress_recorder or
@@ -59,7 +62,7 @@ def _recover_from_record(
         record_modify_time = record.get('modify_time', 0)
         record_etags = record.get('etags', [])
 
-        # compact with old sdk(<= v7.11.1)
+        # compat with old sdk(<= v7.11.1)
         if not record_up_hosts or not record_part_size:
             return context
 
@@ -125,8 +128,8 @@ def _try_delete_record(
         self,
         file_name,
         key,
-        context,
-        resp
+        context=None,
+        resp=None
     ):
         """
         Parameters
@@ -138,10 +141,7 @@ def _try_delete_record(
         """
         if not self.upload_progress_recorder or not any([file_name, key]):
             return
-        if resp and context and not any([
-            resp.ok(),
-            resp.status_code == 612 and context.resumed
-        ]):
+        if resp and not resp.ok():
             return
         self.upload_progress_recorder.delete_upload_record(file_name, key)
 
@@ -163,8 +163,38 @@ def _progress_handler(
         total_size: int
         """
         self._set_to_record(file_name, key, context)
-        if callable(self.progress_handler):
+        if not callable(self.progress_handler):
+            return
+        try:
             self.progress_handler(uploaded_size, total_size)
+        except Exception as err:
+            err.no_need_retry = True
+            raise err
+
+    def _initial_context(
+        self,
+        key,
+        file_name,
+        modify_time,
+        part_size
+    ):
+        context = _ResumeUploadV2Context(
+            up_hosts=[],
+            upload_id='',
+            expired_at=0,
+            part_size=part_size,
+            parts=[],
+            modify_time=modify_time,
+            resumed=False
+        )
+
+        # try to recover from record
+
+        return self._recover_from_record(
+            file_name,
+            key,
+            context
+        )
 
     def initial_parts(
         self,
@@ -176,6 +206,7 @@ def initial_parts(
         modify_time=None,
         part_size=None,
         file_name=None,
+        up_endpoint=None,
         **kwargs
     ):
         """
@@ -190,6 +221,7 @@ def initial_parts(
         modify_time: int
         part_size: int
         file_name: str
+        up_endpoint: Endpoint
         kwargs
 
         Returns
@@ -218,23 +250,13 @@ def initial_parts(
             part_size = self.part_size
 
         # -- initial context
-        context = _ResumeUploadV2Context(
-            up_hosts=[],
-            upload_id='',
-            expired_at=0,
-            part_size=part_size,
-            parts=[],
-            modify_time=modify_time,
-            resumed=False
-        )
-
-        # try to recover from record
         if not file_name and file_path:
             file_name = path.basename(file_path)
-        context = self._recover_from_record(
-            file_name,
-            key,
-            context
+        context = self._initial_context(
+            key=key,
+            file_name=file_name,
+            modify_time=modify_time,
+            part_size=part_size
         )
 
         if (
@@ -245,8 +267,11 @@ def initial_parts(
             return context, None
 
         # -- get a new upload id
-        access_key, _, _ = Auth.up_token_decode(up_token)
+        if not context.up_hosts and up_endpoint:
+            context.up_hosts.extend([up_endpoint.get_value(scheme=self.preferred_scheme)])
+
         if not context.up_hosts:
+            access_key, _, _ = Auth.up_token_decode(up_token)
             context.up_hosts.extend(self._get_up_hosts(access_key))
 
         bucket_name = Auth.get_bucket_name(up_token)
@@ -463,49 +488,100 @@ def complete_parts(
         )
         return ret, resp
 
-    def upload(
+    def __upload_with_retrier(
         self,
-        key,
-        file_path=None,
-        data=None,
-        data_size=None,
-
-        part_size=None,
-        modify_time=None,
-        mime_type=None,
-        metadata=None,
-        file_name=None,
-        custom_vars=None,
-        **kwargs
+        access_key,
+        bucket_name,
+        **upload_opts
     ):
-        """
-        Parameters
-        ----------
-        key: str
-        file_path: str
-        data: IOBase
-        data_size: int
-        part_size: int
-        modify_time: int
-        mime_type: str
-        metadata: dict
-        file_name: str
-        custom_vars: dict
-        kwargs
-            up_token
-            bucket_name, expires, policy, strict_policy for generate `up_token`
+        file_name = upload_opts.get('file_name', None)
+        key = upload_opts.get('key', None)
+        modify_time = upload_opts.get('modify_time', None)
+        part_size = upload_opts.get('part_size', self.part_size)
 
-        Returns
-        -------
+        context = self._initial_context(
+            key=key,
+            file_name=file_name,
+            modify_time=modify_time,
+            part_size=part_size
+        )
+        preferred_endpoints = None
+        if context.up_hosts:
+            preferred_endpoints = [
+                Endpoint.from_host(h)
+                for h in context.up_hosts
+            ]
 
-        """
-        # up_token
-        up_token = kwargs.get('up_token', None)
-        if not up_token:
-            up_token = self.get_up_token(**kwargs)
-        if not file_name and file_path:
-            file_name = path.basename(file_path)
+        progress_record = None
+        if all(
+            [
+                self.upload_progress_recorder,
+                file_name,
+                key
+            ]
+        ):
+            progress_record = ProgressRecord(
+                upload_api_version='v1',
+                exists=functools.partial(
+                    self.upload_progress_recorder.has_upload_record,
+                    file_name=file_name,
+                    key=key
+                ),
+                delete=functools.partial(
+                    self.upload_progress_recorder.delete_upload_record,
+                    file_name=file_name,
+                    key=key
+                )
+            )
+
+        retrier = get_default_retrier(
+            regions_provider=self._get_regions_provider(
+                access_key=access_key,
+                bucket_name=bucket_name
+            ),
+            preferred_endpoints_provider=preferred_endpoints,
+            progress_record=progress_record,
+            accelerate_uploading=self.accelerate_uploading
+        )
+
+        data = upload_opts.get('data')
+        attempt = None
+        for attempt in retrier:
+            with attempt:
+                upload_opts['up_endpoint'] = attempt.context.get('endpoint')
+                attempt.result = self.__upload(
+                    **upload_opts
+                )
+                ret, resp = attempt.result
+                if resp.ok() and ret:
+                    return attempt.result
+                if (
+                    not is_seekable(data) or
+                    not resp.need_retry()
+                ):
+                    return attempt.result
+                data.seek(0)
+
+        if attempt is None:
+            raise RuntimeError('Retrier is not working. attempt is None')
 
+        return attempt.result
+
+    def __upload(
+        self,
+        up_token,
+        key,
+        file_path,
+        file_name,
+        data,
+        data_size,
+        part_size,
+        modify_time,
+        mime_type,
+        custom_vars,
+        metadata,
+        up_endpoint
+    ):
         # initial_parts
         context, resp = self.initial_parts(
             up_token,
@@ -515,7 +591,8 @@ def upload(
             data=data,
             data_size=data_size,
             modify_time=modify_time,
-            part_size=part_size
+            part_size=part_size,
+            up_endpoint=up_endpoint
         )
 
         if (
@@ -550,20 +627,6 @@ def upload(
                 data.close()
 
         if resp and not resp.ok():
-            if resp.status_code == 612 and context.resumed:
-                return self.upload(
-                    key,
-                    file_path=file_path,
-                    data=data,
-                    data_size=data_size,
-                    modify_time=modify_time,
-
-                    mime_type=mime_type,
-                    metadata=metadata,
-                    file_name=file_name,
-                    custom_vars=custom_vars,
-                    **kwargs
-                )
             return ret, resp
 
         # complete parts
@@ -579,23 +642,78 @@ def upload(
             metadata=metadata
         )
 
-        # retry if expired. the record file will be deleted by complete_parts
-        if resp.status_code == 612 and context.resumed:
-            return self.upload(
-                key,
-                file_path=file_path,
-                data=data,
-                data_size=data_size,
-                modify_time=modify_time,
+        return ret, resp
 
-                mime_type=mime_type,
-                metadata=metadata,
-                file_name=file_name,
-                custom_vars=custom_vars,
-                **kwargs
-            )
+    def upload(
+        self,
+        key,
+        file_path=None,
+        data=None,
+        data_size=None,
 
-        return ret, resp
+        part_size=None,
+        modify_time=None,
+        mime_type=None,
+        metadata=None,
+        file_name=None,
+        custom_vars=None,
+        **kwargs
+    ):
+        """
+        Parameters
+        ----------
+        key: str
+        file_path: str
+        data: IOBase
+        data_size: int
+        part_size: int
+        modify_time: int
+        mime_type: str
+        metadata: dict
+        file_name: str
+        custom_vars: dict
+        kwargs
+            up_token: str
+            bucket_name: str,
+            expired: int,
+            policy: dict,
+            strict_policy: bool
+
+        Returns
+        -------
+        ret: dict
+        resp: ResponseInfo
+        """
+        # up_token
+        up_token = kwargs.get('up_token', None)
+        if not up_token:
+            kwargs.setdefault('up_token', self.get_up_token(**kwargs))
+            access_key = self.auth.get_access_key()
+        else:
+            access_key, _, _ = Auth.up_token_decode(up_token)
+
+        # bucket_name
+        kwargs['bucket_name'] = Auth.get_bucket_name(up_token)
+
+        # file_name
+        if not file_name and file_path:
+            file_name = path.basename(file_path)
+
+        # upload
+        return self.__upload_with_retrier(
+            access_key=access_key,
+            key=key,
+            file_path=file_path,
+            file_name=file_name,
+            data=data,
+            data_size=data_size,
+            part_size=part_size,
+            modify_time=modify_time,
+            mime_type=mime_type,
+            custom_vars=custom_vars,
+            metadata=metadata,
+            **kwargs
+        )
 
     def __get_url_for_upload(
         self,
diff --git a/qiniu/utils.py b/qiniu/utils.py
index fa750707..f8517e35 100644
--- a/qiniu/utils.py
+++ b/qiniu/utils.py
@@ -121,15 +121,21 @@ def _sha1(data):
 
 
 def etag_stream(input_stream):
-    """计算输入流的etag:
+    """
+    计算输入流的etag
 
-    etag规格参考 https://developer.qiniu.com/kodo/manual/1231/appendix#3
+    .. deprecated::
+        在 v2 分片上传使用 4MB 以外分片大小时无法正常工作
 
-    Args:
-        input_stream: 待计算etag的二进制流
+    Parameters
+    ----------
+    input_stream: io.IOBase
+        支持随机访问的文件型对象
+
+    Returns
+    -------
+    str
 
-    Returns:
-        输入流的etag值
     """
     array = [_sha1(block) for block in _file_iter(input_stream, _BLOCK_SIZE)]
     if len(array) == 0:
@@ -145,12 +151,21 @@ def etag_stream(input_stream):
 
 
 def etag(filePath):
-    """计算文件的etag:
+    """
+    计算文件的etag:
 
-    Args:
-        filePath: 待计算etag的文件路径
+    .. deprecated::
+        在 v2 分片上传使用 4MB 以外分片大小时无法正常工作
 
-    Returns:
+
+    Parameters
+    ----------
+    filePath: str
+        待计算 etag 的文件路径
+
+    Returns
+    -------
+    str
         输入文件的etag值
     """
     with open(filePath, 'rb') as f:
diff --git a/qiniu/zone.py b/qiniu/zone.py
index acb34d00..0a213eaa 100644
--- a/qiniu/zone.py
+++ b/qiniu/zone.py
@@ -1,7 +1,7 @@
 # -*- coding: utf-8 -*-
 
-from qiniu.region import Region
+from qiniu.region import LegacyRegion
 
 
-class Zone(Region):
+class Zone(LegacyRegion):
     pass
diff --git a/setup.py b/setup.py
index 5ddc9c4d..cf97eae2 100644
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,8 @@ def find_version(*file_paths):
     install_requires=[
         'requests; python_version >= "3.7"',
         'requests<2.28; python_version < "3.7"',
-        'futures; python_version == "2.7"'
+        'futures; python_version == "2.7"',
+        'enum34; python_version == "2.7"'
     ],
     extras_require={
         'dev': [
diff --git a/test_qiniu.py b/test_qiniu.py
index c048d4c3..2b71aa22 100644
--- a/test_qiniu.py
+++ b/test_qiniu.py
@@ -45,11 +45,6 @@
 hostscache_dir = None
 
 
-dummy_access_key = 'abcdefghklmnopq'
-dummy_secret_key = '1234567890'
-dummy_auth = Auth(dummy_access_key, dummy_secret_key)
-
-
 def rand_string(length):
     lib = string.ascii_uppercase
     return ''.join([random.choice(lib) for i in range(0, length)])
@@ -193,172 +188,6 @@ def test_decode_entry(self):
             assert key == c.get('expect').get('key'), c.get('msg')
 
 
-class AuthTestCase(unittest.TestCase):
-    def test_token(self):
-        token = dummy_auth.token('test')
-        assert token == 'abcdefghklmnopq:mSNBTR7uS2crJsyFr2Amwv1LaYg='
-
-    def test_token_with_data(self):
-        token = dummy_auth.token_with_data('test')
-        assert token == 'abcdefghklmnopq:-jP8eEV9v48MkYiBGs81aDxl60E=:dGVzdA=='
-
-    def test_noKey(self):
-        with pytest.raises(ValueError):
-            Auth(None, None).token('nokey')
-        with pytest.raises(ValueError):
-            Auth('', '').token('nokey')
-
-    def test_token_of_request(self):
-        token = dummy_auth.token_of_request('https://www.qiniu.com?go=1', 'test', '')
-        assert token == 'abcdefghklmnopq:cFyRVoWrE3IugPIMP5YJFTO-O-Y='
-        token = dummy_auth.token_of_request('https://www.qiniu.com?go=1', 'test', 'application/x-www-form-urlencoded')
-        assert token == 'abcdefghklmnopq:svWRNcacOE-YMsc70nuIYdaa1e4='
-
-    def test_QiniuMacRequestsAuth(self):
-        auth = QiniuMacAuth("ak", "sk")
-        test_cases = [
-            {
-                "method": "GET",
-                "host": None,
-                "url": "",
-                "qheaders": {
-                    "X-Qiniu-": "a",
-                    "X-Qiniu": "b",
-                    "Content-Type": "application/x-www-form-urlencoded",
-                },
-                "content_type": "application/x-www-form-urlencoded",
-                "body": "{\"name\": \"test\"}",
-                "except_sign_token": "ak:0i1vKClRDWFyNkcTFzwcE7PzX74=",
-            },
-            {
-                "method": "GET",
-                "host": None,
-                "url": "",
-                "qheaders": {
-                    "Content-Type": "application/json",
-                },
-                "content_type": "application/json",
-                "body": "{\"name\": \"test\"}",
-                "except_sign_token": "ak:K1DI0goT05yhGizDFE5FiPJxAj4=",
-            },
-            {
-                "method": "POST",
-                "host": None,
-                "url": "",
-                "qheaders": {
-                    "Content-Type": "application/json",
-                    "X-Qiniu": "b",
-                },
-                "content_type": "application/json",
-                "body": "{\"name\": \"test\"}",
-                "except_sign_token": "ak:0ujEjW_vLRZxebsveBgqa3JyQ-w=",
-            },
-            {
-                "method": "GET",
-                "host": "upload.qiniup.com",
-                "url": "http://upload.qiniup.com",
-                "qheaders": {
-                    "X-Qiniu-": "a",
-                    "X-Qiniu": "b",
-                    "Content-Type": "application/x-www-form-urlencoded",
-                },
-                "content_type": "application/x-www-form-urlencoded",
-                "body": "{\"name\": \"test\"}",
-                "except_sign_token": "ak:GShw5NitGmd5TLoo38nDkGUofRw=",
-            },
-            {
-                "method": "GET",
-                "host": "upload.qiniup.com",
-                "url": "http://upload.qiniup.com",
-                "qheaders": {
-                    "Content-Type": "application/json",
-                    "X-Qiniu-Bbb": "BBB",
-                    "X-Qiniu-Aaa": "DDD",
-                    "X-Qiniu-": "a",
-                    "X-Qiniu": "b",
-                },
-                "content_type": "application/json",
-                "body": "{\"name\": \"test\"}",
-                "except_sign_token": "ak:DhNA1UCaBqSHCsQjMOLRfVn63GQ=",
-            },
-            {
-                "method": "GET",
-                "host": "upload.qiniup.com",
-                "url": "http://upload.qiniup.com",
-                "qheaders": {
-                    "Content-Type": "application/x-www-form-urlencoded",
-                    "X-Qiniu-Bbb": "BBB",
-                    "X-Qiniu-Aaa": "DDD",
-                    "X-Qiniu-": "a",
-                    "X-Qiniu": "b",
-                },
-                "content_type": "application/x-www-form-urlencoded",
-                "body": "name=test&language=go",
-                "except_sign_token": "ak:KUAhrYh32P9bv0COD8ugZjDCmII=",
-            },
-            {
-                "method": "GET",
-                "host": "upload.qiniup.com",
-                "url": "http://upload.qiniup.com",
-                "qheaders": {
-                    "Content-Type": "application/x-www-form-urlencoded",
-                    "X-Qiniu-Bbb": "BBB",
-                    "X-Qiniu-Aaa": "DDD",
-                },
-                "content_type": "application/x-www-form-urlencoded",
-                "body": "name=test&language=go",
-                "except_sign_token": "ak:KUAhrYh32P9bv0COD8ugZjDCmII=",
-            },
-            {
-                "method": "GET",
-                "host": "upload.qiniup.com",
-                "url": "http://upload.qiniup.com/mkfile/sdf.jpg",
-                "qheaders": {
-                    "Content-Type": "application/x-www-form-urlencoded",
-                    "X-Qiniu-Bbb": "BBB",
-                    "X-Qiniu-Aaa": "DDD",
-                    "X-Qiniu-": "a",
-                    "X-Qiniu": "b",
-                },
-                "content_type": "application/x-www-form-urlencoded",
-                "body": "name=test&language=go",
-                "except_sign_token": "ak:fkRck5_LeyfwdkyyLk-hyNwGKac=",
-            },
-            {
-                "method": "GET",
-                "host": "upload.qiniup.com",
-                "url": "http://upload.qiniup.com/mkfile/sdf.jpg?s=er3&df",
-                "qheaders": {
-                    "Content-Type": "application/x-www-form-urlencoded",
-                    "X-Qiniu-Bbb": "BBB",
-                    "X-Qiniu-Aaa": "DDD",
-                    "X-Qiniu-": "a",
-                    "X-Qiniu": "b",
-                },
-                "content_type": "application/x-www-form-urlencoded",
-                "body": "name=test&language=go",
-                "except_sign_token": "ak:PUFPWsEUIpk_dzUvvxTTmwhp3p4=",
-            },
-        ]
-
-        for test_case in test_cases:
-            sign_token = auth.token_of_request(
-                method=test_case["method"],
-                host=test_case["host"],
-                url=test_case["url"],
-                qheaders=auth.qiniu_headers(test_case["qheaders"]),
-                content_type=test_case["content_type"],
-                body=test_case["body"],
-            )
-            assert sign_token == test_case["except_sign_token"]
-
-    def test_verify_callback(self):
-        body = 'name=sunflower.jpg&hash=Fn6qeQi4VDLQ347NiRm-RlQx_4O2&location=Shanghai&price=1500.00&uid=123'
-        url = 'test.qiniu.com/callback'
-        ok = dummy_auth.verify_callback('QBox abcdefghklmnopq:ZWyeM5ljWMRFwuPTPOwQ4RwSto4=', url, body)
-        assert ok
-
-
 class BucketTestCase(unittest.TestCase):
     q = Auth(access_key, secret_key)
     bucket = BucketManager(q)
@@ -368,8 +197,7 @@ def test_list(self):
         assert eof is False
         assert len(ret.get('items')) == 4
         ret, eof, info = self.bucket.list(bucket_name, limit=1000)
-        print(ret, eof, info)
-        assert info.status_code == 200
+        assert info.status_code == 200, info
 
     def test_buckets(self):
         ret, info = self.bucket.buckets()
@@ -606,18 +434,6 @@ def test_private_url(self):
         assert r.status_code == 200
 
 
-class MediaTestCase(unittest.TestCase):
-    def test_pfop(self):
-        q = Auth(access_key, secret_key)
-        pfop = PersistentFop(q, 'testres', 'sdktest')
-        op = op_save('avthumb/m3u8/segtime/10/vcodec/libx264/s/320x240', 'pythonsdk', 'pfoptest')
-        ops = []
-        ops.append(op)
-        ret, info = pfop.execute('sintel_trailer.mp4', ops, 1)
-        print(info)
-        assert ret['persistentId'] is not None
-
-
 class EtagTestCase(unittest.TestCase):
     def test_zero_size(self):
         open("x", 'a').close()
@@ -648,80 +464,6 @@ def test_get_domain(self):
         assert info.status_code == 200
 
 
-class RegionTestCase(unittest.TestCase):
-    test_rs_host = 'test.region.compatible.config.rs'
-    test_rsf_host = 'test.region.compatible.config.rsf'
-
-    @staticmethod
-    def restore_hosts():
-        set_default(
-            default_rs_host=qiniu.config.RS_HOST,
-            default_rsf_host=qiniu.config.RSF_HOST,
-            default_uc_host=qiniu.config.UC_HOST,
-            default_query_region_host=qiniu.config.QUERY_REGION_HOST,
-            default_query_region_backup_hosts=[
-                'uc.qbox.me',
-                'api.qiniu.com'
-            ]
-        )
-        qiniu.config._is_customized_default['default_rs_host'] = False
-        qiniu.config._is_customized_default['default_rsf_host'] = False
-        qiniu.config._is_customized_default['default_uc_host'] = False
-        qiniu.config._is_customized_default['default_query_region_host'] = False
-        qiniu.config._is_customized_default['default_query_region_backup_hosts'] = False
-
-    def test_config_compatible(self):
-        try:
-            set_default(default_rs_host=self.test_rs_host)
-            set_default(default_rsf_host=self.test_rsf_host)
-            zone = Zone()
-            assert zone.get_rs_host("mock_ak", "mock_bucket") == self.test_rs_host
-            assert zone.get_rsf_host("mock_ak", "mock_bucket") == self.test_rsf_host
-        finally:
-            RegionTestCase.restore_hosts()
-
-    def test_query_region_with_custom_domain(self):
-        try:
-            set_default(
-                default_query_region_host='https://fake-uc.phpsdk.qiniu.com'
-            )
-            zone = Zone()
-            data = zone.bucket_hosts(access_key, bucket_name)
-            assert data != 'null'
-        finally:
-            RegionTestCase.restore_hosts()
-
-    def test_query_region_with_backup_domains(self):
-        try:
-            set_default(
-                default_query_region_host='https://fake-uc.phpsdk.qiniu.com',
-                default_query_region_backup_hosts=[
-                    'unavailable-uc.phpsdk.qiniu.com',
-                    'uc.qbox.me'
-                ]
-            )
-            zone = Zone()
-            data = zone.bucket_hosts(access_key, bucket_name)
-            assert data != 'null'
-        finally:
-            RegionTestCase.restore_hosts()
-
-    def test_query_region_with_uc_and_backup_domains(self):
-        try:
-            set_default(
-                default_uc_host='https://fake-uc.phpsdk.qiniu.com',
-                default_query_region_backup_hosts=[
-                    'unavailable-uc.phpsdk.qiniu.com',
-                    'uc.qbox.me'
-                ]
-            )
-            zone = Zone()
-            data = zone.bucket_hosts(access_key, bucket_name)
-            assert data != 'null'
-        finally:
-            RegionTestCase.restore_hosts()
-
-
 class ReadWithoutSeek(object):
     def __init__(self, str):
         self.str = str
diff --git a/tests/cases/conftest.py b/tests/cases/conftest.py
index dcf802f7..13f41618 100644
--- a/tests/cases/conftest.py
+++ b/tests/cases/conftest.py
@@ -1,10 +1,11 @@
 # -*- coding: utf-8 -*-
 import os
+import random
+import string
 
 import pytest
 
 from qiniu import config as qn_config
-from qiniu import region
 from qiniu import Auth
 
 
@@ -23,6 +24,16 @@ def bucket_name():
     yield os.getenv('QINIU_TEST_BUCKET')
 
 
+@pytest.fixture(scope='session')
+def no_acc_bucket_name():
+    yield os.getenv('QINIU_TEST_NO_ACC_BUCKET')
+
+
+@pytest.fixture(scope='session')
+def download_domain():
+    yield os.getenv('QINIU_TEST_DOMAIN')
+
+
 @pytest.fixture(scope='session')
 def upload_callback_url():
     yield os.getenv('QINIU_UPLOAD_CALLBACK_URL')
@@ -48,11 +59,12 @@ def set_conf_default(request):
         qn_config.set_default(**request.param)
     yield
     qn_config._config = {
-        'default_zone': region.Region(),
+        'default_zone': None,
         'default_rs_host': qn_config.RS_HOST,
         'default_rsf_host': qn_config.RSF_HOST,
         'default_api_host': qn_config.API_HOST,
         'default_uc_host': qn_config.UC_HOST,
+        'default_uc_backup_hosts': qn_config.UC_BACKUP_HOSTS,
         'default_query_region_host': qn_config.QUERY_REGION_HOST,
         'default_query_region_backup_hosts': [
             'uc.qbox.me',
@@ -79,3 +91,30 @@ def set_conf_default(request):
         'connection_pool': False,
         'default_upload_threshold': False
     }
+
+
+@pytest.fixture(scope='session')
+def rand_string():
+    def _rand_string(length):
+        # use random.choices when min version of python >= 3.6
+        return ''.join(
+            random.choice(string.ascii_letters + string.digits)
+            for _ in range(length)
+        )
+    yield _rand_string
+
+
+class Ref:
+    """
+    python2 not support nonlocal keyword
+    """
+    def __init__(self, value=None):
+        self.value = value
+
+
+@pytest.fixture(scope='session')
+def use_ref():
+    def _use_ref(value):
+        return Ref(value)
+
+    yield _use_ref
diff --git a/tests/cases/test_auth.py b/tests/cases/test_auth.py
new file mode 100644
index 00000000..d294f018
--- /dev/null
+++ b/tests/cases/test_auth.py
@@ -0,0 +1,212 @@
+import pytest
+
+from qiniu.auth import Auth, QiniuMacAuth
+
+
+@pytest.fixture(scope="module")
+def dummy_auth():
+    dummy_access_key = 'abcdefghklmnopq'
+    dummy_secret_key = '1234567890'
+    yield Auth(dummy_access_key, dummy_secret_key)
+
+
+class TestAuth:
+    def test_token(self, dummy_auth):
+        token = dummy_auth.token('test')
+        assert token == 'abcdefghklmnopq:mSNBTR7uS2crJsyFr2Amwv1LaYg='
+
+    def test_token_with_data(self, dummy_auth):
+        token = dummy_auth.token_with_data('test')
+        assert token == 'abcdefghklmnopq:-jP8eEV9v48MkYiBGs81aDxl60E=:dGVzdA=='
+
+    def test_nokey(self, dummy_auth):
+        with pytest.raises(ValueError):
+            Auth(None, None).token('nokey')
+        with pytest.raises(ValueError):
+            Auth('', '').token('nokey')
+
+    def test_token_of_request(self, dummy_auth):
+        token = dummy_auth.token_of_request('https://www.qiniu.com?go=1', 'test', '')
+        assert token == 'abcdefghklmnopq:cFyRVoWrE3IugPIMP5YJFTO-O-Y='
+        token = dummy_auth.token_of_request('https://www.qiniu.com?go=1', 'test', 'application/x-www-form-urlencoded')
+        assert token == 'abcdefghklmnopq:svWRNcacOE-YMsc70nuIYdaa1e4='
+
+    @pytest.mark.parametrize(
+        'opts, except_token',
+        [
+            (
+                {
+                    "method": "GET",
+                    "host": None,
+                    "url": "",
+                    "qheaders": {
+                        "X-Qiniu-": "a",
+                        "X-Qiniu": "b",
+                        "Content-Type": "application/x-www-form-urlencoded",
+                    },
+                    "content_type": "application/x-www-form-urlencoded",
+                    "body": "{\"name\": \"test\"}",
+                },
+                "ak:0i1vKClRDWFyNkcTFzwcE7PzX74=",
+            ),
+            (
+                {
+                    "method": "GET",
+                    "host": None,
+                    "url": "",
+                    "qheaders": {
+                        "Content-Type": "application/json",
+                    },
+                    "content_type": "application/json",
+                    "body": "{\"name\": \"test\"}",
+                },
+                "ak:K1DI0goT05yhGizDFE5FiPJxAj4=",
+            ),
+            (
+                {
+                    "method": "POST",
+                    "host": None,
+                    "url": "",
+                    "qheaders": {
+                        "Content-Type": "application/json",
+                        "X-Qiniu": "b",
+                    },
+                    "content_type": "application/json",
+                    "body": "{\"name\": \"test\"}",
+                },
+                "ak:0ujEjW_vLRZxebsveBgqa3JyQ-w=",
+            ),
+            (
+                {
+                    "method": "GET",
+                    "host": "upload.qiniup.com",
+                    "url": "http://upload.qiniup.com",
+                    "qheaders": {
+                        "X-Qiniu-": "a",
+                        "X-Qiniu": "b",
+                        "Content-Type": "application/x-www-form-urlencoded",
+                    },
+                    "content_type": "application/x-www-form-urlencoded",
+                    "body": "{\"name\": \"test\"}",
+                },
+                "ak:GShw5NitGmd5TLoo38nDkGUofRw=",
+            ),
+            (
+                {
+                    "method": "GET",
+                    "host": "upload.qiniup.com",
+                    "url": "http://upload.qiniup.com",
+                    "qheaders": {
+                        "Content-Type": "application/json",
+                        "X-Qiniu-Bbb": "BBB",
+                        "X-Qiniu-Aaa": "DDD",
+                        "X-Qiniu-": "a",
+                        "X-Qiniu": "b",
+                    },
+                    "content_type": "application/json",
+                    "body": "{\"name\": \"test\"}",
+                },
+                "ak:DhNA1UCaBqSHCsQjMOLRfVn63GQ=",
+            ),
+            (
+                {
+                    "method": "GET",
+                    "host": "upload.qiniup.com",
+                    "url": "http://upload.qiniup.com",
+                    "qheaders": {
+                        "Content-Type": "application/x-www-form-urlencoded",
+                        "X-Qiniu-Bbb": "BBB",
+                        "X-Qiniu-Aaa": "DDD",
+                        "X-Qiniu-": "a",
+                        "X-Qiniu": "b",
+                    },
+                    "content_type": "application/x-www-form-urlencoded",
+                    "body": "name=test&language=go",
+                },
+                "ak:KUAhrYh32P9bv0COD8ugZjDCmII=",
+            ),
+            (
+                {
+                    "method": "GET",
+                    "host": "upload.qiniup.com",
+                    "url": "http://upload.qiniup.com",
+                    "qheaders": {
+                        "Content-Type": "application/x-www-form-urlencoded",
+                        "X-Qiniu-Bbb": "BBB",
+                        "X-Qiniu-Aaa": "DDD",
+                    },
+                    "content_type": "application/x-www-form-urlencoded",
+                    "body": "name=test&language=go",
+                },
+                "ak:KUAhrYh32P9bv0COD8ugZjDCmII=",
+            ),
+            (
+                {
+                    "method": "GET",
+                    "host": "upload.qiniup.com",
+                    "url": "http://upload.qiniup.com/mkfile/sdf.jpg",
+                    "qheaders": {
+                        "Content-Type": "application/x-www-form-urlencoded",
+                        "X-Qiniu-Bbb": "BBB",
+                        "X-Qiniu-Aaa": "DDD",
+                        "X-Qiniu-": "a",
+                        "X-Qiniu": "b",
+                    },
+                    "content_type": "application/x-www-form-urlencoded",
+                    "body": "name=test&language=go",
+                },
+                "ak:fkRck5_LeyfwdkyyLk-hyNwGKac=",
+            ),
+            (
+                {
+                    "method": "GET",
+                    "host": "upload.qiniup.com",
+                    "url": "http://upload.qiniup.com/mkfile/sdf.jpg?s=er3&df",
+                    "qheaders": {
+                        "Content-Type": "application/x-www-form-urlencoded",
+                        "X-Qiniu-Bbb": "BBB",
+                        "X-Qiniu-Aaa": "DDD",
+                        "X-Qiniu-": "a",
+                        "X-Qiniu": "b",
+                    },
+                    "content_type": "application/x-www-form-urlencoded",
+                    "body": "name=test&language=go",
+                },
+                "ak:PUFPWsEUIpk_dzUvvxTTmwhp3p4=",
+            )
+        ]
+    )
+    def test_qiniu_mac_requests_auth(self, dummy_auth, opts, except_token):
+        auth = QiniuMacAuth("ak", "sk")
+
+        sign_token = auth.token_of_request(
+            method=opts["method"],
+            host=opts["host"],
+            url=opts["url"],
+            qheaders=auth.qiniu_headers(opts["qheaders"]),
+            content_type=opts["content_type"],
+            body=opts["body"],
+        )
+        assert sign_token == except_token
+
+    def test_qbox_verify_callback(self, dummy_auth):
+        ok = dummy_auth.verify_callback(
+            'QBox abcdefghklmnopq:T7F-SjxX7X2zI4Fc1vANiNt1AUE=',
+            url='https://test.qiniu.com/callback',
+            body='name=sunflower.jpg&hash=Fn6qeQi4VDLQ347NiRm-RlQx_4O2&location=Shanghai&price=1500.00&uid=123'
+        )
+        assert ok
+
+    def test_qiniu_verify_token(self, dummy_auth):
+        ok = dummy_auth.verify_callback(
+            'Qiniu abcdefghklmnopq:ZqS7EZuAKrhZaEIxqNGxDJi41IQ=',
+            url='https://test.qiniu.com/callback',
+            body='name=sunflower.jpg&hash=Fn6qeQi4VDLQ347NiRm-RlQx_4O2&location=Shanghai&price=1500.00&uid=123',
+            content_type='application/x-www-form-urlencoded',
+            method='GET',
+            headers={
+                'X-Qiniu-Bbb': 'BBB',
+            }
+        )
+        assert ok
+
diff --git a/tests/cases/test_http/test_endpoint.py b/tests/cases/test_http/test_endpoint.py
new file mode 100644
index 00000000..9bfbeb66
--- /dev/null
+++ b/tests/cases/test_http/test_endpoint.py
@@ -0,0 +1,27 @@
+from qiniu.http.endpoint import Endpoint
+
+
+class TestEndpoint:
+    def test_endpoint_with_default_scheme(self):
+        endpoint = Endpoint('uc.python-sdk.qiniu.com')
+        assert endpoint.get_value() == 'https://uc.python-sdk.qiniu.com'
+
+    def test_endpoint_with_custom_scheme(self):
+        endpoint = Endpoint('uc.python-sdk.qiniu.com', default_scheme='http')
+        assert endpoint.get_value() == 'http://uc.python-sdk.qiniu.com'
+
+    def test_endpoint_with_get_value_with_custom_scheme(self):
+        endpoint = Endpoint('uc.python-sdk.qiniu.com', default_scheme='http')
+        assert endpoint.get_value('https') == 'https://uc.python-sdk.qiniu.com'
+
+    def test_create_endpoint_from_host_with_scheme(self):
+        endpoint = Endpoint.from_host('http://uc.python-sdk.qiniu.com')
+        assert endpoint.default_scheme == 'http'
+        assert endpoint.get_value() == 'http://uc.python-sdk.qiniu.com'
+
+    def test_clone_endpoint(self):
+        endpoint = Endpoint('uc.python-sdk.qiniu.com')
+        another_endpoint = endpoint.clone()
+        another_endpoint.host = 'another-uc.python-sdk.qiniu.com'
+        assert endpoint.get_value() == 'https://uc.python-sdk.qiniu.com'
+        assert another_endpoint.get_value() == 'https://another-uc.python-sdk.qiniu.com'
diff --git a/tests/cases/test_http/test_endpoints_retry_policy.py b/tests/cases/test_http/test_endpoints_retry_policy.py
new file mode 100644
index 00000000..a8135ca2
--- /dev/null
+++ b/tests/cases/test_http/test_endpoints_retry_policy.py
@@ -0,0 +1,75 @@
+import pytest
+
+from qiniu.http.endpoint import Endpoint
+from qiniu.http.endpoints_retry_policy import EndpointsRetryPolicy
+from qiniu.retry.attempt import Attempt
+
+
+@pytest.fixture(scope='function')
+def mocked_endpoints_provider():
+    yield [
+        Endpoint('a'),
+        Endpoint('b'),
+        Endpoint('c')
+    ]
+
+
+class TestEndpointsRetryPolicy:
+    def test_init_context(self, mocked_endpoints_provider):
+        endpoints_retry_policy = EndpointsRetryPolicy(
+            endpoints_provider=mocked_endpoints_provider
+        )
+
+        mocked_context = {}
+        endpoints_retry_policy.init_context(mocked_context)
+
+        assert mocked_context['endpoint'].get_value() == mocked_endpoints_provider[0].get_value()
+        assert [
+            e.get_value()
+            for e in mocked_context['alternative_endpoints']
+        ] == [
+            e.get_value()
+            for e in mocked_endpoints_provider[1:]
+        ]
+
+    def test_should_retry(self, mocked_endpoints_provider):
+        mocked_attempt = Attempt()
+
+        endpoints_retry_policy = EndpointsRetryPolicy(
+            endpoints_provider=mocked_endpoints_provider
+        )
+        endpoints_retry_policy.init_context(mocked_attempt.context)
+        assert endpoints_retry_policy.should_retry(mocked_attempt)
+
+    def test_prepare_retry(self, mocked_endpoints_provider):
+        mocked_attempt = Attempt()
+
+        endpoints_retry_policy = EndpointsRetryPolicy(
+            endpoints_provider=mocked_endpoints_provider
+        )
+        endpoints_retry_policy.init_context(mocked_attempt.context)
+
+        actual_tried_endpoints = [
+            mocked_attempt.context.get('endpoint')
+        ]
+        while endpoints_retry_policy.should_retry(mocked_attempt):
+            endpoints_retry_policy.prepare_retry(mocked_attempt)
+            actual_tried_endpoints.append(mocked_attempt.context.get('endpoint'))
+
+        assert [
+            e.get_value() for e in actual_tried_endpoints
+        ] == [
+            e.get_value() for e in mocked_endpoints_provider
+        ]
+
+    def test_skip_init_context(self, mocked_endpoints_provider):
+        endpoints_retry_policy = EndpointsRetryPolicy(
+            endpoints_provider=mocked_endpoints_provider,
+            skip_init_context=True
+        )
+
+        mocked_context = {}
+        endpoints_retry_policy.init_context(mocked_context)
+
+        assert not mocked_context.get('endpoint')
+        assert not mocked_context.get('alternative_endpoints')
diff --git a/tests/cases/test_http/test_qiniu_conf.py b/tests/cases/test_http/test_qiniu_conf.py
index 3ce4c5a0..29c6fd05 100644
--- a/tests/cases/test_http/test_qiniu_conf.py
+++ b/tests/cases/test_http/test_qiniu_conf.py
@@ -44,7 +44,7 @@ def reset_session():
     yield
 
 
-class TestQiniuConf:
+class TestQiniuConfWithHTTP:
     @pytest.mark.usefixtures('reset_session')
     @pytest.mark.parametrize(
         'set_conf_default',
diff --git a/tests/cases/test_http/test_region.py b/tests/cases/test_http/test_region.py
new file mode 100644
index 00000000..13ac1035
--- /dev/null
+++ b/tests/cases/test_http/test_region.py
@@ -0,0 +1,192 @@
+from datetime import datetime, timedelta
+from itertools import chain
+
+from qiniu.http.endpoint import Endpoint
+from qiniu.http.region import Region, ServiceName
+
+
+class TestRegion:
+    def test_default_options(self):
+        region = Region('z0')
+        assert region.region_id == 'z0'
+        assert region.s3_region_id == 'z0'
+        assert all(k in region.services for k in ServiceName)
+        assert datetime.now() - region.create_time < timedelta(seconds=1)
+        assert region.ttl == 86400
+        assert region.is_live
+
+    def test_custom_options(self):
+        region = Region(
+            region_id='z0',
+            s3_region_id='s3-z0',
+            services={
+                ServiceName.UP: [
+                    Endpoint('uc.python-sdk.qiniu.com')
+                ],
+                'custom-service': [
+                    Endpoint('custom-service.python-sdk.qiniu.com')
+                ]
+            },
+            create_time=datetime.now() - timedelta(days=1),
+            ttl=3600
+        )
+        assert region.region_id == 'z0'
+        assert region.s3_region_id == 's3-z0'
+        assert all(
+            k in region.services
+            for k in chain(ServiceName, ['custom-service'])
+        )
+        assert datetime.now() - region.create_time > timedelta(days=1)
+        assert region.ttl == 3600
+        assert not region.is_live
+
+    def test_from_region_id(self):
+        region = Region.from_region_id('z0')
+
+        expect_services_endpoint_value = {
+            ServiceName.UC: [
+                'https://uc.qiniuapi.com'
+            ],
+            ServiceName.UP: [
+                'https://upload.qiniup.com',
+                'https://up.qiniup.com'
+            ],
+            ServiceName.UP_ACC: [],
+            ServiceName.IO: [
+                'https://iovip.qiniuio.com',
+            ],
+            ServiceName.RS: [
+                'https://rs-z0.qiniuapi.com',
+            ],
+            ServiceName.RSF: [
+                'https://rsf-z0.qiniuapi.com',
+            ],
+            ServiceName.API: [
+                'https://api-z0.qiniuapi.com',
+            ],
+            ServiceName.S3: [
+                'https://s3.z0.qiniucs.com'
+            ]
+        }
+
+        assert region.region_id == 'z0'
+        assert region.s3_region_id == 'z0'
+
+        assert {
+            k: [
+                e.get_value()
+                for e in v
+            ]
+            for k, v in region.services.items()
+        } == expect_services_endpoint_value
+
+        assert datetime.now() - region.create_time < timedelta(seconds=1)
+        assert region.ttl == 86400
+        assert region.is_live
+
+    def test_from_region_id_with_custom_options(self):
+        preferred_scheme = 'http'
+        custom_service_endpoint = Endpoint('custom-service.python-sdk.qiniu.com')
+        region_z1 = Region.from_region_id(
+            'z1',
+            s3_region_id='s3-z1',
+            ttl=-1,
+            create_time=datetime.fromtimestamp(0),
+            extended_services= {
+                'custom-service': [
+                    custom_service_endpoint
+                ]
+            },
+            preferred_scheme=preferred_scheme
+        )
+
+        expect_services_endpoint_value = {
+            ServiceName.UC: [
+                preferred_scheme + '://uc.qiniuapi.com'
+            ],
+            ServiceName.UP: [
+                preferred_scheme + '://upload-z1.qiniup.com',
+                preferred_scheme + '://up-z1.qiniup.com'
+            ],
+            ServiceName.UP_ACC: [],
+            ServiceName.IO: [
+                preferred_scheme + '://iovip-z1.qiniuio.com',
+            ],
+            ServiceName.RS: [
+                preferred_scheme + '://rs-z1.qiniuapi.com',
+            ],
+            ServiceName.RSF: [
+                preferred_scheme + '://rsf-z1.qiniuapi.com',
+            ],
+            ServiceName.API: [
+                preferred_scheme + '://api-z1.qiniuapi.com',
+            ],
+            ServiceName.S3: [
+                preferred_scheme + '://s3.z1.qiniucs.com'
+            ],
+            'custom-service': [
+                custom_service_endpoint.get_value()
+            ]
+        }
+
+        assert region_z1.region_id == 'z1'
+        assert region_z1.s3_region_id == 's3-z1'
+        assert {
+            k: [
+                e.get_value()
+                for e in v
+            ]
+            for k, v in region_z1.services.items()
+        } == expect_services_endpoint_value
+        assert region_z1.ttl == -1
+        assert region_z1.create_time == datetime.fromtimestamp(0)
+        assert region_z1.is_live
+
+    def test_clone(self):
+        region = Region.from_region_id('z0')
+        cloned_region = region.clone()
+        cloned_region.region_id = 'another'
+        cloned_region.services[ServiceName.UP][0].host = 'another-uc.qiniuapi.com'
+        assert region.region_id == 'z0'
+        assert region.services[ServiceName.UP][0].get_value() == 'https://upload.qiniup.com'
+        assert cloned_region.services[ServiceName.UP][0].get_value() == 'https://another-uc.qiniuapi.com'
+
+    def test_merge(self):
+        r1 = Region.from_region_id('z0')
+        r2 = Region(
+            region_id='r2',
+            s3_region_id='s3-r2',
+            services={
+                ServiceName.UP: [
+                    Endpoint('up-r2.python-sdk.qiniu.com')
+                ],
+                'custom-service': [
+                    Endpoint('custom-service-r2.python-sdk.qiniu.com')
+                ]
+            },
+            create_time=datetime.now() - timedelta(days=1),
+            ttl=3600
+        )
+
+        merged_region = Region.merge(r1, r2)
+
+        assert merged_region.region_id == r1.region_id
+        assert merged_region.s3_region_id == r1.s3_region_id
+        assert merged_region.create_time == r1.create_time
+        assert merged_region.ttl == r1.ttl
+
+        assert all(
+            k in merged_region.services
+            for k in [
+                ServiceName.UP,
+                'custom-service'
+            ]
+        ), merged_region.services.keys()
+
+        for k, v in merged_region.services.items():
+            if k == ServiceName.UP:
+                assert v == list(chain(r1.services[k], r2.services[k]))
+            elif k == 'custom-service':
+                assert v == r2.services[k]
+            else:
+                assert v == r1.services[k]
diff --git a/tests/cases/test_http/test_regions_provider.py b/tests/cases/test_http/test_regions_provider.py
new file mode 100644
index 00000000..163f19d2
--- /dev/null
+++ b/tests/cases/test_http/test_regions_provider.py
@@ -0,0 +1,267 @@
+import os
+import datetime
+import tempfile
+import json
+
+import pytest
+
+from qiniu.compat import urlparse
+from qiniu.config import QUERY_REGION_HOST, QUERY_REGION_BACKUP_HOSTS
+from qiniu.http.endpoint import Endpoint
+from qiniu.http.region import Region
+from qiniu.http.regions_provider import QueryRegionsProvider, CachedRegionsProvider, _global_cache_scope, _persist_region
+
+
+@pytest.fixture(scope='session')
+def query_regions_endpoints_provider():
+    query_region_host = urlparse(QUERY_REGION_HOST).hostname
+    endpoints_provider = [
+        Endpoint(h)
+        for h in [query_region_host] + QUERY_REGION_BACKUP_HOSTS
+    ]
+    yield endpoints_provider
+
+
+@pytest.fixture(scope='function')
+def query_regions_provider(access_key, bucket_name, query_regions_endpoints_provider):
+    query_regions_provider = QueryRegionsProvider(
+        access_key=access_key,
+        bucket_name=bucket_name,
+        endpoints_provider=query_regions_endpoints_provider
+    )
+    yield query_regions_provider
+
+
+class TestQueryRegionsProvider:
+    def test_getter(self, query_regions_provider):
+        ret = list(query_regions_provider)
+        assert len(ret) > 0
+
+    def test_error_with_bad_ak(self, query_regions_endpoints_provider):
+        query_regions_provider = QueryRegionsProvider(
+            access_key='fake',
+            bucket_name='fake',
+            endpoints_provider=query_regions_endpoints_provider
+        )
+        with pytest.raises(Exception) as exc:
+            list(query_regions_provider)
+        assert '612' in str(exc)
+
+    def test_error_with_bad_endpoint(self, query_regions_provider):
+        query_regions_provider.endpoints_provider = [
+            Endpoint('fake-uc.python.qiniu.com')
+        ]
+        with pytest.raises(Exception) as exc:
+            list(query_regions_provider)
+        assert '-1' in str(exc)
+
+    def test_getter_with_retried(self, query_regions_provider, query_regions_endpoints_provider):
+        query_regions_provider.endpoints_provider = [
+            Endpoint('fake-uc.python.qiniu.com'),
+        ] + list(query_regions_endpoints_provider)
+
+        ret = list(query_regions_provider)
+        assert len(ret) > 0
+
+    def test_getter_with_preferred_scheme(self, query_regions_provider):
+        query_regions_provider.preferred_scheme = 'http'
+        for region in query_regions_provider:
+            for endpoints in region.services.values():
+                assert all(
+                    e.get_value().startswith('http://')
+                    for e in endpoints
+                )
+
+
+@pytest.fixture(scope='function')
+def cached_regions_provider(request):
+    if not hasattr(request, 'param') or not isinstance(request.param, dict):
+        request.param = {}
+    request.param.setdefault('cache_key', 'test-cache-key')
+    request.param.setdefault('base_regions_provider', [])
+
+    cached_regions_provider = CachedRegionsProvider(
+        **request.param
+    )
+    yield cached_regions_provider
+
+    # clear memo_cache for test cases will affect each other with same cache_key
+    _global_cache_scope.memo_cache.clear()
+    persist_path = request.param.get('persist_path')
+    if persist_path:
+        try:
+            os.remove(persist_path)
+        except OSError:
+            pass
+
+
+@pytest.fixture(scope='function')
+def bad_regions_provider():
+    regions_provider = QueryRegionsProvider(
+        access_key='fake',
+        bucket_name='fake',
+        endpoints_provider=[
+            Endpoint('fake-uc.python.qiniu.com')
+        ]
+    )
+    yield regions_provider
+
+
+class TestCachedQueryRegionsProvider:
+    @pytest.mark.parametrize(
+        'cached_regions_provider',
+        [
+            {'base_regions_provider': [Region.from_region_id('z0')]},
+        ],
+        indirect=True
+    )
+    def test_getter_normally(self, cached_regions_provider):
+        ret = list(cached_regions_provider)
+        assert len(ret) > 0
+
+    def test_setter(self, cached_regions_provider):
+        regions = [Region.from_region_id('z0')]
+        cached_regions_provider.set_regions(regions)
+        assert list(cached_regions_provider) == regions
+
+    def test_getter_with_expired_file_cache(self, cached_regions_provider):
+        expired_region = Region.from_region_id('z0')
+        expired_region.create_time = datetime.datetime.now()
+
+        r_z0 = Region.from_region_id('z0')
+        r_z0.ttl = 86400
+
+        with open(cached_regions_provider.persist_path, 'w') as f:
+            json.dump({
+                'cacheKey': cached_regions_provider.cache_key,
+                'regions': [_persist_region(r) for r in [expired_region]]
+            }, f)
+
+        cached_regions_provider._cache_scope.memo_cache[cached_regions_provider.cache_key] = [r_z0]
+
+        assert list(cached_regions_provider) == [r_z0]
+        try:
+            os.remove(cached_regions_provider.persist_path)
+        except OSError:
+            pass
+
+    @pytest.mark.parametrize(
+        'cached_regions_provider',
+        [
+            {
+                'persist_path': os.path.join(tempfile.gettempdir(), 'test-disable-persist.jsonl'),
+            },
+            {
+                'persist_path': None,
+            }
+        ],
+        indirect=True
+    )
+    def test_disable_persist(self, cached_regions_provider):
+        if cached_regions_provider.persist_path:
+            old_persist_path = cached_regions_provider.persist_path
+            cached_regions_provider.persist_path = None
+        else:
+            old_persist_path = _global_cache_scope.persist_path
+
+        regions = [Region.from_region_id('z0')]
+        cached_regions_provider.set_regions(regions)
+
+        assert list(cached_regions_provider) == regions
+        assert not os.path.exists(old_persist_path)
+
+    @pytest.mark.parametrize(
+        'cached_regions_provider',
+        [
+            {
+                'persist_path': os.path.join(tempfile.gettempdir(), 'test-base-provider.jsonl'),
+                'base_regions_provider': [Region.from_region_id('z0')]
+            }
+        ],
+        indirect=True
+    )
+    def test_getter_with_base_regions_provider(self, cached_regions_provider):
+        assert not os.path.exists(cached_regions_provider.persist_path)
+        regions = list(cached_regions_provider.base_regions_provider)
+        assert list(cached_regions_provider) == regions
+        line_num = 0
+        with open(cached_regions_provider.persist_path, 'r') as f:
+            for _ in f:
+                line_num += 1
+        assert line_num == 1
+
+    @pytest.mark.parametrize(
+        'cached_regions_provider',
+        [
+            {
+                'persist_path': os.path.join(tempfile.gettempdir(), 'test-base-provider.jsonl')
+            }
+        ],
+        indirect=True
+    )
+    def test_should_provide_memo_expired_regions_when_base_provider_failed(
+        self,
+        cached_regions_provider,
+        bad_regions_provider
+    ):
+        expired_region = Region.from_region_id('z0')
+        expired_region.create_time = datetime.datetime.fromtimestamp(0)
+        expired_region.ttl = 1
+        cached_regions_provider.set_regions([expired_region])
+        cached_regions_provider.base_regions_provider = bad_regions_provider
+        regions = list(cached_regions_provider)
+        assert len(regions) > 0
+        assert not regions[0].is_live
+
+    @pytest.mark.parametrize(
+        'cached_regions_provider',
+        [
+            {
+                'persist_path': os.path.join(tempfile.gettempdir(), 'test-base-provider.jsonl')
+            }
+        ],
+        indirect=True
+    )
+    def test_should_provide_file_expired_regions_when_base_provider_failed(
+        self,
+        cached_regions_provider,
+        bad_regions_provider
+    ):
+        expired_region = Region.from_region_id('z0')
+        expired_region.create_time = datetime.datetime.fromtimestamp(0)
+        expired_region.ttl = 1
+        cached_regions_provider.set_regions([expired_region])
+        cached_regions_provider._cache_scope.memo_cache.clear()
+        cached_regions_provider.base_regions_provider = bad_regions_provider
+        regions = list(cached_regions_provider)
+        assert len(regions) > 0
+        assert not regions[0].is_live
+
+    @pytest.mark.parametrize(
+        'cached_regions_provider',
+        [
+            {
+                'should_shrink_expired_regions': True
+            }
+        ],
+        indirect=True
+    )
+    def test_shrink_with_expired_regions(self, cached_regions_provider):
+        expired_region = Region.from_region_id('z0')
+        expired_region.create_time = datetime.datetime.fromtimestamp(0)
+        expired_region.ttl = 1
+        origin_cache_key = cached_regions_provider.cache_key
+        cached_regions_provider.set_regions([expired_region])
+        cached_regions_provider.cache_key = 'another-cache-key'
+        list(cached_regions_provider)  # trigger __shrink_cache()
+        assert len(cached_regions_provider._cache_scope.memo_cache[origin_cache_key]) == 0
+
+    def test_shrink_with_ignore_expired_regions(self, cached_regions_provider):
+        expired_region = Region.from_region_id('z0')
+        expired_region.create_time = datetime.datetime.fromtimestamp(0)
+        expired_region.ttl = 1
+        origin_cache_key = cached_regions_provider.cache_key
+        cached_regions_provider.set_regions([expired_region])
+        cached_regions_provider.cache_key = 'another-cache-key'
+        list(cached_regions_provider)  # trigger __shrink_cache()
+        assert len(cached_regions_provider._cache_scope.memo_cache[origin_cache_key]) > 0
diff --git a/tests/cases/test_http/test_regions_retry_policy.py b/tests/cases/test_http/test_regions_retry_policy.py
new file mode 100644
index 00000000..add39930
--- /dev/null
+++ b/tests/cases/test_http/test_regions_retry_policy.py
@@ -0,0 +1,263 @@
+import pytest
+
+from qiniu.http.endpoint import Endpoint
+from qiniu.http.region import Region, ServiceName
+from qiniu.http.regions_retry_policy import RegionsRetryPolicy
+from qiniu.retry import Attempt
+
+
+@pytest.fixture(scope='function')
+def mocked_regions_provider():
+    yield [
+        Region.from_region_id('z0'),
+        Region.from_region_id('z1')
+    ]
+
+
+class TestRegionsRetryPolicy:
+    def test_init(self, mocked_regions_provider):
+        regions_retry_policy = RegionsRetryPolicy(
+            regions_provider=mocked_regions_provider,
+            service_names=[ServiceName.UP]
+        )
+
+        mocked_context = {}
+        regions_retry_policy.init_context(mocked_context)
+
+        assert mocked_context['region'] == mocked_regions_provider[0]
+        assert mocked_context['alternative_regions'] == mocked_regions_provider[1:]
+        assert mocked_context['service_name'] == ServiceName.UP
+        assert mocked_context['alternative_service_names'] == []
+        assert mocked_context['endpoint'] == mocked_regions_provider[0].services[ServiceName.UP][0]
+        assert mocked_context['alternative_endpoints'] == mocked_regions_provider[0].services[ServiceName.UP][1:]
+
+    @pytest.mark.parametrize(
+        'regions,service_names,expect_should_retry,msg',
+        [
+            (
+                [
+                    Region.from_region_id('z0'),
+                    Region.from_region_id('z1')
+                ],
+                [ServiceName.UP],
+                True,
+                'Should retry when there are alternative regions'
+            ),
+            (
+                [
+                    Region.from_region_id(
+                        'z0',
+                        extended_services={
+                            ServiceName.UP_ACC: [
+                                Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com')
+                            ]
+                        }
+                    )
+                ],
+                [ServiceName.UP_ACC, ServiceName.UP],
+                True,
+                'Should retry when there are alternative services'
+            ),
+            (
+                [
+                    Region.from_region_id('z0')
+                ],
+                [ServiceName.UP_ACC, ServiceName.UP],
+                False,
+                'Should not retry when there are no alternative regions or empty endpoint in services'
+            ),
+            (
+                [
+                    Region.from_region_id('z0')
+                ],
+                [ServiceName.UP],
+                False,
+                'Should not retry when there are no alternative regions or services'
+            ),
+        ],
+        ids=lambda v: v if type(v) is str else ''
+    )
+    def test_should_retry(
+        self,
+        regions,
+        service_names,
+        expect_should_retry,
+        msg
+    ):
+        regions_retry_policy = RegionsRetryPolicy(
+            regions_provider=regions,
+            service_names=service_names
+        )
+
+        mocked_attempt = Attempt()
+        regions_retry_policy.init_context(mocked_attempt.context)
+
+        assert regions_retry_policy.should_retry(mocked_attempt) == expect_should_retry, msg
+
+    @pytest.mark.parametrize(
+        'regions,service_names',
+        [
+            (
+                [
+                    Region.from_region_id('z0'),
+                    Region.from_region_id('z1')
+                ],
+                [ServiceName.UP]
+            ),
+            (
+                [
+                    Region.from_region_id(
+                        'z0',
+                        extended_services={
+                            ServiceName.UP_ACC: [
+                                Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com')
+                            ]
+                        }
+                    )
+                ],
+                [ServiceName.UP_ACC, ServiceName.UP]
+            )
+        ]
+    )
+    def test_prepare_retry(self, regions, service_names):
+        mocked_attempt = Attempt()
+
+        regions_retry_policy = RegionsRetryPolicy(
+            regions_provider=regions,
+            service_names=service_names
+        )
+        regions_retry_policy.init_context(mocked_attempt.context)
+
+        actual_tried_endpoints = [
+            mocked_attempt.context.get('endpoint')
+        ]
+        while regions_retry_policy.should_retry(mocked_attempt):
+            regions_retry_policy.prepare_retry(mocked_attempt)
+            actual_tried_endpoints.append(mocked_attempt.context.get('endpoint'))
+
+        # There is no endpoints retry policy,
+        # so just the first endpoint will be tried
+        expect_tried_endpoints = [
+            r.services[sn][0]
+            for r in regions
+            for sn in service_names
+            if sn in r.services and r.services[sn]
+        ]
+
+        print(actual_tried_endpoints)
+        print(expect_tried_endpoints)
+
+        assert [
+            e.get_value()
+            for e in actual_tried_endpoints
+        ] == [
+            e.get_value()
+            for e in expect_tried_endpoints
+        ]
+
+    @pytest.mark.parametrize(
+        'regions,service_names,expect_change_region_times',
+        [
+            # tow region, retry once
+            (
+                [
+                    Region.from_region_id('z0'),
+                    Region.from_region_id('z1')
+                ],
+                [ServiceName.UP],
+                1
+            ),
+            # one region, tow service, retry service once, region zero
+            (
+                [
+                    Region.from_region_id(
+                        'z0',
+                        extended_services={
+                            ServiceName.UP_ACC: [
+                                Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com')
+                            ]
+                        }
+                    )
+                ],
+                [ServiceName.UP_ACC, ServiceName.UP],
+                0
+            ),
+            # tow region, tow service, retry service once, region once
+            (
+                [
+                    Region.from_region_id(
+                        'z0',
+                        extended_services={
+                            ServiceName.UP_ACC: [
+                                Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com')
+                            ]
+                        }
+                    ),
+                    Region.from_region_id('z1')
+                ],
+                [ServiceName.UP_ACC, ServiceName.UP],
+                1
+            )
+        ]
+    )
+    def test_on_change_region_option(
+        self,
+        regions,
+        service_names,
+        expect_change_region_times,
+        use_ref
+    ):
+        actual_change_region_times_ref = use_ref(0)
+
+        def handle_change_region(_context):
+            actual_change_region_times_ref.value += 1
+
+        regions_retry_policy = RegionsRetryPolicy(
+            regions_provider=regions,
+            service_names=service_names,
+            on_change_region=handle_change_region
+        )
+
+        mocked_attempt = Attempt()
+        regions_retry_policy.init_context(mocked_attempt.context)
+
+        while regions_retry_policy.should_retry(mocked_attempt):
+            regions_retry_policy.prepare_retry(mocked_attempt)
+
+        assert actual_change_region_times_ref.value == expect_change_region_times
+
+    def test_init_with_preferred_endpoints_option_new_temp_region(self, mocked_regions_provider):
+        preferred_endpoints = [
+            Endpoint('python-sdk.kodo-accelerate.cn-east-1.qiniucs.com')
+        ]
+        regions_retry_policy = RegionsRetryPolicy(
+            regions_provider=mocked_regions_provider,
+            service_names=[ServiceName.UP],
+            preferred_endpoints_provider=preferred_endpoints
+        )
+
+        mocked_context = {}
+        regions_retry_policy.init_context(mocked_context)
+
+        assert mocked_context['region'].region_id == 'preferred_region'
+        assert mocked_context['region'].services[ServiceName.UP] == preferred_endpoints
+        assert mocked_context['alternative_regions'] == list(mocked_regions_provider)
+
+    def test_init_with_preferred_endpoints_option_reorder_regions(self, mocked_regions_provider):
+        mocked_regions = list(mocked_regions_provider)
+        preferred_region_index = 1
+        preferred_endpoints = [
+            mocked_regions[preferred_region_index].services[ServiceName.UP][0]
+        ]
+        regions_retry_policy = RegionsRetryPolicy(
+            regions_provider=mocked_regions_provider,
+            service_names=[ServiceName.UP],
+            preferred_endpoints_provider=preferred_endpoints
+        )
+
+        mocked_context = {}
+        regions_retry_policy.init_context(mocked_context)
+
+        assert mocked_context['region'] == mocked_regions[preferred_region_index]
+        mocked_regions.pop(preferred_region_index)
+        assert mocked_context['alternative_regions'] == mocked_regions
diff --git a/tests/cases/test_retry/__init__.py b/tests/cases/test_retry/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/cases/test_retry/test_retrier.py b/tests/cases/test_retry/test_retrier.py
new file mode 100644
index 00000000..c4d1cd9b
--- /dev/null
+++ b/tests/cases/test_retry/test_retrier.py
@@ -0,0 +1,142 @@
+import qiniu.retry
+import qiniu.retry.abc
+
+
+class MaxRetryPolicy(qiniu.retry.abc.RetryPolicy):
+    def __init__(self, max_times):
+        super(MaxRetryPolicy, self).__init__()
+        self.max_times = max_times
+
+    def is_important(self, attempt):
+        return attempt.context[self]['retriedTimes'] >= self.max_times
+
+    def init_context(self, context):
+        context[self] = {
+            'retriedTimes': 0
+        }
+
+    def should_retry(self, attempt):
+        if not attempt.exception:
+            return False
+        return attempt.context[self]['retriedTimes'] < self.max_times
+
+    def prepare_retry(self, attempt):
+        pass
+
+    def after_retry(self, attempt, policy):
+        attempt.context[self]['retriedTimes'] += 1
+
+
+class TestRetry:
+    def test_retrier_with_code_block(self, use_ref):
+        retried_times_ref = use_ref(0)
+
+        def handle_before_retry(_attempt, _policy):
+            retried_times_ref.value += 1
+            return True
+
+        max_retry_times = 3
+        retrier = qiniu.retry.Retrier(
+            policies=[
+                MaxRetryPolicy(max_times=max_retry_times)
+            ],
+            before_retry=handle_before_retry
+        )
+
+        tried_times = 0
+        try:
+            for attempt in retrier:
+                with attempt:
+                    tried_times += 1
+                    raise Exception('mocked error')
+        except Exception as err:
+            assert str(err) == 'mocked error'
+
+        assert tried_times == max_retry_times + 1
+        assert retried_times_ref.value == max_retry_times
+
+    def test_retrier_with_try_do(self, use_ref):
+        retried_times_ref = use_ref(0)
+
+        def handle_before_retry(_attempt, _policy):
+            retried_times_ref.value += 1
+            return True
+
+        max_retry_times = 3
+        retrier = qiniu.retry.Retrier(
+            policies=[
+                MaxRetryPolicy(max_times=max_retry_times)
+            ],
+            before_retry=handle_before_retry
+        )
+
+        tried_times_ref = use_ref(0)
+
+        def add_one(n):
+            tried_times_ref.value += 1
+            if tried_times_ref.value <= 3:
+                raise Exception('mock error')
+            return n + 1
+
+        result = retrier.try_do(add_one, 1)
+        assert result == 2
+        assert tried_times_ref.value == max_retry_times + 1
+        assert retried_times_ref.value == max_retry_times
+
+    def test_retrier_with_decorator(self, use_ref):
+        retried_times_ref = use_ref(0)
+
+        def handle_before_retry(_attempt, _policy):
+            retried_times_ref.value += 1
+            return True
+
+        max_retry_times = 3
+        retrier = qiniu.retry.Retrier(
+            policies=[
+                MaxRetryPolicy(max_times=max_retry_times)
+            ],
+            before_retry=handle_before_retry
+        )
+
+        tried_times_ref = use_ref(0)
+
+        @retrier.retry
+        def add_one(n):
+            tried_times_ref.value += 1
+            if tried_times_ref.value <= 3:
+                raise Exception('mock error')
+            return n + 1
+
+        result = add_one(1)
+        assert result == 2
+        assert tried_times_ref.value == max_retry_times + 1
+        assert retried_times_ref.value == max_retry_times
+
+    def test_retrier_with_no_need_retry_err(self, use_ref):
+        retried_times_ref = use_ref(0)
+
+        def handle_before_retry(_attempt, _policy):
+            retried_times_ref.value += 1
+            return True
+
+        max_retry_times = 3
+        retrier = qiniu.retry.Retrier(
+            policies=[
+                MaxRetryPolicy(max_times=max_retry_times)
+            ],
+            before_retry=handle_before_retry
+        )
+
+        tried_times = 0
+        try:
+            for attempt in retrier:
+                with attempt:
+                    tried_times += 1
+                    err = Exception('mocked error')
+                    err.no_need_retry = True
+                    raise err
+        except Exception as err:
+            assert str(err) == 'mocked error'
+
+        assert tried_times == 1
+        assert retried_times_ref.value == 0
diff --git a/tests/cases/test_services/test_processing/__init__.py b/tests/cases/test_services/test_processing/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/cases/test_services/test_processing/test_pfop.py b/tests/cases/test_services/test_processing/test_pfop.py
new file mode 100644
index 00000000..003be43f
--- /dev/null
+++ b/tests/cases/test_services/test_processing/test_pfop.py
@@ -0,0 +1,49 @@
+import pytest
+
+
+from qiniu import PersistentFop, op_save
+
+
+persistent_id = None
+
+
+class TestPersistentFop:
+    def test_pfop_execute(self, qn_auth):
+        pfop = PersistentFop(qn_auth, 'testres', 'sdktest')
+        op = op_save('avthumb/m3u8/segtime/10/vcodec/libx264/s/320x240', 'pythonsdk', 'pfoptest')
+        ops = [
+            op
+        ]
+        ret, resp = pfop.execute('sintel_trailer.mp4', ops, 1)
+        assert resp.status_code == 200, resp
+        assert ret['persistentId'] is not None, resp
+        global persistent_id
+        persistent_id = ret['persistentId']
+
+    def test_pfop_get_status(self, qn_auth):
+        assert persistent_id is not None
+        pfop = PersistentFop(qn_auth, 'testres', 'sdktest')
+        ret, resp = pfop.get_status(persistent_id)
+        assert resp.status_code == 200, resp
+        assert ret is not None, resp
+
+    def test_pfop_idle_time_task(self, set_conf_default, qn_auth):
+        persistence_key = 'python-sdk-pfop-test/test-pfop-by-api'
+
+        key = 'sintel_trailer.mp4'
+        pfop = PersistentFop(qn_auth, 'testres')
+        ops = [
+            op_save(
+                op='avthumb/m3u8/segtime/10/vcodec/libx264/s/320x240',
+                bucket='pythonsdk',
+                key=persistence_key
+            )
+        ]
+        ret, resp = pfop.execute(key, ops, force=1, persistent_type=1)
+        assert resp.status_code == 200, resp
+        assert 'persistentId' in ret, resp
+
+        ret, resp = pfop.get_status(ret['persistentId'])
+        assert resp.status_code == 200, resp
+        assert ret['type'] == 1, resp
+        assert ret['creationDate'] is not None, resp
diff --git a/tests/cases/test_services/test_storage/conftest.py b/tests/cases/test_services/test_storage/conftest.py
index a791092c..64b81b20 100644
--- a/tests/cases/test_services/test_storage/conftest.py
+++ b/tests/cases/test_services/test_storage/conftest.py
@@ -1,8 +1,140 @@
+import os
+from collections import namedtuple
+from hashlib import new as hashlib_new
+import tempfile
+
 import pytest
 
+import requests
+
 from qiniu import BucketManager
+from qiniu.utils import io_md5
+from qiniu.config import QUERY_REGION_HOST, QUERY_REGION_BACKUP_HOSTS
+from qiniu.http.endpoint import Endpoint
+from qiniu.http.regions_provider import Region, ServiceName, get_default_regions_provider
 
 
-@pytest.fixture()
+@pytest.fixture(scope='session')
 def bucket_manager(qn_auth):
     yield BucketManager(qn_auth)
+
+
+@pytest.fixture(scope='session')
+def get_remote_object_headers_and_md5(download_domain):
+    def fetch_calc_md5(key=None, scheme=None, url=None):
+        if not key and not url:
+            raise TypeError('Must provide key or url')
+
+        scheme = scheme if scheme is not None else 'http'
+        download_url = '{}://{}/{}'.format(scheme, download_domain, key)
+        if url:
+            download_url = url
+
+        resp = requests.get(download_url, stream=True)
+        resp.raise_for_status()
+
+        return resp.headers, io_md5(resp.iter_content(chunk_size=8192))
+
+    yield fetch_calc_md5
+
+
+@pytest.fixture(scope='session')
+def get_real_regions():
+    def _get_real_regions(access_key, bucket_name):
+        regions = list(
+            get_default_regions_provider(
+                query_endpoints_provider=[
+                    Endpoint.from_host(h)
+                    for h in [QUERY_REGION_HOST] + QUERY_REGION_BACKUP_HOSTS
+                ],
+                access_key=access_key,
+                bucket_name=bucket_name
+            )
+        )
+
+        if not regions:
+            raise RuntimeError('No regions found')
+
+        return regions
+
+    yield _get_real_regions
+
+
+@pytest.fixture(scope='function')
+def regions_with_real_endpoints(access_key, bucket_name, get_real_regions):
+    yield get_real_regions(access_key, bucket_name)
+
+
+@pytest.fixture(scope='function')
+def regions_with_fake_endpoints(regions_with_real_endpoints):
+    """
+    Returns
+    -------
+    list[Region]
+        The first element is the fake region with fake endpoints for every service.
+        The second element is the real region with first fake endpoint for every service.
+        The rest elements are real regions with real endpoints if exists.
+    """
+    regions = regions_with_real_endpoints
+
+    regions[0].services = {
+        sn: [
+            Endpoint('fake-{0}.python-sdk.qiniu.com'.format(sn.value))
+        ] + endpoints
+        for sn, endpoints in regions[0].services.items()
+    }
+
+    regions.insert(0, Region(
+        'fake-id',
+        'fake-s3-id',
+        services={
+            sn: [
+                Endpoint('fake-region-{0}.python-sdk.qiniu.com'.format(sn.value))
+            ]
+            for sn in ServiceName
+        }
+    ))
+
+    yield regions
+
+
+TempFile = namedtuple(
+    'TempFile',
+    [
+        'path',
+        'md5',
+        'name',
+        'size'
+    ]
+)
+
+
+@pytest.fixture(scope='function')
+def temp_file(request):
+    size = 4 * 1024
+    if hasattr(request, 'param'):
+        size = request.param
+
+    tmp_file_path = tempfile.mktemp()
+    chunk_size = 4 * 1024
+
+    md5_hasher = hashlib_new('md5')
+    with open(tmp_file_path, 'wb') as f:
+        remaining_bytes = size
+        while remaining_bytes > 0:
+            chunk = os.urandom(min(chunk_size, remaining_bytes))
+            f.write(chunk)
+            md5_hasher.update(chunk)
+            remaining_bytes -= len(chunk)
+
+    yield TempFile(
+        path=tmp_file_path,
+        md5=md5_hasher.hexdigest(),
+        name=os.path.basename(tmp_file_path),
+        size=size
+    )
+
+    try:
+        os.remove(tmp_file_path)
+    except Exception:
+        pass
diff --git a/tests/cases/test_services/test_storage/test_bucket_manager.py b/tests/cases/test_services/test_storage/test_bucket_manager.py
new file mode 100644
index 00000000..68455652
--- /dev/null
+++ b/tests/cases/test_services/test_storage/test_bucket_manager.py
@@ -0,0 +1,205 @@
+import pytest
+
+from qiniu.services.storage.bucket import BucketManager
+from qiniu.region import LegacyRegion
+from qiniu import build_batch_restore_ar
+
+
+@pytest.fixture(scope='function')
+def object_key(bucket_manager, bucket_name, rand_string):
+    key_to = 'copyto_' + rand_string(8)
+    bucket_manager.copy(
+        bucket=bucket_name,
+        key='copyfrom',
+        bucket_to=bucket_name,
+        key_to=key_to,
+        force='true'
+    )
+
+    yield key_to
+
+    bucket_manager.delete(bucket_name, key_to)
+
+
+class TestBucketManager:
+    # TODO(lihs): Move other test cases to here from test_qiniu.py
+    def test_restore_ar(self, bucket_manager, bucket_name, object_key):
+        ret, resp = bucket_manager.restore_ar(bucket_name, object_key, 7)
+        assert not resp.ok(), resp
+        ret, resp = bucket_manager.change_type(bucket_name, object_key, 2)
+        assert resp.ok(), resp
+        ret, resp = bucket_manager.restore_ar(bucket_name, object_key, 7)
+        assert resp.ok(), resp
+
+    @pytest.mark.parametrize(
+        'cond,expect_ok',
+        [
+            (
+                None, True
+            ),
+            (
+                {
+                    'mime': 'text/plain'
+                },
+                True
+            ),
+            (
+                {
+                    'mime': 'application/json'
+                },
+                False
+            )
+        ]
+    )
+    def test_change_status(
+        self,
+        bucket_manager,
+        bucket_name,
+        object_key,
+        cond,
+        expect_ok
+    ):
+        ret, resp = bucket_manager.change_status(bucket_name, object_key, 1, cond)
+        assert resp.ok() == expect_ok, resp
+
+    def test_mkbucketv3(self, bucket_manager, rand_string):
+        # tested manually, no drop bucket API to auto cleanup
+        # ret, resp = bucket_manager.mkbucketv3('py-test-' + rand_string(8).lower(), 'z0')
+        # assert resp.ok(), resp
+        pass
+
+    def test_list_bucket(self, bucket_manager, bucket_name):
+        ret, resp = bucket_manager.list_bucket('na0')
+        assert resp.ok(), resp
+        assert any(b.get('tbl') == bucket_name for b in ret)
+
+    def test_bucket_info(self, bucket_manager, bucket_name):
+        ret, resp = bucket_manager.bucket_info(bucket_name)
+        assert resp.ok(), resp
+        for k in [
+            'protected',
+            'private'
+        ]:
+            assert k in ret
+
+    def test_change_bucket_permission(self, bucket_manager, bucket_name):
+        ret, resp = bucket_manager.bucket_info(bucket_name)
+        assert resp.ok(), resp
+        original_private = ret['private']
+        ret, resp = bucket_manager.change_bucket_permission(
+            bucket_name,
+            1 if original_private == 1 else 0
+        )
+        assert resp.ok(), resp
+        ret, resp = bucket_manager.change_bucket_permission(
+            bucket_name,
+            original_private
+        )
+        assert resp.ok(), resp
+
+    def test_batch_restore_ar(
+        self,
+        bucket_manager,
+        bucket_name,
+        object_key
+    ):
+        bucket_manager.change_type(bucket_name, object_key, 2)
+        ops = build_batch_restore_ar(
+            bucket_name,
+            {
+                object_key: 7
+            }
+        )
+        ret, resp = bucket_manager.batch(ops)
+        assert resp.status_code == 200, resp
+        assert len(ret) > 0
+        assert ret[0].get('code') == 200, ret[0]
+
+    def test_compatible_with_zone(self, qn_auth, bucket_name, regions_with_real_endpoints):
+        r = LegacyRegion(
+            io_host='https://fake-io.python-sdk.qiniu.com',
+            rs_host='https://fake-rs.python-sdk.qiniu.com',
+            rsf_host='https://fake-rsf.python-sdk.qiniu.com',
+            api_host='https://fake-api.python-sdk.qiniu.com'
+        )
+        bucket_manager = BucketManager(
+            qn_auth,
+            zone=r
+        )
+
+        # rs host
+        ret, resp = bucket_manager.stat(bucket_name, 'python-sdk.html')
+        assert resp.status_code == -1
+        assert ret is None
+
+        # rsf host
+        ret, _eof, resp = bucket_manager.list(bucket_name, '', limit=10)
+        assert resp.status_code == -1
+        assert ret is None
+
+        # io host
+        ret, info = bucket_manager.prefetch(bucket_name, 'python-sdk.html')
+        assert resp.status_code == -1
+        assert ret is None
+
+        # api host
+        # no API method to test
+
+    @pytest.mark.parametrize(
+        'preferred_scheme',
+        [
+            None,  # default 'http'
+            'http',
+            'https'
+        ]
+    )
+    def test_preferred_scheme(
+        self,
+        qn_auth,
+        bucket_name,
+        preferred_scheme
+    ):
+        bucket_manager = BucketManager(
+            auth=qn_auth,
+            preferred_scheme=preferred_scheme
+        )
+
+        ret, resp = bucket_manager.stat(bucket_name, 'python-sdk.html')
+
+        assert ret is not None, resp
+        assert resp.ok(), resp
+
+        expect_scheme = preferred_scheme if preferred_scheme else 'http'
+        assert resp.url.startswith(expect_scheme + '://'), resp.url
+
+    def test_operation_with_regions_and_retrier(
+        self,
+        qn_auth,
+        bucket_name,
+        regions_with_fake_endpoints
+    ):
+        bucket_manager = BucketManager(
+            auth=qn_auth,
+            regions=regions_with_fake_endpoints,
+        )
+
+        ret, resp = bucket_manager.stat(bucket_name, 'python-sdk.html')
+
+        assert ret is not None, resp
+        assert resp.ok(), resp
+
+    def test_uc_service_with_retrier(
+        self,
+        qn_auth,
+        bucket_name,
+        regions_with_fake_endpoints
+    ):
+        bucket_manager = BucketManager(
+            auth=qn_auth,
+            regions=regions_with_fake_endpoints
+        )
+
+        ret, resp = bucket_manager.list_bucket('na0')
+        assert resp.ok(), resp
+        assert len(ret) > 0, resp
+        assert any(b.get('tbl') for b in ret), ret
diff --git a/tests/cases/test_services/test_storage/test_upload_pfop.py b/tests/cases/test_services/test_storage/test_upload_pfop.py
new file mode 100644
index 00000000..78818ba4
--- /dev/null
+++ b/tests/cases/test_services/test_storage/test_upload_pfop.py
@@ -0,0 +1,66 @@
+import pytest
+
+import qiniu
+
+
+KB = 1024
+MB = 1024 * KB
+GB = 1024 * MB
+
+
+# set a bucket lifecycle manually to delete prefix `test-pfop`!
+# or this test will continue to occupy bucket space.
+class TestPersistentFopByUpload:
+    @pytest.mark.parametrize('temp_file', [10 * MB], indirect=True)
+    @pytest.mark.parametrize('persistent_type', [None, 0, 1])
+    def test_pfop_with_upload(
+        self,
+        set_conf_default,
+        qn_auth,
+        bucket_name,
+        temp_file,
+        persistent_type
+    ):
+        key = 'test-pfop-upload-file'
+        persistent_key = '_'.join([
+            'test-pfop-by-upload',
+            'type',
+            str(persistent_type)
+        ])
+        persistent_ops = ';'.join([
+            qiniu.op_save(
+                op='avthumb/m3u8/segtime/10/vcodec/libx264/s/320x240',
+                bucket=bucket_name,
+                key=persistent_key
+            )
+        ])
+
+        upload_policy = {
+            'persistentOps': persistent_ops
+        }
+
+        if persistent_type is not None:
+            upload_policy['persistentType'] = persistent_type
+
+        token = qn_auth.upload_token(
+            bucket_name,
+            key,
+            policy=upload_policy
+        )
+        ret, resp = qiniu.put_file(
+            token,
+            key,
+            temp_file.path,
+            check_crc=True
+        )
+
+        assert ret is not None, resp
+        assert ret['key'] == key, resp
+        assert 'persistentId' in ret, resp
+
+        pfop = qiniu.PersistentFop(qn_auth, bucket_name)
+        ret, resp = pfop.get_status(ret['persistentId'])
+        assert resp.status_code == 200, resp
+        if persistent_type == 1:
+            assert ret['type'] == 1, resp
+        assert ret['creationDate'] is not None, resp
diff --git a/tests/cases/test_services/test_storage/test_uploader.py b/tests/cases/test_services/test_storage/test_uploader.py
index 44b153e6..564146d5 100644
--- a/tests/cases/test_services/test_storage/test_uploader.py
+++ b/tests/cases/test_services/test_storage/test_uploader.py
@@ -1,19 +1,19 @@
-import os
 from collections import namedtuple
 
-import tempfile
 import pytest
 
 from qiniu.compat import json, is_py2
 from qiniu import (
     Zone,
-    etag,
+    config as qn_config,
     set_default,
     put_file,
     put_data,
-    put_stream
+    put_stream,
+    build_batch_delete
 )
-from qiniu import config as qn_config
+from qiniu.http.endpoint import Endpoint
+from qiniu.http.region import ServiceName
 from qiniu.services.storage.uploader import _form_put
 
 KB = 1024
@@ -54,11 +54,43 @@ def commonly_options(request):
             'x-qn-meta-age': '18'
         }
     )
-    if hasattr(request, 'params'):
-        res = res._replace(**request.params)
+    if hasattr(request, 'param'):
+        res = res._replace(**request.param)
     yield res
 
 
+@pytest.fixture(scope='class')
+def auto_remove(bucket_manager):
+    grouped_keys_by_bucket_name = {}
+
+    def _auto_remove(bucket_name, key):
+        if bucket_name not in grouped_keys_by_bucket_name:
+            grouped_keys_by_bucket_name[bucket_name] = []
+        grouped_keys_by_bucket_name[bucket_name].append(key)
+        return key
+
+    yield _auto_remove
+
+    for bkt_name, keys in grouped_keys_by_bucket_name.items():
+        try:
+            delete_ops = build_batch_delete(bkt_name, keys)
+            bucket_manager.batch(delete_ops)
+        except Exception as err:
+            print('Failed to delete {0} keys: {1} by {2}'.format(bkt_name, keys, err))
+
+
+@pytest.fixture(scope='class')
+def get_key(bucket_name, rand_string, auto_remove):
+    def _get_key(key, no_rand_trail=False):
+        result = key + '-' + rand_string(8)
+        if no_rand_trail:
+            result = key
+        auto_remove(bucket_name, result)
+        return result
+
+    yield _get_key
+
+
 @pytest.fixture(scope='function')
 def set_default_up_host_zone(request, valid_up_host):
     zone_args = {
@@ -77,41 +109,17 @@ def set_default_up_host_zone(request, valid_up_host):
     qn_config._is_customized_default['default_zone'] = False
 
 
-@pytest.fixture(scope='function')
-def temp_file(request):
-    size = 4 * KB
-    if hasattr(request, 'param'):
-        size = request.param
-
-    tmp_file_path = tempfile.mktemp()
-    chunk_size = 4 * KB
-
-    with open(tmp_file_path, 'wb') as f:
-        remaining_bytes = size
-        while remaining_bytes > 0:
-            chunk = os.urandom(min(chunk_size, remaining_bytes))
-            f.write(chunk)
-            remaining_bytes -= len(chunk)
-
-    yield tmp_file_path
-
-    try:
-        os.remove(tmp_file_path)
-    except Exception:
-        pass
-
-
 class TestUploadFuncs:
-    def test_put(self, qn_auth, bucket_name):
-        key = 'a\\b\\c"hello'
+    def test_put(self, qn_auth, bucket_name, get_key):
+        key = get_key('a\\b\\c"hello', no_rand_trail=True)
         data = 'hello bubby!'
         token = qn_auth.upload_token(bucket_name)
         ret, info = put_data(token, key, data)
         print(info)
         assert ret['key'] == key
 
-    def test_put_crc(self, qn_auth, bucket_name):
-        key = ''
+    def test_put_crc(self, qn_auth, bucket_name, get_key):
+        key = get_key('', no_rand_trail=True)
         data = 'hello bubby!'
         token = qn_auth.upload_token(bucket_name, key)
         ret, info = put_data(token, key, data, check_crc=True)
@@ -119,23 +127,34 @@ def test_put_crc(self, qn_auth, bucket_name):
         assert ret['key'] == key
 
     @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True)
-    def test_put_file(self, qn_auth, bucket_name, temp_file, commonly_options):
-        key = 'test_file'
+    def test_put_file(
+        self,
+        qn_auth,
+        bucket_name,
+        temp_file,
+        commonly_options,
+        get_remote_object_headers_and_md5,
+        get_key
+    ):
+        key = get_key('test_file')
 
         token = qn_auth.upload_token(bucket_name, key)
         ret, info = put_file(
             token,
             key,
-            temp_file,
+            temp_file.path,
             mime_type=commonly_options.mime_type,
             check_crc=True
         )
-        print(info)
-        assert ret['key'] == key
-        assert ret['hash'] == etag(temp_file)
 
-    def test_put_with_invalid_crc(self, qn_auth, bucket_name):
-        key = 'test_invalid'
+        _, actual_md5 = get_remote_object_headers_and_md5(key=key)
+
+        assert ret is not None, info
+        assert ret['key'] == key, info
+        assert actual_md5 == temp_file.md5
+
+    def test_put_with_invalid_crc(self, qn_auth, bucket_name, get_key):
+        key = get_key('test_invalid')
         data = 'hello bubby!'
         crc32 = 'wrong crc32'
         token = qn_auth.upload_token(bucket_name)
@@ -143,13 +162,14 @@ def test_put_with_invalid_crc(self, qn_auth, bucket_name):
         assert ret is None, info
         assert info.status_code == 400, info
 
-    def test_put_without_key(self, qn_auth, bucket_name):
+    def test_put_without_key(self, qn_auth, bucket_name, get_key):
         key = None
         data = 'hello bubby!'
         token = qn_auth.upload_token(bucket_name)
         ret, info = put_data(token, key, data)
-        print(info)
-        assert ret['hash'] == ret['key']
+        assert 'key' in ret, info
+        get_key(ret['key'], no_rand_trail=True)  # auto remove the file
+        assert ret['hash'] == ret['key'], info
 
         data = 'hello bubby!'
         token = qn_auth.upload_token(bucket_name, 'nokey2')
@@ -166,8 +186,8 @@ def test_put_without_key(self, qn_auth, bucket_name):
         ],
         indirect=True
     )
-    def test_without_read_without_seek_retry(self, set_default_up_host_zone, qn_auth, bucket_name):
-        key = 'retry'
+    def test_without_read_without_seek_retry(self, set_default_up_host_zone, qn_auth, bucket_name, get_key):
+        key = get_key('retry')
         data = 'hello retry!'
         token = qn_auth.upload_token(bucket_name)
         ret, info = put_data(token, key, data)
@@ -181,12 +201,13 @@ def test_put_data_without_fname(
         qn_auth,
         bucket_name,
         is_travis,
-        temp_file
+        temp_file,
+        get_key
     ):
         if is_travis:
             return
-        key = 'test_putData_without_fname'
-        with open(temp_file, 'rb') as input_stream:
+        key = get_key('test_putData_without_fname')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key)
             ret, info = put_data(token, key, input_stream)
             print(info)
@@ -199,12 +220,13 @@ def test_put_data_with_empty_fname(
         bucket_name,
         is_travis,
         temp_file,
-        commonly_options
+        commonly_options,
+        get_key
     ):
         if is_travis:
             return
-        key = 'test_putData_without_fname1'
-        with open(temp_file, 'rb') as input_stream:
+        key = get_key('test_putData_without_fname1')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key)
             ret, info = put_data(
                 token,
@@ -226,12 +248,13 @@ def test_put_data_with_space_only_fname(
         bucket_name,
         is_travis,
         temp_file,
-        commonly_options
+        commonly_options,
+        get_key
     ):
         if is_travis:
             return
-        key = 'test_putData_without_fname2'
-        with open(temp_file, 'rb') as input_stream:
+        key = get_key('test_putData_without_fname2')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key)
             ret, info = put_data(
                 token,
@@ -253,13 +276,17 @@ def test_put_file_with_metadata(
         bucket_name,
         temp_file,
         commonly_options,
-        bucket_manager
+        bucket_manager,
+        get_remote_object_headers_and_md5,
+        get_key
     ):
-        key = 'test_file_with_metadata'
+        key = get_key('test_file_with_metadata')
         token = qn_auth.upload_token(bucket_name, key)
-        ret, info = put_file(token, key, temp_file, metadata=commonly_options.metadata)
+        ret, info = put_file(token, key, temp_file.path, metadata=commonly_options.metadata)
+        _, actual_md5 = get_remote_object_headers_and_md5(key=key)
         assert ret['key'] == key
-        assert ret['hash'] == etag(temp_file)
+        assert actual_md5 == temp_file.md5
+
         ret, info = bucket_manager.stat(bucket_name, key)
         assert 'x-qn-meta' in ret
         assert ret['x-qn-meta']['name'] == 'qiniu'
@@ -270,9 +297,10 @@ def test_put_data_with_metadata(
         qn_auth,
         bucket_name,
         commonly_options,
-        bucket_manager
+        bucket_manager,
+        get_key
     ):
-        key = 'put_data_with_metadata'
+        key = get_key('put_data_with_metadata')
         data = 'hello metadata!'
         token = qn_auth.upload_token(bucket_name, key)
         ret, info = put_data(token, key, data, metadata=commonly_options.metadata)
@@ -290,9 +318,11 @@ def test_put_file_with_callback(
         temp_file,
         commonly_options,
         bucket_manager,
-        upload_callback_url
+        upload_callback_url,
+        get_remote_object_headers_and_md5,
+        get_key
     ):
-        key = 'test_file_with_callback'
+        key = get_key('test_file_with_callback')
         policy = {
             'callbackUrl': upload_callback_url,
             'callbackBody': '{"custom_vars":{"a":$(x:a)},"key":$(key),"hash":$(etag)}',
@@ -302,13 +332,15 @@ def test_put_file_with_callback(
         ret, info = put_file(
             token,
             key,
-            temp_file,
+            temp_file.path,
             metadata=commonly_options.metadata,
             params=commonly_options.params,
         )
+        _, actual_md5 = get_remote_object_headers_and_md5(key=key)
         assert ret['key'] == key
-        assert ret['hash'] == etag(temp_file)
+        assert actual_md5 == temp_file.md5
         assert ret['custom_vars']['a'] == 'a'
+
         ret, info = bucket_manager.stat(bucket_name, key)
         assert 'x-qn-meta' in ret
         assert ret['x-qn-meta']['name'] == 'qiniu'
@@ -320,9 +352,10 @@ def test_put_data_with_callback(
         bucket_name,
         commonly_options,
         bucket_manager,
-        upload_callback_url
+        upload_callback_url,
+        get_key
     ):
-        key = 'put_data_with_metadata'
+        key = get_key('put_data_with_metadata')
         data = 'hello metadata!'
         policy = {
             'callbackUrl': upload_callback_url,
@@ -347,17 +380,16 @@ def test_put_data_with_callback(
 
 class TestResumableUploader:
     @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True)
-    def test_put_stream(self, qn_auth, bucket_name, temp_file, commonly_options):
-        key = 'test_file_r'
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+    def test_put_stream(self, qn_auth, bucket_name, temp_file, commonly_options, get_key):
+        key = get_key('test_file_r')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key)
             ret, info = put_stream(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type,
@@ -368,17 +400,16 @@ def test_put_stream(self, qn_auth, bucket_name, temp_file, commonly_options):
             assert ret['key'] == key
 
     @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True)
-    def test_put_stream_v2_without_bucket_name(self, qn_auth, bucket_name, temp_file, commonly_options):
-        key = 'test_file_r'
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+    def test_put_stream_v2_without_bucket_name(self, qn_auth, bucket_name, temp_file, commonly_options, get_key):
+        key = get_key('test_file_r')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key)
             ret, info = put_stream(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type,
@@ -401,17 +432,16 @@ def test_put_stream_v2_without_bucket_name(self, qn_auth, bucket_name, temp_file
         ],
         indirect=True
     )
-    def test_put_stream_v2(self, qn_auth, bucket_name, temp_file, commonly_options):
-        key = 'test_file_r'
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+    def test_put_stream_v2(self, qn_auth, bucket_name, temp_file, commonly_options, get_key):
+        key = get_key('test_file_r')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key)
             ret, info = put_stream(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type,
@@ -422,18 +452,17 @@ def test_put_stream_v2(self, qn_auth, bucket_name, temp_file, commonly_options):
             assert ret['key'] == key
 
     @pytest.mark.parametrize('temp_file', [4 * MB + 1], indirect=True)
-    def test_put_stream_v2_without_key(self, qn_auth, bucket_name, temp_file, commonly_options):
+    def test_put_stream_v2_without_key(self, qn_auth, bucket_name, temp_file, commonly_options, get_key):
         part_size = 4 * MB
         key = None
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key)
             ret, info = put_stream(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type,
@@ -441,21 +470,22 @@ def test_put_stream_v2_without_key(self, qn_auth, bucket_name, temp_file, common
                 version='v2',
                 bucket_name=bucket_name
             )
-            assert ret['key'] == ret['hash']
+        assert 'key' in ret
+        get_key(ret['key'], no_rand_trail=True)  # auto remove the file
+        assert ret['key'] == ret['hash']
 
     @pytest.mark.parametrize('temp_file', [4 * MB + 1], indirect=True)
-    def test_put_stream_v2_with_empty_return_body(self, qn_auth, bucket_name, temp_file, commonly_options):
+    def test_put_stream_v2_with_empty_return_body(self, qn_auth, bucket_name, temp_file, commonly_options, get_key):
         part_size = 4 * MB
-        key = 'test_file_empty_return_body'
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+        key = get_key('test_file_empty_return_body')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key, policy={'returnBody': ' '})
             ret, info = put_stream(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type,
@@ -467,14 +497,14 @@ def test_put_stream_v2_with_empty_return_body(self, qn_auth, bucket_name, temp_f
             assert ret == {}
 
     @pytest.mark.parametrize('temp_file', [4 * MB + 1], indirect=True)
-    def test_big_file(self, qn_auth, bucket_name, temp_file, commonly_options):
-        key = 'big'
+    def test_big_file(self, qn_auth, bucket_name, temp_file, commonly_options, get_key):
+        key = get_key('big')
         token = qn_auth.upload_token(bucket_name, key)
 
         ret, info = put_file(
             token,
             key,
-            temp_file,
+            temp_file.path,
             commonly_options.params,
             commonly_options.mime_type,
             progress_handler=lambda progress, total: progress
@@ -491,32 +521,40 @@ def test_big_file(self, qn_auth, bucket_name, temp_file, commonly_options):
         indirect=True
     )
     @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True)
-    def test_retry(self, set_default_up_host_zone, qn_auth, bucket_name, temp_file, commonly_options):
-        key = 'test_file_r_retry'
+    def test_legacy_retry(
+        self,
+        set_default_up_host_zone,
+        qn_auth,
+        bucket_name,
+        temp_file,
+        commonly_options,
+        get_remote_object_headers_and_md5,
+        get_key
+    ):
+        key = get_key('test_file_r_retry')
         token = qn_auth.upload_token(bucket_name, key)
         ret, info = put_file(
             token,
             key,
-            temp_file,
+            temp_file.path,
             commonly_options.params,
             commonly_options.mime_type
         )
-        print(info)
-        assert ret['key'] == key
-        assert ret['hash'] == etag(temp_file)
+        _, actual_md5 = get_remote_object_headers_and_md5(key=key)
+        assert ret['key'] == key, info
+        assert actual_md5 == temp_file.md5
 
     @pytest.mark.parametrize('temp_file', [64 * KB], indirect=True)
-    def test_put_stream_with_key_limits(self, qn_auth, bucket_name, temp_file, commonly_options):
-        key = 'test_file_r'
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+    def test_put_stream_with_key_limits(self, qn_auth, bucket_name, temp_file, commonly_options, get_key):
+        key = get_key('test_file_r')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key, policy={'keylimit': ['test_file_d']})
             ret, info = put_stream(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type
@@ -525,14 +563,14 @@ def test_put_stream_with_key_limits(self, qn_auth, bucket_name, temp_file, commo
             token = qn_auth.upload_token(
                 bucket_name,
                 key,
-                policy={'keylimit': ['test_file_d', 'test_file_r']}
+                policy={'keylimit': ['test_file_d', key]}
             )
             ret, info = put_stream(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type
@@ -546,18 +584,18 @@ def test_put_stream_with_metadata(
         bucket_name,
         temp_file,
         commonly_options,
-        bucket_manager
+        bucket_manager,
+        get_key
     ):
-        key = 'test_put_stream_with_metadata'
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+        key = get_key('test_put_stream_with_metadata')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key)
             ret, info = put_stream(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type,
@@ -579,19 +617,19 @@ def test_put_stream_v2_with_metadata(
         bucket_name,
         temp_file,
         commonly_options,
-        bucket_manager
+        bucket_manager,
+        get_key
     ):
         part_size = 4 * MB
-        key = 'test_put_stream_v2_with_metadata'
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+        key = get_key('test_put_stream_v2_with_metadata')
+        with open(temp_file.path, 'rb') as input_stream:
             token = qn_auth.upload_token(bucket_name, key)
             ret, info = put_stream(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type,
@@ -614,11 +652,11 @@ def test_put_stream_with_callback(
         temp_file,
         commonly_options,
         bucket_manager,
-        upload_callback_url
+        upload_callback_url,
+        get_key
     ):
-        key = 'test_put_stream_with_callback'
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+        key = get_key('test_put_stream_with_callback')
+        with open(temp_file.path, 'rb') as input_stream:
             policy = {
                 'callbackUrl': upload_callback_url,
                 'callbackBody': '{"custom_vars":{"a":$(x:a)},"key":$(key),"hash":$(etag)}',
@@ -629,8 +667,8 @@ def test_put_stream_with_callback(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type,
@@ -654,12 +692,12 @@ def test_put_stream_v2_with_callback(
         temp_file,
         commonly_options,
         bucket_manager,
-        upload_callback_url
+        upload_callback_url,
+        get_key
     ):
         part_size = 4 * MB
-        key = 'test_put_stream_v2_with_metadata'
-        size = os.stat(temp_file).st_size
-        with open(temp_file, 'rb') as input_stream:
+        key = get_key('test_put_stream_v2_with_metadata')
+        with open(temp_file.path, 'rb') as input_stream:
             policy = {
                 'callbackUrl': upload_callback_url,
                 'callbackBody': '{"custom_vars":{"a":$(x:a)},"key":$(key),"hash":$(etag)}',
@@ -670,8 +708,8 @@ def test_put_stream_v2_with_callback(
                 token,
                 key,
                 input_stream,
-                os.path.basename(temp_file),
-                size,
+                temp_file.name,
+                temp_file.size,
                 None,
                 commonly_options.params,
                 commonly_options.mime_type,
@@ -689,9 +727,8 @@ def test_put_stream_v2_with_callback(
 
     @pytest.mark.parametrize('temp_file', [30 * MB], indirect=True)
     @pytest.mark.parametrize('version', ['v1', 'v2'])
-    def test_resume_upload(self, bucket_name, qn_auth, temp_file, version):
-        key = 'test_resume_upload_{}'.format(version)
-        size = os.stat(temp_file).st_size
+    def test_resume_upload(self, bucket_name, qn_auth, temp_file, version, get_key):
+        key = get_key('test_resume_upload_' + version)
         part_size = 4 * MB
 
         def mock_fail(uploaded_size, _total_size):
@@ -704,7 +741,7 @@ def mock_fail(uploaded_size, _total_size):
                 _ret, _into = put_file(
                     up_token=token,
                     key=key,
-                    file_path=temp_file,
+                    file_path=temp_file.path,
                     hostscache_dir=None,
                     part_size=part_size,
                     version=version,
@@ -727,7 +764,7 @@ def should_start_from_resume(uploaded_size, _total_size):
         ret, into = put_file(
             up_token=token,
             key=key,
-            file_path=temp_file,
+            file_path=temp_file.path,
             hostscache_dir=None,
             part_size=part_size,
             version=version,
@@ -735,3 +772,94 @@ def should_start_from_resume(uploaded_size, _total_size):
             progress_handler=should_start_from_resume
         )
         assert ret['key'] == key
+
+    @pytest.mark.parametrize('temp_file', [
+        64 * KB,  # form
+        10 * MB  # resume
+    ], indirect=True)
+    @pytest.mark.parametrize('version', ['v1', 'v2'])
+    def test_upload_acc_normally(self, bucket_name, qn_auth, temp_file, version, get_key):
+        key = get_key('test_upload_acc_normally')
+
+        token = qn_auth.upload_token(bucket_name, key)
+        ret, resp = put_file(
+            up_token=token,
+            key=key,
+            file_path=temp_file.path,
+            version=version,
+            accelerate_uploading=True
+        )
+
+        assert ret['key'] == key, resp
+        assert 'kodo-accelerate' in resp.url, resp
+
+    @pytest.mark.parametrize('temp_file', [
+        64 * KB,  # form
+        10 * MB  # resume
+    ], indirect=True)
+    @pytest.mark.parametrize('version', ['v1', 'v2'])
+    def test_upload_acc_fallback_src_by_network_err(
+        self,
+        bucket_name,
+        qn_auth,
+        temp_file,
+        version,
+        get_key,
+        get_real_regions
+    ):
+        regions = get_real_regions(qn_auth.get_access_key(), bucket_name)
+        r = regions[0]
+        r.services[ServiceName.UP_ACC] = [
+            Endpoint('qiniu-acc.fake.qiniu.com')
+        ]
+
+        key = get_key('test_upload_acc_fallback_src_by_network_err')
+
+        token = qn_auth.upload_token(bucket_name, key)
+        ret, resp = put_file(
+            up_token=token,
+            key=key,
+            file_path=temp_file.path,
+            version=version,
+            regions=[r],
+            accelerate_uploading=True
+        )
+
+        assert ret['key'] == key, resp
+
+    @pytest.mark.parametrize('temp_file', [
+        64 * KB,  # form
+        10 * MB  # resume
+    ], indirect=True)
+    @pytest.mark.parametrize('version', ['v1', 'v2'])
+    def test_upload_acc_fallback_src_by_acc_unavailable(
+        self,
+        no_acc_bucket_name,
+        qn_auth,
+        temp_file,
+        version,
+        rand_string,
+        auto_remove,
+        get_real_regions
+    ):
+        regions = get_real_regions(qn_auth.get_access_key(), no_acc_bucket_name)
+
+        region = regions[0]
+        region.services[ServiceName.UP_ACC] = [
+            Endpoint('{0}.kodo-accelerate.{1}.qiniucs.com'.format(no_acc_bucket_name, region.s3_region_id)),
+            Endpoint('fake-acc.python-sdk.qiniu.com')
+        ]
+
+        key = 'test_upload_acc_fallback_src_by_acc_unavailable-' + rand_string(8)
+        auto_remove(no_acc_bucket_name, key)
+
+        token = qn_auth.upload_token(no_acc_bucket_name, key)
+        ret, resp = put_file(
+            up_token=token,
+            key=key,
+            file_path=temp_file.path,
+            version=version,
+            accelerate_uploading=True
+        )
+
+        assert ret['key'] == key, resp
diff --git a/tests/cases/test_services/test_storage/test_uploaders_default_retrier.py b/tests/cases/test_services/test_storage/test_uploaders_default_retrier.py
new file mode 100644
index 00000000..2aaa83ee
--- /dev/null
+++ b/tests/cases/test_services/test_storage/test_uploaders_default_retrier.py
@@ -0,0 +1,235 @@
+import pytest
+
+import os
+
+from qiniu.http.region import ServiceName, Region
+from qiniu.retry import Attempt
+from qiniu.services.storage.uploaders._default_retrier import (
+    ProgressRecord,
+    TokenExpiredRetryPolicy,
+    AccUnavailableRetryPolicy
+)
+
+
+@pytest.fixture(
+    scope='function',
+    params=[
+        {'api_version': 'v1'},
+        {'api_version': 'v2'}
+    ]
+)
+def fake_progress_record(request):
+    api_version = request.param.get('api_version')
+    file_path = os.path.join(os.getcwd(), 'fake-progress-record')
+
+    with open(file_path, 'w'):
+        pass
+
+    def _delete():
+        try:
+            os.remove(file_path)
+        except OSError:
+            pass
+
+    def _exists():
+        return os.path.exists(file_path)
+
+    yield ProgressRecord(
+        upload_api_version=api_version,
+        exists=_exists,
+        delete=_delete
+    )
+
+    _delete()
+
+
+class MockResponse:
+    def __init__(self, status_code, text_body=None):
+        self.status_code = status_code
+        self.text_body = text_body
+
+
+class TestTokenExpiredRetryPolicy:
+    def test_should_retry(self, fake_progress_record):
+        policy = TokenExpiredRetryPolicy(
+            upload_api_version=fake_progress_record.upload_api_version,
+            record_delete_handler=fake_progress_record.delete,
+            record_exists_handler=fake_progress_record.exists
+        )
+
+        attempt = Attempt()
+        policy.init_context(attempt.context)
+
+        if fake_progress_record.upload_api_version == 'v1':
+            mocked_resp = MockResponse(status_code=701)
+        else:
+            mocked_resp = MockResponse(status_code=612)
+        attempt.result = (None, mocked_resp)
+
+        assert policy.should_retry(attempt)
+
+    def test_should_not_retry_by_no_result(self, fake_progress_record):
+        policy = TokenExpiredRetryPolicy(
+            upload_api_version=fake_progress_record.upload_api_version,
+            record_delete_handler=fake_progress_record.delete,
+            record_exists_handler=fake_progress_record.exists
+        )
+        attempt = Attempt()
+        policy.init_context(attempt.context)
+
+        assert not policy.should_retry(attempt)
+
+    def test_should_not_retry_by_default_max_retried_times(self, fake_progress_record):
+        policy = TokenExpiredRetryPolicy(
+            upload_api_version=fake_progress_record.upload_api_version,
+            record_delete_handler=fake_progress_record.delete,
+            record_exists_handler=fake_progress_record.exists
+        )
+        attempt = Attempt()
+        policy.init_context(attempt.context)
+        if fake_progress_record.upload_api_version == 'v1':
+            mocked_resp = MockResponse(status_code=701)
+        else:
+            mocked_resp = MockResponse(status_code=612)
+        attempt.result = (None, mocked_resp)
+        attempt.context[policy] = attempt.context[policy]._replace(retried_times=1)
+
+        assert not policy.should_retry(attempt)
+
+    def test_should_not_retry_by_file_no_exists(self, fake_progress_record):
+        policy = TokenExpiredRetryPolicy(
+            upload_api_version=fake_progress_record.upload_api_version,
+            record_delete_handler=fake_progress_record.delete,
+            record_exists_handler=fake_progress_record.exists
+        )
+
+        attempt = Attempt()
+        policy.init_context(attempt.context)
+        if fake_progress_record.upload_api_version == 'v1':
+            mocked_resp = MockResponse(status_code=701)
+        else:
+            mocked_resp = MockResponse(status_code=612)
+        attempt.result = (None, mocked_resp)
+        fake_progress_record.delete()
+
+        assert not policy.should_retry(attempt)
+
+    def test_prepare_retry(self, fake_progress_record):
+        policy = TokenExpiredRetryPolicy(
+            upload_api_version=fake_progress_record.upload_api_version,
+            record_delete_handler=fake_progress_record.delete,
+            record_exists_handler=fake_progress_record.exists
+        )
+
+        attempt = Attempt()
+        policy.init_context(attempt.context)
+        if fake_progress_record.upload_api_version == 'v1':
+            mocked_resp = MockResponse(status_code=701)
+        else:
+            mocked_resp = MockResponse(status_code=612)
+        attempt.result = (None, mocked_resp)
+
+        policy.prepare_retry(attempt)
+
+        assert not fake_progress_record.exists()
+
+
+class TestAccUnavailableRetryPolicy:
+    def test_should_retry(self):
+        policy = AccUnavailableRetryPolicy()
+        attempt = Attempt()
+
+        attempt.context['service_name'] = ServiceName.UP_ACC
+        attempt.context['alternative_service_names'] = [ServiceName.UP]
+        attempt.context['region'] = Region.from_region_id('z0')
+
+        mocked_resp = MockResponse(
+            status_code=400,
+            text_body='{"error":"transfer acceleration is not configured on this bucket"}'
+        )
+        attempt.result = (None, mocked_resp)
+
+        assert policy.should_retry(attempt)
+
+    def test_should_not_retry_by_no_result(self):
+        policy = AccUnavailableRetryPolicy()
+        attempt = Attempt()
+
+        attempt.context['service_name'] = ServiceName.UP_ACC
+        attempt.context['alternative_service_names'] = [ServiceName.UP]
+        attempt.context['region'] = Region.from_region_id('z0')
+
+        assert not policy.should_retry(attempt)
+
+    def test_should_not_retry_by_no_alternative_services(self):
+        policy = AccUnavailableRetryPolicy()
+        attempt = Attempt()
+
+        attempt.context['service_name'] = ServiceName.UP
+        attempt.context['alternative_service_names'] = []
+        attempt.context['region'] = Region.from_region_id('z0')
+
+        mocked_resp = MockResponse(
+            status_code=400,
+            text_body='{"error":"transfer acceleration is not configured on this bucket"}'
+        )
+        attempt.result = (None, mocked_resp)
+
+        assert not policy.should_retry(attempt)
+
+    def test_should_not_retry_by_no_alternative_endpoints(self):
+        policy = AccUnavailableRetryPolicy()
+        attempt = Attempt()
+
+        attempt.context['service_name'] = ServiceName.UP_ACC
+        attempt.context['alternative_service_names'] = [ServiceName.UP]
+        attempt.context['region'] = Region.from_region_id('z0')
+        attempt.context['region'].services[ServiceName.UP] = []
+
+        mocked_resp = MockResponse(
+            status_code=400,
+            text_body='{"error":"transfer acceleration is not configured on this bucket"}'
+        )
+        attempt.result = (None, mocked_resp)
+
+        assert not policy.should_retry(attempt)
+
+    def test_should_not_retry_by_other_error(self):
+        policy = AccUnavailableRetryPolicy()
+        attempt = Attempt()
+
+        attempt.context['service_name'] = ServiceName.UP_ACC
+        attempt.context['alternative_service_names'] = [ServiceName.UP]
+        attempt.context['region'] = Region.from_region_id('z0')
+
+        mocked_resp = MockResponse(
+            status_code=400,
+            text_body='{"error":"Bad Request"}'
+        )
+        attempt.result = (None, mocked_resp)
+
+        assert not policy.should_retry(attempt)
+
+    def test_prepare_retry(self):
+        policy = AccUnavailableRetryPolicy()
+        attempt = Attempt()
+        region = Region.from_region_id('z0')
+
+        attempt.context['service_name'] = ServiceName.UP_ACC
+        attempt.context['alternative_service_names'] = [ServiceName.UP]
+        attempt.context['region'] = region
+
+        mocked_resp = MockResponse(
+            status_code=400,
+            text_body='{"error":"transfer acceleration is not configured on this bucket"}'
+        )
+        attempt.result = (None, mocked_resp)
+
+        policy.prepare_retry(attempt)
+
+        assert attempt.context['service_name'] == ServiceName.UP
+        assert (
+            [attempt.context['endpoint']] + attempt.context['alternative_endpoints']
+            ==
+            region.services[ServiceName.UP]
+        )
diff --git a/tests/cases/test_zone/__init__.py b/tests/cases/test_zone/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/tests/cases/test_zone/test_lagacy_region.py b/tests/cases/test_zone/test_lagacy_region.py
new file mode 100644
index 00000000..2fab2afc
--- /dev/null
+++ b/tests/cases/test_zone/test_lagacy_region.py
@@ -0,0 +1,64 @@
+from qiniu.http.region import Region, ServiceName
+from qiniu.region import LegacyRegion
+from qiniu.compat import json
+
+
+class TestLegacyRegion:
+    def test_compatible_with_http_region(self):
+        mocked_hosts = {
+            ServiceName.UP: ['https://up.python-example.qiniu.com', 'https://up-2.python-example.qiniu.com'],
+            ServiceName.IO: ['https://io.python-example.qiniu.com'],
+            ServiceName.RS: ['https://rs.python-example.qiniu.com'],
+            ServiceName.RSF: ['https://rsf.python-example.qiniu.com'],
+            ServiceName.API: ['https://api.python-example.qiniu.com']
+        }
+
+        region = LegacyRegion(
+            up_host=mocked_hosts[ServiceName.UP][0],
+            up_host_backup=mocked_hosts[ServiceName.UP][1],
+            io_host=mocked_hosts[ServiceName.IO][0],
+            rs_host=mocked_hosts[ServiceName.RS][0],
+            rsf_host=mocked_hosts[ServiceName.RSF][0],
+            api_host=mocked_hosts[ServiceName.API][0]
+        )
+        assert isinstance(region, Region)
+        assert mocked_hosts == {
+            k: [
+                e.get_value()
+                for e in region.services[k]
+            ]
+            for k in mocked_hosts
+        }
+
+    def test_get_bucket_hosts(self, access_key, bucket_name):
+        region = LegacyRegion()
+        bucket_hosts = region.get_bucket_hosts(access_key, bucket_name)
+        for k in [
+            'upHosts',
+            'ioHosts',
+            'rsHosts',
+            'rsfHosts',
+            'apiHosts'
+        ]:
+            assert all(h.startswith('http') for h in bucket_hosts[k]), bucket_hosts[k]
+
+    def test_bucket_hosts(self, access_key, bucket_name):
+        region = LegacyRegion()
+        bucket_hosts_str = region.bucket_hosts(access_key, bucket_name)
+        bucket_hosts = json.loads(bucket_hosts_str)
+
+        region_hosts = bucket_hosts.get('hosts', [])
+
+        assert len(region_hosts) > 0
+
+        for r in region_hosts:
+            for k in [
+                'up',
+                'io',
+                'rs',
+                'rsf',
+                'api'
+            ]:
+                service_hosts = r[k].get('domains')
+                assert len(service_hosts) > 0
+                assert all(len(h) for h in service_hosts)
diff --git a/tests/cases/test_zone/test_qiniu_conf.py b/tests/cases/test_zone/test_qiniu_conf.py
new file mode 100644
index 00000000..c6bce5b8
--- /dev/null
+++ b/tests/cases/test_zone/test_qiniu_conf.py
@@ -0,0 +1,99 @@
+import pytest
+
+from qiniu import Zone
+from qiniu.config import get_default
+
+TEST_RS_HOST = 'rs.test.region.compatible.config.qiniu.com'
+TEST_RSF_HOST = 'rsf.test.region.compatible.config.qiniu.com'
+TEST_API_HOST = 'api.test.region.compatible.config.qiniu.com'
+
+
+class TestQiniuConfWithZone:
+    """
+    Test qiniu.conf with Zone(aka LegacyRegion)
+    """
+
+    @pytest.mark.parametrize(
+        'set_conf_default',
+        [
+            {
+                'default_uc_backup_hosts': [],
+            },
+            {
+                'default_uc_backup_hosts': [],
+                'default_query_region_backup_hosts': []
+            }
+        ],
+        indirect=True
+    )
+    def test_disable_backup_hosts(self, set_conf_default):
+        assert get_default('default_uc_backup_hosts') == []
+        assert get_default('default_query_region_backup_hosts') == []
+
+    @pytest.mark.parametrize(
+        'set_conf_default',
+        [
+            {
+                'default_rs_host': TEST_RS_HOST,
+                'default_rsf_host': TEST_RSF_HOST,
+                'default_api_host': TEST_API_HOST
+            }
+        ],
+        indirect=True
+    )
+    def test_config_compatible(self, set_conf_default):
+        zone = Zone()
+        assert zone.get_rs_host("mock_ak", "mock_bucket") == TEST_RS_HOST
+        assert zone.get_rsf_host("mock_ak", "mock_bucket") == TEST_RSF_HOST
+        assert zone.get_api_host("mock_ak", "mock_bucket") == TEST_API_HOST
+
+    @pytest.mark.parametrize(
+        'set_conf_default',
+        [
+            {
+                'default_query_region_host': 'https://fake-uc.phpsdk.qiniu.com'
+            }
+        ],
+        indirect=True
+    )
+    def test_query_region_with_custom_domain(self, access_key, bucket_name, set_conf_default):
+        with pytest.raises(Exception) as exc:
+            zone = Zone()
+            zone.bucket_hosts(access_key, bucket_name)
+        assert 'HTTP Status Code -1' in str(exc)
+
+    @pytest.mark.parametrize(
+        'set_conf_default',
+        [
+            {
+                'default_query_region_host': 'https://fake-uc.phpsdk.qiniu.com',
+                'default_query_region_backup_hosts': [
+                    'unavailable-uc.phpsdk.qiniu.com',
+                    'uc.qbox.me'
+                ]
+            }
+        ],
+        indirect=True
+    )
+    def test_query_region_with_backup_domains(self, access_key, bucket_name, set_conf_default):
+        zone = Zone()
+        data = zone.bucket_hosts(access_key, bucket_name)
+        assert data != 'null'
+
+    @pytest.mark.parametrize(
+        'set_conf_default',
+        [
+            {
+                'default_uc_host': 'https://fake-uc.phpsdk.qiniu.com',
+                'default_query_region_backup_hosts': [
+                    'unavailable-uc.phpsdk.qiniu.com',
+                    'uc.qbox.me'
+                ]
+            }
+        ],
+        indirect=True
+    )
+    def test_query_region_with_uc_and_backup_domains(self, access_key, bucket_name, set_conf_default):
+        zone = Zone()
+        data = zone.bucket_hosts(access_key, bucket_name)
+        assert data != 'null'