From e48a7a38ac01fe0db47a7898ae1401f25394402f Mon Sep 17 00:00:00 2001 From: Ziniu Yu Date: Wed, 3 Aug 2022 12:50:11 +0800 Subject: [PATCH] fix: change onnx and trt default model name to ViT-B-32::openai (#793) * fix: change onnx default model name to ViT-B-32::openai * fix: trt default model name * fix: update trt model list * fix: trt file path --- server/clip_server/executors/clip_onnx.py | 2 +- server/clip_server/executors/clip_tensorrt.py | 2 +- server/clip_server/model/clip_onnx.py | 2 +- server/clip_server/model/clip_trt.py | 18 +++++++++++++++++- tests/conftest.py | 4 +++- 5 files changed, 23 insertions(+), 5 deletions(-) diff --git a/server/clip_server/executors/clip_onnx.py b/server/clip_server/executors/clip_onnx.py index 0a6a3e6d4..24abd11ab 100644 --- a/server/clip_server/executors/clip_onnx.py +++ b/server/clip_server/executors/clip_onnx.py @@ -19,7 +19,7 @@ class CLIPEncoder(Executor): def __init__( self, - name: str = 'ViT-B/32', + name: str = 'ViT-B-32::openai', device: Optional[str] = None, num_worker_preprocess: int = 4, minibatch_size: int = 32, diff --git a/server/clip_server/executors/clip_tensorrt.py b/server/clip_server/executors/clip_tensorrt.py index 60eaa50a6..7b519a649 100644 --- a/server/clip_server/executors/clip_tensorrt.py +++ b/server/clip_server/executors/clip_tensorrt.py @@ -18,7 +18,7 @@ class CLIPEncoder(Executor): def __init__( self, - name: str = 'ViT-B/32', + name: str = 'ViT-B-32::openai', device: str = 'cuda', num_worker_preprocess: int = 4, minibatch_size: int = 32, diff --git a/server/clip_server/model/clip_onnx.py b/server/clip_server/model/clip_onnx.py index b01b19827..b02034629 100644 --- a/server/clip_server/model/clip_onnx.py +++ b/server/clip_server/model/clip_onnx.py @@ -155,7 +155,7 @@ def __init__(self, name: str, model_path: str = None): if name in _MODELS: if not model_path: cache_dir = os.path.expanduser( - f'~/.cache/clip/{name.replace("/", "-")}' + f'~/.cache/clip/{name.replace("/", "-").replace("::", "-")}' ) textual_model_name, textual_model_md5 = _MODELS[name][0] self._textual_path = download_model( diff --git a/server/clip_server/model/clip_trt.py b/server/clip_server/model/clip_trt.py index 4d53400ef..b43a20c5b 100644 --- a/server/clip_server/model/clip_trt.py +++ b/server/clip_server/model/clip_trt.py @@ -21,6 +21,20 @@ from clip_server.model.clip_onnx import _MODELS as ONNX_MODELS _MODELS = [ + 'RN50::openai', + 'RN50::yfcc15m', + 'RN50::cc12m', + 'RN101::openai', + 'RN101::yfcc15m', + 'RN50x4::openai', + 'ViT-B-32::openai', + 'ViT-B-32::laion2b_e16', + 'ViT-B-32::laion400m_e31', + 'ViT-B-32::laion400m_e32', + 'ViT-B-16::openai', + 'ViT-B-16::laion400m_e31', + 'ViT-B-16::laion400m_e32', + # older version name format 'RN50', 'RN101', 'RN50x4', @@ -41,7 +55,9 @@ def __init__( super().__init__(name) if name in _MODELS: - cache_dir = os.path.expanduser(f'~/.cache/clip/{name.replace("/", "-")}') + cache_dir = os.path.expanduser( + f'~/.cache/clip/{name.replace("/", "-").replace("::", "-")}' + ) self._textual_path = os.path.join( cache_dir, diff --git a/tests/conftest.py b/tests/conftest.py index a172d288f..280970a27 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -32,7 +32,9 @@ def make_flow(port_generator, request): f = Flow(port=port_generator()).add( name=request.param, uses=CLIPEncoder, - uses_with={'model_path': os.path.expanduser('~/.cache/clip/ViT-B-32')}, + uses_with={ + 'model_path': os.path.expanduser('~/.cache/clip/ViT-B-32-openai') + }, ) with f: yield f