Skip to content

Commit

Permalink
Added support to load clip model from local file path (run-llama#12577)
Browse files Browse the repository at this point in the history
  • Loading branch information
chandrashekarvt authored and chrisalexiuk-nvidia committed Apr 25, 2024
1 parent 1725f63 commit 677274c
Showing 1 changed file with 3 additions and 1 deletion.
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from llama_index.core.embeddings.multi_modal_base import MultiModalEmbedding
from llama_index.core.schema import ImageType
from PIL import Image
import os

logger = logging.getLogger(__name__)

Expand Down Expand Up @@ -86,7 +87,8 @@ def __init__(

try:
self._device = "cuda" if torch.cuda.is_available() else "cpu"
if self.model_name not in AVAILABLE_CLIP_MODELS:
is_local_path = os.path.exists(self.model_name)
if not is_local_path and self.model_name not in AVAILABLE_CLIP_MODELS:
raise ValueError(
f"Model name {self.model_name} is not available in CLIP."
)
Expand Down

0 comments on commit 677274c

Please sign in to comment.