Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 5 additions & 13 deletions lmdeploy/turbomind/deploy/source_model/llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

import torch
from safetensors.torch import load_file
from sentencepiece import SentencePieceProcessor

from lmdeploy.tokenizer import Tokenizer

Expand Down Expand Up @@ -168,18 +167,11 @@ def get_mgrs(self):

def tokenizer_info(self):
"""Read tokenizer info."""
assert osp.isfile(self.tokenizer_path), self.tokenizer_path
try:
tk_model = SentencePieceProcessor(model_file=self.tokenizer_path)
# BOS / EOS token IDs
n_words = tk_model.vocab_size
bos_id = tk_model.bos_token_id
eos_id = tk_model.eos_token_id
except Exception:
tk_model = Tokenizer(self.model_path)
n_words = tk_model.vocab_size
bos_id = tk_model.bos_token_id
eos_id = tk_model.eos_token_id
assert osp.isdir(self.model_path), self.model_path
tk_model = Tokenizer(self.model_path)
n_words = tk_model.vocab_size
bos_id = tk_model.bos_token_id
eos_id = tk_model.eos_token_id
return n_words, bos_id, eos_id

def model_info(self):
Expand Down