From dedd2067e867919ba88645ca75aa5c8ef6092ab2 Mon Sep 17 00:00:00 2001 From: Aman Karmani Date: Thu, 29 Jun 2023 19:08:57 -0700 Subject: [PATCH 1/3] convert: spike out xgen support --- convert.py | 31 ++++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/convert.py b/convert.py index e340d2273f378..c593abeb41f07 100644 --- a/convert.py +++ b/convert.py @@ -4,6 +4,7 @@ import enum import faulthandler import functools +import importlib import io import itertools import json @@ -201,6 +202,28 @@ def load(model_plus: 'ModelPlus') -> 'Params': return params +class XgenVocab: + def __init__(self, path: Path) -> None: + self.fname_tokenizer = path + self.fname_added_tokens = None + path = str((path / "tokenization_xgen.py").absolute()) + spec = importlib.util.spec_from_file_location(path, path) + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + self.xt = module.XgenTokenizer() + self.vocab_size_base: int = self.xt.vocab_size + self.vocab_size: int = self.xt.vocab_size + self.added_tokens_list = [] + + def all_tokens(self) -> Iterable[Tuple[bytes, float]]: + for index in range(0, self.vocab_size_base): + token = self.xt._convert_id_to_token(index) + yield (token, float(index)) + + def __repr__(self) -> str: + return f"" + + class SentencePieceVocab: def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None: self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) @@ -265,7 +288,7 @@ def __repr__(self) -> str: return f"" -Vocab = Union[SentencePieceVocab, GGMLVocab] +Vocab = Union[XgenVocab, SentencePieceVocab, GGMLVocab] def permute(weights: NDArray, n_head: int) -> NDArray: @@ -948,7 +971,7 @@ def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], conc def check_vocab_size(params: Params, vocab: Vocab) -> None: if params.n_vocab != vocab.vocab_size: # GGMLVocab comes from the same file as the model so shouldn't mismatch: - assert isinstance(vocab, SentencePieceVocab) + assert isinstance(vocab, SentencePieceVocab) or isinstance(vocab, XgenVocab) if params.n_vocab == vocab.vocab_size_base: print("Ignoring added_tokens.json since model matches vocab size without it.") vocab.added_tokens_list = [] @@ -1133,11 +1156,13 @@ def filter_and_sort_tensors(model: LazyModel) -> LazyModel: return {name: model[name] for name in TENSORS_LIST if name in model} -def load_vocab(path: Path) -> SentencePieceVocab: +def load_vocab(path: Path) -> Vocab: # Be extra-friendly and accept either a file or a directory. Also, if it's # a directory, it might be the model directory, and tokenizer.model might # be in the parent of that. if path.is_dir(): + if (path / "tokenization_xgen.py").exists(): + return XgenVocab(path) path2 = path / "tokenizer.model" # Use `.parent` instead of /.. to handle the symlink case better. path3 = path.parent / "tokenizer.model" From 58d663d32793fb476762212519341dfb80e17689 Mon Sep 17 00:00:00 2001 From: Aman Karmani Date: Thu, 6 Jul 2023 14:08:32 -0700 Subject: [PATCH 2/3] hack in empty tokens for unknown vocab --- convert.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/convert.py b/convert.py index c593abeb41f07..5d2f8aa55ff9c 100644 --- a/convert.py +++ b/convert.py @@ -219,6 +219,8 @@ def all_tokens(self) -> Iterable[Tuple[bytes, float]]: for index in range(0, self.vocab_size_base): token = self.xt._convert_id_to_token(index) yield (token, float(index)) + for index in range(self.vocab_size_base, self.vocab_size): + yield (b'', float(index)) def __repr__(self) -> str: return f"" @@ -977,6 +979,9 @@ def check_vocab_size(params: Params, vocab: Vocab) -> None: vocab.added_tokens_list = [] vocab.vocab_size = vocab.vocab_size_base return + if isinstance(vocab, XgenVocab): + vocab.vocab_size = params.n_vocab + return msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer}" if vocab.fname_added_tokens is not None: msg += f" combined with {vocab.fname_added_tokens}" From 45e5df66dacbb0e6ebf579ba4e7c6d667bd72772 Mon Sep 17 00:00:00 2001 From: Aman Karmani Date: Mon, 10 Jul 2023 11:06:05 -0700 Subject: [PATCH 3/3] XgenVocab fix from @smdesai --- convert.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/convert.py b/convert.py index 5d2f8aa55ff9c..ee7148a65bf04 100644 --- a/convert.py +++ b/convert.py @@ -217,13 +217,13 @@ def __init__(self, path: Path) -> None: def all_tokens(self) -> Iterable[Tuple[bytes, float]]: for index in range(0, self.vocab_size_base): - token = self.xt._convert_id_to_token(index) + token = self.xt.encoder.decode_single_token_bytes(index) yield (token, float(index)) for index in range(self.vocab_size_base, self.vocab_size): yield (b'', float(index)) def __repr__(self) -> str: - return f"" + return f"" class SentencePieceVocab: