From d7aab2e9003977a0934d8bda2b4de2d91c89668d Mon Sep 17 00:00:00 2001 From: ldwang Date: Sat, 15 Jul 2023 14:12:25 +0800 Subject: [PATCH 1/5] support bpe tokenizer in convert Signed-off-by: ldwang --- convert.py | 68 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 47 insertions(+), 21 deletions(-) diff --git a/convert.py b/convert.py index 7a2705e5c506f..6d5db5368d6f8 100644 --- a/convert.py +++ b/convert.py @@ -208,14 +208,21 @@ def load(model_plus: 'ModelPlus') -> 'Params': class SentencePieceVocab: - def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None: - self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) + def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], vocabtype: Optional[str]) -> None: + self.vocabtype = vocabtype + if self.vocabtype == "bpe": + self.sentencepiece_tokenizer = json.loads(open(str(fname_tokenizer)).read()) + else: + self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer)) added_tokens: Dict[str, int] if fname_added_tokens is not None: added_tokens = json.load(open(fname_added_tokens)) else: added_tokens = {} - vocab_size: int = self.sentencepiece_tokenizer.vocab_size() + if self.vocabtype == "bpe": + vocab_size: int = len(self.sentencepiece_tokenizer) + else: + vocab_size: int = self.sentencepiece_tokenizer.vocab_size() expected_ids = list(range(vocab_size, vocab_size + len(added_tokens))) actual_ids = sorted(added_tokens.values()) if expected_ids != actual_ids: @@ -229,22 +236,36 @@ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]: tokenizer = self.sentencepiece_tokenizer - for i in range(tokenizer.vocab_size()): + if self.vocabtype == "bpe": + from transformers.models.gpt2 import tokenization_gpt2 + byte_encoder = tokenization_gpt2.bytes_to_unicode() + byte_decoder = {v: k for k, v in byte_encoder.items()} + for i, item in enumerate(tokenizer): text: bytes - if tokenizer.is_unknown(i): + if i == 0: text = " \u2047 ".encode("utf-8") - elif tokenizer.is_control(i): - text = b"" - elif tokenizer.is_byte(i): - piece = tokenizer.id_to_piece(i) - if len(piece) != 6: - raise Exception(f"Invalid token: {piece}") - byte_value = int(piece[3:-1], 16) - text = struct.pack("B", byte_value) + score = 0.0 else: - text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") - score: float = tokenizer.get_score(i) + text = b''.join([x.to_bytes(1, byteorder='big') for x in [byte_decoder[y] for y in item]]) + score: float = -i yield text, score + else: + for i in range(tokenizer.vocab_size()): + text: bytes + if tokenizer.is_unknown(i): + text = " \u2047 ".encode("utf-8") + elif tokenizer.is_control(i): + text = b"" + elif tokenizer.is_byte(i): + piece = tokenizer.id_to_piece(i) + if len(piece) != 6: + raise Exception(f"Invalid token: {piece}") + byte_value = int(piece[3:-1], 16) + text = struct.pack("B", byte_value) + else: + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + score: float = tokenizer.get_score(i) + yield text, score def added_tokens(self) -> Iterable[Tuple[bytes, float]]: for text in self.added_tokens_list: @@ -1171,14 +1192,17 @@ def filter_and_sort_tensors(model: LazyModel) -> LazyModel: return {name: model[name] for name in TENSORS_LIST if name in model} -def load_vocab(path: Path) -> SentencePieceVocab: +def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab: # Be extra-friendly and accept either a file or a directory. Also, if it's # a directory, it might be the model directory, and tokenizer.model might # be in the parent of that. if path.is_dir(): - path2 = path / "tokenizer.model" + vocab_file = "tokenizer.model" + if vocabtype == 'bpe': + vocab_file = "vocab.json" + path2 = path / vocab_file # Use `.parent` instead of /.. to handle the symlink case better. - path3 = path.parent / "tokenizer.model" + path3 = path.parent / vocab_file if path2.exists(): path = path2 elif path3.exists(): @@ -1189,7 +1213,8 @@ def load_vocab(path: Path) -> SentencePieceVocab: "if it's in another directory, pass the directory as --vocab-dir") added_tokens_path = path.parent / "added_tokens.json" print(f"Loading vocab file {path}") - return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None) + return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None, + vocab_file) def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path: @@ -1227,6 +1252,7 @@ def main(args_in: Optional[List[str]] = None) -> None: parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") + parser.add_argument("--vocabtype", default='spm', choices=["spm", "bpe"], help="vocab format (default: spm)") args = parser.parse_args(args_in) vocab: Vocab @@ -1234,7 +1260,7 @@ def main(args_in: Optional[List[str]] = None) -> None: model_plus = lazy_load_file(args.model) do_dump_model(model_plus) elif args.vocab_only: - vocab = load_vocab(args.vocab_dir or args.model) + vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype) assert args.outfile, "need --outfile if using --vocab-only" outfile = args.outfile OutputFile.write_vocab_only(outfile, vocab) @@ -1248,7 +1274,7 @@ def main(args_in: Optional[List[str]] = None) -> None: vocab = model_plus.vocab else: vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent - vocab = load_vocab(vocab_dir) + vocab = load_vocab(vocab_dir, args.vocabtype) params = Params.load(model_plus) model = model_plus.model model = do_necessary_conversions(model, params) From ee6bc1426e607f89e629060d8acdbf6be3f500ce Mon Sep 17 00:00:00 2001 From: ldwang Date: Sat, 15 Jul 2023 14:14:00 +0800 Subject: [PATCH 2/5] support bpe tokenizer in convert Signed-off-by: ldwang --- convert.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/convert.py b/convert.py index 6d5db5368d6f8..45e59b933ed55 100644 --- a/convert.py +++ b/convert.py @@ -242,12 +242,8 @@ def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]: byte_decoder = {v: k for k, v in byte_encoder.items()} for i, item in enumerate(tokenizer): text: bytes - if i == 0: - text = " \u2047 ".encode("utf-8") - score = 0.0 - else: - text = b''.join([x.to_bytes(1, byteorder='big') for x in [byte_decoder[y] for y in item]]) - score: float = -i + text = b''.join([x.to_bytes(1, byteorder='big') for x in [byte_decoder[y] for y in item]]) + score: float = -i yield text, score else: for i in range(tokenizer.vocab_size()): From 64b8aafce1c2f475e7e60e2ab693091a0c4f1fe2 Mon Sep 17 00:00:00 2001 From: ldwang Date: Tue, 18 Jul 2023 11:18:12 +0800 Subject: [PATCH 3/5] support bpe tokenizer in convert, fix Signed-off-by: ldwang --- convert.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/convert.py b/convert.py index 45e59b933ed55..04422bb5f1818 100644 --- a/convert.py +++ b/convert.py @@ -1189,6 +1189,7 @@ def filter_and_sort_tensors(model: LazyModel) -> LazyModel: def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab: + print(f"vocabtype: {vocabtype}") # Be extra-friendly and accept either a file or a directory. Also, if it's # a directory, it might be the model directory, and tokenizer.model might # be in the parent of that. @@ -1210,7 +1211,7 @@ def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab: added_tokens_path = path.parent / "added_tokens.json" print(f"Loading vocab file {path}") return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None, - vocab_file) + vocabtype) def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path: From 35ed27b1afb2a60d1c6210d0f7fc35aa567ef248 Mon Sep 17 00:00:00 2001 From: ldwang Date: Wed, 2 Aug 2023 14:48:03 +0800 Subject: [PATCH 4/5] Add Aquila-7B models in README.md Signed-off-by: ldwang --- README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/README.md b/README.md index 515c80c42ec85..c9ac95b893e48 100644 --- a/README.md +++ b/README.md @@ -88,6 +88,7 @@ as the main playground for developing new features for the [ggml](https://github - [X] [Pygmalion 7B / Metharme 7B](#using-pygmalion-7b--metharme-7b) - [X] [WizardLM](https://github.com/nlpxucan/WizardLM) - [X] [Baichuan-7B](https://huggingface.co/baichuan-inc/baichuan-7B) and its derivations (such as [baichuan-7b-sft](https://huggingface.co/hiyouga/baichuan-7b-sft)) +- [X] [Aquila-7B](https://huggingface.co/BAAI/Aquila-7B) and its derivations (such as [AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B)) **Bindings:** @@ -492,6 +493,9 @@ Building the program with BLAS support may lead to some performance improvements # obtain the original LLaMA model weights and place them in ./models ls ./models 65B 30B 13B 7B tokenizer_checklist.chk tokenizer.model + # [Optional] for models using BPE tokenizers + ls ./models + 65B 30B 13B 7B vocab.json # install Python dependencies python3 -m pip install -r requirements.txt @@ -499,6 +503,9 @@ python3 -m pip install -r requirements.txt # convert the 7B model to ggml FP16 format python3 convert.py models/7B/ + # [Optional] for models using BPE tokenizers + python convert.py models/7B/ --vocabtype bpe + # quantize the model to 4-bits (using q4_0 method) ./quantize ./models/7B/ggml-model-f16.bin ./models/7B/ggml-model-q4_0.bin q4_0 From 803c2ff7bfe115844bb309f07967b7d00279ff6a Mon Sep 17 00:00:00 2001 From: ldwang Date: Wed, 2 Aug 2023 15:18:36 +0800 Subject: [PATCH 5/5] Up Aquila-7B models in README.md Signed-off-by: ldwang --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c9ac95b893e48..2ece294b7c947 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ as the main playground for developing new features for the [ggml](https://github - [X] [Pygmalion 7B / Metharme 7B](#using-pygmalion-7b--metharme-7b) - [X] [WizardLM](https://github.com/nlpxucan/WizardLM) - [X] [Baichuan-7B](https://huggingface.co/baichuan-inc/baichuan-7B) and its derivations (such as [baichuan-7b-sft](https://huggingface.co/hiyouga/baichuan-7b-sft)) -- [X] [Aquila-7B](https://huggingface.co/BAAI/Aquila-7B) and its derivations (such as [AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B)) +- [X] [Aquila-7B](https://huggingface.co/BAAI/Aquila-7B) / [AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B) **Bindings:**