Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Qwen bug fixes #639

Merged
merged 149 commits into from
Jun 14, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
149 commits
Select commit Hold shift + click to select a range
7df08c4
Update llama.py
danielhanchen May 19, 2024
ba5b6ce
offload
danielhanchen May 19, 2024
a07057e
Update llama.py
danielhanchen May 19, 2024
4be9063
Update llama.py
danielhanchen May 19, 2024
3dc3d3f
Update llama.py
danielhanchen May 19, 2024
f1cc1e8
Update llama.py
danielhanchen May 19, 2024
5cb531a
Update llama.py
danielhanchen May 19, 2024
6bd8e60
Update llama.py
danielhanchen May 19, 2024
d1d57ff
Update llama.py
danielhanchen May 19, 2024
7470f67
continued pretraining trainer
danielhanchen May 20, 2024
da9c1a6
Update trainer.py
danielhanchen May 20, 2024
2c68f56
Update trainer.py
danielhanchen May 20, 2024
217bf9d
Update trainer.py
danielhanchen May 20, 2024
6e85384
Update trainer.py
danielhanchen May 21, 2024
77f9c51
is_bfloat16_supported
danielhanchen May 21, 2024
c0e1d27
Update __init__.py
danielhanchen May 21, 2024
2b23b93
Update README.md
danielhanchen May 21, 2024
902e23a
Update llama.py
danielhanchen May 21, 2024
98f41ce
Merge branch 'main' into nightly
danielhanchen May 22, 2024
3193cac
is_bfloat16_supported
danielhanchen May 22, 2024
dfeaf4b
Update __init__.py
danielhanchen May 22, 2024
1e84090
Mistral v3
danielhanchen May 22, 2024
f63f32b
Merge branch 'main' into nightly
danielhanchen May 23, 2024
57ad8e7
Phi 3 medium
danielhanchen May 23, 2024
2b994b2
Update chat_templates.py
danielhanchen May 23, 2024
ff8171f
Update chat_templates.py
danielhanchen May 23, 2024
5ca8b58
Phi-3
danielhanchen May 23, 2024
98c2e81
Merge branch 'main' into nightly
danielhanchen May 23, 2024
3817660
Merge branch 'main' into nightly
danielhanchen May 23, 2024
f858145
Merge branch 'main' into nightly
danielhanchen May 24, 2024
a1328f6
Update save.py
danielhanchen May 24, 2024
fb29673
Update README.md
shimmyshimmer May 25, 2024
fa85556
Untrained tokens
danielhanchen May 26, 2024
c511aca
Update tokenizer_utils.py
danielhanchen May 26, 2024
35e7355
Update tokenizer_utils.py
danielhanchen May 26, 2024
cc0bf44
Update tokenizer_utils.py
danielhanchen May 26, 2024
674ba66
Update tokenizer_utils.py
danielhanchen May 26, 2024
9823f52
Update tokenizer_utils.py
danielhanchen May 26, 2024
c0c761b
Update tokenizer_utils.py
danielhanchen May 26, 2024
e2850c0
Update tokenizer_utils.py
danielhanchen May 26, 2024
8e12780
Update tokenizer_utils.py
danielhanchen May 26, 2024
6f1855e
Update tokenizer_utils.py
danielhanchen May 26, 2024
d27b173
Update tokenizer_utils.py
danielhanchen May 26, 2024
7bf7399
Update tokenizer_utils.py
danielhanchen May 26, 2024
31ecef9
Update tokenizer_utils.py
danielhanchen May 26, 2024
b67d93f
Update tokenizer_utils.py
danielhanchen May 26, 2024
e874ccd
Update tokenizer_utils.py
danielhanchen May 26, 2024
d7b54ff
Update tokenizer_utils.py
danielhanchen May 27, 2024
5a4a512
Update tokenizer_utils.py
danielhanchen May 27, 2024
82c040e
Update tokenizer_utils.py
danielhanchen May 27, 2024
8e227b2
Update tokenizer_utils.py
danielhanchen May 27, 2024
250d386
Update tokenizer_utils.py
danielhanchen May 27, 2024
e6db3ba
Update llama.py
danielhanchen May 27, 2024
e673fa2
Update tokenizer_utils.py
danielhanchen May 27, 2024
222b835
Update tokenizer_utils.py
danielhanchen May 27, 2024
6404aa5
Update tokenizer_utils.py
danielhanchen May 27, 2024
cfea7b2
Update tokenizer_utils.py
danielhanchen May 27, 2024
083e5ba
Update save.py
danielhanchen May 27, 2024
6f2565c
Update save.py
danielhanchen May 27, 2024
c19b04e
Update save.py
danielhanchen May 27, 2024
64b12a2
checkpoint
danielhanchen May 28, 2024
4cd5a8a
Merge branch 'main' into nightly
danielhanchen May 28, 2024
196faec
Update _utils.py
danielhanchen May 28, 2024
235be40
Update tokenizer_utils.py
danielhanchen May 29, 2024
cf9090a
Update tokenizer_utils.py
danielhanchen May 29, 2024
1fb1110
Update tokenizer_utils.py
danielhanchen May 29, 2024
d1bd60c
Update llama.py
danielhanchen May 30, 2024
732ead0
accelerate
danielhanchen May 30, 2024
359ae5c
Update _utils.py
danielhanchen May 30, 2024
8dcfad3
Update _utils.py
danielhanchen May 30, 2024
2bafc57
Update _utils.py
danielhanchen May 30, 2024
90f6311
Update _utils.py
danielhanchen May 30, 2024
7b84ff7
Update _utils.py
danielhanchen May 30, 2024
60f4b9a
Update _utils.py
danielhanchen May 30, 2024
3ebe5a5
Update _utils.py
danielhanchen May 30, 2024
7bbc8ce
Update tokenizer_utils.py
danielhanchen May 30, 2024
6f5c84c
train_dataloader
danielhanchen May 30, 2024
0d269ca
Update llama.py
danielhanchen May 30, 2024
6b7c142
Update llama.py
danielhanchen May 30, 2024
54f3a74
Update llama.py
danielhanchen May 30, 2024
0bc96c5
use_fast_convert
danielhanchen May 30, 2024
02c91b0
Merge branch 'main' into nightly
danielhanchen May 30, 2024
b384ff0
Merge branch 'main' into nightly
danielhanchen May 30, 2024
a8b5d89
Update save.py
danielhanchen May 30, 2024
872d569
Update save.py
danielhanchen May 30, 2024
3a1f5f2
Update save.py
danielhanchen May 30, 2024
bcadc8c
Update save.py
danielhanchen Jun 2, 2024
1381820
remove_special_tokens
danielhanchen Jun 2, 2024
e01b87d
Ollama
danielhanchen Jun 2, 2024
b3479c7
Update chat_templates.py
danielhanchen Jun 3, 2024
86804dc
Update chat_templates.py
danielhanchen Jun 3, 2024
87fdd3a
Update chat_templates.py
danielhanchen Jun 3, 2024
5c5df69
Merge branch 'main' into nightly
danielhanchen Jun 7, 2024
6386d94
Update llama.py
danielhanchen Jun 7, 2024
b1a9551
Update chat_templates.py
danielhanchen Jun 9, 2024
344a05d
Support bfloat16 GGUF
danielhanchen Jun 9, 2024
6b11e0d
Update save.py
danielhanchen Jun 9, 2024
c6e4b5b
Update llama.py
danielhanchen Jun 9, 2024
57f29ab
fast_forward_inference
danielhanchen Jun 9, 2024
d32e972
Update mapper.py
danielhanchen Jun 9, 2024
e121fa5
Update loader.py
danielhanchen Jun 9, 2024
5eaa10f
Update llama.py
danielhanchen Jun 9, 2024
f57d28d
Update tokenizer_utils.py
danielhanchen Jun 10, 2024
8937507
info
danielhanchen Jun 11, 2024
8982edb
edits
danielhanchen Jun 11, 2024
8904605
Create chat template
danielhanchen Jun 11, 2024
2a374c2
Fix tokenizer
danielhanchen Jun 12, 2024
d704b73
Merge branch 'main' into nightly
danielhanchen Jun 13, 2024
8176155
Update tokenizer_utils.py
danielhanchen Jun 13, 2024
21a99f1
fix case where gguf saving fails due to first_conversion dtype (#630)
chrehall68 Jun 13, 2024
dbf2dcf
Support revision parameter in FastLanguageModel.from_pretrained (#629)
chrehall68 Jun 13, 2024
9016171
clears any selected_adapters before calling internal_model.save_pretr…
neph1 Jun 13, 2024
0428920
Update __init__.py (#602)
xyangk Jun 13, 2024
9fdd847
Fixed unsloth/tokenizer_utils.py for chat training (#604)
Oseltamivir Jun 13, 2024
b5fc6aa
Add GGML saving option to Unsloth for easier Ollama model creation an…
mahiatlinux Jun 13, 2024
3fafbf7
docs: Add LoraConfig parameters documentation (#619)
sebdg Jun 13, 2024
273a871
llama.cpp failing (#371)
bet0x Jun 13, 2024
b312b3f
fix libcuda_dirs import for triton 3.0 (#227)
t-vi Jun 13, 2024
1601dca
Update save.py
danielhanchen Jun 13, 2024
26dc502
Update __init__.py
danielhanchen Jun 13, 2024
6a51657
Update fast_lora.py
danielhanchen Jun 13, 2024
4a8ba90
Update save.py
danielhanchen Jun 13, 2024
0abb5ba
Update save.py
danielhanchen Jun 13, 2024
b24dd05
Update save.py
danielhanchen Jun 13, 2024
48c6d6d
Update loader.py
danielhanchen Jun 13, 2024
e35f608
Update save.py
danielhanchen Jun 13, 2024
4822eae
Update save.py
danielhanchen Jun 13, 2024
7d847ed
quantize now llama-quantize
danielhanchen Jun 13, 2024
82f10cb
Update chat_templates.py
danielhanchen Jun 13, 2024
08424f0
Update loader.py
danielhanchen Jun 13, 2024
eb906d0
Update mapper.py
danielhanchen Jun 13, 2024
0a304ae
Update __init__.py
danielhanchen Jun 13, 2024
71edc42
embedding size
danielhanchen Jun 13, 2024
411b881
Merge branch 'main' into nightly
danielhanchen Jun 13, 2024
b74e321
Update qwen2.py
danielhanchen Jun 13, 2024
9c6d415
Merge branch 'main' into nightly
danielhanchen Jun 14, 2024
b82277f
docs
danielhanchen Jun 14, 2024
d98e45e
Update README.md
danielhanchen Jun 14, 2024
b6f0fdb
Update qwen2.py
danielhanchen Jun 14, 2024
6c031e4
README: Fix minor typo. (#559)
shaper Jun 14, 2024
2401dee
Update mistral.py
danielhanchen Jun 14, 2024
1b93d7e
Update qwen2.py
danielhanchen Jun 14, 2024
3581037
Update qwen2.py
danielhanchen Jun 14, 2024
b56b8b8
Update qwen2.py
danielhanchen Jun 14, 2024
fe8c064
Update llama.py
danielhanchen Jun 14, 2024
d8d332a
Update llama.py
danielhanchen Jun 14, 2024
cdb1dbb
Update llama.py
danielhanchen Jun 14, 2024
e8b3cf0
Update README.md
danielhanchen Jun 14, 2024
7e6f000
FastMistralModel
danielhanchen Jun 14, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
87 changes: 0 additions & 87 deletions PARAMETERS.md

This file was deleted.

18 changes: 10 additions & 8 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ All notebooks are **beginner friendly**! Add your dataset, click "Run All", and
- Run [Llama 3 conversational notebook](https://colab.research.google.com/drive/1XamvWYinY6FOSX9GLvnqSjjsNflxdhNc?usp=sharing) and [Mistral 7B v3 ChatML](https://colab.research.google.com/drive/15F1xyn8497_dUbxZP4zWmPZ3PJx1Oymv?usp=sharing)
- This [text completion notebook](https://colab.research.google.com/drive/1ef-tab5bhkvWmBOObepl1WgJvfvSzn5Q?usp=sharing) is for continued pretraining / raw text
- This [continued pretraining notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) is for learning another language

- Click [here](https://github.com/unslothai/unsloth/wiki) for detailed documentation for Unsloth.

## 🦥 Unsloth.ai News
- 📣 NEW! Continued Pretraining [notebook](https://colab.research.google.com/drive/1tEd1FrOXWMnCU9UIvdYhs61tkxdMuKZu?usp=sharing) for other languages like Korean!
Expand Down Expand Up @@ -76,7 +76,7 @@ model = FastLanguageModel.get_peft_model(


## 🥇 Performance Benchmarking
- For the full list of **reproducable** benchmarking tables, [go to our website](https://unsloth.ai/blog/mistral-benchmark#Benchmark%20tables)
- For the full list of **reproducible** benchmarking tables, [go to our website](https://unsloth.ai/blog/mistral-benchmark#Benchmark%20tables)

| 1 A100 40GB | 🤗Hugging Face | Flash Attention | 🦥Unsloth Open Source | 🦥[Unsloth Pro](https://unsloth.ai/pricing) |
|--------------|--------------|-----------------|---------------------|-----------------|
Expand All @@ -100,14 +100,16 @@ model = FastLanguageModel.get_peft_model(
### Conda Installation
Select either `pytorch-cuda=11.8` for CUDA 11.8 or `pytorch-cuda=12.1` for CUDA 12.1. If you have `mamba`, use `mamba` instead of `conda` for faster solving. See this [Github issue](https://github.com/unslothai/unsloth/issues/73) for help on debugging Conda installs.
```bash
conda create --name unsloth_env python=3.10
conda create --name unsloth_env \
python=3.10 \
pytorch-cuda=<11.8/12.1> \
pytorch cudatoolkit xformers -c pytorch -c nvidia -c xformers \
-y
conda activate unsloth_env

conda install pytorch-cuda=<12.1/11.8> pytorch cudatoolkit xformers -c pytorch -c nvidia -c xformers

pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"

pip install --no-deps trl peft accelerate bitsandbytes
pip install --no-deps "trl<0.9.0" peft accelerate bitsandbytes
```

### Pip Installation
Expand Down Expand Up @@ -162,7 +164,7 @@ pip install --no-deps packaging ninja einops flash-attn xformers trl peft accele

# Pre Ampere RTX 2080, T4, GTX 1080 GPUs:
pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"
pip install --no-deps xformers trl peft accelerate bitsandbytes
pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes
```
7. For Pytorch 2.3.0: Use the `"ampere"` path for newer RTX 30xx GPUs or higher.
```bash
Expand Down Expand Up @@ -257,7 +259,7 @@ trainer.train()
# (1) Saving to GGUF / merging to 16bit for vLLM
# (2) Continued training from a saved LoRA adapter
# (3) Adding an evaluation loop / OOMs
# (4) Cutomized chat templates
# (4) Customized chat templates
```

<a name="DPO"></a>
Expand Down
4 changes: 3 additions & 1 deletion unsloth/models/mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -512,7 +512,7 @@ def from_pretrained(
if "n_total_devices >" not in inner_training_loop:
raise RuntimeError(
"Our OSS was designed for people with few GPU resources to level the playing field.\n"
"The OSS Apache 2 license only supports four GPUs - please obtain a commercial license from our website.\n"
"The OSS Apache 2 license only supports one GPU - please obtain a commercial license.\n"
"We're a 2 person team, so we still have to fund our development costs - thanks!\n"
"If you don't, please consider at least sponsoring us through Ko-fi! Appreciate it!",
)
Expand All @@ -521,6 +521,7 @@ def from_pretrained(
"is_sagemaker_mp_enabled()",
"False",
)
exec(inner_training_loop, globals())
Trainer._inner_training_loop = _fast_inner_training_loop

# Save max_seq_length
Expand Down Expand Up @@ -560,6 +561,7 @@ def from_pretrained(

# Add save modules
patch_saving_functions(model)
Trainer._inner_training_loop = _fast_inner_training_loop

# Save tokenizer for inference purposes
tokenizer.padding_side = "left" # Force inference
Expand Down
8 changes: 3 additions & 5 deletions unsloth/models/qwen2.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from .llama import *
import os
from ._utils import __version__
from .mistral import *

from transformers.models.qwen2.modeling_qwen2 import (
Qwen2Attention,
Expand All @@ -34,7 +32,7 @@
pass


class FastQwen2Model(FastLlamaModel):
class FastQwen2Model(FastMistralModel):

@staticmethod
def pre_patch():
Expand Down Expand Up @@ -72,7 +70,7 @@ def from_pretrained(
trust_remote_code = False,
**kwargs,
):
return FastLlamaModel.from_pretrained(
return FastMistralModel.from_pretrained(
model_name = model_name,
max_seq_length = max_seq_length,
dtype = dtype,
Expand Down